code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def has_overflow(self, params): is_not_finite = 0 for param in params: if param.grad_req != 'null': grad = param.list_grad()[0] is_not_finite += mx.nd.contrib.isnan(grad).sum() is_not_finite += mx.nd.contrib.isinf(grad).sum() if is_not_finite == 0: return False else: return True
detect inf and nan
def get_decoded_tile(codec, stream, imagep, tile_index): OPENJP2.opj_get_decoded_tile.argtypes = [CODEC_TYPE, STREAM_TYPE_P, ctypes.POINTER(ImageType), ctypes.c_uint32] OPENJP2.opj_get_decoded_tile.restype = check_error OPENJP2.opj_get_decoded_tile(codec, stream, imagep, tile_index)
get the decoded tile from the codec Wraps the openjp2 library function opj_get_decoded_tile. Parameters ---------- codec : CODEC_TYPE The jpeg2000 codec. stream : STREAM_TYPE_P The input stream. image : ImageType Output image structure. tiler_index : int Index of the tile which will be decoded. Raises ------ RuntimeError If the OpenJPEG library routine opj_get_decoded_tile fails.
def json(self): return json.dumps(self.dict_rules, sort_keys=True, indent=2, separators=(',', ': '))
Output the security rules as a json string. Return: str
def _parse_args(self, args): parser = ArgumentParser(description="Runs pylint recursively on a directory") parser.add_argument( "-v", "--verbose", dest="verbose", action="store_true", default=False, help="Verbose mode (report which files were found for testing).", ) parser.add_argument( "--rcfile", dest="rcfile", action="store", default=".pylintrc", help="A relative or absolute path to your pylint rcfile. Defaults to\ `.pylintrc` at the current working directory", ) parser.add_argument( "-V", "--version", action="version", version="%(prog)s ({0}) for Python {1}".format(__version__, PYTHON_VERSION), ) options, _ = parser.parse_known_args(args) self.verbose = options.verbose if options.rcfile: if not os.path.isfile(options.rcfile): options.rcfile = os.getcwd() + "/" + options.rcfile self.rcfile = options.rcfile return options
Parses any supplied command-line args and provides help text.
def calc_dihedral(point1, point2, point3, point4): points = np.array([point1, point2, point3, point4]) x = np.cross(points[1] - points[0], points[2] - points[1]) y = np.cross(points[2] - points[1], points[3] - points[2]) return angle(x, y)
Calculates a dihedral angle Here, two planes are defined by (point1, point2, point3) and (point2, point3, point4). The angle between them is returned. Parameters ---------- point1, point2, point3, point4 : array-like, shape=(3,), dtype=float Four points that define two planes Returns ------- float The dihedral angle between the two planes defined by the four points.
def _exec(**kwargs): if 'ignore_retcode' not in kwargs: kwargs['ignore_retcode'] = True if 'output_loglevel' not in kwargs: kwargs['output_loglevel'] = 'quiet' return salt.modules.cmdmod.run_all(**kwargs)
Simple internal wrapper for cmdmod.run
def binned_entropy(x, max_bins): if not isinstance(x, (np.ndarray, pd.Series)): x = np.asarray(x) hist, bin_edges = np.histogram(x, bins=max_bins) probs = hist / x.size return - np.sum(p * np.math.log(p) for p in probs if p != 0)
First bins the values of x into max_bins equidistant bins. Then calculates the value of .. math:: - \\sum_{k=0}^{min(max\\_bins, len(x))} p_k log(p_k) \\cdot \\mathbf{1}_{(p_k > 0)} where :math:`p_k` is the percentage of samples in bin :math:`k`. :param x: the time series to calculate the feature of :type x: numpy.ndarray :param max_bins: the maximal number of bins :type max_bins: int :return: the value of this feature :return type: float
def validate(self): if not self.file.exists(): raise ValueError("File \"%s\" doesn't exists") if not self.search: raise ValueError("Search cannot be empty") if not self.replace: raise ValueError("Replace cannot be empty") if self.match not in ('file', 'line'): raise ValueError("Match must be one of: file, line") try: codecs.lookup(self.encoding) except LookupError: raise ValueError("Unknown encoding: \"%s\"" % self.encoding)
Validate current file configuration :raise ValueError:
def write_file(self, filename, file_format="xyz"): mol = pb.Molecule(self._obmol) return mol.write(file_format, filename, overwrite=True)
Uses OpenBabel to output all supported formats. Args: filename: Filename of file to output file_format: String specifying any OpenBabel supported formats.
def service_restart(service_name): if host.service_available(service_name): if host.service_running(service_name): host.service_restart(service_name) else: host.service_start(service_name)
Wrapper around host.service_restart to prevent spurious "unknown service" messages in the logs.
def sign(self, key, network_id=None): if network_id is None: rawhash = utils.sha3(rlp.encode(unsigned_tx_from_tx(self), UnsignedTransaction)) else: assert 1 <= network_id < 2**63 - 18 rlpdata = rlp.encode(rlp.infer_sedes(self).serialize(self)[ :-3] + [network_id, b'', b'']) rawhash = utils.sha3(rlpdata) key = normalize_key(key) v, r, s = ecsign(rawhash, key) if network_id is not None: v += 8 + network_id * 2 ret = self.copy( v=v, r=r, s=s ) ret._sender = utils.privtoaddr(key) return ret
Sign this transaction with a private key. A potentially already existing signature would be overridden.
def check_pow(block_number, header_hash, mixhash, nonce, difficulty): log.debug('checking pow', block_number=block_number) if len(mixhash) != 32 or len(header_hash) != 32 or len(nonce) != 8: return False cache = get_cache(block_number) mining_output = hashimoto_light(block_number, cache, header_hash, nonce) if mining_output[b'mix digest'] != mixhash: return False return utils.big_endian_to_int( mining_output[b'result']) <= 2**256 // (difficulty or 1)
Check if the proof-of-work of the block is valid. :param nonce: if given the proof of work function will be evaluated with this nonce instead of the one already present in the header :returns: `True` or `False`
def register(cls, package_type): if not issubclass(package_type, cls): raise TypeError('package_type must be a subclass of Package.') cls._REGISTRY.add(package_type)
Register a concrete implementation of a Package to be recognized by pex.
def lmx_base(): hparams = transformer.transformer_tpu() hparams.shared_embedding_and_softmax_weights = False hparams.label_smoothing = 0.0 hparams.max_length = 256 hparams.batch_size = 4096 hparams.activation_dtype = "bfloat16" return hparams
Transformer on languagemodel_lm1b32k_packed. 50M Params.
def create_audio_mp3_profile(apps, schema_editor): Profile = apps.get_model('edxval', 'Profile') Profile.objects.get_or_create(profile_name=AUDIO_MP3_PROFILE)
Create audio_mp3 profile
def claim_watches(user): Watch.objects.filter(email=user.email).update(email=None, user=user)
Attach any anonymous watches having a user's email to that user. Call this from your user registration process if you like.
def is_human(data, builds=None): def has_build37_contigs(data): for contig in ref.file_contigs(dd.get_ref_file(data)): if contig.name.startswith("GL") or contig.name.find("_gl") >= 0: if contig.name in naming.GMAP["hg19"] or contig.name in naming.GMAP["GRCh37"]: return True return False if not builds and tz.get_in(["genome_resources", "aliases", "human"], data): return True if not builds or "37" in builds: target_builds = ["hg19", "GRCh37"] if any([dd.get_genome_build(data).startswith(b) for b in target_builds]): return True elif has_build37_contigs(data): return True if not builds or "38" in builds: target_builds = ["hg38"] if any([dd.get_genome_build(data).startswith(b) for b in target_builds]): return True return False
Check if human, optionally with build number, search by name or extra GL contigs.
def add_cut(problem, indicators, bound, Constraint): cut = Constraint(sympy.Add(*indicators), ub=bound) problem.add(cut) return cut
Add an integer cut to the problem. Ensure that the same solution involving these indicator variables cannot be found by enforcing their sum to be less than before. Parameters ---------- problem : optlang.Model Specific optlang interface Model instance. indicators : iterable Binary indicator `optlang.Variable`s. bound : int Should be one less than the sum of indicators. Corresponds to P - 1 in equation (14) in [1]_. Constraint : optlang.Constraint Constraint class for a specific optlang interface. References ---------- .. [1] Gevorgyan, A., M. G Poolman, and D. A Fell. "Detection of Stoichiometric Inconsistencies in Biomolecular Models." Bioinformatics 24, no. 19 (2008): 2245.
def trimpath(attributes): if 'pathdepth' in attributes: if attributes['pathdepth'] != 'full': pathelements = [] remainder = attributes['file'] limit = int(attributes['pathdepth']) while len(pathelements) < limit and remainder: remainder, pe = os.path.split(remainder) pathelements.insert(0, pe) return os.path.join(*pathelements) return attributes['file'] return os.path.basename(attributes['file'])
Simplifies the given path. If pathdepth is in attributes, the last pathdepth elements will be returned. If pathdepth is "full", the full path will be returned. Otherwise the filename only will be returned. Args: attributes: The element attributes. Returns: The trimmed path.
def search(table: LdapObjectClass, query: Optional[Q] = None, database: Optional[Database] = None, base_dn: Optional[str] = None) -> Iterator[LdapObject]: fields = table.get_fields() db_fields = { name: field for name, field in fields.items() if field.db_field } database = get_database(database) connection = database.connection search_options = table.get_search_options(database) iterator = tldap.query.search( connection=connection, query=query, fields=db_fields, base_dn=base_dn or search_options.base_dn, object_classes=search_options.object_class, pk=search_options.pk_field, ) for dn, data in iterator: python_data = _db_to_python(data, table, dn) python_data = table.on_load(python_data, database) yield python_data
Search for a object of given type in the database.
def _hash_of_file(path, algorithm): with open(path, 'rb') as archive: hash = hashlib.new(algorithm) for chunk in read_chunks(archive): hash.update(chunk) return hash.hexdigest()
Return the hash digest of a file.
def add_custom_fields(cls, *args, **kw): for factory in config.custom_field_factories: for field in factory(): setattr(cls, field.name, field)
Add any custom fields defined in the configuration.
def percentile(self, percent): if percent >= 100: percent = 100 target = len(self) - len(self) * (percent / 100) for k in reversed(sorted(self._data.keys())): target -= self._data[k] if target < 0: return k return 10
Return the value that is the Nth precentile in the histogram. Args: percent (Union[int, float]): The precentile being sought. The default consumer implementations use consistently use ``99``. Returns: int: The value corresponding to the requested percentile.
def colorize(text, messageType=None): formattedText = str(text) if "ERROR" in messageType: formattedText = colorama.Fore.RED + formattedText elif "WARNING" in messageType: formattedText = colorama.Fore.YELLOW + formattedText elif "SUCCESS" in messageType: formattedText = colorama.Fore.GREEN + formattedText elif "INFO" in messageType: formattedText = colorama.Fore.BLUE + formattedText if "BOLD" in messageType: formattedText = colorama.Style.BRIGHT + formattedText return formattedText + colorama.Style.RESET_ALL
Function that colorizes a message. Args: ----- text: The string to be colorized. messageType: Possible options include "ERROR", "WARNING", "SUCCESS", "INFO" or "BOLD". Returns: -------- string: Colorized if the option is correct, including a tag at the end to reset the formatting.
def append_responder(self, matcher, *args, **kwargs): return self._insert_responder("bottom", matcher, *args, **kwargs)
Add a responder of last resort. Like `.autoresponds`, but instead of adding a responder to the top of the stack, add it to the bottom. This responder will be called if no others match.
def generate_timeline(usnjrnl, filesystem_content): journal_content = defaultdict(list) for event in usnjrnl: journal_content[event.inode].append(event) for event in usnjrnl: try: dirent = lookup_dirent(event, filesystem_content, journal_content) yield UsnJrnlEvent( dirent.inode, dirent.path, dirent.size, dirent.allocated, event.timestamp, event.changes, event.attributes) except LookupError as error: LOGGER.debug(error)
Aggregates the data collected from the USN journal and the filesystem content.
def get_link_domain(link, dist): domain = np.array([-np.inf, -1, 0, 1, np.inf]) domain = domain[~np.isnan(link.link(domain, dist))] return [domain[0], domain[-1]]
tool to identify the domain of a given monotonic link function Parameters ---------- link : Link object dist : Distribution object Returns ------- domain : list of length 2, representing the interval of the domain.
def add(self, synchronous=True, **kwargs): kwargs = kwargs.copy() if 'data' not in kwargs: kwargs['data'] = dict() if 'component_ids' not in kwargs['data']: kwargs['data']['components'] = [_payload(self.get_fields(), self.get_values())] kwargs.update(self._server_config.get_client_kwargs()) response = client.put(self.path('add'), **kwargs) return _handle_response(response, self._server_config, synchronous)
Add provided Content View Component. :param synchronous: What should happen if the server returns an HTTP 202 (accepted) status code? Wait for the task to complete if ``True``. Immediately return the server's response otherwise. :param kwargs: Arguments to pass to requests. :returns: The server's response, with all JSON decoded. :raises: ``requests.exceptions.HTTPError`` If the server responds with an HTTP 4XX or 5XX message.
def to_satoshis(input_quantity, input_type): assert input_type in UNIT_CHOICES, input_type if input_type in ('btc', 'mbtc', 'bit'): satoshis = float(input_quantity) * float(UNIT_MAPPINGS[input_type]['satoshis_per']) elif input_type == 'satoshi': satoshis = input_quantity else: raise Exception('Invalid Unit Choice: %s' % input_type) return int(satoshis)
convert to satoshis, no rounding
def complete(text, state): for cmd in COMMANDS: if cmd.startswith(text): if not state: return cmd else: state -= 1
Auto complete scss constructions in interactive mode.
def close(self): if not self._closed: self._closed = True if self._pool is not None: self._pool.close() self._pool = None
Shut down, closing any open connections in the pool.
def contribute_to_class(self, model, name): super(SearchableManager, self).contribute_to_class(model, name) setattr(model, name, ManagerDescriptor(self))
Newer versions of Django explicitly prevent managers being accessed from abstract classes, which is behaviour the search API has always relied on. Here we reinstate it.
def replace_url_query_values(url, replace_vals): if '?' not in url: return url parsed_url = urlparse(url) query = dict(parse_qsl(parsed_url.query)) query.update(replace_vals) return '{0}?{1}'.format(url.split('?')[0], urlencode(query))
Replace querystring values in a url string. >>> url = 'http://helloworld.com/some/path?test=5' >>> replace_vals = {'test': 10} >>> replace_url_query_values(url=url, replace_vals=replace_vals) 'http://helloworld.com/some/path?test=10'
def get_num_shares(self) -> Decimal: from pydatum import Datum today = Datum().today() return self.get_num_shares_on(today)
Returns the number of shares at this time
def sort_file_tabs_alphabetically(self): while self.sorted() is False: for i in range(0, self.tabs.tabBar().count()): if(self.tabs.tabBar().tabText(i) > self.tabs.tabBar().tabText(i + 1)): self.tabs.tabBar().moveTab(i, i + 1)
Sort open tabs alphabetically.
def filter_queryset(self, request, queryset, view): applicable_filters, applicable_exclusions = self.build_filters(view, filters=self.get_request_filters(request)) return self.apply_filters( queryset=queryset, applicable_filters=self.process_filters(applicable_filters, queryset, view), applicable_exclusions=self.process_filters(applicable_exclusions, queryset, view) )
Return the filtered queryset.
def create_comment(self, body, sha, path=None, position=None, line=1): json = None if body and sha and (line and int(line) > 0): data = {'body': body, 'line': line, 'path': path, 'position': position} self._remove_none(data) url = self._build_url('commits', sha, 'comments', base_url=self._api) json = self._json(self._post(url, data=data), 201) return RepoComment(json, self) if json else None
Create a comment on a commit. :param str body: (required), body of the message :param str sha: (required), commit id :param str path: (optional), relative path of the file to comment on :param str position: (optional), line index in the diff to comment on :param int line: (optional), line number of the file to comment on, default: 1 :returns: :class:`RepoComment <github3.repos.comment.RepoComment>` if successful, otherwise None
def hooks_factory(identifier, configuration, context): manager = HookManager(identifier, configuration) manager.load_hooks(context) return manager
Returns the initialized hooks.
def _try_close_dirty_tabs(self, exept=None): widgets, filenames = self._collect_dirty_tabs(exept=exept) if not len(filenames): return True dlg = DlgUnsavedFiles(self, files=filenames) if dlg.exec_() == dlg.Accepted: if not dlg.discarded: for item in dlg.listWidget.selectedItems(): filename = item.text() widget = None for widget in widgets: if widget.file.path == filename: break if widget != exept: self._save_editor(widget) self.removeTab(self.indexOf(widget)) return True return False
Tries to close dirty tabs. Uses DlgUnsavedFiles to ask the user what he wants to do.
def _process_data(self, obj): assert len(self._waiters) > 0, (type(obj), obj) waiter, encoding, cb = self._waiters.popleft() if isinstance(obj, RedisError): if isinstance(obj, ReplyError): if obj.args[0].startswith('READONLY'): obj = ReadOnlyError(obj.args[0]) _set_exception(waiter, obj) if self._in_transaction is not None: self._transaction_error = obj else: if encoding is not None: try: obj = decode(obj, encoding) except Exception as exc: _set_exception(waiter, exc) return if cb is not None: try: obj = cb(obj) except Exception as exc: _set_exception(waiter, exc) return _set_result(waiter, obj) if self._in_transaction is not None: self._in_transaction.append((encoding, cb))
Processes command results.
def get_max_recv_data_size(self, target): fname = "get_max_recv_data_size" cname = self.__class__.__module__ + '.' + self.__class__.__name__ raise NotImplementedError("%s.%s() is required" % (cname, fname))
Returns the maximum number of data bytes for receiving. The maximum number of data bytes acceptable for receiving with either :meth:`send_cmd_recv_rsp` or :meth:`send_rsp_recv_cmd`. The value reflects the local device capabilities for receiving in the mode determined by *target*. It does not relate to any protocol capabilities and negotiations. Arguments: target (nfc.clf.Target): The current local or remote communication target. Returns: int: Maximum number of data bytes supported for receiving.
def generate(self, id_or_uri): uri = self._client.build_uri(id_or_uri) + "/generate" return self._client.get(uri)
Generates and returns a random range. Args: id_or_uri: ID or URI of range. Returns: dict: A dict containing a list with IDs.
def addcommenttomergerequest(self, project_id, mergerequest_id, note): request = requests.post( '{0}/{1}/merge_request/{2}/comments'.format(self.projects_url, project_id, mergerequest_id), data={'note': note}, headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout) return request.status_code == 201
Add a comment to a merge request. :param project_id: ID of the project originating the merge request :param mergerequest_id: ID of the merge request to comment on :param note: Text of comment :return: True if success
def connect(self, address, **kws): return yield_(Connect(self, address, timeout=self._timeout, **kws))
Connect to a remote socket at _address_.
def _silence(): old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = _DummyFile() sys.stderr = _DummyFile() exception_occurred = False try: yield except: exception_occurred = True sys.stdout = old_stdout sys.stderr = old_stderr raise if not exception_occurred: sys.stdout = old_stdout sys.stderr = old_stderr
A context manager that silences sys.stdout and sys.stderr.
def selected_objects(self): return [ obj for obj in self.text_objects if contains_or_overlap(self.table_bbox, obj.bbox) ]
Filter out objects outside table boundaries
def put(self, key, value): key = self._service_key(key) self._service_ops['put'](key, value)
Stores the object `value` named by `key` in `service`. Args: key: Key naming `value`. value: the object to store.
def pan_delta(self, d): dx, dy = d pan_x, pan_y = self.pan zoom_x, zoom_y = self._zoom_aspect(self._zoom) self.pan = (pan_x + dx / zoom_x, pan_y + dy / zoom_y) self.update()
Pan the view by a given amount.
def get_host_template(resource_root, name, cluster_name): return call(resource_root.get, HOST_TEMPLATE_PATH % (cluster_name, name), ApiHostTemplate, api_version=3)
Lookup a host template by name in the specified cluster. @param resource_root: The root Resource object. @param name: Host template name. @param cluster_name: Cluster name. @return: An ApiHostTemplate object. @since: API v3
def tournament(self, negative=False): if self.generation <= self._random_generations and not negative: return self.random_selection() if not self._negative_selection and negative: return self.random_selection(negative=negative) vars = self.random() fit = [(k, self.population[x].fitness) for k, x in enumerate(vars)] if negative: fit = min(fit, key=lambda x: x[1]) else: fit = max(fit, key=lambda x: x[1]) index = fit[0] return vars[index]
Tournament selection and when negative is True it performs negative tournament selection
def parse_timers(self): filenames = list(filter(os.path.exists, [task.output_file.path for task in self])) parser = AbinitTimerParser() parser.parse(filenames) return parser
Parse the TIMER section reported in the ABINIT output files. Returns: :class:`AbinitTimerParser` object
def _request(method, url, content_type=None, _data=None, user=None, passwd=None): opener = _build_opener(_HTTPHandler) request = _Request(url, data=_data) if content_type: request.add_header('Content-Type', content_type) if user and passwd: auth_encode = '{0}:{1}'.format(user, passwd).encode('base64')[:-1] auth_basic = "Basic {0}".format(auth_encode) request.add_header('Authorization', auth_basic) request.add_header('Accept', 'application/json') request.get_method = lambda: method try: handler = opener.open(request) except HTTPError as exc: return {'error': '{0}'.format(exc)} return salt.utils.json.loads(handler.read())
Makes a HTTP request. Returns the JSON parse, or an obj with an error.
def breakpoint_set(self, addr, thumb=False, arm=False): flags = enums.JLinkBreakpoint.ANY if thumb: flags = flags | enums.JLinkBreakpoint.THUMB elif arm: flags = flags | enums.JLinkBreakpoint.ARM handle = self._dll.JLINKARM_SetBPEx(int(addr), flags) if handle <= 0: raise errors.JLinkException('Breakpoint could not be set.') return handle
Sets a breakpoint at the specified address. If ``thumb`` is ``True``, the breakpoint is set in THUMB-mode, while if ``arm`` is ``True``, the breakpoint is set in ARM-mode, otherwise a normal breakpoint is set. Args: self (JLink): the ``JLink`` instance addr (int): the address where the breakpoint will be set thumb (bool): boolean indicating to set the breakpoint in THUMB mode arm (bool): boolean indicating to set the breakpoint in ARM mode Returns: An integer specifying the breakpoint handle. This handle should be retained for future breakpoint operations. Raises: TypeError: if the given address is not an integer. JLinkException: if the breakpoint could not be set.
def lat_from_pole(ref_loc_lon, ref_loc_lat, pole_plon, pole_plat): ref_loc = (ref_loc_lon, ref_loc_lat) pole = (pole_plon, pole_plat) paleo_lat = 90 - pmag.angle(pole, ref_loc) return float(paleo_lat)
Calculate paleolatitude for a reference location based on a paleomagnetic pole Required Parameters ---------- ref_loc_lon: longitude of reference location in degrees ref_loc_lat: latitude of reference location pole_plon: paleopole longitude in degrees pole_plat: paleopole latitude in degrees
def base_url(klass, space_id, resource_id=None, public=False, environment_id=None, **kwargs): if public: environment_slug = "" if environment_id is not None: environment_slug = "/environments/{0}".format(environment_id) return "spaces/{0}{1}/public/content_types".format(space_id, environment_slug) return super(ContentType, klass).base_url( space_id, resource_id=resource_id, environment_id=environment_id, **kwargs )
Returns the URI for the content type.
def get_child_files(path): path = FileHelper.abspath(path) return [filename for filename in os.listdir(path) if os.path.isfile(os.path.join(path, filename))]
Get all child files of a folder
def draw_variable_local(self, size): return ss.norm.rvs(loc=self.mu0, scale=self.sigma0, size=size)
Simulate from the Normal distribution using instance values Parameters ---------- size : int How many simulations to perform Returns ---------- np.ndarray of Normal random variable
def clean_dict(d0, clean_item_fn=None): clean_item_fn = clean_item_fn if clean_item_fn else clean_item d = dict() for key in d0: cleaned_item = clean_item_fn(d0[key]) if cleaned_item is not None: d[key] = cleaned_item return d
Return a json-clean dict. Will log info message for failures.
def attempt(self, *kinds): if self._error: raise self._error token = self.next_token if not token: return None if kinds and token.kind not in kinds: return None self._advance() return token
Try to get the next token if it matches one of the kinds given, otherwise returning None. If no kinds are given, any kind is accepted.
def DbExportEvent(self, argin): self._log.debug("In DbExportEvent()") if len(argin) < 5: self.warn_stream("DataBase::db_export_event(): insufficient export info for event ") th_exc(DB_IncorrectArguments, "insufficient export info for event", "DataBase::ExportEvent()") event, IOR, host, pid, version = argin[:5] event = replace_wildcard(event.lower()) self.db.export_event(event, IOR, host, pid, version)
Export Event channel to database :param argin: Str[0] = event channel name (or factory name) Str[1] = CORBA IOR Str[2] = Notifd host name Str[3] = Notifd pid Str[4] = Notifd version :type: tango.DevVarStringArray :return: :rtype: tango.DevVoid
def set_extractor_processor_inputs(self, extractor_processors, sub_output=None): if not (isinstance(extractor_processors, ExtractorProcessor) or isinstance(extractor_processors, types.ListType)): raise ValueError( "extractor_processors must be an ExtractorProcessor or a list") if isinstance(extractor_processors, ExtractorProcessor): extractor_processor = extractor_processors self.input_fields = self.__get_jp(extractor_processor, sub_output) elif isinstance(extractor_processors, types.ListType): self.input_fields = list() for extractor_processor in extractor_processors: if isinstance(extractor_processor, ExtractorProcessor): self.input_fields.append( self.__get_jp(extractor_processor, sub_output)) elif isinstance(extractor_processor, list): self.input_fields.append( reduce(lambda a, b: "{}|{}".format(a, b), ["({})".format(self.__get_jp(x, sub_output)) for x in extractor_processor])) self.generate_json_paths() return self
Instead of specifying fields in the source document to rename for the extractor, allows the user to specify ExtractorProcessors that are executed earlier in the chain and generate json paths from their output fields
def _build(self, inputs_list): outputs = [] for idx, tensor in enumerate(inputs_list): outputs.append( Linear( self._output_size, initializers=self._initializers, partitioners=self._partitioners, regularizers=self._regularizers, use_bias=(idx == 0 and self._use_bias))(tensor)) return tf.add_n(outputs)
Connects the module into the graph. If this is not the first time the module has been connected to the graph, the Tensors provided here must have the same final dimensions as when called the first time, in order for the existing variables to be the correct size for the multiplication. The batch size may differ for each connection. Args: inputs_list: A list of 2D Tensors of rank 2, with leading batch dimension. Returns: A 2D Tensor of size [batch_size, output_size].
def fix_repeat_dt(dt_list, offset_s=0.001): idx = (np.diff(dt_list) == timedelta(0)) while np.any(idx): dt_list[idx.nonzero()[0] + 1] += timedelta(seconds=offset_s) idx = (np.diff(dt_list) == timedelta(0)) return dt_list
Add some small offset to remove duplicate times Needed for xarray interp, which expects monotonically increasing times
def _SetHeader(self, values): if self._values and len(values) != len(self._values): raise ValueError('Header values not equal to existing data width.') if not self._values: for _ in range(len(values)): self._values.append(None) self._keys = list(values) self._BuildIndex()
Set the row's header from a list.
def start_listener_thread(self, timeout_ms=30000, exception_handler=None): try: thread = Thread(target=self.listen_forever, args=(timeout_ms, exception_handler)) thread.daemon = True self.sync_thread = thread self.should_listen = True thread.start() except RuntimeError: e = sys.exc_info()[0] logger.error("Error: unable to start thread. %s", str(e))
Start a listener thread to listen for events in the background. Args: timeout (int): How long to poll the Home Server for before retrying. exception_handler (func(exception)): Optional exception handler function which can be used to handle exceptions in the caller thread.
def is_event_of_key_string(event, key_string): return len(event) >= 2 and not isinstance(event[1], Gdk.ModifierType) and event[0] == Gtk.accelerator_parse(key_string)[0]
Condition check if key string represent the key value of handed event and whether the event is of right type The function checks for constructed event tuple that are generated by the rafcon.gui.shortcut_manager.ShortcutManager. :param tuple event: Event tuple generated by the ShortcutManager :param str key_string: Key string parsed to a key value and for condition check
def bdd_common_after_scenario(context_or_world, scenario, status): if status == 'skipped': return elif status == 'passed': test_status = 'Pass' test_comment = None context_or_world.logger.info("The scenario '%s' has passed", scenario.name) else: test_status = 'Fail' test_comment = "The scenario '%s' has failed" % scenario.name context_or_world.logger.error("The scenario '%s' has failed", scenario.name) context_or_world.global_status['test_passed'] = False DriverWrappersPool.close_drivers(scope='function', test_name=scenario.name, test_passed=status == 'passed', context=context_or_world) add_jira_status(get_jira_key_from_scenario(scenario), test_status, test_comment)
Clean method that will be executed after each scenario in behave or lettuce :param context_or_world: behave context or lettuce world :param scenario: running scenario :param status: scenario status (passed, failed or skipped)
def configure(self, kubernetes_host, kubernetes_ca_cert='', token_reviewer_jwt='', pem_keys=None, mount_point=DEFAULT_MOUNT_POINT): if pem_keys is None: pem_keys = [] list_of_pem_params = { 'kubernetes_ca_cert': kubernetes_ca_cert, 'pem_keys': pem_keys } for param_name, param_argument in list_of_pem_params.items(): validate_pem_format( param_name=param_name, param_argument=param_argument, ) params = { 'kubernetes_host': kubernetes_host, 'kubernetes_ca_cert': kubernetes_ca_cert, 'token_reviewer_jwt': token_reviewer_jwt, 'pem_keys': pem_keys, } api_path = '/v1/auth/{mount_point}/config'.format( mount_point=mount_point ) return self._adapter.post( url=api_path, json=params, )
Configure the connection parameters for Kubernetes. This path honors the distinction between the create and update capabilities inside ACL policies. Supported methods: POST: /auth/{mount_point}/config. Produces: 204 (empty body) :param kubernetes_host: Host must be a host string, a host:port pair, or a URL to the base of the Kubernetes API server. Example: https://k8s.example.com:443 :type kubernetes_host: str | unicode :param kubernetes_ca_cert: PEM encoded CA cert for use by the TLS client used to talk with the Kubernetes API. NOTE: Every line must end with a newline: \n :type kubernetes_ca_cert: str | unicode :param token_reviewer_jwt: A service account JWT used to access the TokenReview API to validate other JWTs during login. If not set the JWT used for login will be used to access the API. :type token_reviewer_jwt: str | unicode :param pem_keys: Optional list of PEM-formatted public keys or certificates used to verify the signatures of Kubernetes service account JWTs. If a certificate is given, its public key will be extracted. Not every installation of Kubernetes exposes these keys. :type pem_keys: list :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The response of the configure_method request. :rtype: requests.Response
def assemble(self): first_block = ray.get(self.objectids[(0, ) * self.ndim]) dtype = first_block.dtype result = np.zeros(self.shape, dtype=dtype) for index in np.ndindex(*self.num_blocks): lower = DistArray.compute_block_lower(index, self.shape) upper = DistArray.compute_block_upper(index, self.shape) result[[slice(l, u) for (l, u) in zip(lower, upper)]] = ray.get( self.objectids[index]) return result
Assemble an array from a distributed array of object IDs.
def _msToString(self, ms): hr, ms = divmod(ms, 3600000) mins, ms = divmod(ms, 60000) secs, mill = divmod(ms, 1000) return "%ihr %imin %isecs %ims" % (hr, mins, secs, mill)
Change milliseconds to hours min sec ms format.
def _compile_fragment_ast(schema, current_schema_type, ast, location, context): query_metadata_table = context['metadata'] coerces_to_type_name = ast.type_condition.name.value coerces_to_type_obj = schema.get_type(coerces_to_type_name) basic_blocks = [] is_same_type_as_scope = current_schema_type.is_same_type(coerces_to_type_obj) equivalent_union_type = context['type_equivalence_hints'].get(coerces_to_type_obj, None) is_base_type_of_union = ( isinstance(current_schema_type, GraphQLUnionType) and current_schema_type.is_same_type(equivalent_union_type) ) if not (is_same_type_as_scope or is_base_type_of_union): query_metadata_table.record_coercion_at_location(location, coerces_to_type_obj) basic_blocks.append(blocks.CoerceType({coerces_to_type_name})) inner_basic_blocks = _compile_ast_node_to_ir( schema, coerces_to_type_obj, ast, location, context) basic_blocks.extend(inner_basic_blocks) return basic_blocks
Return a list of basic blocks corresponding to the inline fragment at this AST node. Args: schema: GraphQL schema object, obtained from the graphql library current_schema_type: GraphQLType, the schema type at the current location ast: GraphQL AST node, obtained from the graphql library. location: Location object representing the current location in the query context: dict, various per-compilation data (e.g. declared tags, whether the current block is optional, etc.). May be mutated in-place in this function! Returns: list of basic blocks, the compiled output of the vertex AST node
def extract_geometry(self): gf = vtk.vtkCompositeDataGeometryFilter() gf.SetInputData(self) gf.Update() return wrap(gf.GetOutputDataObject(0))
Combines the geomertry of all blocks into a single ``PolyData`` object. Place this filter at the end of a pipeline before a polydata consumer such as a polydata mapper to extract geometry from all blocks and append them to one polydata object.
def _get_files(file_patterns, top=HERE): if not isinstance(file_patterns, (list, tuple)): file_patterns = [file_patterns] for i, p in enumerate(file_patterns): if os.path.isabs(p): file_patterns[i] = os.path.relpath(p, top) matchers = [_compile_pattern(p) for p in file_patterns] files = set() for root, dirnames, filenames in os.walk(top): if 'node_modules' in dirnames: dirnames.remove('node_modules') for m in matchers: for filename in filenames: fn = os.path.relpath(_glob_pjoin(root, filename), top) fn = fn.replace(os.sep, '/') if m(fn): files.add(fn.replace(os.sep, '/')) return list(files)
Expand file patterns to a list of paths. Parameters ----------- file_patterns: list or str A list of glob patterns for the data file locations. The globs can be recursive if they include a `**`. They should be relative paths from the top directory or absolute paths. top: str the directory to consider for data files Note: Files in `node_modules` are ignored.
def get_grades(self, login=None, promotion=None, **kwargs): _login = kwargs.get( 'login', login or self._login ) _promotion_id = kwargs.get('promotion', promotion) _grades_url = GRADES_URL.format(login=_login, promo_id=_promotion_id) return self._request_api(url=_grades_url).json()
Get a user's grades on a single promotion based on his login. Either use the `login` param, or the client's login if unset. :return: JSON
def register(self, plugin): if not plugin or not isinstance(plugin, BasePlugin): raise ValueError("Plugin must be implemented as a subclass of BasePlugin class") if self.is_registered(plugin.name): raise ValueError("Plugin with name {} is already registered".format(plugin.name)) self._plugins.append(plugin)
Register a plugin. New plugins are added to the end of the plugins list. :param samtranslator.plugins.BasePlugin plugin: Instance/subclass of BasePlugin class that implements hooks :raises ValueError: If plugin is not an instance of samtranslator.plugins.BasePlugin or if it is already registered :return: None
def load_psd(): psd = np.loadtxt("ZERO_DET_high_P_PSD.txt")[:,1] down_factor = 3 pad_size = int(np.ceil(float(psd.size)/down_factor)*down_factor - psd.size) psd_padded = np.append(psd, np.zeros(pad_size)*np.NaN) psd = sp.nanmean(psd_padded.reshape(-1,down_factor), axis=1) return psd
Resamples advLIGO noise PSD to 4096 Hz
def all_entity_classes(): persistent_classes = Entity._decl_class_registry.values() return [ cls for cls in persistent_classes if isclass(cls) and issubclass(cls, Entity) ]
Return the list of all concrete persistent classes that are subclasses of Entity.
def is_valid(self): if not self.total: return False if not self.contributor.freelanceprofile.is_freelance: return False return True
returns `True` if the report should be sent.
def get_collection(self, request, **resources): if self._meta.queryset is None: return [] filters = self.get_filters(request, **resources) filters.update(self.get_default_filters(**resources)) qs = self._meta.queryset for key, (value, exclude) in filters.items(): try: if exclude: qs = qs.exclude(**{key: value}) else: qs = qs.filter(**{key: value}) except FieldError, e: logger.warning(e) sorting = self.get_sorting(request, **resources) if sorting: qs = qs.order_by(*sorting) return qs
Get filters and return filtered result. :return collection: collection of related resources.
def load_servers_from_env(self, filter=[], dynamic=None): if dynamic == None: dynamic = self._dynamic if NAMESERVERS_ENV_VAR in os.environ: servers = [s for s in os.environ[NAMESERVERS_ENV_VAR].split(';') \ if s] self._parse_name_servers(servers, filter, dynamic)
Load the name servers environment variable and parse each server in the list. @param filter Restrict the parsed objects to only those in this path. For example, setting filter to [['/', 'localhost', 'host.cxt', 'comp1.rtc']] will prevent 'comp2.rtc' in the same naming context from being parsed. @param dynamic Override the tree-wide dynamic setting. If not provided, the value given when the tree was created will be used.
def _start_server(self, *args): self.log("Starting server", args) secure = self.certificate is not None if secure: self.log("Running SSL server with cert:", self.certificate) else: self.log("Running insecure server without SSL. Do not use without SSL proxy in production!", lvl=warn) try: self.server = Server( (self.host, self.port), secure=secure, certfile=self.certificate ).register(self) except PermissionError: self.log('Could not open (privileged?) port, check ' 'permissions!', lvl=critical)
Run the node local server
def cores(self): if self._cores is not None: return self._cores elif self._config is not None: return self._config.defaultCores else: raise AttributeError("Default value for 'cores' cannot be determined")
The number of CPU cores required.
def _update_recording(self, frame, config): should_record = config['is_recording'] if should_record: if not self.is_recording: self.is_recording = True logger.info( 'Starting recording using %s', self.video_writer.current_output().name()) self.video_writer.write_frame(frame) elif self.is_recording: self.is_recording = False self.video_writer.finish() logger.info('Finished recording')
Adds a frame to the current video output.
def get(self): key = self.get_key_from_request() result = self.get_storage().get(key) return result if result else None
Get the item from redis.
def request(self, apdu): if _debug: ClientSSM._debug("request %r", apdu) apdu.pduSource = None apdu.pduDestination = self.pdu_address self.ssmSAP.request(apdu)
This function is called by client transaction functions when it wants to send a message to the device.
def partial_fit(self, X): opt, cost = self.sess.run((self.optimizer, self.cost), feed_dict={self.x: X}) return cost
Train model based on mini-batch of input data. Return cost of mini-batch.
def AAAA(host, nameserver=None): dig = ['dig', '+short', six.text_type(host), 'AAAA'] if nameserver is not None: dig.append('@{0}'.format(nameserver)) cmd = __salt__['cmd.run_all'](dig, python_shell=False) if cmd['retcode'] != 0: log.warning( 'dig returned exit code \'%s\'. Returning empty list as fallback.', cmd['retcode'] ) return [] return [x for x in cmd['stdout'].split('\n') if check_ip(x)]
Return the AAAA record for ``host``. Always returns a list. CLI Example: .. code-block:: bash salt ns1 dig.AAAA www.google.com
def on_view_not_found( self, _, start_response: Callable[[str, List[Tuple[str, str]]], None], ) -> Iterable[bytes]: start_response( "405 Method Not Allowed", [('Content-type', 'text/plain')]) return [b"Method Not Allowed"]
called when valid view is not found
def insert(self, schema, fields, **kwargs): r = 0 with self.connection(**kwargs) as connection: kwargs['connection'] = connection try: with self.transaction(**kwargs): r = self._insert(schema, fields, **kwargs) except Exception as e: exc_info = sys.exc_info() if self.handle_error(schema, e, **kwargs): r = self._insert(schema, fields, **kwargs) else: self.raise_error(e, exc_info) return r
Persist d into the db schema -- Schema() fields -- dict -- the values to persist return -- int -- the primary key of the row just inserted
def find_span_binsearch(degree, knot_vector, num_ctrlpts, knot, **kwargs): tol = kwargs.get('tol', 10e-6) n = num_ctrlpts - 1 if abs(knot_vector[n + 1] - knot) <= tol: return n low = degree high = num_ctrlpts mid = (low + high) / 2 mid = int(round(mid + tol)) while (knot < knot_vector[mid]) or (knot >= knot_vector[mid + 1]): if knot < knot_vector[mid]: high = mid else: low = mid mid = int((low + high) / 2) return mid
Finds the span of the knot over the input knot vector using binary search. Implementation of Algorithm A2.1 from The NURBS Book by Piegl & Tiller. The NURBS Book states that the knot span index always starts from zero, i.e. for a knot vector [0, 0, 1, 1]; if FindSpan returns 1, then the knot is between the interval [0, 1). :param degree: degree, :math:`p` :type degree: int :param knot_vector: knot vector, :math:`U` :type knot_vector: list, tuple :param num_ctrlpts: number of control points, :math:`n + 1` :type num_ctrlpts: int :param knot: knot or parameter, :math:`u` :type knot: float :return: knot span :rtype: int
def name(self): return self._meta.name if self._meta.name else \ 'Rule @%s' % self.tag
Name attribute of rule element
def reset(self): self.resetRNG() sNow = np.zeros(self.pop_size) Shk = self.RNG.rand(self.pop_size) sNow[Shk < self.p_init] = 1 self.sNow = sNow
Resets this agent type to prepare it for a new simulation run. This includes resetting the random number generator and initializing the style of each agent of this type.
def get_token(code, token_service, client_id, client_secret, redirect_uri, grant_type): data = { 'code': code, 'client_id': client_id, 'client_secret': client_secret, 'redirect_uri': redirect_uri, 'grant_type': grant_type, } resp = requests.post(token_service, data, verify=False) return resp.json()
Fetches an OAuth 2 token.
def set_val(self, key:str, val:Any, bn_groups:bool=True)->Any: "Set `val` inside the optimizer dictionary at `key`." if is_tuple(val): val = [(v1,v2) for v1,v2 in zip(*val)] for v,pg1,pg2 in zip(val,self.opt.param_groups[::2],self.opt.param_groups[1::2]): pg1[key] = v if bn_groups: pg2[key] = v return val
Set `val` inside the optimizer dictionary at `key`.
def is_valid_delta_name(file): filename = basename(file) pattern = re.compile(Delta.FILENAME_PATTERN) if re.match(pattern, filename): return True return False
Return if a file has a valid name A delta file name can be: - pre-all.py - pre-all.sql - delta_x.x.x_ddmmyyyy.pre.py - delta_x.x.x_ddmmyyyy.pre.sql - delta_x.x.x_ddmmyyyy.py - delta_x.x.x_ddmmyyyy.sql - delta_x.x.x_ddmmyyyy.post.py - delta_x.x.x_ddmmyyyy.post.sql - post-all.py - post-all.sql where x.x.x is the version number and _ddmmyyyy is an optional description, usually representing the date of the delta file
def update(self, test_path, number): GRAPH_WIDTH = 14 num_filled = int(round(min(1.0, float(number) / self.max) * GRAPH_WIDTH)) graph = ''.join([self._fill_cap(' ' * num_filled), self._empty_cap(self._empty_char * (GRAPH_WIDTH - num_filled))]) cols_for_path = self.cols - GRAPH_WIDTH - 2 if len(test_path) > cols_for_path: test_path = test_path[len(test_path) - cols_for_path:] else: test_path += ' ' * (cols_for_path - len(test_path)) self.last = self._term.bold(test_path) + ' ' + graph with self._at_last_line(): self.stream.write(self.last) self.stream.flush()
Draw an updated progress bar. At the moment, the graph takes a fixed width, and the test identifier takes the rest of the row, truncated from the left to fit. test_path -- the selector of the test being run number -- how many tests have been run so far, including this one
def dump(self, fh, value, context=None): value = self.dumps(value) fh.write(value) return len(value)
Attempt to transform and write a string-based foreign value to the given file-like object. Returns the length written.
def add_density_option_group(parser): density_group = parser.add_argument_group("Options for configuring the " "contours and density color map") density_group.add_argument( "--density-cmap", type=str, default='viridis', help="Specify the colormap to use for the density. " "Default is viridis.") density_group.add_argument( "--contour-color", type=str, default=None, help="Specify the color to use for the contour lines. Default is " "white for density plots and black for scatter plots.") density_group.add_argument( '--use-kombine-kde', default=False, action="store_true", help="Use kombine's KDE for determining contours. " "Default is to use scipy's gaussian_kde.") return density_group
Adds the options needed to configure contours and density colour map. Parameters ---------- parser : object ArgumentParser instance.
def instruction_ROR_register(self, opcode, register): a = register.value r = self.ROR(a) register.set(r)
Rotate accumulator right
def export_public_keys(self, identities): public_keys = [] with self.device: for i in identities: pubkey = self.device.pubkey(identity=i) vk = formats.decompress_pubkey(pubkey=pubkey, curve_name=i.curve_name) public_key = formats.export_public_key(vk=vk, label=i.to_string()) public_keys.append(public_key) return public_keys
Export SSH public keys from the device.