code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def distributions_route(self, request): tag = request.args.get('tag') run = request.args.get('run') try: (body, mime_type) = self.distributions_impl(tag, run) code = 200 except ValueError as e: (body, mime_type) = (str(e), 'text/plain') code = 400 return http_util.Respond(request, body, mime_type, code=code)
Given a tag and single run, return an array of compressed histograms.
def _accumulate(self, old_accum, next_val): return old_accum * (1 - self.alpha) + next_val * self.alpha
Implement exponential moving average
def get_module(app, modname, verbose=False, failfast=False): module_name = '%s.%s' % (app, modname) try: module = import_module(module_name) except ImportError as e: if failfast: raise e elif verbose: print("Could not load %r from %r: %s" % (modname, app, e)) return None if verbose: print("Loaded %r from %r" % (modname, app)) return module
Internal function to load a module from a single app. taken from https://github.com/ojii/django-load.
def isBirthday(self): if not self.birthday: return False birthday = self.birthdate() today = date.today() return (birthday.month == today.month and birthday.day == today.day)
Is it the user's birthday today?
def add_link(self): "Create a new internal link" n=len(self.links)+1 self.links[n]=(0,0) return n
Create a new internal link
def wait(self, limit=None): it = self.iterconsume(limit) while True: it.next()
Go into consume mode. Mostly for testing purposes and simple programs, you probably want :meth:`iterconsume` or :meth:`iterqueue` instead. This runs an infinite loop, processing all incoming messages using :meth:`receive` to apply the message to all registered callbacks.
def fetch_credential_report(self, credentials, ignore_exception = False): iam_report = {} try: api_client = connect_service('iam', credentials, silent = True) response = api_client.generate_credential_report() if response['State'] != 'COMPLETE': if not ignore_exception: printError('Failed to generate a credential report.') return report = api_client.get_credential_report()['Content'] lines = report.splitlines() keys = lines[0].decode('utf-8').split(',') for line in lines[1:]: values = line.decode('utf-8').split(',') manage_dictionary(iam_report, values[0], {}) for key, value in zip(keys, values): iam_report[values[0]][key] = value self.credential_report = iam_report self.fetchstatuslogger.counts['credential_report']['fetched'] = 1 except Exception as e: if ignore_exception: return printError('Failed to download a credential report.') printException(e)
Fetch the credential report :param: api_client :type: FOO :param: ignore_exception : initiate credential report creation as not always ready :type: Boolean
def _handleDecodeHextileRAW(self, block, bg, color, x, y, width, height, tx, ty, tw, th): self.updateRectangle(tx, ty, tw, th, block) self._doNextHextileSubrect(bg, color, x, y, width, height, tx, ty)
the tile is in raw encoding
def get_handler_classes(self): handler_classes = [import_string(handler_cls) for handler_cls in settings.MODERNRPC_HANDLERS] if self.protocol == ALL: return handler_classes else: return [cls for cls in handler_classes if cls.protocol in ensure_sequence(self.protocol)]
Return the list of handlers to use when receiving RPC requests.
def display(self) -> typing.Union[None, report.Report]: return ( self._project.current_step.report if self._project and self._project.current_step else None )
The display report for the current project.
def elements_are_numbers(array): if len(array) == 0: return 0 output_value = 1 for x in array: test = is_a_number(x) if not test: return False output_value = max(output_value,test) return output_value
Tests whether the elements of the supplied array are numbers.
def write( self, filepath, fully_qualified=True, pretty_print=False, encoding="UTF-8" ): root = self.serialize(fully_qualified=fully_qualified) tree = root.getroottree() kwargs = {"pretty_print": pretty_print, "encoding": encoding} if encoding != "unicode": kwargs["xml_declaration"] = True tree.write(filepath, **kwargs)
Serialize and write this METS document to `filepath`. The default encoding is ``UTF-8``. This method will return a unicode string when ``encoding`` is set to ``unicode``. :param str filepath: Path to write the METS document to
def leaky_relu(x, name=None): with tf.name_scope(name, 'leaky_relu', [x]) as scope: x = tf.convert_to_tensor(x, name='x') return tf.where(tf.less(x, 0.0), 0.01 * x, x, name=scope)
Creates a leaky_relu. This is an alternate non-linearity to relu. The leaky part of the relu may prevent dead Neurons in a model since the gradient doesn't go completely to 0. Args: x: The input tensor. name: Optional name for this op. Returns: x if x > 0 otherwise 0.01 * x.
def ping(host=None, core_name=None): ret = _get_return_dict() if _get_none_or_value(core_name) is None and _check_for_cores(): success = True for name in __opts__['solr.cores']: resp = _get_admin_info('ping', host=host, core_name=name) if resp['success']: data = {name: {'status': resp['data']['status']}} else: success = False data = {name: {'status': None}} ret = _update_return_dict(ret, success, data, resp['errors']) return ret else: resp = _get_admin_info('ping', host=host, core_name=core_name) return resp
Does a health check on solr, makes sure solr can talk to the indexes. host : str (None) The solr host to query. __opts__['host'] is default. core_name : str (None) The name of the solr core if using cores. Leave this blank if you are not using cores or if you want to check all cores. Return : dict<str,obj>:: {'success':boolean, 'data':dict, 'errors':list, 'warnings':list} CLI Example: .. code-block:: bash salt '*' solr.ping music
def add_instance(self, role, instance, username='root', key_filename=None, output_shell=False): if not role in self.Instances.keys(): self.Instances[role] = [] self.logger.debug('Adding ' + role + ' with private_hostname ' + instance['private_hostname'] + ', public_hostname ' + instance['public_hostname']) self.Instances[role].append(Connection(instance, username, key_filename, output_shell=output_shell))
Add instance to the setup @param role: instance's role @type role: str @param instance: host parameters we would like to establish connection to @type instance: dict @param username: user name for creating ssh connection @type username: str @param key_filename: file name with ssh private key @type key_filename: str @param output_shell: write output from this connection to standard output @type output_shell: bool
def stop(self): self.state = STATE_STOPPED if self.transport: self.transport.close()
Close websocket connection.
def _remove_deprecated_options(self, old_version): old_defaults = self._load_old_defaults(old_version) for section in old_defaults.sections(): for option, _ in old_defaults.items(section, raw=self.raw): if self.get_default(section, option) is NoDefault: try: self.remove_option(section, option) if len(self.items(section, raw=self.raw)) == 0: self.remove_section(section) except cp.NoSectionError: self.remove_section(section)
Remove options which are present in the .ini file but not in defaults
def entries(self, region, queue, tier, division): url, query = LeagueApiV4Urls.entries( region=region, queue=queue, tier=tier, division=division ) return self._raw_request(self.entries.__name__, region, url, query)
Get all the league entries :param string region: the region to execute this request on :param string queue: the queue to query, i.e. RANKED_SOLO_5x5 :param string tier: the tier to query, i.e. DIAMOND :param string division: the division to query, i.e. III :returns: Set[LeagueEntryDTO]
def _batch_call_watchers(self_): while self_.self_or_cls.param._events: event_dict = OrderedDict([((event.name, event.what), event) for event in self_.self_or_cls.param._events]) watchers = self_.self_or_cls.param._watchers[:] self_.self_or_cls.param._events = [] self_.self_or_cls.param._watchers = [] for watcher in watchers: events = [self_._update_event_type(watcher, event_dict[(name, watcher.what)], self_.self_or_cls.param._TRIGGER) for name in watcher.parameter_names if (name, watcher.what) in event_dict] with batch_watch(self_.self_or_cls, run=False): if watcher.mode == 'args': watcher.fn(*events) else: watcher.fn(**{c.name:c.new for c in events})
Batch call a set of watchers based on the parameter value settings in kwargs using the queued Event and watcher objects.
def _get_container_id(self, labels): namespace = CadvisorPrometheusScraperMixin._get_container_label(labels, "namespace") pod_name = CadvisorPrometheusScraperMixin._get_container_label(labels, "pod_name") container_name = CadvisorPrometheusScraperMixin._get_container_label(labels, "container_name") return self.pod_list_utils.get_cid_by_name_tuple((namespace, pod_name, container_name))
Should only be called on a container-scoped metric It gets the container id from the podlist using the metrics labels :param labels :return str or None
def maybe_copy_file_to_directory(source_filepath, target_directory): if not tf.gfile.Exists(target_directory): tf.logging.info("Creating directory %s" % target_directory) os.mkdir(target_directory) target_filepath = os.path.join(target_directory, os.path.basename(source_filepath)) if not tf.gfile.Exists(target_filepath): tf.logging.info("Copying %s to %s" % (source_filepath, target_filepath)) tf.gfile.Copy(source_filepath, target_filepath) statinfo = os.stat(target_filepath) tf.logging.info("Successfully copied %s, %s bytes." % (target_filepath, statinfo.st_size)) else: tf.logging.info("Not copying, file already found: %s" % target_filepath) return target_filepath
Copy a file to a directory if it is not already there. Returns the target filepath. Args: source_filepath: a string target_directory: a string Returns: a string
def get_sequence_rule_lookup_session_for_bank(self, bank_id, proxy): if not self.supports_sequence_rule_lookup(): raise errors.Unimplemented() return sessions.SequenceRuleLookupSession(bank_id, proxy, self._runtime)
Gets the ``OsidSession`` associated with the sequence rule lookup service for the given bank. arg: bank_id (osid.id.Id): the ``Id`` of the ``Bank`` arg: proxy (osid.proxy.Proxy): a proxy return: (osid.assessment.authoring.SequenceRuleLookupSession) - a ``SequenceRuleLookupSession`` raise: NotFound - no ``Bank`` found by the given ``Id`` raise: NullArgument - ``bank_id or proxy is null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_sequence_rule_lookup()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_sequence_rule_lookup()`` and ``supports_visible_federation()`` are ``true``.*
def store_object(self, obj_name, data, content_type=None, etag=None, content_encoding=None, ttl=None, return_none=False, headers=None, extra_info=None): return self.create(obj_name=obj_name, data=data, content_type=content_type, etag=etag, content_encoding=content_encoding, ttl=ttl, return_none=return_none, headers=headers)
Creates a new object in this container, and populates it with the given data. A StorageObject reference to the uploaded file will be returned, unless 'return_none' is set to True. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more.
def discriminator2(ndf, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12): BatchNorm = mx.sym.BatchNorm data = mx.sym.Variable('data') label = mx.sym.Variable('label') d4 = mx.sym.Convolution(data, name='d4', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=ndf*8, no_bias=no_bias) dbn4 = BatchNorm(d4, name='dbn4', fix_gamma=fix_gamma, eps=eps) dact4 = mx.sym.LeakyReLU(dbn4, name='dact4', act_type='leaky', slope=0.2) h = mx.sym.Flatten(dact4) d5 = mx.sym.FullyConnected(h, num_hidden=1, name="d5") dloss = mx.sym.LogisticRegressionOutput(data=d5, label=label, name='dloss') return dloss
Second part of the discriminator which takes a 256x8x8 feature map as input and generates the loss based on whether the input image was a real one or fake one
def _decode(hashid, salt, alphabet, separators, guards): parts = tuple(_split(hashid, guards)) hashid = parts[1] if 2 <= len(parts) <= 3 else parts[0] if not hashid: return lottery_char = hashid[0] hashid = hashid[1:] hash_parts = _split(hashid, separators) for part in hash_parts: alphabet_salt = (lottery_char + salt + alphabet)[:len(alphabet)] alphabet = _reorder(alphabet, alphabet_salt) yield _unhash(part, alphabet)
Helper method that restores the values encoded in a hashid without argument checks.
def _get_webapi_requests(self): headers = { 'Accept': '*/*', 'Accept-Language': 'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4', 'Connection': 'keep-alive', 'Content-Type': 'application/x-www-form-urlencoded', 'Referer': 'http://music.163.com', 'Host': 'music.163.com', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36' } NCloudBot.req.headers.update(headers) return NCloudBot.req
Update headers of webapi for Requests.
def get_model_indexes(model): indexes = [] for index in get_index_names(): for app_model in get_index_models(index): if app_model == model: indexes.append(index) return indexes
Return list of all indexes in which a model is configured. A model may be configured to appear in multiple indexes. This function will return the names of the indexes as a list of strings. This is useful if you want to know which indexes need updating when a model is saved. Args: model: a Django model class.
def parse_cdhit_clstr_file(lines): clusters = [] curr_cluster = [] for l in lines: if l.startswith('>Cluster'): if not curr_cluster: continue clusters.append(curr_cluster) curr_cluster = [] else: curr_cluster.append(clean_cluster_seq_id(l.split()[2])) if curr_cluster: clusters.append(curr_cluster) return clusters
Returns a list of list of sequence ids representing clusters
def _split_list(cls, items, separator=",", last_separator=" and "): if items is None: return None items = items.split(separator) last_item = items[-1] last_split = last_item.split(last_separator) if len(last_split) > 1: items[-1] = last_split[0] items.append(last_split[1]) return [e.strip() for e in items]
Splits a string listing elements into an actual list. Parameters ---------- items: :class:`str` A string listing elements. separator: :class:`str` The separator between each item. A comma by default. last_separator: :class:`str` The separator used for the last item. ' and ' by default. Returns ------- :class:`list` of :class:`str` A list containing each one of the items.
def volreg(dset,suffix='_volreg',base=3,tshift=3,dfile_suffix='_volreg.1D'): cmd = ['3dvolreg','-prefix',nl.suffix(dset,suffix),'-base',base,'-dfile',nl.prefix(dset)+dfile_suffix] if tshift: cmd += ['-tshift',tshift] cmd += [dset] nl.run(cmd,products=nl.suffix(dset,suffix))
simple interface to 3dvolreg :suffix: suffix to add to ``dset`` for volreg'ed file :base: either a number or ``dset[#]`` of the base image to register to :tshift: if a number, then tshift ignoring that many images, if ``None`` then don't tshift :dfile_suffix: suffix to add to ``dset`` to save the motion parameters to
def write(self, b): self._checkClosed() if isinstance(b, str): raise TypeError("can't write str to binary stream") with self._write_lock: self._write_buf.extend(b) self._flush_unlocked() return len(b)
Write bytes to buffer.
def DeleteList(self, listName): soap_request = soap('DeleteList') soap_request.add_parameter('listName', listName) self.last_request = str(soap_request) response = self._session.post(url=self._url('Lists'), headers=self._headers('DeleteList'), data=str(soap_request), verify=self._verify_ssl, timeout=self.timeout) if response == 200: return response.text else: return response
Delete a List with given name
def search_mode_provides(self, product, pipeline='default'): pipeline = self.pipelines[pipeline] for obj, mode, field in self.iterate_mode_provides(self.modes, pipeline): if obj.name() == product: return ProductEntry(obj.name(), mode.key, field) else: raise ValueError('no mode provides %s' % product)
Search the mode that provides a given product
def group_envs(envlist): groups = {} for env in envlist: envpy, category = env.split('-')[0:2] if category == 'lint': category = 'unit' try: groups[envpy, category].append(env) except KeyError: groups[envpy, category] = [env] return sorted((envpy, category, envs) for (envpy, category), envs in groups.items())
Group Tox environments for Travis CI builds Separate by Python version so that they can go in different Travis jobs: >>> group_envs('py37-int-snappy', 'py36-int') [('py36', 'int', ['py36-int']), ('py37', 'int', ['py37-int-snappy'])] Group unit tests and linting together: >>> group_envs(['py27-unit', 'py27-lint']) [('py27', 'unit', ['py27-unit', 'py27-lint'])]
def _get_repr(obj, pretty=False, indent=1): if pretty: repr_value = pformat(obj, indent) else: repr_value = repr(obj) if sys.version_info[0] == 2: try: repr_value = repr_value.decode('raw_unicode_escape') except UnicodeError: repr_value = repr_value.decode('utf-8', 'replace') return repr_value
Get string representation of an object :param obj: object :type obj: object :param pretty: use pretty formatting :type pretty: bool :param indent: indentation for pretty formatting :type indent: int :return: string representation :rtype: str
def process_boolean(self, tag): tag.set_address(self.normal_register.current_bit_address) self.normal_register.move_to_next_bit_address()
Process Boolean type tags
def _output_work(self, work, root): output_filename = os.path.join(self._output_dir, work) tree = etree.ElementTree(root) tree.write(output_filename, encoding='utf-8', pretty_print=True)
Saves the TEI XML document `root` at the path `work`.
def run(self): try: self.proxy = config_ini.engine.open() items = list(config_ini.engine.items(self.VIEWNAME, cache=False)) if self.sort_key: items.sort(key=self.sort_key) self._start(items) self.LOG.debug("%s - %s" % (config_ini.engine.engine_id, self.proxy)) except (error.LoggableError, xmlrpc.ERRORS) as exc: self.LOG.debug(str(exc))
Queue manager job callback.
def namespace(self): self._ns = { 'db': self.store, 'store': store, 'autocommit': False, } return self._ns
Return a dictionary representing the namespace which should be available to the user.
def get_model_agents(self): model_stmts = self.get_statements() agents = [] for stmt in model_stmts: for a in stmt.agent_list(): if a is not None: agents.append(a) return agents
Return a list of all Agents from all Statements. Returns ------- agents : list[indra.statements.Agent] A list of Agents that are in the model.
def read(self, address, size): value = 0x0 for i in range(0, size): value |= self._read_byte(address + i) << (i * 8) return value
Read arbitrary size content from memory.
def open_sciobj_file_by_pid(pid, write=False): abs_path = get_abs_sciobj_file_path_by_pid(pid) if write: d1_common.utils.filesystem.create_missing_directories_for_file(abs_path) return open_sciobj_file_by_path(abs_path, write)
Open the file containing the Science Object bytes at the custom location ``abs_path`` in the local filesystem for read.
def get_parent_families(self, family_id): if self._catalog_session is not None: return self._catalog_session.get_parent_catalogs(catalog_id=family_id) return FamilyLookupSession( self._proxy, self._runtime).get_families_by_ids( list(self.get_parent_family_ids(family_id)))
Gets the parent families of the given ``id``. arg: family_id (osid.id.Id): the ``Id`` of the ``Family`` to query return: (osid.relationship.FamilyList) - the parent families of the ``id`` raise: NotFound - a ``Family`` identified by ``Id is`` not found raise: NullArgument - ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def execute_return_success(cmd): ret = _run_all(cmd) if ret['retcode'] != 0 or 'not supported' in ret['stdout'].lower(): msg = 'Command Failed: {0}\n'.format(cmd) msg += 'Return Code: {0}\n'.format(ret['retcode']) msg += 'Output: {0}\n'.format(ret['stdout']) msg += 'Error: {0}\n'.format(ret['stderr']) raise CommandExecutionError(msg) return True
Executes the passed command. Returns True if successful :param str cmd: The command to run :return: True if successful, otherwise False :rtype: bool :raises: Error if command fails or is not supported
def clone(self, klass=None, memo=None, **kwargs): obj = Empty() obj.__class__ = klass or self.__class__ obj.resource = self.resource obj.filters = self.filters.copy() obj.order_by = self.order_by obj.low_mark = self.low_mark obj.high_mark = self.high_mark obj.__dict__.update(kwargs) return obj
Creates a copy of the current instance. The 'kwargs' parameter can be used by clients to update attributes after copying has taken place.
def default(self, obj): if isinstance(obj, Sensor): return { 'sensor_id': obj.sensor_id, 'children': obj.children, 'type': obj.type, 'sketch_name': obj.sketch_name, 'sketch_version': obj.sketch_version, 'battery_level': obj.battery_level, 'protocol_version': obj.protocol_version, 'heartbeat': obj.heartbeat, } if isinstance(obj, ChildSensor): return { 'id': obj.id, 'type': obj.type, 'description': obj.description, 'values': obj.values, } return json.JSONEncoder.default(self, obj)
Serialize obj into JSON.
def get_assignable_bank_ids(self, bank_id): mgr = self._get_provider_manager('ASSESSMENT', local=True) lookup_session = mgr.get_bank_lookup_session(proxy=self._proxy) banks = lookup_session.get_banks() id_list = [] for bank in banks: id_list.append(bank.get_id()) return IdList(id_list)
Gets a list of bank including and under the given bank node in which any assessment part can be assigned. arg: bank_id (osid.id.Id): the ``Id`` of the ``Bank`` return: (osid.id.IdList) - list of assignable bank ``Ids`` raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.*
def track_exists(self, localdir): path = glob.glob(self.gen_localdir(localdir) + self.gen_filename() + "*") if len(path) > 0 and os.path.getsize(path[0]) > 0: return True return False
Check if track exists in local directory.
def notify_listeners(self, msg_type, params): for c in self.listeners: c.notify(msg_type, params)
Send a message to all the observers.
def parse_singular_alphabetic_character(t, tag_name): pos = t.getElementsByTagName(tag_name) assert(len(pos) == 1) pos = pos[0] assert(len(pos.childNodes) == 1) v = pos.childNodes[0].data assert(len(v) == 1 and v >= 'A' and 'v' <= 'z') return v
Parses the sole alphabetic character value with name tag_name in tag t. Heavy-handed with the asserts.
def decode_string(self, string, cache, as_map_key): if is_cache_key(string): return self.parse_string(cache.decode(string, as_map_key), cache, as_map_key) if is_cacheable(string, as_map_key): cache.encode(string, as_map_key) return self.parse_string(string, cache, as_map_key)
Decode a string - arguments follow the same convention as the top-level 'decode' function.
def _make_table_formatter(f, offset=None): format = f delta = offset return lambda v: _resolve_table(v, format, delta)
A closure-izer for table arguments that include a format and possibly an offset.
def animation_add(sequence_number, animation_id, name): return MessageWriter().string("animation.add").uint64(sequence_number).uint32(animation_id).string(name).get()
Create a animation.add message
def dchisq(psr,formbats=False,renormalize=True): if formbats: psr.formbats() res, err = psr.residuals(removemean=False)[psr.deleted == 0], psr.toaerrs[psr.deleted == 0] res -= numpy.sum(res/err**2) / numpy.sum(1/err**2) M = psr.designmatrix(updatebats=False,fixunits=True,fixsigns=True)[psr.deleted==0,1:] if renormalize: norm = numpy.sqrt(numpy.sum(M**2,axis=0)) M /= norm else: norm = 1.0 dr = -2 * numpy.dot(M.T,res / (1e-12 * err**2)) * norm return dr
Return gradient of total chisq for the current timing solution, after removing noise-averaged mean residual, and ignoring deleted points.
def positive_report(binary_report, sha256hash, project, patch_file): failure = True report_url = binary_report['permalink'] scan_date = binary_report['scan_date'] logger.error("Virus Found!") logger.info('File scan date for %s shows a infected status on: %s', patch_file, scan_date) logger.info('Full report avaliable here: %s', report_url)
If a Positive match is found
def scroll_one_line_up(event): w = find_window_for_buffer_name(event.cli, event.cli.current_buffer_name) b = event.cli.current_buffer if w: if w.render_info: info = w.render_info if w.vertical_scroll > 0: first_line_height = info.get_height_for_line(info.first_visible_line()) cursor_up = info.cursor_position.y - (info.window_height - 1 - first_line_height - info.configured_scroll_offsets.bottom) for _ in range(max(0, cursor_up)): b.cursor_position += b.document.get_cursor_up_position() w.vertical_scroll -= 1
scroll_offset -= 1
def ParseByteStream( self, parser_mediator, byte_stream, parent_path_segments=None, codepage='cp1252'): if parent_path_segments and isinstance(parent_path_segments, list): self._path_segments = list(parent_path_segments) else: self._path_segments = [] shell_item_list = pyfwsi.item_list() parser_mediator.AppendToParserChain(self) try: shell_item_list.copy_from_byte_stream( byte_stream, ascii_codepage=codepage) for shell_item in iter(shell_item_list.items): self._ParseShellItem(parser_mediator, shell_item) finally: parser_mediator.PopFromParserChain()
Parses the shell items from the byte stream. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. byte_stream (bytes): shell items data. parent_path_segments (Optional[list[str]]): parent shell item path segments. codepage (Optional[str]): byte stream codepage.
def _make_cmake(config_info): configure_args = ["-DCMAKE_EXPORT_COMPILE_COMMANDS=ON"] cmake_args = {} options, option_fns = _make_all_options() def _add_value(value, key): args_key, args_value = _EX_ARG_FNS[key](value) cmake_args[args_key] = args_value devpipeline_core.toolsupport.args_builder( "cmake", config_info, options, lambda v, key: configure_args.extend(option_fns[key](v)), ) devpipeline_core.toolsupport.args_builder( "cmake", config_info, _EX_ARGS, _add_value ) cmake = CMake(cmake_args, config_info, configure_args) build_type = config_info.config.get("cmake.build_type") if build_type: cmake.set_build_type(build_type) return devpipeline_build.make_simple_builder(cmake, config_info)
This function initializes a CMake builder for building the project.
def add_parent(self, id, pid, relation='subClassOf'): g = self.get_graph() g.add_edge(pid, id, pred=relation)
Add a new edge to the ontology
def fail_run_group(group, session): from datetime import datetime group.end = datetime.now() group.status = 'failed' session.commit()
End the run_group unsuccessfully. Args: group: The run_group we want to complete. session: The database transaction we will finish.
def _append_object(self, value, _file): _labs = ' {' _file.write(_labs) self._tctr += 1 for (_item, _text) in value.items(): _tabs = '\t' * self._tctr _cmma = ',' if self._vctr[self._tctr] else '' _keys = '{cmma}\n{tabs}"{item}" :'.format(cmma=_cmma, tabs=_tabs, item=_item) _file.write(_keys) self._vctr[self._tctr] += 1 _text = self.object_hook(_text) _type = type(_text).__name__ _MAGIC_TYPES[_type](self, _text, _file) self._vctr[self._tctr] = 0 self._tctr -= 1 _tabs = '\t' * self._tctr _labs = '\n{tabs}{}'.format('}', tabs=_tabs) _file.write(_labs)
Call this function to write object contents. Keyword arguments: * value - dict, content to be dumped * _file - FileIO, output file
def copy(self, existing_inputs): return PPOPolicyGraph( self.observation_space, self.action_space, self.config, existing_inputs=existing_inputs)
Creates a copy of self using existing input placeholders.
def _CompileProtos(): proto_files = [] for dir_path, _, filenames in os.walk(THIS_DIRECTORY): for filename in filenames: if filename.endswith(".proto"): proto_files.append(os.path.join(dir_path, filename)) if not proto_files: return protoc_command = [ "python", "-m", "grpc_tools.protoc", "--python_out", THIS_DIRECTORY, "--grpc_python_out", THIS_DIRECTORY, "--proto_path", THIS_DIRECTORY, ] protoc_command.extend(proto_files) subprocess.check_output(protoc_command)
Compiles all Fleetspeak protos.
def preprocess_tree(self, tree): visitor = RinohTreePreprocessor(tree, self) tree.walkabout(visitor)
Transform internal refuri targets in reference nodes to refids and transform footnote rubrics so that they do not end up in the output
def force_ascii_values(data): return { k: v.encode('utf8').decode('ascii', 'backslashreplace') for k, v in data.items() }
Ensures each value is ascii-only
def clean_exit(signum, frame=None): global exiting if exiting: LOG.debug('Exit in progress clean_exit received additional signal %s' % signum) return LOG.info('Received signal %s, beginning graceful shutdown.' % signum) exiting = True wait_for_exit = False for process in processors: try: if process.is_alive(): process.terminate() wait_for_exit = True except Exception: pass if wait_for_exit: time.sleep(2) for child in multiprocessing.active_children(): LOG.debug('Killing pid %s' % child.pid) try: os.kill(child.pid, signal.SIGKILL) except Exception: pass if signum == signal.SIGTERM: sys.exit(0) sys.exit(signum)
Exit all processes attempting to finish uncommitted active work before exit. Can be called on an os signal or no zookeeper losing connection.
def get_witnesses(self, name='*'): for filepath in glob.glob(os.path.join(self._path, name, '*.txt')): if os.path.isfile(filepath): name = os.path.split(os.path.split(filepath)[0])[1] siglum = os.path.splitext(os.path.basename(filepath))[0] yield self.get_witness(name, siglum)
Returns a generator supplying `WitnessText` objects for each work in the corpus. :rtype: `generator` of `WitnessText`
def make_python_xref_nodes(py_typestr, state, hide_namespace=False): if hide_namespace: template = ':py:obj:`~{}`\n' else: template = ':py:obj:`{}`\n' xref_text = template.format(py_typestr) return parse_rst_content(xref_text, state)
Make docutils nodes containing a cross-reference to a Python object. Parameters ---------- py_typestr : `str` Name of the Python object. For example ``'mypackage.mymodule.MyClass'``. If you have the object itself, or its type, use the `make_python_xref_nodes_for_type` function instead. state : ``docutils.statemachine.State`` Usually the directive's ``state`` attribute. hide_namespace : `bool`, optional If `True`, the namespace of the object is hidden in the rendered cross reference. Internally, this uses ``:py:obj:`~{py_obj}` (note tilde). Returns ------- instance from ``docutils.nodes`` Docutils node representing the cross reference. Examples -------- If called from within a directive: .. code-block:: python make_python_xref_nodes('numpy.sin', self.state) See also -------- `make_python_xref_nodes_for_type`
def _new_from_rft(self, base_template, rft_file): self._add_entry(base_template) self._add_entry(templates.NEW_FROM_RFT .format(rft_file_path=rft_file, rft_file_name=op.basename(rft_file)))
Append a new file from .rft entry to the journal. This instructs Revit to create a new model based on the provided .rft template. Args: base_template (str): new file journal template from rmj.templates rft_file (str): full path to .rft template to be used
def get_next(intersection, intersections, unused): result = None if is_first(intersection.interior_curve): result = get_next_first(intersection, intersections) elif is_second(intersection.interior_curve): result = get_next_second(intersection, intersections) elif intersection.interior_curve == CLASSIFICATION_T.COINCIDENT: result = get_next_coincident(intersection, intersections) else: raise ValueError( 'Cannot get next node if not starting from "FIRST", ' '"TANGENT_FIRST", "SECOND", "TANGENT_SECOND" or "COINCIDENT".' ) if result in unused: unused.remove(result) return result
Gets the next node along a given edge. .. note:: This is a helper used only by :func:`basic_interior_combine`, which in turn is only used by :func:`combine_intersections`. This function does the majority of the heavy lifting for :func:`basic_interior_combine`. .. note:: This function returns :class:`.Intersection` objects even when the point isn't strictly an intersection. This is "incorrect" in some sense, but for now, we don't bother implementing a class similar to, but different from, :class:`.Intersection` to satisfy this need. Args: intersection (.Intersection): The current intersection. intersections (List[.Intersection]): List of all detected intersections, provided as a reference for potential points to arrive at. unused (List[.Intersection]): List of nodes that haven't been used yet in an intersection curved polygon Returns: .Intersection: The "next" point along a surface of intersection. This will produce the next intersection along the current edge or the end of the current edge. Raises: ValueError: If the intersection is not classified as :attr:`~.IntersectionClassification.FIRST`, :attr:`~.IntersectionClassification.TANGENT_FIRST`, :attr:`~.IntersectionClassification.SECOND`, :attr:`~.IntersectionClassification.TANGENT_SECOND` or :attr:`~.IntersectionClassification.COINCIDENT`.
def get_dos(self, partial_dos=False, npts_mu=10000, T=None): spin = self.data.spin if isinstance(self.data.spin,int) else 1 energies, densities, vvdos, cdos = BL.BTPDOS(self.eband, self.vvband, npts=npts_mu) if T is not None: densities = BL.smoothen_DOS(energies, densities, T) tdos = Dos(self.efermi / units.eV, energies / units.eV, {Spin(spin): densities}) if partial_dos: tdos = self.get_partial_doses(tdos=tdos, npts_mu=npts_mu, T=T) return tdos
Return a Dos object interpolating bands Args: partial_dos: if True, projections will be interpolated as well and partial doses will be return. Projections must be available in the loader. npts_mu: number of energy points of the Dos T: parameter used to smooth the Dos
def eval_field(field, asc): if isinstance(field, dict): if asc: return field else: field = copy.deepcopy(field) key = list(field.keys())[0] field[key]['order'] = reverse_order(field[key]['order']) return field elif callable(field): return field(asc) else: key, key_asc = parse_sort_field(field) if not asc: key_asc = not key_asc return {key: {'order': 'asc' if key_asc else 'desc'}}
Evaluate a field for sorting purpose. :param field: Field definition (string, dict or callable). :param asc: ``True`` if order is ascending, ``False`` if descending. :returns: Dictionary with the sort field query.
def _ref_prop_matches(prop, target_classname, ref_classname, resultclass_names, role): assert prop.type == 'reference' if prop.reference_class.lower() == target_classname.lower(): if resultclass_names and ref_classname not in resultclass_names: return False if role and prop.name.lower() != role: return False return True return False
Test filters for a reference property Returns `True` if matches the criteria. Returns `False` if it does not match. The match criteria are: - target_classname == prop_reference_class - if result_classes are not None, ref_classname is in result_classes - If role is not None, prop name matches role
def json(self, **kwargs): encoding = detect_encoding(self.content[:4]) value = self.content.decode(encoding) return simplejson.loads(value, **kwargs)
Decodes response as JSON.
def GetEstimatedYear(self): if self._preferred_year: return self._preferred_year if self._knowledge_base.year: return self._knowledge_base.year year = self._GetEarliestYearFromFileEntry() if not year: year = self._GetLatestYearFromFileEntry() if not year: year = timelib.GetCurrentYear() return year
Retrieves an estimate of the year. This function determines the year in the following manner: * see if the user provided a preferred year; * see if knowledge base defines a year e.g. derived from preprocessing; * determine the year based on the file entry metadata; * default to the current year; Returns: int: estimated year.
def _validate_slice(self, start, end): if start is None: start = 0 elif start < 0: start += self.len if end is None: end = self.len elif end < 0: end += self.len if not 0 <= end <= self.len: raise ValueError("end is not a valid position in the bitstring.") if not 0 <= start <= self.len: raise ValueError("start is not a valid position in the bitstring.") if end < start: raise ValueError("end must not be less than start.") return start, end
Validate start and end and return them as positive bit positions.
def getaddress(self, address: str) -> dict: return cast(dict, self.ext_fetch('getaddress/' + address))
Returns information for given address.
def _run(self): def get_next_interval(): start_time = time.time() start = 0 if self.eager else 1 for count in itertools.count(start=start): yield max(start_time + count * self.interval - time.time(), 0) interval = get_next_interval() sleep_time = next(interval) while True: with Timeout(sleep_time, exception=False): self.should_stop.wait() break self.handle_timer_tick() self.worker_complete.wait() self.worker_complete.reset() sleep_time = next(interval)
Runs the interval loop.
def adjust_properties (self, prop_set): assert isinstance(prop_set, property_set.PropertySet) s = self.targets () [0].creating_subvariant () return prop_set.add_raw (s.implicit_includes ('include', 'H'))
For all virtual targets for the same dependency graph as self, i.e. which belong to the same main target, add their directories to include path.
def _BuildEventData(self, record): event_data = FseventsdEventData() event_data.path = record.path event_data.flags = record.event_flags event_data.event_identifier = record.event_identifier event_data.node_identifier = getattr(record, 'node_identifier', None) return event_data
Builds an FseventsdData object from a parsed structure. Args: record (dls_record_v1|dls_record_v2): parsed record structure. Returns: FseventsdEventData: event data attribute container.
def ListDevices(self): devices = [] for obj in mockobject.objects.keys(): if obj.startswith('/org/bluez/') and 'dev_' in obj: devices.append(dbus.ObjectPath(obj, variant_level=1)) return dbus.Array(devices, variant_level=1)
List all known devices
def get_value(self, merge=True, createfunc=None, expiration_time=None, ignore_expiration=False): cache, cache_key = self._get_cache_plus_key() assert not ignore_expiration or not createfunc, \ "Can't ignore expiration and also provide createfunc" if ignore_expiration or not createfunc: cached_value = cache.get(cache_key, expiration_time=expiration_time, ignore_expiration=ignore_expiration) else: cached_value = cache.get(cache_key) if not cached_value: cached_value = createfunc() cache.set(cache_key, cached_value, timeout=expiration_time) if cached_value and merge: cached_value = self.merge_result(cached_value, load=False) return cached_value
Return the value from the cache for this query.
def getXRDExpiration(xrd_element, default=None): expires_element = xrd_element.find(expires_tag) if expires_element is None: return default else: expires_string = expires_element.text expires_time = strptime(expires_string, "%Y-%m-%dT%H:%M:%SZ") return datetime(*expires_time[0:6])
Return the expiration date of this XRD element, or None if no expiration was specified. @type xrd_element: ElementTree node @param default: The value to use as the expiration if no expiration was specified in the XRD. @rtype: datetime.datetime @raises ValueError: If the xrd:Expires element is present, but its contents are not formatted according to the specification.
def add_channel_pulse(dma_channel, gpio, start, width): return _PWM.add_channel_pulse(dma_channel, gpio, start, width)
Add a pulse for a specific GPIO to a dma channel subcycle. `start` and `width` are multiples of the pulse-width increment granularity.
def time_slices_to_layers(graphs, interslice_weight=1, slice_attr='slice', vertex_id_attr='id', edge_type_attr='type', weight_attr='weight'): G_slices = _ig.Graph.Tree(len(graphs), 1, mode=_ig.TREE_UNDIRECTED) G_slices.es[weight_attr] = interslice_weight G_slices.vs[slice_attr] = graphs return slices_to_layers(G_slices, slice_attr, vertex_id_attr, edge_type_attr, weight_attr)
Convert time slices to layer graphs. Each graph is considered to represent a time slice. This function simply connects all the consecutive slices (i.e. the slice graph) with an ``interslice_weight``. The further conversion is then delegated to :func:`slices_to_layers`, which also provides further details. See Also -------- :func:`find_partition_temporal` :func:`slices_to_layers`
def impact_path(self, value): self._impact_path = value if value is None: self.action_show_report.setEnabled(False) self.action_show_log.setEnabled(False) self.report_path = None self.log_path = None else: self.action_show_report.setEnabled(True) self.action_show_log.setEnabled(True) self.log_path = '%s.log.html' % self.impact_path self.report_path = '%s.report.html' % self.impact_path self.save_report_to_html() self.save_log_to_html() self.show_report()
Setter to impact path. :param value: The impact path. :type value: str
def get_coordinate_offset(self, other_reading): my_x, my_y = self.reference_source_point other_x, other_y = other_reading.reference_source_point return my_x - other_x, my_y - other_y
Calculates the offsets between readings' coordinate systems. Args: other_reading: ossos.astrom.SourceReading The reading to compare coordinate systems with. Returns: (offset_x, offset_y): The x and y offsets between this reading and the other reading's coordinate systems.
def dump(self): logger.warn('This function is deprecated and replaced by `dump_np_vars`.') ret = False if self.system.files.no_output: return True if self.write_lst() and self.write_dat(): ret = True return ret
Dump the TDS results to the output `dat` file :return: succeed flag
def __parse_stream(self, stream, parse_line): if not stream: raise InvalidFormatError(cause='stream cannot be empty or None') nline = 0 lines = stream.split('\n') for line in lines: nline += 1 m = re.match(self.LINES_TO_IGNORE_REGEX, line, re.UNICODE) if m: continue m = re.match(self.VALID_LINE_REGEX, line, re.UNICODE) if not m: cause = "line %s: invalid format" % str(nline) raise InvalidFormatError(cause=cause) try: result = parse_line(m.group(1), m.group(2)) yield result except InvalidFormatError as e: cause = "line %s: %s" % (str(nline), e) raise InvalidFormatError(cause=cause)
Generic method to parse gitdm streams
def _push_entry(self, key): "Push entry onto our access log, invalidate the old entry if exists." self._invalidate_entry(key) new_entry = AccessEntry(key) self.access_lookup[key] = new_entry self.access_log_lock.acquire() self.access_log.appendleft(new_entry) self.access_log_lock.release()
Push entry onto our access log, invalidate the old entry if exists.
def delete_rows_csr(mat, indices): if not isinstance(mat, scipy.sparse.csr_matrix): raise ValueError("works only for CSR format -- use .tocsr() first") indices = list(indices) mask = np.ones(mat.shape[0], dtype=bool) mask[indices] = False return mat[mask]
Remove the rows denoted by ``indices`` form the CSR sparse matrix ``mat``.
def pagination_for(context, current_page, page_var="page", exclude_vars=""): querystring = context["request"].GET.copy() exclude_vars = [v for v in exclude_vars.split(",") if v] + [page_var] for exclude_var in exclude_vars: if exclude_var in querystring: del querystring[exclude_var] querystring = querystring.urlencode() return { "current_page": current_page, "querystring": querystring, "page_var": page_var, }
Include the pagination template and data for persisting querystring in pagination links. Can also contain a comma separated string of var names in the current querystring to exclude from the pagination links, via the ``exclude_vars`` arg.
def matches_input(self, optimized_str): if all([keyword in optimized_str for keyword in self['keywords']]): logger.debug('Matched template %s', self['template_name']) return True
See if string matches keywords set in template file
def model_to_objective(self, x_model): idx_model = 0 x_objective = [] for idx_obj in range(self.objective_dimensionality): variable = self.space_expanded[idx_obj] new_entry = variable.model_to_objective(x_model, idx_model) x_objective += new_entry idx_model += variable.dimensionality_in_model return x_objective
This function serves as interface between model input vectors and objective input vectors
def create_random_string(length=7, chars='ABCDEFGHJKMNPQRSTUVWXYZ23456789', repetitions=False): if repetitions: return ''.join(random.choice(chars) for _ in range(length)) return ''.join(random.sample(chars, length))
Returns a random string, based on the provided arguments. It returns capital letters and numbers by default. Ambiguous characters are left out, repetitions will be avoided.
def get_sid_string(principal): if principal is None: principal = 'NULL SID' try: return win32security.ConvertSidToStringSid(principal) except TypeError: principal = get_sid(principal) try: return win32security.ConvertSidToStringSid(principal) except pywintypes.error: log.exception('Invalid principal %s', principal) raise CommandExecutionError('Invalid principal {0}'.format(principal))
Converts a PySID object to a string SID. Args: principal(str): The principal to lookup the sid. Must be a PySID object. Returns: str: A string sid Usage: .. code-block:: python # Get a PySID object py_sid = salt.utils.win_dacl.get_sid('jsnuffy') # Get the string version of the SID salt.utils.win_dacl.get_sid_string(py_sid)
def check_cew(cls): if gf.can_run_c_extension("cew"): gf.print_success(u"aeneas.cew AVAILABLE") return False gf.print_warning(u"aeneas.cew NOT AVAILABLE") gf.print_info(u" You can still run aeneas but it will be a bit slower") gf.print_info(u" Please refer to the installation documentation for details") return True
Check whether Python C extension ``cew`` can be imported. Return ``True`` on failure and ``False`` on success. :rtype: bool
def check_serial_port(name): try: cdc = next(serial.tools.list_ports.grep(name)) return cdc[0] except StopIteration: msg = "device {} not found. ".format(name) msg += "available devices are: " ports = list(serial.tools.list_ports.comports()) for p in ports: msg += "{},".format(text_type(p)) raise ValueError(msg)
returns valid COM Port.
def comments_for(context, obj): form_class = import_dotted_path(settings.COMMENT_FORM_CLASS) form = form_class(context["request"], obj) context_form = context.get("posted_comment_form", form) context.update({ 'posted_comment_form': context_form if context_form.target_object == obj else form, 'unposted_comment_form': form, 'comment_url': reverse("comment"), 'object_for_comments': obj, }) return context
Provides a generic context variable name for the object that comments are being rendered for.
def linearize_data_types(self): linearized_data_types = [] seen_data_types = set() def add_data_type(data_type): if data_type in seen_data_types: return elif data_type.namespace != self: return if is_composite_type(data_type) and data_type.parent_type: add_data_type(data_type.parent_type) linearized_data_types.append(data_type) seen_data_types.add(data_type) for data_type in self.data_types: add_data_type(data_type) return linearized_data_types
Returns a list of all data types used in the namespace. Because the inheritance of data types can be modeled as a DAG, the list will be a linearization of the DAG. It's ideal to generate data types in this order so that composite types that reference other composite types are defined in the correct order.