code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def _link_or_update_vars(self): """ Creates or updates the symlink to group_vars and returns None. :returns: None """ for d, source in self.links.items(): target = os.path.join(self.inventory_directory, d) source = os.path.join(self._config.scenario.directory, source) if not os.path.exists(source): msg = "The source path '{}' does not exist.".format(source) util.sysexit_with_message(msg) msg = "Inventory {} linked to {}".format(source, target) LOG.info(msg) os.symlink(source, target)
Creates or updates the symlink to group_vars and returns None. :returns: None
def align(*objects, **kwargs): """align(*objects, join='inner', copy=True, indexes=None, exclude=frozenset()) Given any number of Dataset and/or DataArray objects, returns new objects with aligned indexes and dimension sizes. Array from the aligned objects are suitable as input to mathematical operators, because along each dimension they have the same index and size. Missing values (if ``join != 'inner'``) are filled with NaN. Parameters ---------- *objects : Dataset or DataArray Objects to align. join : {'outer', 'inner', 'left', 'right', 'exact'}, optional Method for joining the indexes of the passed objects along each dimension: - 'outer': use the union of object indexes - 'inner': use the intersection of object indexes - 'left': use indexes from the first object with each dimension - 'right': use indexes from the last object with each dimension - 'exact': instead of aligning, raise `ValueError` when indexes to be aligned are not equal copy : bool, optional If ``copy=True``, data in the return values is always copied. If ``copy=False`` and reindexing is unnecessary, or can be performed with only slice operations, then the output may share memory with the input. In either case, new xarray objects are always returned. exclude : sequence of str, optional Dimensions that must be excluded from alignment indexes : dict-like, optional Any indexes explicitly provided with the `indexes` argument should be used in preference to the aligned indexes. Returns ------- aligned : same as *objects Tuple of objects with aligned coordinates. Raises ------ ValueError If any dimensions without labels on the arguments have different sizes, or a different size than the size of the aligned dimension labels. """ join = kwargs.pop('join', 'inner') copy = kwargs.pop('copy', True) indexes = kwargs.pop('indexes', None) exclude = kwargs.pop('exclude', _DEFAULT_EXCLUDE) if indexes is None: indexes = {} if kwargs: raise TypeError('align() got unexpected keyword arguments: %s' % list(kwargs)) if not indexes and len(objects) == 1: # fast path for the trivial case obj, = objects return (obj.copy(deep=copy),) all_indexes = defaultdict(list) unlabeled_dim_sizes = defaultdict(set) for obj in objects: for dim in obj.dims: if dim not in exclude: try: index = obj.indexes[dim] except KeyError: unlabeled_dim_sizes[dim].add(obj.sizes[dim]) else: all_indexes[dim].append(index) # We don't reindex over dimensions with all equal indexes for two reasons: # - It's faster for the usual case (already aligned objects). # - It ensures it's possible to do operations that don't require alignment # on indexes with duplicate values (which cannot be reindexed with # pandas). This is useful, e.g., for overwriting such duplicate indexes. joiner = _get_joiner(join) joined_indexes = {} for dim, matching_indexes in all_indexes.items(): if dim in indexes: index = utils.safe_cast_to_index(indexes[dim]) if (any(not index.equals(other) for other in matching_indexes) or dim in unlabeled_dim_sizes): joined_indexes[dim] = index else: if (any(not matching_indexes[0].equals(other) for other in matching_indexes[1:]) or dim in unlabeled_dim_sizes): if join == 'exact': raise ValueError( 'indexes along dimension {!r} are not equal' .format(dim)) index = joiner(matching_indexes) joined_indexes[dim] = index else: index = matching_indexes[0] if dim in unlabeled_dim_sizes: unlabeled_sizes = unlabeled_dim_sizes[dim] labeled_size = index.size if len(unlabeled_sizes | {labeled_size}) > 1: raise ValueError( 'arguments without labels along dimension %r cannot be ' 'aligned because they have different dimension size(s) %r ' 'than the size of the aligned dimension labels: %r' % (dim, unlabeled_sizes, labeled_size)) for dim in unlabeled_dim_sizes: if dim not in all_indexes: sizes = unlabeled_dim_sizes[dim] if len(sizes) > 1: raise ValueError( 'arguments without labels along dimension %r cannot be ' 'aligned because they have different dimension sizes: %r' % (dim, sizes)) result = [] for obj in objects: valid_indexers = {k: v for k, v in joined_indexes.items() if k in obj.dims} if not valid_indexers: # fast path for no reindexing necessary new_obj = obj.copy(deep=copy) else: new_obj = obj.reindex(copy=copy, **valid_indexers) new_obj.encoding = obj.encoding result.append(new_obj) return tuple(result)
align(*objects, join='inner', copy=True, indexes=None, exclude=frozenset()) Given any number of Dataset and/or DataArray objects, returns new objects with aligned indexes and dimension sizes. Array from the aligned objects are suitable as input to mathematical operators, because along each dimension they have the same index and size. Missing values (if ``join != 'inner'``) are filled with NaN. Parameters ---------- *objects : Dataset or DataArray Objects to align. join : {'outer', 'inner', 'left', 'right', 'exact'}, optional Method for joining the indexes of the passed objects along each dimension: - 'outer': use the union of object indexes - 'inner': use the intersection of object indexes - 'left': use indexes from the first object with each dimension - 'right': use indexes from the last object with each dimension - 'exact': instead of aligning, raise `ValueError` when indexes to be aligned are not equal copy : bool, optional If ``copy=True``, data in the return values is always copied. If ``copy=False`` and reindexing is unnecessary, or can be performed with only slice operations, then the output may share memory with the input. In either case, new xarray objects are always returned. exclude : sequence of str, optional Dimensions that must be excluded from alignment indexes : dict-like, optional Any indexes explicitly provided with the `indexes` argument should be used in preference to the aligned indexes. Returns ------- aligned : same as *objects Tuple of objects with aligned coordinates. Raises ------ ValueError If any dimensions without labels on the arguments have different sizes, or a different size than the size of the aligned dimension labels.
def bundles(ctx): """ List discovered bundles. """ bundles = _get_bundles(ctx.obj.data['env']) print_table(('Name', 'Location'), [(bundle.name, f'{bundle.__module__}.{bundle.__class__.__name__}') for bundle in bundles])
List discovered bundles.
def parse_array(raw_array): """Parse a WMIC array.""" array_strip_brackets = raw_array.replace('{', '').replace('}', '') array_strip_spaces = array_strip_brackets.replace('"', '').replace(' ', '') return array_strip_spaces.split(',')
Parse a WMIC array.
def createModel(modelName, **kwargs): """ Return a classification model of the appropriate type. The model could be any supported subclass of ClassficationModel based on modelName. @param modelName (str) A supported temporal memory type @param kwargs (dict) Constructor argument for the class that will be instantiated. Keyword parameters specific to each model type should be passed in here. """ if modelName not in TemporalMemoryTypes.getTypes(): raise RuntimeError("Unknown model type: " + modelName) return getattr(TemporalMemoryTypes, modelName)(**kwargs)
Return a classification model of the appropriate type. The model could be any supported subclass of ClassficationModel based on modelName. @param modelName (str) A supported temporal memory type @param kwargs (dict) Constructor argument for the class that will be instantiated. Keyword parameters specific to each model type should be passed in here.
def run_next(self): """ Run the next item in the queue (a job waiting to run). """ while 1: (op, obj) = self.work_queue.get() if op is STOP_SIGNAL: return try: (job_id, command_line) = obj try: os.remove(self._job_file(job_id, JOB_FILE_COMMAND_LINE)) except Exception: log.exception("Running command but failed to delete - command may rerun on Pulsar boot.") # _run will not do anything if job has been cancelled. self._run(job_id, command_line, background=False) except Exception: log.warn("Uncaught exception running job with job_id %s" % job_id) traceback.print_exc()
Run the next item in the queue (a job waiting to run).
def list_sources(embedding_name=None): """Get valid token embedding names and their pre-trained file names. To load token embedding vectors from an externally hosted pre-trained token embedding file, such as those of GloVe and FastText, one should use `gluonnlp.embedding.create(embedding_name, source)`. This method returns all the valid names of `source` for the specified `embedding_name`. If `embedding_name` is set to None, this method returns all the valid names of `embedding_name` with their associated `source`. Parameters ---------- embedding_name : str or None, default None The pre-trained token embedding name. Returns ------- dict or list: A list of all the valid pre-trained token embedding file names (`source`) for the specified token embedding name (`embedding_name`). If the text embedding name is set to None, returns a dict mapping each valid token embedding name to a list of valid pre-trained files (`source`). They can be plugged into `gluonnlp.embedding.create(embedding_name, source)`. """ text_embedding_reg = registry.get_registry(TokenEmbedding) if embedding_name is not None: embedding_name = embedding_name.lower() if embedding_name not in text_embedding_reg: raise KeyError('Cannot find `embedding_name` {}. Use ' '`list_sources(embedding_name=None).keys()` to get all the valid' 'embedding names.'.format(embedding_name)) return list(text_embedding_reg[embedding_name].source_file_hash.keys()) else: return {embedding_name: list(embedding_cls.source_file_hash.keys()) for embedding_name, embedding_cls in registry.get_registry(TokenEmbedding).items()}
Get valid token embedding names and their pre-trained file names. To load token embedding vectors from an externally hosted pre-trained token embedding file, such as those of GloVe and FastText, one should use `gluonnlp.embedding.create(embedding_name, source)`. This method returns all the valid names of `source` for the specified `embedding_name`. If `embedding_name` is set to None, this method returns all the valid names of `embedding_name` with their associated `source`. Parameters ---------- embedding_name : str or None, default None The pre-trained token embedding name. Returns ------- dict or list: A list of all the valid pre-trained token embedding file names (`source`) for the specified token embedding name (`embedding_name`). If the text embedding name is set to None, returns a dict mapping each valid token embedding name to a list of valid pre-trained files (`source`). They can be plugged into `gluonnlp.embedding.create(embedding_name, source)`.
def cumulative_sum(self): """ Return the cumulative sum of the elements in the SArray. Returns an SArray where each element in the output corresponds to the sum of all the elements preceding and including it. The SArray is expected to be of numeric type (int, float), or a numeric vector type. Returns ------- out : sarray[int, float, array.array] Notes ----- - Missing values are ignored while performing the cumulative aggregate operation. - For SArray's of type array.array, all entries are expected to be of the same size. Examples -------- >>> sa = SArray([1, 2, 3, 4, 5]) >>> sa.cumulative_sum() dtype: int rows: 3 [1, 3, 6, 10, 15] """ from .. import extensions agg_op = "__builtin__cum_sum__" return SArray(_proxy = self.__proxy__.builtin_cumulative_aggregate(agg_op))
Return the cumulative sum of the elements in the SArray. Returns an SArray where each element in the output corresponds to the sum of all the elements preceding and including it. The SArray is expected to be of numeric type (int, float), or a numeric vector type. Returns ------- out : sarray[int, float, array.array] Notes ----- - Missing values are ignored while performing the cumulative aggregate operation. - For SArray's of type array.array, all entries are expected to be of the same size. Examples -------- >>> sa = SArray([1, 2, 3, 4, 5]) >>> sa.cumulative_sum() dtype: int rows: 3 [1, 3, 6, 10, 15]
def _get_all_children(self,): """ return the list of children of a node """ res = '' if self.child_nodes: for c in self.child_nodes: res += ' child = ' + str(c) + '\n' if c.child_nodes: for grandchild in c.child_nodes: res += ' child = ' + str(grandchild) + '\n' else: res += ' child = None\n' return res
return the list of children of a node
def get_context_file_name(pid_file): """When the daemon is started write out the information which port it was using.""" root = os.path.dirname(pid_file) port_file = os.path.join(root, "context.json") return port_file
When the daemon is started write out the information which port it was using.
def do_info(self, arg, arguments): """ :: Usage: info [--all] Options: --all -a more extensive information Prints some internal information about the shell """ if arguments["--all"]: Console.ok(70 * "-") Console.ok('DIR') Console.ok(70 * "-") for element in dir(self): Console.ok(str(element)) Console.ok(70 * "-") self.print_info()
:: Usage: info [--all] Options: --all -a more extensive information Prints some internal information about the shell
def go_to_step(self, step): """Set the stacked widget to the given step, set up the buttons, and run all operations that should start immediately after entering the new step. :param step: The step widget to be moved to. :type step: WizardStep """ self.stackedWidget.setCurrentWidget(step) # Disable the Next button unless new data already entered self.pbnNext.setEnabled(step.is_ready_to_next_step()) # Enable the Back button unless it's not the first step self.pbnBack.setEnabled( step not in [self.step_kw_purpose, self.step_fc_functions1] or self.parent_step is not None) # Set Next button label if (step in [self.step_kw_summary, self.step_fc_analysis] and self.parent_step is None): self.pbnNext.setText(tr('Finish')) elif step == self.step_fc_summary: self.pbnNext.setText(tr('Run')) else: self.pbnNext.setText(tr('Next')) # Run analysis after switching to the new step if step == self.step_fc_analysis: self.step_fc_analysis.setup_and_run_analysis() # Set lblSelectCategory label if entering the kw mode # from the ifcw mode if step == self.step_kw_purpose and self.parent_step: if self.parent_step in [self.step_fc_hazlayer_from_canvas, self.step_fc_hazlayer_from_browser]: text_label = category_question_hazard elif self.parent_step in [self.step_fc_explayer_from_canvas, self.step_fc_explayer_from_browser]: text_label = category_question_exposure else: text_label = category_question_aggregation self.step_kw_purpose.lblSelectCategory.setText(text_label)
Set the stacked widget to the given step, set up the buttons, and run all operations that should start immediately after entering the new step. :param step: The step widget to be moved to. :type step: WizardStep
def get_full_current_object(arn, current_model): """ Utility method to fetch items from the Current table if they are too big for SNS/SQS. :param record: :param current_model: :return: """ LOG.debug(f'[-->] Item with ARN: {arn} was too big for SNS -- fetching it from the Current table...') item = list(current_model.query(arn)) # If for whatever reason, the item *cannot* be found, then this record should be skipped over. # This will happen if this event came in and got processed right after the item was deleted # from the Current table. If so, then do nothing -- the deletion event will be processed later. if not item: return None # We need to place the real configuration data into the record so it can be deserialized into # the current model correctly: return item[0]
Utility method to fetch items from the Current table if they are too big for SNS/SQS. :param record: :param current_model: :return:
def connect(self): """ Connects to the Deluge instance """ self._connect() logger.debug('Connected to Deluge, detecting daemon version') self._detect_deluge_version() logger.debug('Daemon version {} detected, logging in'.format(self.deluge_version)) if self.deluge_version == 2: result = self.call('daemon.login', self.username, self.password, client_version='deluge-client') else: result = self.call('daemon.login', self.username, self.password) logger.debug('Logged in with value %r' % result) self.connected = True
Connects to the Deluge instance
def job_terminate(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /job-xxxx/terminate API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method%3A-%2Fjob-xxxx%2Fterminate """ return DXHTTPRequest('/%s/terminate' % object_id, input_params, always_retry=always_retry, **kwargs)
Invokes the /job-xxxx/terminate API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method%3A-%2Fjob-xxxx%2Fterminate
def remove(self, path, recursive=True): """ Remove file or directory at location `path`. """ if recursive: cmd = ["rm", "-r", path] else: cmd = ["rm", path] self.remote_context.check_output(cmd)
Remove file or directory at location `path`.
def make_command(tasks, *args, **kwargs): """ Create a TaskCommand with defined tasks. This is a helper function to avoid boiletplate when dealing with simple cases (e.g., all cli arguments can be handled by TaskCommand), with no special processing. In general, this means a command only needs to run established tasks. Arguments: tasks - the tasks to execute args - extra arguments to pass to the TargetCommand constructor kwargs - extra keyword arguments to pass to the TargetCommand constructor """ command = TaskCommand(tasks=tasks, *args, **kwargs) return command
Create a TaskCommand with defined tasks. This is a helper function to avoid boiletplate when dealing with simple cases (e.g., all cli arguments can be handled by TaskCommand), with no special processing. In general, this means a command only needs to run established tasks. Arguments: tasks - the tasks to execute args - extra arguments to pass to the TargetCommand constructor kwargs - extra keyword arguments to pass to the TargetCommand constructor
def serve_forever(self, poll_interval=0.5): """Handle one request at a time until shutdown. Polls for shutdown every poll_interval seconds. Ignores self.timeout. If you need to do periodic tasks, do them in another thread. """ self._serving_event.set() self._shutdown_request_event.clear() TCPServer.serve_forever(self, poll_interval=poll_interval)
Handle one request at a time until shutdown. Polls for shutdown every poll_interval seconds. Ignores self.timeout. If you need to do periodic tasks, do them in another thread.
def _get_nets_other(self, *args, **kwargs): """ Deprecated. This will be removed in a future release. """ from warnings import warn warn('Whois._get_nets_other() has been deprecated and will be ' 'removed. You should now use Whois.get_nets_other().') return self.get_nets_other(*args, **kwargs)
Deprecated. This will be removed in a future release.
def query_mongo_sort_decend( database_name, collection_name, query={}, skip=0, limit=getattr( settings, 'MONGO_LIMIT', 200), return_keys=(), sortkey=None): """return a response_dict with a list of search results in decending order based on a sort key """ l = [] response_dict = {} try: mongodb_client_url = getattr(settings, 'MONGODB_CLIENT', 'mongodb://localhost:27017/') mc = MongoClient(mongodb_client_url,document_class=OrderedDict) db = mc[str(database_name)] collection = db[str(collection_name)] if return_keys: return_dict = {} for k in return_keys: return_dict[k] = 1 # print "returndict=",return_dict mysearchresult = collection.find( query, return_dict).skip(skip).limit(limit).sort( sortkey, DESCENDING) else: mysearchresult = collection.find(query).skip( skip).limit(limit).sort(sortkey, DESCENDING) # response_dict['num_results']=int(mysearchresult.count(with_limit_and_skip=False)) response_dict['code'] = 200 response_dict['type'] = "search-results" for d in mysearchresult: d['id'] = d['_id'].__str__() del d['_id'] l.append(d) response_dict['results'] = l except: print("Error reading from Mongo") print(str(sys.exc_info())) response_dict['num_results'] = 0 response_dict['code'] = 500 response_dict['type'] = "Error" response_dict['results'] = [] response_dict['message'] = str(sys.exc_info()) return response_dict
return a response_dict with a list of search results in decending order based on a sort key
def setbit(self, key, offset, value): """Sets or clears the bit at offset in the string value stored at key. :raises TypeError: if offset is not int :raises ValueError: if offset is less than 0 or value is not 0 or 1 """ if not isinstance(offset, int): raise TypeError("offset argument must be int") if offset < 0: raise ValueError("offset must be greater equal 0") if value not in (0, 1): raise ValueError("value argument must be either 1 or 0") return self.execute(b'SETBIT', key, offset, value)
Sets or clears the bit at offset in the string value stored at key. :raises TypeError: if offset is not int :raises ValueError: if offset is less than 0 or value is not 0 or 1
def getChildren(self, forgetter, field=None, where=None, orderBy=None): """Return the children that links to me. That means that I have to be listed in their _userClasses somehow. If field is specified, that field in my children is used as the pointer to me. Use this if you have multiple fields referring to my class. """ if type(where) in (types.StringType, types.UnicodeType): where = (where,) if not field: for (i_field, i_class) in forgetter._userClasses.items(): if isinstance(self, i_class): field = i_field break # first one found is ok :=) if not field: raise "No field found, check forgetter's _userClasses" sqlname = forgetter._sqlFields[field] myID = self._getID()[0] # assuming single-primary ! whereList = ["%s='%s'" % (sqlname, myID)] if where: whereList.extend(where) return forgetter.getAll(whereList, orderBy=orderBy)
Return the children that links to me. That means that I have to be listed in their _userClasses somehow. If field is specified, that field in my children is used as the pointer to me. Use this if you have multiple fields referring to my class.
async def call_command(bot: NoneBot, ctx: Context_T, name: Union[str, CommandName_T], *, current_arg: str = '', args: Optional[CommandArgs_T] = None, check_perm: bool = True, disable_interaction: bool = False) -> bool: """ Call a command internally. This function is typically called by some other commands or "handle_natural_language" when handling NLPResult object. Note: If disable_interaction is not True, after calling this function, any previous command session will be overridden, even if the command being called here does not need further interaction (a.k.a asking the user for more info). :param bot: NoneBot instance :param ctx: message context :param name: command name :param current_arg: command current argument string :param args: command args :param check_perm: should check permission before running command :param disable_interaction: disable the command's further interaction :return: the command is successfully called """ cmd = _find_command(name) if not cmd: return False session = CommandSession(bot, ctx, cmd, current_arg=current_arg, args=args) return await _real_run_command(session, context_id(session.ctx), check_perm=check_perm, disable_interaction=disable_interaction)
Call a command internally. This function is typically called by some other commands or "handle_natural_language" when handling NLPResult object. Note: If disable_interaction is not True, after calling this function, any previous command session will be overridden, even if the command being called here does not need further interaction (a.k.a asking the user for more info). :param bot: NoneBot instance :param ctx: message context :param name: command name :param current_arg: command current argument string :param args: command args :param check_perm: should check permission before running command :param disable_interaction: disable the command's further interaction :return: the command is successfully called
def parse_extension_arg(arg, arg_dict): """ Converts argument strings in key=value or key.namespace=value form to dictionary entries Parameters ---------- arg : str The argument string to parse, which must be in key=value or key.namespace=value form. arg_dict : dict The dictionary into which the key/value pair will be added """ match = re.match(r'^(([^\d\W]\w*)(\.[^\d\W]\w*)*)=(.*)$', arg) if match is None: raise ValueError( "invalid extension argument '%s', must be in key=value form" % arg ) name = match.group(1) value = match.group(4) arg_dict[name] = value
Converts argument strings in key=value or key.namespace=value form to dictionary entries Parameters ---------- arg : str The argument string to parse, which must be in key=value or key.namespace=value form. arg_dict : dict The dictionary into which the key/value pair will be added
def _parse_abbreviation(self, abbr): """ Parse a team's abbreviation. Given the team's HTML name tag, parse their abbreviation. Parameters ---------- abbr : string A string of a team's HTML name tag. Returns ------- string Returns a ``string`` of the team's abbreviation. """ abbr = re.sub(r'.*/teams/', '', str(abbr)) abbr = re.sub(r'/.*', '', abbr) return abbr
Parse a team's abbreviation. Given the team's HTML name tag, parse their abbreviation. Parameters ---------- abbr : string A string of a team's HTML name tag. Returns ------- string Returns a ``string`` of the team's abbreviation.
def delete(self): 'Delete this file and return the new, deleted JFSFile' #url = '%s?dl=true' % self.path r = self.jfs.post(url=self.path, params={'dl':'true'}) return r
Delete this file and return the new, deleted JFSFile
def inputAnalyzeCallback(self, *args, **kwargs): """ Test method for inputAnalzeCallback This method loops over the passed number of files, and optionally "delays" in each loop to simulate some analysis. The delay length is specified by the '--test <delay>' flag. """ b_status = False filesRead = 0 filesAnalyzed = 0 for k, v in kwargs.items(): if k == 'filesRead': d_DCMRead = v if k == 'path': str_path = v if len(args): at_data = args[0] str_path = at_data[0] d_read = at_data[1] b_status = True self.dp.qprint("analyzing:\n%s" % self.pp.pformat(d_read['l_file']), level = 5) if int(self.f_sleepLength): self.dp.qprint("sleeping for: %f" % self.f_sleepLength, level = 5) time.sleep(self.f_sleepLength) filesAnalyzed = len(d_read['l_file']) return { 'status': b_status, 'filesAnalyzed': filesAnalyzed, 'l_file': d_read['l_file'] }
Test method for inputAnalzeCallback This method loops over the passed number of files, and optionally "delays" in each loop to simulate some analysis. The delay length is specified by the '--test <delay>' flag.
def _reproduce_stages( G, stages, node, force, dry, interactive, ignore_build_cache, no_commit, downstream, ): r"""Derive the evaluation of the given node for the given graph. When you _reproduce a stage_, you want to _evaluate the descendants_ to know if it make sense to _recompute_ it. A post-ordered search will give us an order list of the nodes we want. For example, let's say that we have the following pipeline: E / \ D F / \ \ B C G \ / A The derived evaluation of D would be: [A, B, C, D] In case that `downstream` option is specifed, the desired effect is to derive the evaluation starting from the given stage up to the ancestors. However, the `networkx.ancestors` returns a set, without any guarantee of any order, so we are going to reverse the graph and use a pre-ordered search using the given stage as a starting point. E A / \ / \ D F B C G / \ \ --- reverse --> \ / / B C G D F \ / \ / A E The derived evaluation of _downstream_ B would be: [B, D, E] """ import networkx as nx if downstream: # NOTE (py3 only): # Python's `deepcopy` defaults to pickle/unpickle the object. # Stages are complex objects (with references to `repo`, `outs`, # and `deps`) that cause struggles when you try to serialize them. # We need to create a copy of the graph itself, and then reverse it, # instead of using graph.reverse() directly because it calls # `deepcopy` underneath -- unless copy=False is specified. pipeline = nx.dfs_preorder_nodes(G.copy().reverse(copy=False), node) else: pipeline = nx.dfs_postorder_nodes(G, node) result = [] for n in pipeline: try: ret = _reproduce_stage( stages, n, force, dry, interactive, no_commit ) if len(ret) != 0 and ignore_build_cache: # NOTE: we are walking our pipeline from the top to the # bottom. If one stage is changed, it will be reproduced, # which tells us that we should force reproducing all of # the other stages down below, even if their direct # dependencies didn't change. force = True result += ret except Exception as ex: raise ReproductionError(stages[n].relpath, ex) return result
r"""Derive the evaluation of the given node for the given graph. When you _reproduce a stage_, you want to _evaluate the descendants_ to know if it make sense to _recompute_ it. A post-ordered search will give us an order list of the nodes we want. For example, let's say that we have the following pipeline: E / \ D F / \ \ B C G \ / A The derived evaluation of D would be: [A, B, C, D] In case that `downstream` option is specifed, the desired effect is to derive the evaluation starting from the given stage up to the ancestors. However, the `networkx.ancestors` returns a set, without any guarantee of any order, so we are going to reverse the graph and use a pre-ordered search using the given stage as a starting point. E A / \ / \ D F B C G / \ \ --- reverse --> \ / / B C G D F \ / \ / A E The derived evaluation of _downstream_ B would be: [B, D, E]
def _get_data(self, url, config, send_sc=True): """ Hit a given URL and return the parsed json """ # Load basic authentication configuration, if available. if config.username and config.password: auth = (config.username, config.password) else: auth = None # Load SSL configuration, if available. # ssl_verify can be a bool or a string # (http://docs.python-requests.org/en/latest/user/advanced/#ssl-cert-verification) if isinstance(config.ssl_verify, bool) or isinstance(config.ssl_verify, str): verify = config.ssl_verify else: verify = None if config.ssl_cert: if config.ssl_key: cert = (config.ssl_cert, config.ssl_key) else: cert = config.ssl_cert else: cert = None resp = None try: resp = requests.get( url, timeout=config.timeout, headers=headers(self.agentConfig), auth=auth, verify=verify, cert=cert ) resp.raise_for_status() except Exception as e: # this means we've hit a particular kind of auth error that means the config is broken if resp and resp.status_code == 400: raise AuthenticationError("The ElasticSearch credentials are incorrect") if send_sc: self.service_check( self.SERVICE_CHECK_CONNECT_NAME, AgentCheck.CRITICAL, message="Error {} when hitting {}".format(e, url), tags=config.service_check_tags, ) raise self.log.debug("request to url {} returned: {}".format(url, resp)) return resp.json()
Hit a given URL and return the parsed json
async def restart(request): """ Returns OK, then waits approximately 1 second and restarts container """ def wait_and_restart(): log.info('Restarting server') sleep(1) os.system('kill 1') Thread(target=wait_and_restart).start() return web.json_response({"message": "restarting"})
Returns OK, then waits approximately 1 second and restarts container
def _build_kreemer_cell(data, loc): ''' Constructs the "Kreemer Cell" from the input file. The Kreemer cell is simply a set of five lines describing the four nodes of the square (closed) :param list data: Strain data as list of text lines (input from linecache.getlines) :param int loc: Pointer to location in data :returns: temp_poly - 5 by 2 numpy array of cell longitudes and latitudes ''' temp_poly = np.empty([5, 2], dtype=float) for ival in range(1, 6): value = data[loc + ival].rstrip('\n') value = value.lstrip(' ') value = np.array((value.split(' ', 1))).astype(float) temp_poly[ival - 1, :] = value.flatten() return temp_poly
Constructs the "Kreemer Cell" from the input file. The Kreemer cell is simply a set of five lines describing the four nodes of the square (closed) :param list data: Strain data as list of text lines (input from linecache.getlines) :param int loc: Pointer to location in data :returns: temp_poly - 5 by 2 numpy array of cell longitudes and latitudes
def simulation_manager(self, thing=None, **kwargs): """ Constructs a new simulation manager. :param thing: Optional - What to put in the new SimulationManager's active stash (either a SimState or a list of SimStates). :param kwargs: Any additional keyword arguments will be passed to the SimulationManager constructor :returns: The new SimulationManager :rtype: angr.sim_manager.SimulationManager Many different types can be passed to this method: * If nothing is passed in, the SimulationManager is seeded with a state initialized for the program entry point, i.e. :meth:`entry_state()`. * If a :class:`SimState` is passed in, the SimulationManager is seeded with that state. * If a list is passed in, the list must contain only SimStates and the whole list will be used to seed the SimulationManager. """ if thing is None: thing = [ self.entry_state() ] elif isinstance(thing, (list, tuple)): if any(not isinstance(val, SimState) for val in thing): raise AngrError("Bad type to initialize SimulationManager") elif isinstance(thing, SimState): thing = [ thing ] else: raise AngrError("BadType to initialze SimulationManager: %s" % repr(thing)) return SimulationManager(self.project, active_states=thing, **kwargs)
Constructs a new simulation manager. :param thing: Optional - What to put in the new SimulationManager's active stash (either a SimState or a list of SimStates). :param kwargs: Any additional keyword arguments will be passed to the SimulationManager constructor :returns: The new SimulationManager :rtype: angr.sim_manager.SimulationManager Many different types can be passed to this method: * If nothing is passed in, the SimulationManager is seeded with a state initialized for the program entry point, i.e. :meth:`entry_state()`. * If a :class:`SimState` is passed in, the SimulationManager is seeded with that state. * If a list is passed in, the list must contain only SimStates and the whole list will be used to seed the SimulationManager.
def _variant_levels(level, variant): """ Gets the level for the variant. :param int level: the current variant level :param int variant: the value for this level if variant :returns: a level for the object and one for the function :rtype: int * int """ return (level + variant, level + variant) \ if variant != 0 else (variant, level)
Gets the level for the variant. :param int level: the current variant level :param int variant: the value for this level if variant :returns: a level for the object and one for the function :rtype: int * int
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile): """ Private file object read method. Classes that inherit from this base class must implement this method. The ``read()`` method that each file object inherits from this base class performs the processes common to all file read methods, after which it calls the file object's ``_read()`` (the preceding underscore denotes that the method is a private method). The purpose of the ``_read()`` method is to perform the file read operations that are specific to the file that the file object represents. This method should add any supporting SQLAlchemy objects to the session without committing. The common ``read()`` method handles the database commit for all file objects. The ``read()`` method processes the user input and passes on the information through the many parameters of the ``_read()`` method. As the ``_read()`` method should never be called by the user directly, the arguments will be defined in terms of what they offer for the developer of a new file object needing to implement this method. Args: directory (str): Directory containing the file to be read. Same as given by user in ``read()``. filename (str): Name of the file which will be read (e.g.: 'example.prj'). Same as given by user. Same as given by user in ``read()``. session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. Same as given by user in ``read()``. path (str): Directory and filename combined into the path to the file. This is a convenience parameter. name (str): Name of the file without extension. This is a convenience parameter. extension (str): Extension of the file without the name. This is a convenience parameter. spatial (bool, optional): If True, spatially enabled objects will be read in as PostGIS spatial objects. Defaults to False. Same as given by user in ``read()``. spatialReferenceID (int, optional): Integer id of spatial reference system for the model. Required if spatial is True. Same as given by user in ``read()``. replaceParamFile (:class:`gsshapy.orm.ReplaceParamFile`, optional): Handle the case when replacement parameters are used in place of normal variables. If this is not None, then the user expects there to be replacement variables in the file. Use the gsshapy.lib.parsetools.valueReadPreprocessor() to handle these. """
Private file object read method. Classes that inherit from this base class must implement this method. The ``read()`` method that each file object inherits from this base class performs the processes common to all file read methods, after which it calls the file object's ``_read()`` (the preceding underscore denotes that the method is a private method). The purpose of the ``_read()`` method is to perform the file read operations that are specific to the file that the file object represents. This method should add any supporting SQLAlchemy objects to the session without committing. The common ``read()`` method handles the database commit for all file objects. The ``read()`` method processes the user input and passes on the information through the many parameters of the ``_read()`` method. As the ``_read()`` method should never be called by the user directly, the arguments will be defined in terms of what they offer for the developer of a new file object needing to implement this method. Args: directory (str): Directory containing the file to be read. Same as given by user in ``read()``. filename (str): Name of the file which will be read (e.g.: 'example.prj'). Same as given by user. Same as given by user in ``read()``. session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. Same as given by user in ``read()``. path (str): Directory and filename combined into the path to the file. This is a convenience parameter. name (str): Name of the file without extension. This is a convenience parameter. extension (str): Extension of the file without the name. This is a convenience parameter. spatial (bool, optional): If True, spatially enabled objects will be read in as PostGIS spatial objects. Defaults to False. Same as given by user in ``read()``. spatialReferenceID (int, optional): Integer id of spatial reference system for the model. Required if spatial is True. Same as given by user in ``read()``. replaceParamFile (:class:`gsshapy.orm.ReplaceParamFile`, optional): Handle the case when replacement parameters are used in place of normal variables. If this is not None, then the user expects there to be replacement variables in the file. Use the gsshapy.lib.parsetools.valueReadPreprocessor() to handle these.
def escape(u): """Escape a string in an OAuth-compatible fashion. TODO: verify whether this can in fact be used for OAuth 2 """ if not isinstance(u, unicode_type): raise ValueError('Only unicode objects are escapable.') return quote(u.encode('utf-8'), safe=b'~')
Escape a string in an OAuth-compatible fashion. TODO: verify whether this can in fact be used for OAuth 2
def _constexpr_transform(fn): """ >>> from Redy.Opt.ConstExpr import constexpr, const, optimize, macro >>> import dis >>> a = 1; b = ""; c = object() >>> x = 1 >>> @optimize >>> def f(y): >>> val1: const[int] = a >>> val2: const = b >>> if constexpr[x is c]: >>> return val1, y >>> elif constexpr[x is 1]: >>> return None, y >>> else: >>> return val2, y >>> assert f(1) == (None, 1) >>> dis.dis(f) >>> @optimize >>> def f(x): >>> d: const = 1 >>> return x + d + constexpr[2] # # >>> dis.dis(f) >>> print('result:', f(1)) # # >>> @optimize >>> def f(z): >>> @macro >>> def g(a): >>> x = a + 1 # >>> g(z) >>> return x # # >>> dis.dis(f) >>> print('result:', f(1)) # >>> c = 10 # # >>> @optimize >>> def f(x): >>> if constexpr[1 + c < 10]: >>> return x + 1 >>> else: >>> return x - 1 # # >>> print(dis.dis(f)) >>> print(f(5)) # >>> @optimize >>> def f(x): >>> return (x + constexpr[c * 20]) if constexpr[c > 10] else constexpr[c - 2] # >>> dis.dis(f) >>> print(f(20)) >>> def g(lst: list): >>> k = 1 >>> @optimize >>> def _(): >>> nonlocal k >>> f: const = lst.append >>> for i in range(1000): >>> f(i) >>> k += 1 >>> f(k) >>> _() >>> return lst >>> # dis.dis(g) >>> print(g([])) """ code_string = inspect.getsource(fn) while _s.match(code_string): code_string = textwrap.dedent(code_string) module = ast.parse(code_string) fn_ast = module.body[0] fn: types.FunctionType fn_name = fn.__name__ closure = fn.__closure__ closure_dict = {v: c.cell_contents for v, c in zip(fn.__code__.co_freevars, closure if closure else ())} ctx = CompilingTimeMapping(fn.__globals__, closure_dict) ce = ConstExpr(ctx, [], OrderedDict(), {}, [], fn.__code__.co_filename) body = fn_ast.body macro_def = new_transformer(ce, MazcroDef) macro_invoke = new_transformer(ce, MacroInvoke) const_def = new_transformer(ce, ConstExprConstDef) const_if = new_transformer(ce, ConstExprIf) name_fold = new_transformer(ce, ConstExprNameFold) body = _visit_suite(macro_def.visit, body) body = _visit_suite(macro_invoke.visit, body) body = _visit_suite(const_def.visit, body) body = _visit_suite(const_if.visit, body) body = _visit_suite(name_fold.visit, body) fn_ast.body = body module.body = [fn_ast] code = compile(module, "<const-optimize>", "exec") fn_code: types.CodeType = next( each for each in code.co_consts if isinstance(each, types.CodeType) and each.co_name == fn_name) fn_code = const_link(fn_code, ce.constant_symbols, ce.additional_consts, fn.__code__, ce.nonlocal_names) new_fn = types.FunctionType(fn_code, fn.__globals__, fn.__name__, fn.__defaults__, fn.__closure__) new_fn.__annotations__ = fn.__annotations__ new_fn.__doc__ = fn.__doc__ new_fn.__kwdefaults__ = fn.__kwdefaults__ new_fn.__module__ = fn.__module__ return new_fn
>>> from Redy.Opt.ConstExpr import constexpr, const, optimize, macro >>> import dis >>> a = 1; b = ""; c = object() >>> x = 1 >>> @optimize >>> def f(y): >>> val1: const[int] = a >>> val2: const = b >>> if constexpr[x is c]: >>> return val1, y >>> elif constexpr[x is 1]: >>> return None, y >>> else: >>> return val2, y >>> assert f(1) == (None, 1) >>> dis.dis(f) >>> @optimize >>> def f(x): >>> d: const = 1 >>> return x + d + constexpr[2] # # >>> dis.dis(f) >>> print('result:', f(1)) # # >>> @optimize >>> def f(z): >>> @macro >>> def g(a): >>> x = a + 1 # >>> g(z) >>> return x # # >>> dis.dis(f) >>> print('result:', f(1)) # >>> c = 10 # # >>> @optimize >>> def f(x): >>> if constexpr[1 + c < 10]: >>> return x + 1 >>> else: >>> return x - 1 # # >>> print(dis.dis(f)) >>> print(f(5)) # >>> @optimize >>> def f(x): >>> return (x + constexpr[c * 20]) if constexpr[c > 10] else constexpr[c - 2] # >>> dis.dis(f) >>> print(f(20)) >>> def g(lst: list): >>> k = 1 >>> @optimize >>> def _(): >>> nonlocal k >>> f: const = lst.append >>> for i in range(1000): >>> f(i) >>> k += 1 >>> f(k) >>> _() >>> return lst >>> # dis.dis(g) >>> print(g([]))
def CreateBiddingStrategy(client): """Creates a bidding strategy object. Args: client: AdWordsClient the client to run the example with. Returns: dict An object representing a bidding strategy. """ # Initialize appropriate service. bidding_strategy_service = client.GetService( 'BiddingStrategyService', version='v201809') # Create a shared bidding strategy. shared_bidding_strategy = { 'name': 'Maximize Clicks %s' % uuid.uuid4(), 'biddingScheme': { 'xsi_type': 'TargetSpendBiddingScheme', # Optionally set additional bidding scheme parameters. 'bidCeiling': { 'microAmount': '2000000' } } } # Create operation. operation = { 'operator': 'ADD', 'operand': shared_bidding_strategy } response = bidding_strategy_service.mutate([operation]) new_bidding_strategy = response['value'][0] print ('Shared bidding strategy with name "%s" and ID "%s" of type "%s"' 'was created.' % (new_bidding_strategy['name'], new_bidding_strategy['id'], new_bidding_strategy['biddingScheme']['BiddingScheme.Type'])) return new_bidding_strategy
Creates a bidding strategy object. Args: client: AdWordsClient the client to run the example with. Returns: dict An object representing a bidding strategy.
def load(filename: str, format: str = None): """Load a task file and get a ``Project`` back.""" path = Path(filename).resolve() with path.open() as file: data = file.read() if format is None: loader, error_class = _load_autodetect, InvalidMofileFormat else: try: loader, error_class = formats[format] except KeyError: raise InvalidMofileFormat(f'Unknown file format: {format}') try: config = loader(data) except error_class as e: raise InvalidMofileFormat(f'Unable to load task file: {e}') return Project(config, path.parent)
Load a task file and get a ``Project`` back.
def dump(self, output, close_after_write=True): """Write data to the output with tabular format. Args: output (file descriptor or str): file descriptor or path to the output file. close_after_write (bool, optional): Close the output after write. Defaults to |True|. """ try: output.write self.stream = output except AttributeError: self.stream = io.open(output, "w", encoding="utf-8") try: self.write_table() finally: if close_after_write: self.stream.close() self.stream = sys.stdout
Write data to the output with tabular format. Args: output (file descriptor or str): file descriptor or path to the output file. close_after_write (bool, optional): Close the output after write. Defaults to |True|.
def divide(self, other, out=None): """Return ``out = self / other``. If ``out`` is provided, the result is written to it. See Also -------- LinearSpace.divide """ return self.space.divide(self, other, out=out)
Return ``out = self / other``. If ``out`` is provided, the result is written to it. See Also -------- LinearSpace.divide
def flat_images(images, grid=None, bfill=1.0, bsz=(1, 1)): """ convert batch image to flat image with margin inserted [B,h,w,c] => [H,W,c] :param images: :param grid: patch grid cell size of (Row, Col) :param bfill: board filling value :param bsz: int or (int, int) board size :return: flatted image """ if images.ndim == 4 and images.shape[-1] == 1: images = images.squeeze(axis=-1) grid = grid or grid_recommend(len(images), sorted(images[0].shape[:2])) if not isinstance(bsz, (tuple, list)): bsz = (bsz, bsz) # np.empty() imshape = list(images.shape) imshape[0] = grid[0] * grid[1] imshape[1] += bsz[0] imshape[2] += bsz[1] # data = np.empty((grid[0] * grid[1], imshape[1], imshape[2]), dtype=images.dtype) data = np.empty(imshape, dtype=images.dtype) data.fill(bfill) bslice0 = slice(0, -bsz[0]) if bsz[0] else slice(None, None) bslice1 = slice(0, -bsz[1]) if bsz[1] else slice(None, None) data[:len(images), bslice0, bslice1] = images imshape = list(grid) + imshape[1:] # [grid[0], grid[1], H, W, [Channel]] data = data.reshape(imshape) if len(imshape) == 5: data = data.transpose(0, 2, 1, 3, 4) imshape = [imshape[0]*imshape[2], imshape[1]*imshape[3], imshape[4]] else: # len == 4 data = data.transpose(0, 2, 1, 3) imshape = [imshape[0]*imshape[2], imshape[1]*imshape[3]] data = data.reshape(imshape) # remove last margin data = data[bslice0, bslice1] return data
convert batch image to flat image with margin inserted [B,h,w,c] => [H,W,c] :param images: :param grid: patch grid cell size of (Row, Col) :param bfill: board filling value :param bsz: int or (int, int) board size :return: flatted image
def _get_exchange_key_ntlm_v1(negotiate_flags, session_base_key, server_challenge, lm_challenge_response, lm_hash): """ [MS-NLMP] v28.0 2016-07-14 4.3.5.1 KXKEY Calculates the Key Exchange Key for NTLMv1 authentication. Used for signing and sealing messages @param negotiate_flags: @param session_base_key: A session key calculated from the user password challenge @param server_challenge: A random 8-byte response generated by the server in the CHALLENGE_MESSAGE @param lm_challenge_response: The LmChallengeResponse value computed in ComputeResponse @param lm_hash: The LMOWF computed in Compute Response @return key_exchange_key: The Key Exchange Key (KXKEY) used to sign and seal messages and compute the ExportedSessionKey """ if negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY: key_exchange_key = hmac.new(session_base_key, server_challenge + lm_challenge_response[:8]).digest() elif negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_LM_KEY: des_handler = des.DES(lm_hash[:7]) first_des = des_handler.encrypt(lm_challenge_response[:8]) des_handler = des.DES(lm_hash[7:8] + binascii.unhexlify('bdbdbdbdbdbdbd')) second_des = des_handler.encrypt(lm_challenge_response[:8]) key_exchange_key = first_des + second_des elif negotiate_flags & NegotiateFlags.NTLMSSP_REQUEST_NON_NT_SESSION_KEY: key_exchange_key = lm_hash[:8] + b'\0' * 8 else: key_exchange_key = session_base_key return key_exchange_key
[MS-NLMP] v28.0 2016-07-14 4.3.5.1 KXKEY Calculates the Key Exchange Key for NTLMv1 authentication. Used for signing and sealing messages @param negotiate_flags: @param session_base_key: A session key calculated from the user password challenge @param server_challenge: A random 8-byte response generated by the server in the CHALLENGE_MESSAGE @param lm_challenge_response: The LmChallengeResponse value computed in ComputeResponse @param lm_hash: The LMOWF computed in Compute Response @return key_exchange_key: The Key Exchange Key (KXKEY) used to sign and seal messages and compute the ExportedSessionKey
def estimate_tau_exp(chains, **kwargs): """ Estimate the exponential auto-correlation time for all parameters in a chain. """ # Calculate the normalised autocorrelation function in each parameter. rho = np.nan * np.ones(chains.shape[1:]) for i in range(chains.shape[2]): try: rho[:, i] = autocorr.function(np.mean(chains[:, :, i], axis=0), **kwargs) except: continue # Take the max rho at any step. rho_max = np.max(rho, axis=1) # Now fit the max rho with an exponential profile. x = np.arange(rho_max.size) func = lambda tau_exp: np.exp(-x/tau_exp) chi = lambda tau_exp: func(tau_exp[0]) - rho_max # tau_exp is a list # Start with 50% of the chain length. probably OK. tau_exp, ier = leastsq(chi, [chains.shape[1]/2.]) return (tau_exp, rho, func(tau_exp))
Estimate the exponential auto-correlation time for all parameters in a chain.
async def setup_watchdog(self, cb, timeout): """Trigger a reconnect after @timeout seconds of inactivity.""" self._watchdog_timeout = timeout self._watchdog_cb = cb self._watchdog_task = self.loop.create_task(self._watchdog(timeout))
Trigger a reconnect after @timeout seconds of inactivity.
def unzoom_all(self,event=None,panel=None): """zoom out full data range """ if panel is None: panel = self.current_panel self.panels[panel].unzoom_all(event=event)
zoom out full data range
def RandomShuffle(a, seed): """ Random uniform op. """ if seed: np.random.seed(seed) r = a.copy() np.random.shuffle(r) return r,
Random uniform op.
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0): """ Write the data encoding the DeriveKey request payload to a stream. Args: output_buffer (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the data attribute is not defined. """ local_buffer = utils.BytearrayStream() if self._object_type: self._object_type.write(local_buffer, kmip_version=kmip_version) else: raise exceptions.InvalidField( "The DeriveKey request payload is missing the object type " "field." ) if self._unique_identifiers: for unique_identifier in self._unique_identifiers: unique_identifier.write( local_buffer, kmip_version=kmip_version ) else: raise exceptions.InvalidField( "The DeriveKey request payload is missing the unique " "identifiers field." ) if self._derivation_method: self._derivation_method.write( local_buffer, kmip_version=kmip_version ) else: raise exceptions.InvalidField( "The DeriveKey request payload is missing the derivation " "method field." ) if self._derivation_parameters: self._derivation_parameters.write( local_buffer, kmip_version=kmip_version ) else: raise exceptions.InvalidField( "The DeriveKey request payload is missing the derivation " "parameters field." ) if kmip_version < enums.KMIPVersion.KMIP_2_0: if self._template_attribute: self._template_attribute.write( local_buffer, kmip_version=kmip_version ) else: raise exceptions.InvalidField( "The DeriveKey request payload is missing the template " "attribute field." ) else: if self._template_attribute: attrs = objects.convert_template_attribute_to_attributes( self._template_attribute ) attrs.write(local_buffer, kmip_version=kmip_version) else: raise exceptions.InvalidField( "The DeriveKey request payload is missing the template " "attribute field." ) self.length = local_buffer.length() super(DeriveKeyRequestPayload, self).write( output_buffer, kmip_version=kmip_version ) output_buffer.write(local_buffer.buffer)
Write the data encoding the DeriveKey request payload to a stream. Args: output_buffer (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the data attribute is not defined.
def _report_self(self): ''' Reports the kafka monitor uuid to redis ''' key = "stats:kafka-monitor:self:{m}:{u}".format( m=socket.gethostname(), u=self.my_uuid) self.redis_conn.set(key, time.time()) self.redis_conn.expire(key, self.settings['HEARTBEAT_TIMEOUT'])
Reports the kafka monitor uuid to redis
def _filter_pb(field_or_unary): """Convert a specific protobuf filter to the generic filter type. Args: field_or_unary (Union[google.cloud.proto.firestore.v1beta1.\ query_pb2.StructuredQuery.FieldFilter, google.cloud.proto.\ firestore.v1beta1.query_pb2.StructuredQuery.FieldFilter]): A field or unary filter to convert to a generic filter. Returns: google.cloud.firestore_v1beta1.types.\ StructuredQuery.Filter: A "generic" filter. Raises: ValueError: If ``field_or_unary`` is not a field or unary filter. """ if isinstance(field_or_unary, query_pb2.StructuredQuery.FieldFilter): return query_pb2.StructuredQuery.Filter(field_filter=field_or_unary) elif isinstance(field_or_unary, query_pb2.StructuredQuery.UnaryFilter): return query_pb2.StructuredQuery.Filter(unary_filter=field_or_unary) else: raise ValueError("Unexpected filter type", type(field_or_unary), field_or_unary)
Convert a specific protobuf filter to the generic filter type. Args: field_or_unary (Union[google.cloud.proto.firestore.v1beta1.\ query_pb2.StructuredQuery.FieldFilter, google.cloud.proto.\ firestore.v1beta1.query_pb2.StructuredQuery.FieldFilter]): A field or unary filter to convert to a generic filter. Returns: google.cloud.firestore_v1beta1.types.\ StructuredQuery.Filter: A "generic" filter. Raises: ValueError: If ``field_or_unary`` is not a field or unary filter.
def charge_balance(self): r'''Charge imbalance of the mixture, in units of [faraday]. Mixtures meeting the electroneutrality condition will have an imbalance of 0. Examples -------- >>> Mixture(['Na+', 'Cl-', 'water'], zs=[.01, .01, .98]).charge_balance 0.0 ''' return sum([zi*ci for zi, ci in zip(self.zs, self.charges)])
r'''Charge imbalance of the mixture, in units of [faraday]. Mixtures meeting the electroneutrality condition will have an imbalance of 0. Examples -------- >>> Mixture(['Na+', 'Cl-', 'water'], zs=[.01, .01, .98]).charge_balance 0.0
def read_message_from_pipe(pipe_handle): """ (coroutine) Read message from this pipe. Return text. """ data = yield From(read_message_bytes_from_pipe(pipe_handle)) assert isinstance(data, bytes) raise Return(data.decode('utf-8', 'ignore'))
(coroutine) Read message from this pipe. Return text.
def set_content_type(self): """ Set the content type based on the file extension used in the object name. """ if self.object_name and not self.content_type: # XXX nothing is currently done with the encoding... we may # need to in the future self.content_type, encoding = mimetypes.guess_type( self.object_name, strict=False)
Set the content type based on the file extension used in the object name.
def hide_busy(self): """Unlock buttons A helper function to indicate processing is done.""" self.progress_bar.hide() self.parent.pbnNext.setEnabled(True) self.parent.pbnBack.setEnabled(True) self.parent.pbnCancel.setEnabled(True) self.parent.repaint() disable_busy_cursor()
Unlock buttons A helper function to indicate processing is done.
def execute_on_key_owner(self, key, task): """ Executes a task on the owner of the specified key. :param key: (object), the specified key. :param task: (Task), a task executed on the owner of the specified key. :return: (:class:`~hazelcast.future.Future`), future representing pending completion of the task. """ check_not_none(key, "key can't be None") key_data = self._to_data(key) partition_id = self._client.partition_service.get_partition_id(key_data) uuid = self._get_uuid() return self._encode_invoke_on_partition(executor_service_submit_to_partition_codec, partition_id, uuid=uuid, callable=self._to_data(task), partition_id=partition_id)
Executes a task on the owner of the specified key. :param key: (object), the specified key. :param task: (Task), a task executed on the owner of the specified key. :return: (:class:`~hazelcast.future.Future`), future representing pending completion of the task.
def truncate(sequence): """ Create a potentially shortened text display of a list. Parameters ---------- sequence : list An indexable sequence of elements. Returns ------- str The list as a formatted string. """ if len(sequence) > LIST_SLICE: return ", ".join(sequence[:LIST_SLICE] + ["..."]) else: return ", ".join(sequence)
Create a potentially shortened text display of a list. Parameters ---------- sequence : list An indexable sequence of elements. Returns ------- str The list as a formatted string.
def run_server(self, port): """run a server binding to port""" try: self.server = MultiThreadedHTTPServer(('0.0.0.0', port), Handler) except socket.error, e: # failed to bind port logger.error(str(e)) sys.exit(1) logger.info("HTTP serve at http://0.0.0.0:%d (ctrl-c to stop) ..." % port) try: self.server.serve_forever() except KeyboardInterrupt: logger.info("^C received, shutting down server") self.shutdown_server()
run a server binding to port
def filter_taxa(fasta_path: 'path to fasta input', taxids: 'comma delimited list of taxon IDs', unclassified: 'pass sequences unclassified at superkingdom level >(0)' = False, discard: 'discard specified taxa' = False, warnings: 'show warnings' = False): ''' Customisable filtering of tictax flavoured fasta files ''' configure_warnings(warnings) records = SeqIO.parse(fasta_path, 'fasta') filtered_records = tictax.filter_taxa(records, map(int, taxids.split(',')), unclassified, discard) SeqIO.write(filtered_records, sys.stdout, 'fasta')
Customisable filtering of tictax flavoured fasta files
def register_id(self, id, module): """Associate the given id with the given project module.""" assert isinstance(id, basestring) assert isinstance(module, basestring) self.id2module[id] = module
Associate the given id with the given project module.
def delete(self, key): '''Removes the object named by `key`. Args: key: Key naming the object to remove. ''' path = self.object_path(key) if os.path.exists(path): os.remove(path)
Removes the object named by `key`. Args: key: Key naming the object to remove.
def clear(self, *args): """ Set default values to **self.fields_to_clear**. In addition, it is possible to pass extra fields to clear. :param args: extra fields to clear. """ for field in self.fields_to_clear + list(args): setattr(self, field, None)
Set default values to **self.fields_to_clear**. In addition, it is possible to pass extra fields to clear. :param args: extra fields to clear.
def exists(self, symbol): """Checks to if a symbol exists, by name. Parameters ---------- symbol : str or Symbol Returns ------- bool """ if isinstance(symbol, str): sym = symbol elif isinstance(symbol, Symbol): sym = symbol.name syms = self.ses.query(Symbol).filter(Symbol.name == sym).all() if len(syms) == 0: return False else: return True
Checks to if a symbol exists, by name. Parameters ---------- symbol : str or Symbol Returns ------- bool
def ParseFileObject(self, parser_mediator, file_object): """Parses a bencoded file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed. """ file_object.seek(0, os.SEEK_SET) header = file_object.read(2) if not self.BENCODE_RE.match(header): raise errors.UnableToParseFile('Not a valid Bencoded file.') file_object.seek(0, os.SEEK_SET) try: data_object = bencode.bdecode(file_object.read()) except (IOError, bencode.BTFailure) as exception: raise errors.UnableToParseFile( '[{0:s}] unable to parse file: {1:s} with error: {2!s}'.format( self.NAME, parser_mediator.GetDisplayName(), exception)) if not data_object: raise errors.UnableToParseFile( '[{0:s}] missing decoded data for file: {1:s}'.format( self.NAME, parser_mediator.GetDisplayName())) for plugin in self._plugins: try: plugin.UpdateChainAndProcess(parser_mediator, data=data_object) except errors.WrongBencodePlugin as exception: logger.debug('[{0:s}] wrong plugin: {1!s}'.format( self.NAME, exception))
Parses a bencoded file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
def LDREX(cpu, dest, src, offset=None): """ LDREX loads data from memory. * If the physical address has the shared TLB attribute, LDREX tags the physical address as exclusive access for the current processor, and clears any exclusive access tag for this processor for any other physical address. * Otherwise, it tags the fact that the executing processor has an outstanding tagged physical address. :param Armv7Operand dest: the destination register; register :param Armv7Operand src: the source operand: register """ # TODO: add lock mechanism to underlying memory --GR, 2017-06-06 cpu._LDR(dest, src, 32, False, offset)
LDREX loads data from memory. * If the physical address has the shared TLB attribute, LDREX tags the physical address as exclusive access for the current processor, and clears any exclusive access tag for this processor for any other physical address. * Otherwise, it tags the fact that the executing processor has an outstanding tagged physical address. :param Armv7Operand dest: the destination register; register :param Armv7Operand src: the source operand: register
def _create_field_vectors(self): """Builds a vector space model of every document using lunr.Vector.""" field_vectors = {} term_idf_cache = {} for field_ref, term_frequencies in self.field_term_frequencies.items(): _field_ref = FieldRef.from_string(field_ref) field_name = _field_ref.field_name field_length = self.field_lengths[field_ref] field_vector = Vector() field_boost = self._fields[field_name].boost doc_boost = self._documents[_field_ref.doc_ref].get("boost", 1) for term, tf in term_frequencies.items(): term_index = self.inverted_index[term]["_index"] if term not in term_idf_cache: idf = Idf(self.inverted_index[term], self.document_count) term_idf_cache[term] = idf else: idf = term_idf_cache[term] score = ( idf * ((self._k1 + 1) * tf) / ( self._k1 * ( 1 - self._b + self._b * (field_length / self.average_field_length[field_name]) ) + tf ) ) score *= field_boost score *= doc_boost score_with_precision = round(score, 3) field_vector.insert(term_index, score_with_precision) field_vectors[field_ref] = field_vector self.field_vectors = field_vectors
Builds a vector space model of every document using lunr.Vector.
def p_systemcall_signed(self, p): # for $signed system task 'systemcall : DOLLER SIGNED LPAREN sysargs RPAREN' p[0] = SystemCall(p[2], p[4], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
systemcall : DOLLER SIGNED LPAREN sysargs RPAREN
def setup_conf(conf_globals): """ Setup function that is called from within the project's docs/conf.py module that takes the conf module's globals() and assigns the values that can be automatically determined from the current project, such as project name, package name, version and author. """ project_path = abspath(join(dirname(conf_globals["__file__"]), "..")) chdir(project_path) sys.path.insert(0, project_path) authors_file = "AUTHORS" version = None author = None setup = "setup.py" setup_path = join(project_path, setup) ignore = (setup,) # First try and get the author and version from setup.py if exists(setup_path): try: import setuptools except ImportError: pass else: version = get_setup_attribute("version", setup_path) if version == "0.0.0": version = None author = get_setup_attribute("author", setup_path) if author == "UNKNOWN": author = None # Iterate through each of the files in the project's directory, # looking for an AUTHORS file for the project's author, or # importable packages/modules for the version. for name in listdir(project_path): path = join(project_path, name) if name.upper() == authors_file: with open(path, "r") as f: for line in f.readlines(): line = line.strip("*- \n\r\t") if line: author = decode_utf8(line) break elif name not in ignore and (isdir(path) or splitext(name)[1] == ".py"): try: module = __import__(name) except (ImportError, ValueError): continue if not version: version = get_version(module) if version and not author: try: author = decode_utf8(getattr(module, "__author__")) except AttributeError: pass # Ask for any values that couldn't be found. if not version: version = input("No version number found, please enter one: ") if not author: author = input("No author found, please enter one: ") author = decode_utf8(author) with open(join(project_path, authors_file), "wb") as f: f.write(author.encode('utf-8')) # Inject the minimum required names into the conf module. settings = { "version": version, "release": version, "project": project_path.rstrip(sep).split(sep)[-1], "master_doc": "index", "copyright": "%s, %s" % (datetime.now().year, author), } pad = max([len(k) for k in settings.keys()]) + 3 print() print("sphinx-me using the following values:") print() print("\n".join([(k + ":").ljust(pad) + v for k, v in settings.items()])) print() conf_globals.update(settings)
Setup function that is called from within the project's docs/conf.py module that takes the conf module's globals() and assigns the values that can be automatically determined from the current project, such as project name, package name, version and author.
def lookup_rdap(self, hr=True, show_name=False, colorize=True, **kwargs): """ The function for wrapping IPWhois.lookup_rdap() and generating formatted CLI output. Args: hr (:obj:`bool`): Enable human readable key translations. Defaults to True. show_name (:obj:`bool`): Show human readable name (default is to only show short). Defaults to False. colorize (:obj:`bool`): Colorize the console output with ANSI colors. Defaults to True. kwargs: Arguments to pass to IPWhois.lookup_rdap(). Returns: str: The generated output. """ # Perform the RDAP lookup ret = self.obj.lookup_rdap(**kwargs) if script_args.json: output = json.dumps(ret) else: # Header output = self.generate_output_header(query_type='RDAP') # ASN output += self.generate_output_asn( json_data=ret, hr=hr, show_name=show_name, colorize=colorize ) output += self.generate_output_newline(colorize=colorize) # Entities output += self.generate_output_entities( json_data=ret, hr=hr, show_name=show_name, colorize=colorize ) output += self.generate_output_newline(colorize=colorize) # Network output += self.generate_output_network( json_data=ret, hr=hr, show_name=show_name, colorize=colorize ) output += self.generate_output_newline(colorize=colorize) # Objects output += self.generate_output_objects( json_data=ret, hr=hr, show_name=show_name, colorize=colorize ) output += self.generate_output_newline(colorize=colorize) if 'nir' in ret: # NIR output += self.generate_output_nir( json_data=ret, hr=hr, show_name=show_name, colorize=colorize ) output += self.generate_output_newline(colorize=colorize) return output
The function for wrapping IPWhois.lookup_rdap() and generating formatted CLI output. Args: hr (:obj:`bool`): Enable human readable key translations. Defaults to True. show_name (:obj:`bool`): Show human readable name (default is to only show short). Defaults to False. colorize (:obj:`bool`): Colorize the console output with ANSI colors. Defaults to True. kwargs: Arguments to pass to IPWhois.lookup_rdap(). Returns: str: The generated output.
def update(self): """Update the host/system info using the input method. Return the stats (dict) """ # Init new stats stats = self.get_init_value() if self.input_method == 'local': # Update stats using the standard system lib stats['os_name'] = platform.system() stats['hostname'] = platform.node() stats['platform'] = platform.architecture()[0] if stats['os_name'] == "Linux": try: linux_distro = platform.linux_distribution() except AttributeError: stats['linux_distro'] = _linux_os_release() else: if linux_distro[0] == '': stats['linux_distro'] = _linux_os_release() else: stats['linux_distro'] = ' '.join(linux_distro[:2]) stats['os_version'] = platform.release() elif (stats['os_name'].endswith('BSD') or stats['os_name'] == 'SunOS'): stats['os_version'] = platform.release() elif stats['os_name'] == "Darwin": stats['os_version'] = platform.mac_ver()[0] elif stats['os_name'] == "Windows": os_version = platform.win32_ver() stats['os_version'] = ' '.join(os_version[::2]) # if the python version is 32 bit perhaps the windows operating # system is 64bit if stats['platform'] == '32bit' and 'PROCESSOR_ARCHITEW6432' in os.environ: stats['platform'] = '64bit' else: stats['os_version'] = "" # Add human readable name if stats['os_name'] == "Linux": stats['hr_name'] = stats['linux_distro'] else: stats['hr_name'] = '{} {}'.format( stats['os_name'], stats['os_version']) stats['hr_name'] += ' {}'.format(stats['platform']) elif self.input_method == 'snmp': # Update stats using SNMP try: stats = self.get_stats_snmp( snmp_oid=snmp_oid[self.short_system_name]) except KeyError: stats = self.get_stats_snmp(snmp_oid=snmp_oid['default']) # Default behavor: display all the information stats['os_name'] = stats['system_name'] # Windows OS tips if self.short_system_name == 'windows': for r, v in iteritems(snmp_to_human['windows']): if re.search(r, stats['system_name']): stats['os_name'] = v break # Add human readable name stats['hr_name'] = stats['os_name'] # Update the stats self.stats = stats return self.stats
Update the host/system info using the input method. Return the stats (dict)
def to_bytesize(value, default_unit=None, base=DEFAULT_BASE): """Convert `value` to bytes, accepts notations such as "4k" to mean 4096 bytes Args: value (str | unicode | int | None): Number of bytes optionally suffixed by a char from UNITS default_unit (str | unicode | None): Default unit to use for unqualified values base (int): Base to use (usually 1024) Returns: (int | None): Deduced bytesize value, if possible """ if isinstance(value, (int, float)): return unitized(value, default_unit, base) if value is None: return None try: if value[-1].lower() == "b": # Accept notations such as "1mb", as they get used out of habit value = value[:-1] unit = value[-1:].lower() if unit.isdigit(): unit = default_unit else: value = value[:-1] return unitized(to_number(float, value), unit, base) except (IndexError, TypeError, ValueError): return None
Convert `value` to bytes, accepts notations such as "4k" to mean 4096 bytes Args: value (str | unicode | int | None): Number of bytes optionally suffixed by a char from UNITS default_unit (str | unicode | None): Default unit to use for unqualified values base (int): Base to use (usually 1024) Returns: (int | None): Deduced bytesize value, if possible
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'available') and self.available is not None: _dict['available'] = self.available if hasattr(self, 'processing') and self.processing is not None: _dict['processing'] = self.processing if hasattr(self, 'failed') and self.failed is not None: _dict['failed'] = self.failed if hasattr(self, 'pending') and self.pending is not None: _dict['pending'] = self.pending return _dict
Return a json dictionary representing this model.
def dist_eudex(src, tar, weights='exponential', max_length=8): """Return normalized Hamming distance between Eudex hashes of two terms. This is a wrapper for :py:meth:`Eudex.dist`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison weights : str, iterable, or generator function The weights or weights generator function max_length : int The number of characters to encode as a eudex hash Returns ------- int The normalized Eudex Hamming distance Examples -------- >>> round(dist_eudex('cat', 'hat'), 12) 0.062745098039 >>> round(dist_eudex('Niall', 'Neil'), 12) 0.000980392157 >>> round(dist_eudex('Colin', 'Cuilen'), 12) 0.004901960784 >>> round(dist_eudex('ATCG', 'TAGC'), 12) 0.197549019608 """ return Eudex().dist(src, tar, weights, max_length)
Return normalized Hamming distance between Eudex hashes of two terms. This is a wrapper for :py:meth:`Eudex.dist`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison weights : str, iterable, or generator function The weights or weights generator function max_length : int The number of characters to encode as a eudex hash Returns ------- int The normalized Eudex Hamming distance Examples -------- >>> round(dist_eudex('cat', 'hat'), 12) 0.062745098039 >>> round(dist_eudex('Niall', 'Neil'), 12) 0.000980392157 >>> round(dist_eudex('Colin', 'Cuilen'), 12) 0.004901960784 >>> round(dist_eudex('ATCG', 'TAGC'), 12) 0.197549019608
def loadfile(args): '''load a log file (path given by arg)''' mestate.console.write("Loading %s...\n" % args) t0 = time.time() mlog = mavutil.mavlink_connection(args, notimestamps=False, zero_time_base=False, progress_callback=progress_bar) mestate.filename = args mestate.mlog = mlog mestate.status.msgs = mlog.messages t1 = time.time() mestate.console.write("\ndone (%u messages in %.1fs)\n" % (mestate.mlog._count, t1-t0)) global flightmodes flightmodes = mlog.flightmode_list() load_graphs() setup_menus()
load a log file (path given by arg)
def fastcc_consistent_subset(model, epsilon, solver): """Return consistent subset of model. The largest consistent subset is returned as a set of reaction names. Args: model: :class:`MetabolicModel` to solve. epsilon: Flux threshold value. solver: LP solver instance to use. Returns: Set of reaction IDs in the consistent reaction subset. """ reaction_set = set(model.reactions) return reaction_set.difference(fastcc(model, epsilon, solver))
Return consistent subset of model. The largest consistent subset is returned as a set of reaction names. Args: model: :class:`MetabolicModel` to solve. epsilon: Flux threshold value. solver: LP solver instance to use. Returns: Set of reaction IDs in the consistent reaction subset.
def update(self): """Update |TTM| based on :math:`TTM = TT+DTTM`. >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(1) >>> zonetype(FIELD) >>> tt(1.0) >>> dttm(-2.0) >>> derived.ttm.update() >>> derived.ttm ttm(-1.0) """ con = self.subpars.pars.control self(con.tt+con.dttm)
Update |TTM| based on :math:`TTM = TT+DTTM`. >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(1) >>> zonetype(FIELD) >>> tt(1.0) >>> dttm(-2.0) >>> derived.ttm.update() >>> derived.ttm ttm(-1.0)
def sync(self): """ Fetch the list of apps from Marathon, find the domains that require certificates, and issue certificates for any domains that don't already have a certificate. """ self.log.info('Starting a sync...') def log_success(result): self.log.info('Sync completed successfully') return result def log_failure(failure): self.log.failure('Sync failed', failure, LogLevel.error) return failure return (self.marathon_client.get_apps() .addCallback(self._apps_acme_domains) .addCallback(self._filter_new_domains) .addCallback(self._issue_certs) .addCallbacks(log_success, log_failure))
Fetch the list of apps from Marathon, find the domains that require certificates, and issue certificates for any domains that don't already have a certificate.
def _from_any_pb(pb_type, any_pb): """Converts an Any protobuf to the specified message type Args: pb_type (type): the type of the message that any_pb stores an instance of. any_pb (google.protobuf.any_pb2.Any): the object to be converted. Returns: pb_type: An instance of the pb_type message. Raises: TypeError: if the message could not be converted. """ msg = pb_type() if not any_pb.Unpack(msg): raise TypeError( "Could not convert {} to {}".format( any_pb.__class__.__name__, pb_type.__name__ ) ) return msg
Converts an Any protobuf to the specified message type Args: pb_type (type): the type of the message that any_pb stores an instance of. any_pb (google.protobuf.any_pb2.Any): the object to be converted. Returns: pb_type: An instance of the pb_type message. Raises: TypeError: if the message could not be converted.
def replace_all(expression: Expression, rules: Iterable[ReplacementRule], max_count: int=math.inf) \ -> Union[Expression, Sequence[Expression]]: """Replace all occurrences of the patterns according to the replacement rules. A replacement rule consists of a *pattern*, that is matched against any subexpression of the expression. If a match is found, the *replacement* callback of the rule is called with the variables from the match substitution. Whatever the callback returns is used as a replacement for the matched subexpression. This can either be a single expression or a sequence of expressions, which is then integrated into the surrounding operation in place of the subexpression. Note that the pattern can therefore not be a single sequence variable/wildcard, because only single expressions will be matched. Args: expression: The expression to which the replacement rules are applied. rules: A collection of replacement rules that are applied to the expression. max_count: If given, at most *max_count* applications of the rules are performed. Otherwise, the rules are applied until there is no more match. If the set of replacement rules is not confluent, the replacement might not terminate without a *max_count* set. Returns: The resulting expression after the application of the replacement rules. This can also be a sequence of expressions, if the root expression is replaced with a sequence of expressions by a rule. """ rules = [ReplacementRule(pattern, replacement) for pattern, replacement in rules] expression = expression replaced = True replace_count = 0 while replaced and replace_count < max_count: replaced = False for subexpr, pos in preorder_iter_with_position(expression): for pattern, replacement in rules: try: subst = next(match(subexpr, pattern)) result = replacement(**subst) expression = replace(expression, pos, result) replaced = True break except StopIteration: pass if replaced: break replace_count += 1 return expression
Replace all occurrences of the patterns according to the replacement rules. A replacement rule consists of a *pattern*, that is matched against any subexpression of the expression. If a match is found, the *replacement* callback of the rule is called with the variables from the match substitution. Whatever the callback returns is used as a replacement for the matched subexpression. This can either be a single expression or a sequence of expressions, which is then integrated into the surrounding operation in place of the subexpression. Note that the pattern can therefore not be a single sequence variable/wildcard, because only single expressions will be matched. Args: expression: The expression to which the replacement rules are applied. rules: A collection of replacement rules that are applied to the expression. max_count: If given, at most *max_count* applications of the rules are performed. Otherwise, the rules are applied until there is no more match. If the set of replacement rules is not confluent, the replacement might not terminate without a *max_count* set. Returns: The resulting expression after the application of the replacement rules. This can also be a sequence of expressions, if the root expression is replaced with a sequence of expressions by a rule.
def _get_py_dictionary(self, var, names=None, used___dict__=False): ''' :return tuple(names, used___dict__), where used___dict__ means we have to access using obj.__dict__[name] instead of getattr(obj, name) ''' # TODO: Those should be options (would fix https://github.com/Microsoft/ptvsd/issues/66). filter_private = False filter_special = True filter_function = True filter_builtin = True if not names: names, used___dict__ = self.get_names(var) d = {} # Be aware that the order in which the filters are applied attempts to # optimize the operation by removing as many items as possible in the # first filters, leaving fewer items for later filters if filter_builtin or filter_function: for name in names: try: name_as_str = name if name_as_str.__class__ != str: name_as_str = '%r' % (name_as_str,) if filter_special: if name_as_str.startswith('__') and name_as_str.endswith('__'): continue if filter_private: if name_as_str.startswith('_') or name_as_str.endswith('__'): continue if not used___dict__: attr = getattr(var, name) else: attr = var.__dict__[name] # filter builtins? if filter_builtin: if inspect.isbuiltin(attr): continue # filter functions? if filter_function: if inspect.isroutine(attr) or isinstance(attr, MethodWrapperType): continue except: # if some error occurs getting it, let's put it to the user. strIO = StringIO.StringIO() traceback.print_exc(file=strIO) attr = strIO.getvalue() d[name_as_str] = attr return d, used___dict__
:return tuple(names, used___dict__), where used___dict__ means we have to access using obj.__dict__[name] instead of getattr(obj, name)
def batch_to_ids(batch: List[List[str]]) -> torch.Tensor: """ Converts a batch of tokenized sentences to a tensor representing the sentences with encoded characters (len(batch), max sentence length, max word length). Parameters ---------- batch : ``List[List[str]]``, required A list of tokenized sentences. Returns ------- A tensor of padded character ids. """ instances = [] indexer = ELMoTokenCharactersIndexer() for sentence in batch: tokens = [Token(token) for token in sentence] field = TextField(tokens, {'character_ids': indexer}) instance = Instance({"elmo": field}) instances.append(instance) dataset = Batch(instances) vocab = Vocabulary() dataset.index_instances(vocab) return dataset.as_tensor_dict()['elmo']['character_ids']
Converts a batch of tokenized sentences to a tensor representing the sentences with encoded characters (len(batch), max sentence length, max word length). Parameters ---------- batch : ``List[List[str]]``, required A list of tokenized sentences. Returns ------- A tensor of padded character ids.
def show_tree(model=None): """Display the model tree window. Args: model: :class:`Model <modelx.core.model.Model>` object. Defaults to the current model. Warnings: For this function to work with Spyder, *Graphics backend* option of Spyder must be set to *inline*. """ if model is None: model = mx.cur_model() view = get_modeltree(model) app = QApplication.instance() if not app: raise RuntimeError("QApplication does not exist.") view.show() app.exec_()
Display the model tree window. Args: model: :class:`Model <modelx.core.model.Model>` object. Defaults to the current model. Warnings: For this function to work with Spyder, *Graphics backend* option of Spyder must be set to *inline*.
def parse_phones(self): """Parse TextGrid phone intervals. This method parses the phone intervals in a TextGrid to extract each phone and each phone's start and end times in the audio recording. For each phone, it instantiates the class Phone(), with the phone and its start and end times as attributes of that class instance. """ phones = [] for i in self.phone_intervals: start = float(i[i.index('xmin = ')+7: i.index('xmin = ')+12].strip('\t').strip('\n')) end = float(i[i.index('xmax = ')+7: i.index('xmax = ')+12].strip('\t').strip('\n')) phone = i[i.index('\"')+1:i.index("$")] phones.append(Phone(phone, start, end)) return phones
Parse TextGrid phone intervals. This method parses the phone intervals in a TextGrid to extract each phone and each phone's start and end times in the audio recording. For each phone, it instantiates the class Phone(), with the phone and its start and end times as attributes of that class instance.
def resize(self, width: int, height: int): """ Replacement for Qt's resizeGL method. """ self.width = width // self.widget.devicePixelRatio() self.height = height // self.widget.devicePixelRatio() self.buffer_width = width self.buffer_height = height if self.ctx: self.set_default_viewport() # Make sure we notify the example about the resize super().resize(self.buffer_width, self.buffer_height)
Replacement for Qt's resizeGL method.
def get_url(access_token, endpoint=ams_rest_endpoint, flag=True): '''Get Media Services Final Endpoint URL. Args: access_token (str): A valid Azure authentication token. endpoint (str): Azure Media Services Initial Endpoint. flag (bol): flag. Returns: HTTP response. JSON body. ''' return do_ams_get_url(endpoint, access_token, flag)
Get Media Services Final Endpoint URL. Args: access_token (str): A valid Azure authentication token. endpoint (str): Azure Media Services Initial Endpoint. flag (bol): flag. Returns: HTTP response. JSON body.
def CA_code_header(fname_out, Nca): """ Write 1023 bit CA (Gold) Code Header Files Mark Wickert February 2015 """ dir_path = os.path.dirname(os.path.realpath(__file__)) ca = loadtxt(dir_path + '/ca1thru37.txt', dtype=int16, usecols=(Nca - 1,), unpack=True) M = 1023 # code period N = 23 # code bits per line Sca = 'ca' + str(Nca) f = open(fname_out, 'wt') f.write('//define a CA code\n\n') f.write('#include <stdint.h>\n\n') f.write('#ifndef N_CA\n') f.write('#define N_CA %d\n' % M) f.write('#endif\n') f.write('/*******************************************************************/\n'); f.write('/* 1023 Bit CA Gold Code %2d */\n' \ % Nca); f.write('int8_t ca%d[N_CA] = {' % Nca) kk = 0; for k in range(M): # k_mod = k % M if (kk < N - 1) and (k < M - 1): f.write('%d,' % ca[k]) kk += 1 elif (kk == N - 1) & (k < M - 1): f.write('%d,\n' % ca[k]) if k < M: if Nca < 10: f.write(' ') else: f.write(' ') kk = 0 else: f.write('%d' % ca[k]) f.write('};\n') f.write('/*******************************************************************/\n') f.close()
Write 1023 bit CA (Gold) Code Header Files Mark Wickert February 2015
def environment_as(**kwargs): """Update the environment to the supplied values, for example: with environment_as(PYTHONPATH='foo:bar:baz', PYTHON='/usr/bin/python2.7'): subprocess.Popen(foo).wait() """ new_environment = kwargs old_environment = {} def setenv(key, val): if val is not None: os.environ[key] = val if PY3 else _os_encode(val) else: if key in os.environ: del os.environ[key] for key, val in new_environment.items(): old_environment[key] = os.environ.get(key) setenv(key, val) try: yield finally: for key, val in old_environment.items(): setenv(key, val)
Update the environment to the supplied values, for example: with environment_as(PYTHONPATH='foo:bar:baz', PYTHON='/usr/bin/python2.7'): subprocess.Popen(foo).wait()
def shout(self, group, msg_p): """ Send message to a named group Destroys message after sending """ return lib.zyre_shout(self._as_parameter_, group, byref(czmq.zmsg_p.from_param(msg_p)))
Send message to a named group Destroys message after sending
def action(callback=None, name=None, path=None, methods=Method.GET, resource=None, tags=None, summary=None, middleware=None): # type: (Callable, Path, Path, Methods, Type[Resource], Tags, str, List[Any]) -> Operation """ Decorator to apply an action to a resource. An action is applied to a `detail` operation. """ # Generate action path path = path or '{key_field}' if name: path += name def inner(c): return Operation(c, path, methods, resource, tags, summary, middleware) return inner(callback) if callback else inner
Decorator to apply an action to a resource. An action is applied to a `detail` operation.
def fully_correlated_conditional(Kmn, Kmm, Knn, f, *, full_cov=False, full_output_cov=False, q_sqrt=None, white=False): """ This function handles conditioning of multi-output GPs in the case where the conditioning points are all fully correlated, in both the prior and posterior. :param Kmn: LM x N x P :param Kmm: LM x LM :param Knn: N x P or N x P x N x P :param f: data matrix, LM x 1 :param q_sqrt: 1 x LM x LM or 1 x ML :param full_cov: calculate covariance between inputs :param full_output_cov: calculate covariance between outputs :param white: use whitened representation :return: - mean: N x P - variance: N x P, N x P x P, P x N x N, N x P x N x P """ m, v = fully_correlated_conditional_repeat(Kmn, Kmm, Knn, f, full_cov=full_cov, full_output_cov=full_output_cov, q_sqrt=q_sqrt, white=white) return m[0, ...], v[0, ...]
This function handles conditioning of multi-output GPs in the case where the conditioning points are all fully correlated, in both the prior and posterior. :param Kmn: LM x N x P :param Kmm: LM x LM :param Knn: N x P or N x P x N x P :param f: data matrix, LM x 1 :param q_sqrt: 1 x LM x LM or 1 x ML :param full_cov: calculate covariance between inputs :param full_output_cov: calculate covariance between outputs :param white: use whitened representation :return: - mean: N x P - variance: N x P, N x P x P, P x N x N, N x P x N x P
def encode_basestring(s): """Return a JSON representation of a Python string """ if isinstance(s, str) and HAS_UTF8.search(s) is not None: s = s.decode('utf-8') def replace(match): return ESCAPE_DCT[match.group(0)] return u'"' + ESCAPE.sub(replace, s) + u'"'
Return a JSON representation of a Python string
def _construct_axes_dict(self, axes=None, **kwargs): """Return an axes dictionary for myself.""" d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)} d.update(kwargs) return d
Return an axes dictionary for myself.
def recipe_weinreb17(adata, log=True, mean_threshold=0.01, cv_threshold=2, n_pcs=50, svd_solver='randomized', random_state=0, copy=False): """Normalization and filtering as of [Weinreb17]_. Expects non-logarithmized data. If using logarithmized data, pass `log=False`. Parameters ---------- adata : :class:`~anndata.AnnData` Annotated data matrix. copy : bool (default: False) Return a copy if true. """ from scipy.sparse import issparse if issparse(adata.X): raise ValueError('`recipe_weinreb16 does not support sparse matrices.') if copy: adata = adata.copy() if log: pp.log1p(adata) adata.X = pp.normalize_per_cell_weinreb16_deprecated(adata.X, max_fraction=0.05, mult_with_mean=True) gene_subset = filter_genes_cv_deprecated(adata.X, mean_threshold, cv_threshold) adata._inplace_subset_var(gene_subset) # this modifies the object itself X_pca = pp.pca(pp.zscore_deprecated(adata.X), n_comps=n_pcs, svd_solver=svd_solver, random_state=random_state) # update adata adata.obsm['X_pca'] = X_pca return adata if copy else None
Normalization and filtering as of [Weinreb17]_. Expects non-logarithmized data. If using logarithmized data, pass `log=False`. Parameters ---------- adata : :class:`~anndata.AnnData` Annotated data matrix. copy : bool (default: False) Return a copy if true.
def create_mssql_pymssql(username, password, host, port, database, **kwargs): # pragma: no cover """ create an engine connected to a mssql database using pymssql. """ return create_engine( _create_mssql_pymssql(username, password, host, port, database), **kwargs )
create an engine connected to a mssql database using pymssql.
def _init_read_gz(self): """Initialize for reading a gzip compressed fileobj. """ self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS) self.dbuf = b"" # taken from gzip.GzipFile with some alterations if self.__read(2) != b"\037\213": raise ReadError("not a gzip file") if self.__read(1) != b"\010": raise CompressionError("unsupported compression method") flag = ord(self.__read(1)) self.__read(6) if flag & 4: xlen = ord(self.__read(1)) + 256 * ord(self.__read(1)) self.read(xlen) if flag & 8: while True: s = self.__read(1) if not s or s == NUL: break if flag & 16: while True: s = self.__read(1) if not s or s == NUL: break if flag & 2: self.__read(2)
Initialize for reading a gzip compressed fileobj.
def save_parameters(path, params=None): """Save all parameters into a file with the specified format. Currently hdf5 and protobuf formats are supported. Args: path : path or file object params (dict, optional): Parameters to be saved. Dictionary is of a parameter name (:obj:`str`) to :obj:`~nnabla.Variable`. """ _, ext = os.path.splitext(path) params = get_parameters(grad_only=False) if params is None else params if ext == '.h5': # TODO temporary work around to suppress FutureWarning message. import warnings warnings.simplefilter('ignore', category=FutureWarning) import h5py with h5py.File(path, 'w') as hd: for i, (k, v) in enumerate(iteritems(params)): hd[k] = v.d hd[k].attrs['need_grad'] = v.need_grad # To preserve order of parameters hd[k].attrs['index'] = i elif ext == '.protobuf': proto = nnabla_pb2.NNablaProtoBuf() for variable_name, variable in params.items(): parameter = proto.parameter.add() parameter.variable_name = variable_name parameter.shape.dim.extend(variable.shape) parameter.data.extend(numpy.array(variable.d).flatten().tolist()) parameter.need_grad = variable.need_grad with open(path, "wb") as f: f.write(proto.SerializeToString()) else: logger.critical('Only supported hdf5 or protobuf.') assert False logger.info("Parameter save ({}): {}".format(ext, path))
Save all parameters into a file with the specified format. Currently hdf5 and protobuf formats are supported. Args: path : path or file object params (dict, optional): Parameters to be saved. Dictionary is of a parameter name (:obj:`str`) to :obj:`~nnabla.Variable`.
def approxSimilarityJoin(self, datasetA, datasetB, threshold, distCol="distCol"): """ Join two datasets to approximately find all pairs of rows whose distance are smaller than the threshold. If the :py:attr:`outputCol` is missing, the method will transform the data; if the :py:attr:`outputCol` exists, it will use that. This allows caching of the transformed data when necessary. :param datasetA: One of the datasets to join. :param datasetB: Another dataset to join. :param threshold: The threshold for the distance of row pairs. :param distCol: Output column for storing the distance between each pair of rows. Use "distCol" as default value if it's not specified. :return: A joined dataset containing pairs of rows. The original rows are in columns "datasetA" and "datasetB", and a column "distCol" is added to show the distance between each pair. """ threshold = TypeConverters.toFloat(threshold) return self._call_java("approxSimilarityJoin", datasetA, datasetB, threshold, distCol)
Join two datasets to approximately find all pairs of rows whose distance are smaller than the threshold. If the :py:attr:`outputCol` is missing, the method will transform the data; if the :py:attr:`outputCol` exists, it will use that. This allows caching of the transformed data when necessary. :param datasetA: One of the datasets to join. :param datasetB: Another dataset to join. :param threshold: The threshold for the distance of row pairs. :param distCol: Output column for storing the distance between each pair of rows. Use "distCol" as default value if it's not specified. :return: A joined dataset containing pairs of rows. The original rows are in columns "datasetA" and "datasetB", and a column "distCol" is added to show the distance between each pair.
def reboot(name, **kwargs): ''' Reboot a domain via ACPI request :param vm_: domain name :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.reboot <domain> ''' conn = __get_conn(**kwargs) ret = _get_domain(conn, name).reboot(libvirt.VIR_DOMAIN_REBOOT_DEFAULT) == 0 conn.close() return ret
Reboot a domain via ACPI request :param vm_: domain name :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.reboot <domain>
def setup(name, path='log', enable_debug=False): """ Prepare a NestedSetup. :param name: the channel name :param path: the path where the logs will be written :param enable_debug: do we want to save the message at the DEBUG level :return a nested Setup """ path_tmpl = os.path.join(path, '{name}_{level}.log') info = path_tmpl.format(name=name, level='info') warn = path_tmpl.format(name=name, level='warn') err = path_tmpl.format(name=name, level='err') crit = path_tmpl.format(name=name, level='crit') # a nested handler setup can be used to configure more complex setups setup = [ # make sure we never bubble up to the stderr handler # if we run out of setup handling NullHandler(), # then write messages that are at least info to to a logfile TimedRotatingFileHandler(info, level='INFO', encoding='utf-8', date_format='%Y-%m-%d'), # then write messages that are at least warnings to to a logfile TimedRotatingFileHandler(warn, level='WARNING', encoding='utf-8', date_format='%Y-%m-%d'), # then write messages that are at least errors to to a logfile TimedRotatingFileHandler(err, level='ERROR', encoding='utf-8', date_format='%Y-%m-%d'), # then write messages that are at least critical errors to to a logfile TimedRotatingFileHandler(crit, level='CRITICAL', encoding='utf-8', date_format='%Y-%m-%d'), ] if enable_debug: debug = path_tmpl.format(name=name, level='debug') setup.insert(1, TimedRotatingFileHandler(debug, level='DEBUG', encoding='utf-8', date_format='%Y-%m-%d')) if src_server is not None and smtp_server is not None \ and smtp_port != 0 and len(dest_mails) != 0: mail_tmpl = '{name}_error@{src}' from_mail = mail_tmpl.format(name=name, src=src_server) subject = 'Error in {}'.format(name) # errors should then be delivered by mail and also be kept # in the application log, so we let them bubble up. setup.append(MailHandler(from_mail, dest_mails, subject, level='ERROR', bubble=True, server_addr=(smtp_server, smtp_port))) return NestedSetup(setup)
Prepare a NestedSetup. :param name: the channel name :param path: the path where the logs will be written :param enable_debug: do we want to save the message at the DEBUG level :return a nested Setup
def get(self, request, bot_id, id, format=None): """ Get KikBot by id --- serializer: KikBotSerializer responseMessages: - code: 401 message: Not authenticated """ return super(KikBotDetail, self).get(request, bot_id, id, format)
Get KikBot by id --- serializer: KikBotSerializer responseMessages: - code: 401 message: Not authenticated
def extract_python_dict_from_x509(x509): """ Extract a python dictionary similar to the return value of :meth:`ssl.SSLSocket.getpeercert` from the given :class:`OpenSSL.crypto.X509` `x509` object. Note that by far not all attributes are included; only those required to use :func:`ssl.match_hostname` are extracted and put in the result. In the future, more attributes may be added. """ result = { "subject": ( (("commonName", x509.get_subject().commonName),), ) } for ext_idx in range(x509.get_extension_count()): ext = x509.get_extension(ext_idx) sn = ext.get_short_name() if sn != b"subjectAltName": continue data = pyasn1.codec.der.decoder.decode( ext.get_data(), asn1Spec=pyasn1_modules.rfc2459.SubjectAltName())[0] for name in data: dNSName = name.getComponentByPosition(2) if dNSName is None: continue if hasattr(dNSName, "isValue") and not dNSName.isValue: continue result.setdefault("subjectAltName", []).append( ("DNS", str(dNSName)) ) return result
Extract a python dictionary similar to the return value of :meth:`ssl.SSLSocket.getpeercert` from the given :class:`OpenSSL.crypto.X509` `x509` object. Note that by far not all attributes are included; only those required to use :func:`ssl.match_hostname` are extracted and put in the result. In the future, more attributes may be added.
def ax(self): """ The matplotlib axes that the visualizer draws upon (can also be a grid of multiple axes objects). The visualizer automatically creates an axes for the user if one has not been specified. """ if not hasattr(self, "_ax") or self._ax is None: self._ax = plt.gca() return self._ax
The matplotlib axes that the visualizer draws upon (can also be a grid of multiple axes objects). The visualizer automatically creates an axes for the user if one has not been specified.