code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def greater_than_obs_constraints(self): """get the names of the observations that are listed as greater than inequality constraints. Zero- weighted obs are skipped Returns ------- pandas.Series : obsnme of obseravtions that are non-zero weighted greater than constraints """ obs = self.observation_data gt_obs = obs.loc[obs.apply(lambda x: self._is_greater_const(x.obgnme) \ and x.weight != 0.0,axis=1),"obsnme"] return gt_obs
get the names of the observations that are listed as greater than inequality constraints. Zero- weighted obs are skipped Returns ------- pandas.Series : obsnme of obseravtions that are non-zero weighted greater than constraints
def knn_impute_with_argpartition( X, missing_mask, k, verbose=False, print_interval=100): """ Fill in the given incomplete matrix using k-nearest neighbor imputation. This version is a simpler algorithm meant primarily for testing but surprisingly it's faster for many (but not all) dataset sizes, particularly when most of the columns are missing in any given row. The crucial bottleneck is the call to numpy.argpartition for every missing element in the array. Parameters ---------- X : np.ndarray Matrix to fill of shape (n_samples, n_features) missing_mask : np.ndarray Boolean array of same shape as X k : int verbose : bool Returns a row-major copy of X with imputed values. """ start_t = time.time() n_rows, n_cols = X.shape # put the missing mask in column major order since it's accessed # one column at a time missing_mask_column_major = np.asarray(missing_mask, order="F") X_row_major, D, effective_infinity = \ knn_initialize(X, missing_mask, verbose=verbose) D_reciprocal = 1.0 / D dot = np.dot array = np.array argpartition = np.argpartition for i in range(n_rows): missing_indices = np.where(missing_mask[i])[0] if verbose and i % print_interval == 0: print( "Imputing row %d/%d with %d missing, elapsed time: %0.3f" % ( i + 1, n_rows, len(missing_indices), time.time() - start_t)) d = D[i, :] inv_d = D_reciprocal[i, :] for j in missing_indices: # move rows which lack this feature to be infinitely far away d_copy = d.copy() d_copy[missing_mask_column_major[:, j]] = effective_infinity neighbor_indices = argpartition(d_copy, k)[:k] if d_copy[neighbor_indices].max() >= effective_infinity: # if there aren't k rows with the feature of interest then # we need to filter out indices of points at infinite distance neighbor_indices = array([ neighbor_index for neighbor_index in neighbor_indices if d_copy[neighbor_index] < effective_infinity ]) n_current_neighbors = len(neighbor_indices) if n_current_neighbors > 0: neighbor_weights = inv_d[neighbor_indices] X_row_major[i, j] = ( dot(X[:, j][neighbor_indices], neighbor_weights) / neighbor_weights.sum() ) return X_row_major
Fill in the given incomplete matrix using k-nearest neighbor imputation. This version is a simpler algorithm meant primarily for testing but surprisingly it's faster for many (but not all) dataset sizes, particularly when most of the columns are missing in any given row. The crucial bottleneck is the call to numpy.argpartition for every missing element in the array. Parameters ---------- X : np.ndarray Matrix to fill of shape (n_samples, n_features) missing_mask : np.ndarray Boolean array of same shape as X k : int verbose : bool Returns a row-major copy of X with imputed values.
def IsEquivalent(self, other): """Determines if 2 operating system artifacts are equivalent. This function compares the operating systems based in order of: * name derived from product * family and version * family Args: other (OperatingSystemArtifact): operating system artifact attribute container to compare with. Returns: bool: True if the operating systems are considered equivalent, False if the most specific criteria do no match, or no criteria are available. """ if self.name and other.name: return self.name == other.name if self.name: self_family, self_version_tuple = self._FAMILY_AND_VERSION_PER_NAME.get( self.name, self._DEFAULT_FAMILY_AND_VERSION) return ( self_family == other.family and self_version_tuple == other.version_tuple) if self.family and self.version: if other.name: other_family, other_version_tuple = ( self._FAMILY_AND_VERSION_PER_NAME.get( other.name, self._DEFAULT_FAMILY_AND_VERSION)) else: other_family = other.family other_version_tuple = other.version_tuple return ( self.family == other_family and self.version_tuple == other_version_tuple) if self.family: if other.name: other_family, _ = self._FAMILY_AND_VERSION_PER_NAME.get( other.name, self._DEFAULT_FAMILY_AND_VERSION) else: other_family = other.family return self.family == other_family return False
Determines if 2 operating system artifacts are equivalent. This function compares the operating systems based in order of: * name derived from product * family and version * family Args: other (OperatingSystemArtifact): operating system artifact attribute container to compare with. Returns: bool: True if the operating systems are considered equivalent, False if the most specific criteria do no match, or no criteria are available.
def _get_group_dataframes(self): """ Get group dataframes Returns ------- out : tuple or generator Group dataframes """ if isinstance(self.data, GroupedDataFrame): grouper = self.data.groupby() # groupby on categorical columns uses the categories # even if they are not present in the data. This # leads to empty groups. We exclude them. return (gdf for _, gdf in grouper if not gdf.empty) else: return (self.data, )
Get group dataframes Returns ------- out : tuple or generator Group dataframes
def matches(): """This resource returns a list of the currently running WvW matches, with the participating worlds included in the result. Further details about a match can be requested using the ``match_details`` function. The response is a list of match objects, each of which contains the following properties: wvw_match_id (string): The WvW match id. red_world_id (number): The world id of the red world. blue_world_id (number): The world id of the blue world. green_world_id (number): The world id of the green world. start_time (datetime): A timestamp of when the match started. end_time (datetime): A timestamp of when the match ends. """ wvw_matches = get_cached("wvw/matches.json", False).get("wvw_matches") for match in wvw_matches: match["start_time"] = parse_datetime(match["start_time"]) match["end_time"] = parse_datetime(match["end_time"]) return wvw_matches
This resource returns a list of the currently running WvW matches, with the participating worlds included in the result. Further details about a match can be requested using the ``match_details`` function. The response is a list of match objects, each of which contains the following properties: wvw_match_id (string): The WvW match id. red_world_id (number): The world id of the red world. blue_world_id (number): The world id of the blue world. green_world_id (number): The world id of the green world. start_time (datetime): A timestamp of when the match started. end_time (datetime): A timestamp of when the match ends.
def create(self, container, instances=None, map_name=None, **kwargs): """ Creates container instances for a container configuration. :param container: Container name. :type container: unicode | str :param instances: Instance name to create. If not specified, will create all instances as specified in the configuration (or just one default instance). :type instances: tuple | list :param map_name: Container map name. Optional - if not provided the default map is used. :type map_name: unicode | str :param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to the main container creation. :return: Return values of created containers. :rtype: list[dockermap.map.runner.ActionOutput] """ return self.run_actions('create', container, instances=instances, map_name=map_name, **kwargs)
Creates container instances for a container configuration. :param container: Container name. :type container: unicode | str :param instances: Instance name to create. If not specified, will create all instances as specified in the configuration (or just one default instance). :type instances: tuple | list :param map_name: Container map name. Optional - if not provided the default map is used. :type map_name: unicode | str :param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to the main container creation. :return: Return values of created containers. :rtype: list[dockermap.map.runner.ActionOutput]
def images_grouped_by_type(self): """ :return: A generator yielding 2-tuples of (type, [creators]) where adjacent creators who share the same role are grouped together. """ type = -1 images = [] for wc in self: if wc.type != type: if images: yield (type, images) role = wc.role creators = [] images.append(wc.image) if images: yield (type, images)
:return: A generator yielding 2-tuples of (type, [creators]) where adjacent creators who share the same role are grouped together.
def process(self): '''Run all tag processors.''' for tag_proc in self.tag_procs: before_count = self.entry_count self.run_tag_processor(tag_proc) after_count = self.entry_count if self.verbose: print('Inserted %d entries for "%s" tag processor' % ( after_count - before_count, tag_proc), file=sys.stderr) if self.verbose: print('Inserted %d entries overall' % self.entry_count, file=sys.stderr)
Run all tag processors.
def replace_first_key_in_makefile(buf, key, replacement, outfile=None): ''' Replaces first line in 'buf' matching 'key' with 'replacement'. Optionally, writes out this new buffer into 'outfile'. Returns: Buffer after replacement has been done ''' regexp = re.compile(r''' \n\s* # there might be some leading spaces ( # start group to return (?:{0}\s*) # placeholder for tags to detect '\S+' == all \s*:*=\s* # optional spaces, optional colon, = , optional spaces .* # the value ) # end group to return '''.format(key), re.VERBOSE) matches = regexp.findall(buf) if matches is None: msg = "Could not find key = {0} in the provided buffer. "\ "Pattern used = {1}".format(key, regexp.pattern) raise ValueError(msg) # Only replace the first occurence newbuf = regexp.sub(replacement, buf, count=1) if outfile is not None: write_text_file(outfile, newbuf) return newbuf
Replaces first line in 'buf' matching 'key' with 'replacement'. Optionally, writes out this new buffer into 'outfile'. Returns: Buffer after replacement has been done
def otp(password, seed, sequence): """ Calculates a one-time password hash using the given password, seed, and sequence number and returns it. Uses the MD4/sixword algorithm as supported by TACACS+ servers. :type password: str :param password: A password. :type seed: str :param seed: A cryptographic seed. :type sequence: int :param sequence: A sequence number. :rtype: string :return: A hash. """ if len(password) not in list(range(4, 64)): raise ValueError('passphrase length') if len(seed) not in list(range(1, 17)): raise ValueError('seed length') for x in seed: if not x in _VALIDSEEDCHARACTERS: raise ValueError('seed composition') if sequence < 0: raise ValueError('sequence') # Pycryptodome only supports byte strings. seed = seed.encode('utf-8') password = password.encode('utf-8') # Discard the first <sequence> keys thehash = MD4.new(seed + password).digest() thehash = _fold_md4_or_md5(thehash) for i in range(0, sequence): thehash = _fold_md4_or_md5(MD4.new(thehash).digest()) # Generate the result return _sixword_from_raw(thehash)
Calculates a one-time password hash using the given password, seed, and sequence number and returns it. Uses the MD4/sixword algorithm as supported by TACACS+ servers. :type password: str :param password: A password. :type seed: str :param seed: A cryptographic seed. :type sequence: int :param sequence: A sequence number. :rtype: string :return: A hash.
def not_unless(*desired_flags): """ Assert that the decorated function can only be called if the desired_flags are active. Note that, unlike :func:`when`, this does **not** trigger the decorated function if the flags match. It **only** raises an exception if the function is called when the flags do not match. This is primarily for informational purposes and as a guard clause. """ def _decorator(func): action_id = _action_id(func) short_action_id = _short_action_id(func) @wraps(func) def _wrapped(*args, **kwargs): active_flags = get_flags() missing_flags = [flag for flag in desired_flags if flag not in active_flags] if missing_flags: hookenv.log('%s called before flag%s: %s' % ( short_action_id, 's' if len(missing_flags) > 1 else '', ', '.join(missing_flags)), hookenv.WARNING) return func(*args, **kwargs) _wrapped._action_id = action_id _wrapped._short_action_id = short_action_id return _wrapped return _decorator
Assert that the decorated function can only be called if the desired_flags are active. Note that, unlike :func:`when`, this does **not** trigger the decorated function if the flags match. It **only** raises an exception if the function is called when the flags do not match. This is primarily for informational purposes and as a guard clause.
def _convert(self, desired_type: Type[T], obj: S, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: """ Apply the converters of the chain in order to produce the desired result. Only the last converter will see the 'desired type', the others will be asked to produce their declared to_type. :param desired_type: :param obj: :param logger: :param options: :return: """ for converter in self._converters_list[:-1]: # convert into each converters destination type obj = converter.convert(converter.to_type, obj, logger, options) # the last converter in the chain should convert to desired type return self._converters_list[-1].convert(desired_type, obj, logger, options)
Apply the converters of the chain in order to produce the desired result. Only the last converter will see the 'desired type', the others will be asked to produce their declared to_type. :param desired_type: :param obj: :param logger: :param options: :return:
def default_setup(): """The default API setup for lxc4u This is the API that you access globally from lxc4u. """ service = LXCService lxc_types = dict(LXC=LXC, LXCWithOverlays=LXCWithOverlays, __default__=UnmanagedLXC) loader = LXCLoader(lxc_types, service) manager = LXCManager(loader, service) return LXCAPI(manager=manager, service=service)
The default API setup for lxc4u This is the API that you access globally from lxc4u.
def itemcounts(self, **kwargs): '''Returns a dict where the keys are the keys of the container. The values are the *lengths* of the value sequences stored in this container. ''' return {k: len(v) for k, v in self._dict.items()}
Returns a dict where the keys are the keys of the container. The values are the *lengths* of the value sequences stored in this container.
def handle_message(self, msg): """Manage message of different type and in the context of path.""" self.messages.append( { "type": msg.category, "module": msg.module, "obj": msg.obj, "line": msg.line, "column": msg.column, "path": msg.path, "symbol": msg.symbol, "message": html.escape(msg.msg or "", quote=False), "message-id": msg.msg_id, } )
Manage message of different type and in the context of path.
def checker(location, receiver): """Construct a function that checks a directory for process configuration The function checks for additions or removals of JSON process configuration files and calls the appropriate receiver methods. :param location: string, the directory to monitor :param receiver: IEventReceiver :returns: a function with no parameters """ path = filepath.FilePath(location) files = set() filesContents = {} def _check(path): currentFiles = set(fname for fname in os.listdir(location) if not fname.endswith('.new')) removed = files - currentFiles added = currentFiles - files for fname in added: contents = path.child(fname).getContent() filesContents[fname] = contents receiver.add(fname, contents) for fname in removed: receiver.remove(fname) same = currentFiles & files for fname in same: newContents = path.child(fname).getContent() oldContents = filesContents[fname] if newContents == oldContents: continue receiver.remove(fname) filesContents[fname] = newContents receiver.add(fname, newContents) files.clear() files.update(currentFiles) return functools.partial(_check, path)
Construct a function that checks a directory for process configuration The function checks for additions or removals of JSON process configuration files and calls the appropriate receiver methods. :param location: string, the directory to monitor :param receiver: IEventReceiver :returns: a function with no parameters
def _mainthread_poll_readable(self): """Searches for readable client sockets. These sockets are then put in a subthread to be handled by _handle_readable """ events = self._recv_selector.select(self.block_time) for key, mask in events: if mask == selectors.EVENT_READ: self._recv_selector.unregister(key.fileobj) self._threads_limiter.start_thread(target=self._subthread_handle_readable, args=(key.fileobj,))
Searches for readable client sockets. These sockets are then put in a subthread to be handled by _handle_readable
def monitor(args): ''' Retrieve status of jobs submitted from a given workspace, as a list of TSV lines sorted by descending order of job submission date''' r = fapi.list_submissions(args.project, args.workspace) fapi._check_response_code(r, 200) statuses = sorted(r.json(), key=lambda k: k['submissionDate'], reverse=True) header = '\t'.join(list(statuses[0].keys())) expander = lambda v: '{0}'.format(v) def expander(thing): if isinstance(thing, dict): entityType = thing.get("entityType", None) if entityType: return "{0}:{1}".format(entityType, thing['entityName']) return "{0}".format(thing) # FIXME: this will generally return different column order between Python 2/3 return [header] + ['\t'.join( map(expander, v.values())) for v in statuses]
Retrieve status of jobs submitted from a given workspace, as a list of TSV lines sorted by descending order of job submission date
def unicode2auto(unicode_text, encode_text): """ This function will convert unicode (first argument) text into other encodes by auto find the encode (from available encodes) by using sample encode text in second argument of this function. unicode_text : Pass unicode string which has to convert into other encode. encode_text : Pass sample encode string to identify suitable encode for it. This function tries to identify encode in available encodings. If it finds, then it will convert unicode_text into encode string. Author : Arulalan.T 08.08.2014 """ _all_unique_encodes_, _all_common_encodes_ = _get_unique_common_encodes() # get unique word which falls under any one of available encodes from # user passed text lines unique_chars = _get_unique_ch(encode_text, _all_common_encodes_) # count common encode chars clen = len(_all_common_encodes_) msg = "Sorry, couldn't find encode :-(\n" msg += 'Need more words to find unique encode out side of %d ' % clen msg += 'common compound characters' if not unique_chars: print(msg) return '' # end of if not unique_chars: for encode_name, encode_keys in _all_unique_encodes_: if not len(encode_keys): continue for ch in encode_keys: # check either encode char is presnent in word if ch in unique_chars: # found encode print(("Found encode : ", encode_name)) encode = _all_encodes_[encode_name] return unicode2encode(unicode_text, encode) # end of if ch in unique_chars: # end of ifor ch in encode_keys: else: print(msg) return ''
This function will convert unicode (first argument) text into other encodes by auto find the encode (from available encodes) by using sample encode text in second argument of this function. unicode_text : Pass unicode string which has to convert into other encode. encode_text : Pass sample encode string to identify suitable encode for it. This function tries to identify encode in available encodings. If it finds, then it will convert unicode_text into encode string. Author : Arulalan.T 08.08.2014
def _append_utc_datetime(self, tag, format, ts, precision, header): """(Internal) Append formatted datetime.""" if ts is None: t = datetime.datetime.utcnow() elif type(ts) is float: t = datetime.datetime.utcfromtimestamp(ts) else: t = ts s = t.strftime(format) if precision == 3: s += ".%03d" % (t.microsecond / 1000) elif precision == 6: s += ".%06d" % t.microsecond elif precision != 0: raise ValueError("Precision should be one of 0, 3 or 6 digits") return self.append_pair(tag, s, header=header)
(Internal) Append formatted datetime.
def savecsv(filename, datadict, mode="w"): """Save a dictionary of data to CSV.""" if mode == "a" : header = False else: header = True with open(filename, mode) as f: _pd.DataFrame(datadict).to_csv(f, index=False, header=header)
Save a dictionary of data to CSV.
def put(self, resource_id): """Return the JSON representation of a new resource created or updated through an HTTP PUT call. If resource_id is not provided, it is assumed the primary key field is included and a totally new resource is created. Otherwise, the existing resource referred to by *resource_id* is updated with the provided JSON data. This method is idempotent. :returns: ``HTTP 201`` if a new resource is created :returns: ``HTTP 200`` if a resource is updated :returns: ``HTTP 400`` if the request is malformed or missing data """ resource = self.__model__.query.get(resource_id) if resource: error_message = is_valid_method(self.__model__, resource) if error_message: raise BadRequestException(error_message) resource.update(request.json) db.session().merge(resource) db.session().commit() return jsonify(resource) resource = self.__model__(**request.json) # pylint: disable=not-callable error_message = is_valid_method(self.__model__, resource) if error_message: raise BadRequestException(error_message) db.session().add(resource) db.session().commit() return self._created_response(resource)
Return the JSON representation of a new resource created or updated through an HTTP PUT call. If resource_id is not provided, it is assumed the primary key field is included and a totally new resource is created. Otherwise, the existing resource referred to by *resource_id* is updated with the provided JSON data. This method is idempotent. :returns: ``HTTP 201`` if a new resource is created :returns: ``HTTP 200`` if a resource is updated :returns: ``HTTP 400`` if the request is malformed or missing data
def setup_menu_actions(self): """Setup and update the menu actions.""" self.recent_notebook_menu.clear() self.recent_notebooks_actions = [] if self.recent_notebooks: for notebook in self.recent_notebooks: name = notebook action = \ create_action(self, name, icon=ima.icon('filenew'), triggered=lambda v, path=notebook: self.create_new_client(filename=path)) self.recent_notebooks_actions.append(action) self.recent_notebooks_actions += \ [None, self.clear_recent_notebooks_action] else: self.recent_notebooks_actions = \ [self.clear_recent_notebooks_action] add_actions(self.recent_notebook_menu, self.recent_notebooks_actions) self.update_notebook_actions()
Setup and update the menu actions.
def _MergeDifferentId(self): """Tries to merge all possible combinations of entities. This tries to merge every entity in the old schedule with every entity in the new schedule. Unlike _MergeSameId, the ids do not need to match. However, _MergeDifferentId is much slower than _MergeSameId. This method makes use of various methods like _Merge and _Migrate which are not implemented in the abstract DataSetMerger class. These method should be overwritten in a subclass to allow _MergeSameId to work with different entity types. Returns: The number of merged entities. """ # TODO: The same entity from A could merge with multiple from B. # This should either generate an error or should be prevented from # happening. for a in self._GetIter(self.feed_merger.a_schedule): for b in self._GetIter(self.feed_merger.b_schedule): try: self._Add(a, b, self._MergeEntities(a, b)) self._num_merged += 1 except MergeError: continue for a in self._GetIter(self.feed_merger.a_schedule): if a not in self.feed_merger.a_merge_map: self._num_not_merged_a += 1 newid = self._HasId(self.feed_merger.b_schedule, self._GetId(a)) self._Add(a, None, self._Migrate(a, self.feed_merger.a_schedule, newid)) for b in self._GetIter(self.feed_merger.b_schedule): if b not in self.feed_merger.b_merge_map: self._num_not_merged_b += 1 newid = self._HasId(self.feed_merger.a_schedule, self._GetId(b)) self._Add(None, b, self._Migrate(b, self.feed_merger.b_schedule, newid)) return self._num_merged
Tries to merge all possible combinations of entities. This tries to merge every entity in the old schedule with every entity in the new schedule. Unlike _MergeSameId, the ids do not need to match. However, _MergeDifferentId is much slower than _MergeSameId. This method makes use of various methods like _Merge and _Migrate which are not implemented in the abstract DataSetMerger class. These method should be overwritten in a subclass to allow _MergeSameId to work with different entity types. Returns: The number of merged entities.
def cudnnSetTensor4dDescriptor(tensorDesc, format, dataType, n, c, h, w): """ Initialize a previously created Tensor 4D object. This function initializes a previously created Tensor4D descriptor object. The strides of the four dimensions are inferred from the format parameter and set in such a way that the data is contiguous in memory with no padding between dimensions. Parameters ---------- tensorDesc : cudnnTensorDescriptor Handle to a previously created tensor descriptor. format : cudnnTensorFormat Type of format. dataType : cudnnDataType Data type. n : int Number of images. c : int Number of feature maps per image. h : int Height of each feature map. w : int Width of each feature map. """ status = _libcudnn.cudnnSetTensor4dDescriptor(tensorDesc, format, dataType, n, c, h, w) cudnnCheckStatus(status)
Initialize a previously created Tensor 4D object. This function initializes a previously created Tensor4D descriptor object. The strides of the four dimensions are inferred from the format parameter and set in such a way that the data is contiguous in memory with no padding between dimensions. Parameters ---------- tensorDesc : cudnnTensorDescriptor Handle to a previously created tensor descriptor. format : cudnnTensorFormat Type of format. dataType : cudnnDataType Data type. n : int Number of images. c : int Number of feature maps per image. h : int Height of each feature map. w : int Width of each feature map.
def _group_matching(tlist, cls): """Groups Tokens that have beginning and end.""" opens = [] tidx_offset = 0 for idx, token in enumerate(list(tlist)): tidx = idx - tidx_offset if token.is_whitespace: # ~50% of tokens will be whitespace. Will checking early # for them avoid 3 comparisons, but then add 1 more comparison # for the other ~50% of tokens... continue if token.is_group and not isinstance(token, cls): # Check inside previously grouped (i.e. parenthesis) if group # of different type is inside (i.e., case). though ideally should # should check for all open/close tokens at once to avoid recursion _group_matching(token, cls) continue if token.match(*cls.M_OPEN): opens.append(tidx) elif token.match(*cls.M_CLOSE): try: open_idx = opens.pop() except IndexError: # this indicates invalid sql and unbalanced tokens. # instead of break, continue in case other "valid" groups exist continue close_idx = tidx tlist.group_tokens(cls, open_idx, close_idx) tidx_offset += close_idx - open_idx
Groups Tokens that have beginning and end.
def all_address_target_pairs(cls, address_families): """Implementation of `address_target_pairs_from_address_families()` which does no filtering.""" addr_tgt_pairs = [] for af in address_families: addr_tgt_pairs.extend(af.addressables.items()) return addr_tgt_pairs
Implementation of `address_target_pairs_from_address_families()` which does no filtering.
def load_json_file(filepath): """ <Purpose> Deserialize a JSON object from a file containing the object. <Arguments> filepath: Absolute path of JSON file. <Exceptions> securesystemslib.exceptions.FormatError: If 'filepath' is improperly formatted. securesystemslib.exceptions.Error: If 'filepath' cannot be deserialized to a Python object. IOError in case of runtime IO exceptions. <Side Effects> None. <Return> Deserialized object. For example, a dictionary. """ # Making sure that the format of 'filepath' is a path string. # securesystemslib.exceptions.FormatError is raised on incorrect format. securesystemslib.formats.PATH_SCHEMA.check_match(filepath) deserialized_object = None # The file is mostly likely gzipped. if filepath.endswith('.gz'): logger.debug('gzip.open(' + str(filepath) + ')') fileobject = six.StringIO(gzip.open(filepath).read().decode('utf-8')) else: logger.debug('open(' + str(filepath) + ')') fileobject = open(filepath) try: deserialized_object = json.load(fileobject) except (ValueError, TypeError) as e: raise securesystemslib.exceptions.Error('Cannot deserialize to a' ' Python object: ' + repr(filepath)) else: fileobject.close() return deserialized_object finally: fileobject.close()
<Purpose> Deserialize a JSON object from a file containing the object. <Arguments> filepath: Absolute path of JSON file. <Exceptions> securesystemslib.exceptions.FormatError: If 'filepath' is improperly formatted. securesystemslib.exceptions.Error: If 'filepath' cannot be deserialized to a Python object. IOError in case of runtime IO exceptions. <Side Effects> None. <Return> Deserialized object. For example, a dictionary.
def _create_ring(self, nodes): """Generate a ketama compatible continuum/ring. """ _weight_sum = 0 for node_conf in self._nodes.values(): _weight_sum += node_conf['weight'] self._weight_sum = _weight_sum _distribution = Counter() _keys = [] _ring = {} for node_name, node_conf in self._nodes.items(): for h in self._hashi_weight_generator(node_name, node_conf): _ring[h] = node_name insort(_keys, h) _distribution[node_name] += 1 self._distribution = _distribution self._keys = _keys self._ring = _ring
Generate a ketama compatible continuum/ring.
def _cmdline(argv=None): """ Parse command line arguments. By default, sys.argv is parsed. """ parser = ArgumentParser() parser.add_argument("--checkout", default="HEAD", help="branch, tag, or commit to use [HEAD]") parser.add_argument("--name", default=_NAME, help="application name [{:s}]".format(_NAME)) parser.add_argument("--repo", default=_REPO, help="source repo [{:s}]".format(_REPO)) parser.add_argument("--test", action="store_true", help="run test suite after installation") parser.add_argument("root", help="installation root") return parser.parse_args(argv)
Parse command line arguments. By default, sys.argv is parsed.
def get_fpath(self, cachedir=None, cfgstr=None, ext=None): """ Ignore: fname = _fname cfgstr = _cfgstr """ _dpath = self.get_cachedir(cachedir) _fname = self.get_prefix() _cfgstr = self.get_cfgstr() if cfgstr is None else cfgstr _ext = self.ext if ext is None else ext fpath = _args2_fpath(_dpath, _fname, _cfgstr, _ext) return fpath
Ignore: fname = _fname cfgstr = _cfgstr
async def build_get_revoc_reg_request(submitter_did: Optional[str], revoc_reg_def_id: str, timestamp: int) -> str: """ Builds a GET_REVOC_REG request. Request to get the accumulated state of the Revocation Registry by ID. The state is defined by the given timestamp. :param submitter_did: (Optional) DID of the read request sender (if not provided then default Libindy DID will be used). :param revoc_reg_def_id: ID of the corresponding Revocation Registry Definition in ledger. :param timestamp: Requested time represented as a total number of seconds from Unix Epoch :return: Request result as json. """ logger = logging.getLogger(__name__) logger.debug("build_get_revoc_reg_request: >>> submitter_did: %r, revoc_reg_def_id: %r, timestamp: %r", submitter_did, revoc_reg_def_id, timestamp) if not hasattr(build_get_revoc_reg_request, "cb"): logger.debug("build_get_revoc_reg_request: Creating callback") build_get_revoc_reg_request.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p)) c_submitter_did = c_char_p(submitter_did.encode('utf-8')) if submitter_did is not None else None c_revoc_reg_def_id = c_char_p(revoc_reg_def_id.encode('utf-8')) c_timestamp = c_int64(timestamp) request_json = await do_call('indy_build_get_revoc_reg_request', c_submitter_did, c_revoc_reg_def_id, c_timestamp, build_get_revoc_reg_request.cb) res = request_json.decode() logger.debug("build_get_revoc_reg_request: <<< res: %r", res) return res
Builds a GET_REVOC_REG request. Request to get the accumulated state of the Revocation Registry by ID. The state is defined by the given timestamp. :param submitter_did: (Optional) DID of the read request sender (if not provided then default Libindy DID will be used). :param revoc_reg_def_id: ID of the corresponding Revocation Registry Definition in ledger. :param timestamp: Requested time represented as a total number of seconds from Unix Epoch :return: Request result as json.
def get_device_name_list(): """Returns a list of device names installed.""" dev_names = ctypes.create_string_buffer(1024) pydaq.DAQmxGetSysDevNames(dev_names, len(dev_names)) return dev_names.value.split(', ')
Returns a list of device names installed.
def register_schemas_dir(self, directory): """Recursively register all json-schemas in a directory. :param directory: directory path. """ for root, dirs, files in os.walk(directory): dir_path = os.path.relpath(root, directory) if dir_path == '.': dir_path = '' for file_ in files: if file_.lower().endswith(('.json')): schema_name = os.path.join(dir_path, file_) if schema_name in self.schemas: raise JSONSchemaDuplicate( schema_name, self.schemas[schema_name], directory ) self.schemas[schema_name] = os.path.abspath(directory)
Recursively register all json-schemas in a directory. :param directory: directory path.
def copy(self, source, destination, recursive=False, use_sudo=False): """ Copy a file or directory """ func = use_sudo and run_as_root or self.run options = '-r ' if recursive else '' func('/bin/cp {0}{1} {2}'.format(options, quote(source), quote(destination)))
Copy a file or directory
def load_fs_mrf_to_syntax_mrf_translation_rules( rulesFile ): ''' Loads rules that can be used to convert from Filosoft's mrf format to syntactic analyzer's format. Returns a dict containing rules. Expects that each line in the input file contains a single rule, and that different parts of the rule separated by @ symbols, e.g. 1@_S_ ?@Substantiiv apellatiiv@_S_ com @Noun common@Nc@NCSX@kesk- 32@_H_ ?@Substantiiv prooprium@_S_ prop @Noun proper@Np@NPCSX@Kesk- 313@_A_@Adjektiiv positiiv@_A_ pos@Adjective positive@A-p@ASX@salkus Only 2nd element and 4th element are extracted from each line; 2nd element will be the key of the dict entry, and 4th element will be added to the value of the dict entry (the value is a list of strings); A list is used for storing values because one Filosoft's analysis could be mapped to multiple syntactic analyzer's analyses; Lines that have ¤ in the beginning of the line will be skipped; ''' rules = {} in_f = codecs.open(rulesFile, mode='r', encoding='utf-8') for line in in_f: line = line.rstrip() if line.startswith('¤'): continue parts = line.split('@') if len(parts) < 4: raise Exception(' Unexpected format of the line: ', line) if parts[1] not in rules: rules[parts[1]] = [] rules[parts[1]].append( parts[3] ) in_f.close() return rules
Loads rules that can be used to convert from Filosoft's mrf format to syntactic analyzer's format. Returns a dict containing rules. Expects that each line in the input file contains a single rule, and that different parts of the rule separated by @ symbols, e.g. 1@_S_ ?@Substantiiv apellatiiv@_S_ com @Noun common@Nc@NCSX@kesk- 32@_H_ ?@Substantiiv prooprium@_S_ prop @Noun proper@Np@NPCSX@Kesk- 313@_A_@Adjektiiv positiiv@_A_ pos@Adjective positive@A-p@ASX@salkus Only 2nd element and 4th element are extracted from each line; 2nd element will be the key of the dict entry, and 4th element will be added to the value of the dict entry (the value is a list of strings); A list is used for storing values because one Filosoft's analysis could be mapped to multiple syntactic analyzer's analyses; Lines that have ¤ in the beginning of the line will be skipped;
def pip_install_package(source_name, pip_version=None, python_version=None, mode=InstallMode.min_deps, release=False): """Install a pip-compatible python package as a rez package. Args: source_name (str): Name of package or archive/url containing the pip package source. This is the same as the arg you would pass to the 'pip install' command. pip_version (str or `Version`): Version of pip to use to perform the install, uses latest if None. python_version (str or `Version`): Python version to use to perform the install, and subsequently have the resulting rez package depend on. mode (`InstallMode`): Installation mode, determines how dependencies are managed. release (bool): If True, install as a released package; otherwise, it will be installed as a local package. Returns: 2-tuple: List of `Variant`: Installed variants; List of `Variant`: Skipped variants (already installed). """ installed_variants = [] skipped_variants = [] pip_exe, context = find_pip(pip_version, python_version) # TODO: should check if packages_path is writable before continuing with pip # packages_path = (config.release_packages_path if release else config.local_packages_path) tmpdir = mkdtemp(suffix="-rez", prefix="pip-") stagingdir = os.path.join(tmpdir, "rez_staging") stagingsep = "".join([os.path.sep, "rez_staging", os.path.sep]) destpath = os.path.join(stagingdir, "python") binpath = os.path.join(stagingdir, "bin") incpath = os.path.join(stagingdir, "include") datapath = stagingdir if context and config.debug("package_release"): buf = StringIO() print >> buf, "\n\npackage download environment:" context.print_info(buf) _log(buf.getvalue()) # Build pip commandline cmd = [pip_exe, "install", "--install-option=--install-lib=%s" % destpath, "--install-option=--install-scripts=%s" % binpath, "--install-option=--install-headers=%s" % incpath, "--install-option=--install-data=%s" % datapath] if mode == InstallMode.no_deps: cmd.append("--no-deps") cmd.append(source_name) _cmd(context=context, command=cmd) _system = System() # Collect resulting python packages using distlib distribution_path = DistributionPath([destpath], include_egg=True) distributions = [d for d in distribution_path.get_distributions()] for distribution in distribution_path.get_distributions(): requirements = [] if distribution.metadata.run_requires: # Handle requirements. Currently handles conditional environment based # requirements and normal requirements # TODO: Handle optional requirements? for requirement in distribution.metadata.run_requires: if "environment" in requirement: if interpret(requirement["environment"]): requirements.extend(_get_dependencies(requirement, distributions)) elif "extra" in requirement: # Currently ignoring optional requirements pass else: requirements.extend(_get_dependencies(requirement, distributions)) tools = [] src_dst_lut = {} for installed_file in distribution.list_installed_files(allow_fail=True): source_file = os.path.normpath(os.path.join(destpath, installed_file[0])) if os.path.exists(source_file): destination_file = installed_file[0].split(stagingsep)[1] exe = False if is_exe(source_file) and \ destination_file.startswith("%s%s" % ("bin", os.path.sep)): _, _file = os.path.split(destination_file) tools.append(_file) exe = True data = [destination_file, exe] src_dst_lut[source_file] = data else: _log("Source file does not exist: " + source_file + "!") def make_root(variant, path): """Using distlib to iterate over all installed files of the current distribution to copy files to the target directory of the rez package variant """ for source_file, data in src_dst_lut.items(): destination_file, exe = data destination_file = os.path.normpath(os.path.join(path, destination_file)) if not os.path.exists(os.path.dirname(destination_file)): os.makedirs(os.path.dirname(destination_file)) shutil.copyfile(source_file, destination_file) if exe: shutil.copystat(source_file, destination_file) # determine variant requirements # TODO detect if platform/arch/os necessary, no if pure python variant_reqs = [] variant_reqs.append("platform-%s" % _system.platform) variant_reqs.append("arch-%s" % _system.arch) variant_reqs.append("os-%s" % _system.os) if context is None: # since we had to use system pip, we have to assume system python version py_ver = '.'.join(map(str, sys.version_info[:2])) else: python_variant = context.get_resolved_package("python") py_ver = python_variant.version.trim(2) variant_reqs.append("python-%s" % py_ver) name, _ = parse_name_and_version(distribution.name_and_version) name = distribution.name[0:len(name)].replace("-", "_") with make_package(name, packages_path, make_root=make_root) as pkg: pkg.version = distribution.version if distribution.metadata.summary: pkg.description = distribution.metadata.summary pkg.variants = [variant_reqs] if requirements: pkg.requires = requirements commands = [] commands.append("env.PYTHONPATH.append('{root}/python')") if tools: pkg.tools = tools commands.append("env.PATH.append('{root}/bin')") pkg.commands = '\n'.join(commands) installed_variants.extend(pkg.installed_variants or []) skipped_variants.extend(pkg.skipped_variants or []) # cleanup shutil.rmtree(tmpdir) return installed_variants, skipped_variants
Install a pip-compatible python package as a rez package. Args: source_name (str): Name of package or archive/url containing the pip package source. This is the same as the arg you would pass to the 'pip install' command. pip_version (str or `Version`): Version of pip to use to perform the install, uses latest if None. python_version (str or `Version`): Python version to use to perform the install, and subsequently have the resulting rez package depend on. mode (`InstallMode`): Installation mode, determines how dependencies are managed. release (bool): If True, install as a released package; otherwise, it will be installed as a local package. Returns: 2-tuple: List of `Variant`: Installed variants; List of `Variant`: Skipped variants (already installed).
def extents(triangles, areas=None): """ Return the 2D bounding box size of each triangle. Parameters ---------- triangles : (n, 3, 3) float Triangles in space areas : (n,) float Optional area of input triangles Returns ---------- box : (n, 2) float The size of each triangle's 2D oriented bounding box """ triangles = np.asanyarray(triangles, dtype=np.float64) if not util.is_shape(triangles, (-1, 3, 3)): raise ValueError('Triangles must be (n,3,3)!') if areas is None: areas = area(triangles=triangles, sum=False) # the edge vectors which define the triangle a = triangles[:, 1] - triangles[:, 0] b = triangles[:, 2] - triangles[:, 0] # length of the edge vectors length_a = (a**2).sum(axis=1)**.5 length_b = (b**2).sum(axis=1)**.5 # which edges are acceptable length nonzero_a = length_a > tol.merge nonzero_b = length_b > tol.merge # find the two heights of the triangle # essentially this is the side length of an # oriented bounding box, per triangle box = np.zeros((len(triangles), 2), dtype=np.float64) box[:, 0][nonzero_a] = (areas[nonzero_a] * 2) / length_a[nonzero_a] box[:, 1][nonzero_b] = (areas[nonzero_b] * 2) / length_b[nonzero_b] return box
Return the 2D bounding box size of each triangle. Parameters ---------- triangles : (n, 3, 3) float Triangles in space areas : (n,) float Optional area of input triangles Returns ---------- box : (n, 2) float The size of each triangle's 2D oriented bounding box
def pset(self, n): """ Convert the nodes nsprefixes into a set. @param n: A node. @type n: L{Element} @return: A set of namespaces. @rtype: set """ s = set() for ns in n.nsprefixes.items(): if self.permit(ns): s.add(ns[1]) return s
Convert the nodes nsprefixes into a set. @param n: A node. @type n: L{Element} @return: A set of namespaces. @rtype: set
def run_blast_commands(ncbicommandline_method, **keywords): """Runs blastplus/tblastn search, collects result and pass as a xml temporary file. """ # temporary files for output blast_out_tmp = tempfile.NamedTemporaryFile(mode="w+",delete=False) keywords['out'] = blast_out_tmp.name # unpack query temp file object query_file_object_tmp = keywords['query'] keywords['query'] = query_file_object_tmp.name stderr = '' error_string = '' try: # formating blastplus command blastplusx_cline = ncbicommandline_method(**keywords) stdout, stderr = blastplusx_cline() except ApplicationError as e: error_string = "Runtime error: " + stderr + "\n" + e.cmd # remove query temp file os.unlink(query_file_object_tmp.name) # os.remove(query_file_object_tmp.name) return blast_out_tmp, error_string
Runs blastplus/tblastn search, collects result and pass as a xml temporary file.
def get_resolve_diff(self, other): """Get the difference between the resolve in this context and another. The difference is described from the point of view of the current context - a newer package means that the package in `other` is newer than the package in `self`. Diffs can only be compared if their package search paths match, an error is raised otherwise. The diff is expressed in packages, not variants - the specific variant of a package is ignored. Returns: A dict containing: - 'newer_packages': A dict containing items: - package name (str); - List of `Package` objects. These are the packages up to and including the newer package in `self`, in ascending order. - 'older_packages': A dict containing: - package name (str); - List of `Package` objects. These are the packages down to and including the older package in `self`, in descending order. - 'added_packages': Set of `Package` objects present in `self` but not in `other`; - 'removed_packages': Set of `Package` objects present in `other`, but not in `self`. If any item ('added_packages' etc) is empty, it is not added to the resulting dict. Thus, an empty dict is returned if there is no difference between contexts. """ if self.package_paths != other.package_paths: from difflib import ndiff diff = ndiff(self.package_paths, other.package_paths) raise ResolvedContextError("Cannot diff resolves, package search " "paths differ:\n%s" % '\n'.join(diff)) d = {} self_pkgs_ = set(x.parent for x in self._resolved_packages) other_pkgs_ = set(x.parent for x in other._resolved_packages) self_pkgs = self_pkgs_ - other_pkgs_ other_pkgs = other_pkgs_ - self_pkgs_ if not (self_pkgs or other_pkgs): return d self_fams = dict((x.name, x) for x in self_pkgs) other_fams = dict((x.name, x) for x in other_pkgs) newer_packages = {} older_packages = {} added_packages = set() removed_packages = set() for pkg in self_pkgs: if pkg.name not in other_fams: removed_packages.add(pkg) else: other_pkg = other_fams[pkg.name] if other_pkg.version > pkg.version: r = VersionRange.as_span(lower_version=pkg.version, upper_version=other_pkg.version) it = iter_packages(pkg.name, range_=r) pkgs = sorted(it, key=lambda x: x.version) newer_packages[pkg.name] = pkgs elif other_pkg.version < pkg.version: r = VersionRange.as_span(lower_version=other_pkg.version, upper_version=pkg.version) it = iter_packages(pkg.name, range_=r) pkgs = sorted(it, key=lambda x: x.version, reverse=True) older_packages[pkg.name] = pkgs for pkg in other_pkgs: if pkg.name not in self_fams: added_packages.add(pkg) if newer_packages: d["newer_packages"] = newer_packages if older_packages: d["older_packages"] = older_packages if added_packages: d["added_packages"] = added_packages if removed_packages: d["removed_packages"] = removed_packages return d
Get the difference between the resolve in this context and another. The difference is described from the point of view of the current context - a newer package means that the package in `other` is newer than the package in `self`. Diffs can only be compared if their package search paths match, an error is raised otherwise. The diff is expressed in packages, not variants - the specific variant of a package is ignored. Returns: A dict containing: - 'newer_packages': A dict containing items: - package name (str); - List of `Package` objects. These are the packages up to and including the newer package in `self`, in ascending order. - 'older_packages': A dict containing: - package name (str); - List of `Package` objects. These are the packages down to and including the older package in `self`, in descending order. - 'added_packages': Set of `Package` objects present in `self` but not in `other`; - 'removed_packages': Set of `Package` objects present in `other`, but not in `self`. If any item ('added_packages' etc) is empty, it is not added to the resulting dict. Thus, an empty dict is returned if there is no difference between contexts.
def register_xml_mapping(self, clsdict): """ Add XML mappings to the enumeration class state for this member. """ member_to_xml = self._get_or_add_member_to_xml(clsdict) member_to_xml[self.value] = self.xml_value xml_to_member = self._get_or_add_xml_to_member(clsdict) xml_to_member[self.xml_value] = self.value
Add XML mappings to the enumeration class state for this member.
def random(self, length=22): """ Generate and return a cryptographically-secure short random string of the specified length. """ random_num = int(binascii.b2a_hex(os.urandom(length)), 16) return self._num_to_string(random_num, pad_to_length=length)[:length]
Generate and return a cryptographically-secure short random string of the specified length.
def initialize(self): """initialize in base class""" self._lb = [b[0] for b in self.bounds] # can be done more efficiently? self._ub = [b[1] for b in self.bounds]
initialize in base class
def cross_lists(*sets): """Return the cross product of the arguments""" wheels = [iter(_) for _ in sets] digits = [next(it) for it in wheels] while True: yield digits[:] for i in range(len(digits)-1, -1, -1): try: digits[i] = next(wheels[i]) break except StopIteration: wheels[i] = iter(sets[i]) digits[i] = next(wheels[i]) else: break
Return the cross product of the arguments
def read(self, vals): """Read values. Args: vals (list): list of strings representing values """ i = 0 if len(vals[i]) == 0: self.year = None else: self.year = vals[i] i += 1 if len(vals[i]) == 0: self.month = None else: self.month = vals[i] i += 1 if len(vals[i]) == 0: self.day = None else: self.day = vals[i] i += 1 if len(vals[i]) == 0: self.hour = None else: self.hour = vals[i] i += 1 if len(vals[i]) == 0: self.minute = None else: self.minute = vals[i] i += 1 if len(vals[i]) == 0: self.data_source_and_uncertainty_flags = None else: self.data_source_and_uncertainty_flags = vals[i] i += 1 if len(vals[i]) == 0: self.dry_bulb_temperature = None else: self.dry_bulb_temperature = vals[i] i += 1 if len(vals[i]) == 0: self.dew_point_temperature = None else: self.dew_point_temperature = vals[i] i += 1 if len(vals[i]) == 0: self.relative_humidity = None else: self.relative_humidity = vals[i] i += 1 if len(vals[i]) == 0: self.atmospheric_station_pressure = None else: self.atmospheric_station_pressure = vals[i] i += 1 if len(vals[i]) == 0: self.extraterrestrial_horizontal_radiation = None else: self.extraterrestrial_horizontal_radiation = vals[i] i += 1 if len(vals[i]) == 0: self.extraterrestrial_direct_normal_radiation = None else: self.extraterrestrial_direct_normal_radiation = vals[i] i += 1 if len(vals[i]) == 0: self.horizontal_infrared_radiation_intensity = None else: self.horizontal_infrared_radiation_intensity = vals[i] i += 1 if len(vals[i]) == 0: self.global_horizontal_radiation = None else: self.global_horizontal_radiation = vals[i] i += 1 if len(vals[i]) == 0: self.direct_normal_radiation = None else: self.direct_normal_radiation = vals[i] i += 1 if len(vals[i]) == 0: self.diffuse_horizontal_radiation = None else: self.diffuse_horizontal_radiation = vals[i] i += 1 if len(vals[i]) == 0: self.global_horizontal_illuminance = None else: self.global_horizontal_illuminance = vals[i] i += 1 if len(vals[i]) == 0: self.direct_normal_illuminance = None else: self.direct_normal_illuminance = vals[i] i += 1 if len(vals[i]) == 0: self.diffuse_horizontal_illuminance = None else: self.diffuse_horizontal_illuminance = vals[i] i += 1 if len(vals[i]) == 0: self.zenith_luminance = None else: self.zenith_luminance = vals[i] i += 1 if len(vals[i]) == 0: self.wind_direction = None else: self.wind_direction = vals[i] i += 1 if len(vals[i]) == 0: self.wind_speed = None else: self.wind_speed = vals[i] i += 1 if len(vals[i]) == 0: self.total_sky_cover = None else: self.total_sky_cover = vals[i] i += 1 if len(vals[i]) == 0: self.opaque_sky_cover = None else: self.opaque_sky_cover = vals[i] i += 1 if len(vals[i]) == 0: self.visibility = None else: self.visibility = vals[i] i += 1 if len(vals[i]) == 0: self.ceiling_height = None else: self.ceiling_height = vals[i] i += 1 if len(vals[i]) == 0: self.present_weather_observation = None else: self.present_weather_observation = vals[i] i += 1 if len(vals[i]) == 0: self.present_weather_codes = None else: self.present_weather_codes = vals[i] i += 1 if len(vals[i]) == 0: self.precipitable_water = None else: self.precipitable_water = vals[i] i += 1 if len(vals[i]) == 0: self.aerosol_optical_depth = None else: self.aerosol_optical_depth = vals[i] i += 1 if len(vals[i]) == 0: self.snow_depth = None else: self.snow_depth = vals[i] i += 1 if len(vals[i]) == 0: self.days_since_last_snowfall = None else: self.days_since_last_snowfall = vals[i] i += 1 if len(vals[i]) == 0: self.albedo = None else: self.albedo = vals[i] i += 1 if len(vals[i]) == 0: self.liquid_precipitation_depth = None else: self.liquid_precipitation_depth = vals[i] i += 1 if len(vals[i]) == 0: self.liquid_precipitation_quantity = None else: self.liquid_precipitation_quantity = vals[i] i += 1
Read values. Args: vals (list): list of strings representing values
def resolve_parameter_refs(self, input): """ Resolves references to parameters within the given dictionary recursively. Other intrinsic functions such as !GetAtt, !Sub or !Ref to non-parameters will be left untouched. Result is a dictionary where parameter values are inlined. Don't pass this dictionary directly into transform's output because it changes the template structure by inlining parameter values. :param input: Any primitive type (dict, array, string etc) whose values might contain intrinsic functions :return: A copy of a dictionary with parameter references replaced by actual value. """ return self._traverse(input, self.parameters, self._try_resolve_parameter_refs)
Resolves references to parameters within the given dictionary recursively. Other intrinsic functions such as !GetAtt, !Sub or !Ref to non-parameters will be left untouched. Result is a dictionary where parameter values are inlined. Don't pass this dictionary directly into transform's output because it changes the template structure by inlining parameter values. :param input: Any primitive type (dict, array, string etc) whose values might contain intrinsic functions :return: A copy of a dictionary with parameter references replaced by actual value.
def _setup_log_prefix(self, plugin_id=''): """Setup custom warning notification.""" self._logger_console_fmtter.prefix = '%s: ' % plugin_id self._logger_console_fmtter.plugin_id = plugin_id self._logger_file_fmtter.prefix = '*' self._logger_file_fmtter.plugin_id = '%s: ' % plugin_id
Setup custom warning notification.
def fit_delta_ts(data, livetime, fit_background=True): """Fits gaussians to delta t for each PMT pair. Parameters ---------- data: 2d np.array: x = PMT combinations (465), y = time, entry = frequency livetime: length of data taking in seconds fit_background: if True: fits gaussian with offset, else without offset Returns ------- numpy arrays with rates and means for all PMT combinations """ data = data / livetime start = -(data.shape[1] - 1) / 2 end = -start + 1 xs = np.arange(start, end) rates = [] sigmas = [] means = [] popts = [] pcovs = [] for combination in data: mean0 = np.argmax(combination) + start try: if fit_background: popt, pcov = optimize.curve_fit( gaussian, xs, combination, p0=[mean0, 4., 5., 0.1], bounds=([start, 0, 0, 0], [end, 10, 10, 1]) ) else: popt, pcov = optimize.curve_fit( gaussian_wo_offset, xs, combination, p0=[mean0, 4., 5.], bounds=([start, 0, 0], [end, 10, 10]) ) except RuntimeError: popt = (0, 0, 0, 0) rates.append(popt[2]) means.append(popt[0]) sigmas.append(popt[1]) popts.append(popt) pcovs.append(pcov) return ( np.array(rates), np.array(means), np.array(sigmas), np.array(popts), np.array(pcovs) )
Fits gaussians to delta t for each PMT pair. Parameters ---------- data: 2d np.array: x = PMT combinations (465), y = time, entry = frequency livetime: length of data taking in seconds fit_background: if True: fits gaussian with offset, else without offset Returns ------- numpy arrays with rates and means for all PMT combinations
def _save_account(self, account, username): """ Called when account is created/updated. With username override. """ # retrieve default project, or use null project if none default_project_name = self._null_project if account.default_project is not None: default_project_name = account.default_project.pid # account created # account updated ds_user = self.get_user(username) if account.date_deleted is None: # date_deleted is not set, user should exist logger.debug("account is active") if ds_user is None: # create user if doesn't exist self._call([ "add", "user", "accounts=%s" % default_project_name, "defaultaccount=%s" % default_project_name, "name=%s" % username]) else: # or just set default project self._call([ "modify", "user", "set", "defaultaccount=%s" % default_project_name, "where", "name=%s" % username]) # update user meta information # add rest of projects user belongs to slurm_projects = self.get_projects_in_user(username) slurm_projects = [project.lower() for project in slurm_projects] slurm_projects = set(slurm_projects) for project in account.person.projects.all(): if project.pid.lower() not in slurm_projects: self._call([ "add", "user", "name=%s" % username, "accounts=%s" % project.pid]) else: # date_deleted is not set, user should not exist logger.debug("account is not active") self._delete_account(username) return
Called when account is created/updated. With username override.
def output(id, url): """ View the files from a job. """ try: experiment = ExperimentClient().get(normalize_job_name(id)) except FloydException: experiment = ExperimentClient().get(id) output_dir_url = "%s/%s/files" % (floyd.floyd_web_host, experiment.name) if url: floyd_logger.info(output_dir_url) else: floyd_logger.info("Opening output path in your browser ...") webbrowser.open(output_dir_url)
View the files from a job.
def fill_zeros(result, x, y, name, fill): """ If this is a reversed op, then flip x,y If we have an integer value (or array in y) and we have 0's, fill them with the fill, return the result. Mask the nan's from x. """ if fill is None or is_float_dtype(result): return result if name.startswith(('r', '__r')): x, y = y, x is_variable_type = (hasattr(y, 'dtype') or hasattr(y, 'type')) is_scalar_type = is_scalar(y) if not is_variable_type and not is_scalar_type: return result if is_scalar_type: y = np.array(y) if is_integer_dtype(y): if (y == 0).any(): # GH 7325, mask and nans must be broadcastable (also: PR 9308) # Raveling and then reshaping makes np.putmask faster mask = ((y == 0) & ~np.isnan(result)).ravel() shape = result.shape result = result.astype('float64', copy=False).ravel() np.putmask(result, mask, fill) # if we have a fill of inf, then sign it correctly # (GH 6178 and PR 9308) if np.isinf(fill): signs = y if name.startswith(('r', '__r')) else x signs = np.sign(signs.astype('float', copy=False)) negative_inf_mask = (signs.ravel() < 0) & mask np.putmask(result, negative_inf_mask, -fill) if "floordiv" in name: # (PR 9308) nan_mask = ((y == 0) & (x == 0)).ravel() np.putmask(result, nan_mask, np.nan) result = result.reshape(shape) return result
If this is a reversed op, then flip x,y If we have an integer value (or array in y) and we have 0's, fill them with the fill, return the result. Mask the nan's from x.
def set_itunes_subtitle(self): """Parses subtitle from itunes tags and sets value""" try: self.itunes_subtitle = self.soup.find('itunes:subtitle').string except AttributeError: self.itunes_subtitle = None
Parses subtitle from itunes tags and sets value
def stisObsCount(input): """ Input: A stis multiextension file Output: Number of stis science extensions in input """ count = 0 toclose = False if isinstance(input, str): input = fits.open(input) toclose = True for ext in input: if 'extname' in ext.header: if (ext.header['extname'].upper() == 'SCI'): count += 1 if toclose: input.close() return count
Input: A stis multiextension file Output: Number of stis science extensions in input
def validate_email(email): """ Validates an email address Source: Himanshu Shankar (https://github.com/iamhssingh) Parameters ---------- email: str Returns ------- bool """ from django.core.validators import validate_email from django.core.exceptions import ValidationError try: validate_email(email) return True except ValidationError: return False
Validates an email address Source: Himanshu Shankar (https://github.com/iamhssingh) Parameters ---------- email: str Returns ------- bool
def fix_journal_name(journal, knowledge_base): """Convert journal name to Inspire's short form.""" if not journal: return '', '' if not knowledge_base: return journal, '' if len(journal) < 2: return journal, '' volume = '' if (journal[-1] <= 'Z' and journal[-1] >= 'A') \ and (journal[-2] == '.' or journal[-2] == ' '): volume += journal[-1] journal = journal[:-1] journal = journal.strip() if journal.upper() in knowledge_base: journal = knowledge_base[journal.upper()].strip() elif journal in knowledge_base: journal = knowledge_base[journal].strip() elif '.' in journal: journalnodots = journal.replace('. ', ' ') journalnodots = journalnodots.replace('.', ' ').strip().upper() if journalnodots in knowledge_base: journal = knowledge_base[journalnodots].strip() journal = journal.replace('. ', '.') return journal, volume
Convert journal name to Inspire's short form.
def guest_live_resize_cpus(self, userid, cpu_cnt): """Live resize virtual cpus of guests. :param userid: (str) the userid of the guest to be live resized :param cpu_cnt: (int) The number of virtual cpus that the guest should have in active state after live resize. The value should be an integer between 1 and 64. """ action = "live resize guest '%s' to have '%i' virtual cpus" % (userid, cpu_cnt) LOG.info("Begin to %s" % action) with zvmutils.log_and_reraise_sdkbase_error(action): self._vmops.live_resize_cpus(userid, cpu_cnt) LOG.info("%s successfully." % action)
Live resize virtual cpus of guests. :param userid: (str) the userid of the guest to be live resized :param cpu_cnt: (int) The number of virtual cpus that the guest should have in active state after live resize. The value should be an integer between 1 and 64.
def seek(self, offset, whence=os.SEEK_SET): """Seeks to an offset within the file-like object. Args: offset (int): offset to seek to. whence (Optional(int)): value that indicates whether offset is an absolute or relative position within the file. Raises: IOError: if the seek failed. OSError: if the seek failed. """ if not self._is_open: raise IOError('Not opened.') if self._current_offset < 0: raise IOError( 'Invalid current offset: {0:d} value less than zero.'.format( self._current_offset)) if whence == os.SEEK_CUR: offset += self._current_offset elif whence == os.SEEK_END: offset += self._range_size elif whence != os.SEEK_SET: raise IOError('Unsupported whence.') if offset < 0: raise IOError('Invalid offset value less than zero.') self._current_offset = offset
Seeks to an offset within the file-like object. Args: offset (int): offset to seek to. whence (Optional(int)): value that indicates whether offset is an absolute or relative position within the file. Raises: IOError: if the seek failed. OSError: if the seek failed.
def form_invalid(self, form, context=None, **kwargs): """This will return the request with form errors as well as any additional context. """ if not context: context = {} context['errors'] = form.errors return super(ApiFormView, self).render_to_response(context=context, status=400)
This will return the request with form errors as well as any additional context.
def restore(self, key, ttl, value): """Creates a key associated with a value that is obtained via DUMP.""" return self.execute(b'RESTORE', key, ttl, value)
Creates a key associated with a value that is obtained via DUMP.
def get_node_by_name(self, nodename): """Return the node corresponding with name `nodename` :params nodename: Name of the node :type nodename: str """ nodes = dict((n.name, n) for n in self.get_all_nodes()) try: return nodes[nodename] except KeyError: raise NodeNotFound("Node %s not found" % nodename)
Return the node corresponding with name `nodename` :params nodename: Name of the node :type nodename: str
def __split_nonleaf_node(self, node): """! @brief Performs splitting of the specified non-leaf node. @param[in] node (non_leaf_node): Non-leaf node that should be splitted. @return (list) New pair of non-leaf nodes [non_leaf_node1, non_leaf_node2]. """ [farthest_node1, farthest_node2] = node.get_farthest_successors(self.__type_measurement); # create new non-leaf nodes new_node1 = non_leaf_node(farthest_node1.feature, node.parent, [ farthest_node1 ], None); new_node2 = non_leaf_node(farthest_node2.feature, node.parent, [ farthest_node2 ], None); farthest_node1.parent = new_node1; farthest_node2.parent = new_node2; # re-insert other successors for successor in node.successors: if ( (successor is not farthest_node1) and (successor is not farthest_node2) ): distance1 = new_node1.get_distance(successor, self.__type_measurement); distance2 = new_node2.get_distance(successor, self.__type_measurement); if (distance1 < distance2): new_node1.insert_successor(successor); else: new_node2.insert_successor(successor); return [new_node1, new_node2];
! @brief Performs splitting of the specified non-leaf node. @param[in] node (non_leaf_node): Non-leaf node that should be splitted. @return (list) New pair of non-leaf nodes [non_leaf_node1, non_leaf_node2].
def make_blastcmd_builder( mode, outdir, format_exe=None, blast_exe=None, prefix="ANIBLAST" ): """Returns BLASTcmds object for construction of BLAST commands.""" if mode == "ANIb": # BLAST/formatting executable depends on mode blastcmds = BLASTcmds( BLASTfunctions(construct_makeblastdb_cmd, construct_blastn_cmdline), BLASTexes( format_exe or pyani_config.MAKEBLASTDB_DEFAULT, blast_exe or pyani_config.BLASTN_DEFAULT, ), prefix, outdir, ) else: blastcmds = BLASTcmds( BLASTfunctions(construct_formatdb_cmd, construct_blastall_cmdline), BLASTexes( format_exe or pyani_config.FORMATDB_DEFAULT, blast_exe or pyani_config.BLASTALL_DEFAULT, ), prefix, outdir, ) return blastcmds
Returns BLASTcmds object for construction of BLAST commands.
def annot_boxplot(ax,dmetrics,xoffwithin=0.85,xoff=1.6, yoff=0,annotby='xs', test=False): """ :param dmetrics: hue in index, x in columns #todos #x|y off in % xmin,xmax=ax.get_xlim() (xmax-xmin)+(xmax-xmin)*0.35+xmin """ xlabel=ax.get_xlabel() ylabel=ax.get_ylabel() if test: dmetrics.index.name='index' dmetrics.columns.name='columns' dm=dmetrics.melt() dm['value']=1 ax=sns.boxplot(data=dm,x='columns',y='value') for huei,hue in enumerate(dmetrics.index): for xi,x in enumerate(dmetrics.columns): if not pd.isnull(dmetrics.loc[hue,x]): xco=xi+(huei*xoffwithin/len(dmetrics.index)+(xoff/len(dmetrics.index))) yco=ax.get_ylim()[1]+yoff if annotby=='ys': xco,yco=yco,xco ax.text(xco,yco,dmetrics.loc[hue,x],ha='center') ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) return ax
:param dmetrics: hue in index, x in columns #todos #x|y off in % xmin,xmax=ax.get_xlim() (xmax-xmin)+(xmax-xmin)*0.35+xmin
def gatherInput(**Config): r"""Helps to interactively get user input. """ _type = Config.get('type') while True: try: got = raw_input('%s: ' % getLabel(Config)) except EOFError: got = None if not got and 'default' in Config: return Config['default'] try: return _type(got) if _type else got except ValueError as e: err(str(e) or '<invalid value>') except TypeError: err(str(e) or '<invalid value>')
r"""Helps to interactively get user input.
def preamble(self, lenient=False): """ Extract the image metadata by reading the initial part of the PNG file up to the start of the ``IDAT`` chunk. All the chunks that precede the ``IDAT`` chunk are read and either processed for metadata or discarded. If the optional `lenient` argument evaluates to `True`, checksum failures will raise warnings rather than exceptions. """ self.validate_signature() while True: if not self.atchunk: self.atchunk = self._chunk_len_type() if self.atchunk is None: raise FormatError('This PNG file has no IDAT chunks.') if self.atchunk[1] == b'IDAT': return self.process_chunk(lenient=lenient)
Extract the image metadata by reading the initial part of the PNG file up to the start of the ``IDAT`` chunk. All the chunks that precede the ``IDAT`` chunk are read and either processed for metadata or discarded. If the optional `lenient` argument evaluates to `True`, checksum failures will raise warnings rather than exceptions.
def _handle_fetch_response(self, responses): """The callback handling the successful response from the fetch request Delivers the message list to the processor, handles per-message errors (ConsumerFetchSizeTooSmall), triggers another fetch request If the processor is still processing the last batch of messages, we defer this processing until it's done. Otherwise, we start another fetch request and submit the messages to the processor """ # Successful fetch, reset our retry delay self.retry_delay = self.retry_init_delay self._fetch_attempt_count = 1 # Check to see if we are still processing the last block we fetched... if self._msg_block_d: # We are still working through the last block of messages... # We have to wait until it's done, then process this response self._msg_block_d.addCallback( lambda _: self._handle_fetch_response(responses)) return # No ongoing processing, great, let's get some started. # Request no longer outstanding, clear the deferred tracker so we # can refetch self._request_d = None messages = [] try: for resp in responses: # We should really only ever get one... if resp.partition != self.partition: log.warning( "%r: Got response with partition: %r not our own: %r", self, resp.partition, self.partition) continue # resp.messages is a KafkaCodec._decode_message_set_iter # Note that 'message' here is really an OffsetAndMessage for message in resp.messages: # Check for messages included which are from prior to our # desired offset: can happen due to compressed message sets if message.offset < self._fetch_offset: log.debug( 'Skipping message at offset: %d, because its ' 'offset is less that our fetch offset: %d.', message.offset, self._fetch_offset) continue # Create a 'SourcedMessage' and add it to the messages list messages.append( SourcedMessage( message=message.message, offset=message.offset, topic=self.topic, partition=self.partition)) # Update our notion of from where to fetch. self._fetch_offset = message.offset + 1 except ConsumerFetchSizeTooSmall: # A message was too large for us to receive, given our current # buffer size. Grow it until it works, or we hit our max # Grow by 16x up to 1MB (could result in 16MB buf), then by 2x factor = 2 if self.buffer_size <= 2**20: factor = 16 if self.max_buffer_size is None: # No limit, increase until we succeed or fail to alloc RAM self.buffer_size *= factor elif (self.max_buffer_size is not None and self.buffer_size < self.max_buffer_size): # Limited, but currently below it. self.buffer_size = min( self.buffer_size * factor, self.max_buffer_size) else: # We failed, and are already at our max. Nothing we can do but # create a Failure and errback() our start() deferred log.error("Max fetch size %d too small", self.max_buffer_size) failure = Failure( ConsumerFetchSizeTooSmall( "Max buffer size:%d too small for message", self.max_buffer_size)) self._start_d.errback(failure) return log.debug( "Next message larger than fetch size, increasing " "to %d (~2x) and retrying", self.buffer_size) finally: # If we were able to extract any messages, deliver them to the # processor now. if messages: self._msg_block_d = Deferred() self._process_messages(messages) # start another fetch, if needed, but use callLater to avoid recursion self._retry_fetch(0)
The callback handling the successful response from the fetch request Delivers the message list to the processor, handles per-message errors (ConsumerFetchSizeTooSmall), triggers another fetch request If the processor is still processing the last batch of messages, we defer this processing until it's done. Otherwise, we start another fetch request and submit the messages to the processor
def resolve_ports(self, ports): """Resolve NICs not yet bound to bridge(s) If hwaddress provided then returns resolved hwaddress otherwise NIC. """ if not ports: return None hwaddr_to_nic = {} hwaddr_to_ip = {} for nic in list_nics(): # Ignore virtual interfaces (bond masters will be identified from # their slaves) if not is_phy_iface(nic): continue _nic = get_bond_master(nic) if _nic: log("Replacing iface '%s' with bond master '%s'" % (nic, _nic), level=DEBUG) nic = _nic hwaddr = get_nic_hwaddr(nic) hwaddr_to_nic[hwaddr] = nic addresses = get_ipv4_addr(nic, fatal=False) addresses += get_ipv6_addr(iface=nic, fatal=False) hwaddr_to_ip[hwaddr] = addresses resolved = [] mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I) for entry in ports: if re.match(mac_regex, entry): # NIC is in known NICs and does NOT hace an IP address if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]: # If the nic is part of a bridge then don't use it if is_bridge_member(hwaddr_to_nic[entry]): continue # Entry is a MAC address for a valid interface that doesn't # have an IP address assigned yet. resolved.append(hwaddr_to_nic[entry]) else: # If the passed entry is not a MAC address, assume it's a valid # interface, and that the user put it there on purpose (we can # trust it to be the real external network). resolved.append(entry) # Ensure no duplicates return list(set(resolved))
Resolve NICs not yet bound to bridge(s) If hwaddress provided then returns resolved hwaddress otherwise NIC.
def decrypt(self): """Decrypt decrypts the secret and returns the plaintext. Calling decrypt() may incur side effects such as a call to a remote service for decryption. """ if not self._crypter: return b'' try: plaintext = self._crypter.decrypt(self._ciphertext, **self._decrypt_params) return plaintext except Exception as e: exc_info = sys.exc_info() six.reraise( ValueError('Invalid ciphertext "%s", error: %s' % (self._ciphertext, e)), None, exc_info[2] )
Decrypt decrypts the secret and returns the plaintext. Calling decrypt() may incur side effects such as a call to a remote service for decryption.
def get_portchannel_info_by_intf_output_lacp_actor_max_deskew(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_portchannel_info_by_intf = ET.Element("get_portchannel_info_by_intf") config = get_portchannel_info_by_intf output = ET.SubElement(get_portchannel_info_by_intf, "output") lacp = ET.SubElement(output, "lacp") actor_max_deskew = ET.SubElement(lacp, "actor-max-deskew") actor_max_deskew.text = kwargs.pop('actor_max_deskew') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def run_console_rules(self, options_bootstrapper, goals, target_roots): """Runs @console_rules sequentially and interactively by requesting their implicit Goal products. For retryable failures, raises scheduler.ExecutionError. :param list goals: The list of requested goal names as passed on the commandline. :param TargetRoots target_roots: The targets root of the request. :returns: An exit code. """ subject = target_roots.specs console = Console() for goal in goals: goal_product = self.goal_map[goal] params = Params(subject, options_bootstrapper, console) logger.debug('requesting {} to satisfy execution of `{}` goal'.format(goal_product, goal)) try: exit_code = self.scheduler_session.run_console_rule(goal_product, params) finally: console.flush() if exit_code != PANTS_SUCCEEDED_EXIT_CODE: return exit_code return PANTS_SUCCEEDED_EXIT_CODE
Runs @console_rules sequentially and interactively by requesting their implicit Goal products. For retryable failures, raises scheduler.ExecutionError. :param list goals: The list of requested goal names as passed on the commandline. :param TargetRoots target_roots: The targets root of the request. :returns: An exit code.
def smsc(self, smscNumber): """ Set the default SMSC number to use when sending SMS messages """ if smscNumber != self._smscNumber: if self.alive: self.write('AT+CSCA="{0}"'.format(smscNumber)) self._smscNumber = smscNumber
Set the default SMSC number to use when sending SMS messages
def write(self, out): """Write ICC Profile to the file.""" if not self.rawtagtable: self.rawtagtable = self.rawtagdict.items() tags = tagblock(self.rawtagtable) self.writeHeader(out, 128 + len(tags)) out.write(tags) out.flush() return self
Write ICC Profile to the file.
def list_xattrs(self, path, **kwargs): """Get all of the xattr names for a file or directory. :rtype: list """ return simplejson.loads(_json(self._get(path, 'LISTXATTRS', **kwargs))['XAttrNames'])
Get all of the xattr names for a file or directory. :rtype: list
def parse_logs(log_list, date, machine_name, log_type): """ Parse log file lines in log_type format. """ output = [] count = fail = skip = updated = 0 # Check things are setup correctly try: machine = Machine.objects.get(name=machine_name) except Machine.DoesNotExist: return "ERROR: Couldn't find machine named: %s" % machine_name if log_type == "alogger": parser = AloggerParser() else: parser = get_parser(log_type) # Process each line for line_no, line in enumerate(log_list): try: data = parser.line_to_dict(line) except ValueError: output.append("%d: Error reading line" % line_no) continue # if parser returns None, nothing to do, continue to next line if data is None: skip += 1 continue # check for required fields required_fields = [ 'user', 'project', 'jobid', 'jobname', 'cpu_usage', 'cores', 'act_wall_time', 'est_wall_time', 'mem', 'vmem', 'list_pmem', 'list_mem', 'list_pvmem', 'ctime', 'qtime', 'etime', 'start', ] for field in required_fields: if field not in data: output.append( "line %d: %s field not given." % (line_no, field)) fail = fail + 1 continue # Process user --> account try: account = Account.objects.get( username=data['user'], date_deleted__isnull=True) except Account.DoesNotExist: # Couldn't find user account - Assign to user None output.append( "line %d: Couldn't find user account for username=%s." % (line_no, data['user'])) fail += 1 continue except Account.MultipleObjectsReturned: output.append( "line %d: Username %s has multiple active accounts." % (line_no, data['user'])) fail += 1 continue # Process project if data['project'] is None: output.append( "line %d: Project was not supplied." % (line_no)) fail += 1 continue try: project = Project.objects.get(pid=data['project']) except Project.DoesNotExist: output.append( "line %d: Couldn't find specified project %s" % (line_no, data['project'])) fail += 1 continue # memory calculations if machine.mem_per_core: avail_mem_per_core = machine.mem_per_core * 1024 avail_mem_for_job = avail_mem_per_core * data['cores'] if data['list_pmem'] * data['cores'] > data['list_mem']: memory_used_per_core = data['list_pmem'] memory_used_for_job = data['list_pmem'] * data['cores'] else: memory_used_per_core = data['list_mem'] / data['cores'] memory_used_for_job = data['list_mem'] if memory_used_for_job > avail_mem_for_job: data['cpu_usage'] = ceil( memory_used_per_core / avail_mem_per_core * data['act_wall_time'] * data['cores']) # apply scaling factor to cpu_usage data['cpu_usage'] = data['cpu_usage'] * machine.scaling_factor # Everything is good so add entry queue, created = Queue.objects.get_or_create(name=data['queue']) try: cpujob, created = CPUJob.objects.get_or_create(jobid=data['jobid']) cpujob.account = account cpujob.username = data['user'] cpujob.project = project cpujob.machine = machine cpujob.date = date cpujob.queue = queue cpujob.cpu_usage = data['cpu_usage'] cpujob.est_wall_time = data['est_wall_time'] cpujob.act_wall_time = data['act_wall_time'] cpujob.mem = data['mem'] cpujob.vmem = data['vmem'] cpujob.ctime = data['ctime'] cpujob.qtime = data['qtime'] cpujob.etime = data['etime'] cpujob.start = data['start'] cpujob.cores = data['cores'] cpujob.exit_status = data['exit_status'] cpujob.jobname = data['jobname'] cpujob.list_mem = data['list_mem'] cpujob.list_vmem = data['list_vmem'] cpujob.list_pmem = data['list_pmem'] cpujob.list_pvmem = data['list_pvmem'] cpujob.save() except Exception as e: output.append( "line %d: Failed to insert a line - %s" % (line_no, e)) fail += 1 continue if created: count += 1 else: updated += 1 summary = ( 'Inserted : %i\nUpdated : %i\nFailed : %i\nSkiped : %i' % (count, updated, fail, skip) ) logger.debug('Inserted : %i' % count) logger.debug('Updated : %i' % updated) logger.debug('Failed : %i' % fail) logger.debug('Skiped : %i' % skip) return summary, output
Parse log file lines in log_type format.
def vector(x, y=None, z=0.0): """Return a 3D numpy array representing a vector (of type `numpy.float64`). If `y` is ``None``, assume input is already in the form `[x,y,z]`. """ if y is None: # assume x is already [x,y,z] return np.array(x, dtype=np.float64) return np.array([x, y, z], dtype=np.float64)
Return a 3D numpy array representing a vector (of type `numpy.float64`). If `y` is ``None``, assume input is already in the form `[x,y,z]`.
def parse(cls, url_path): # type: (str) -> UrlPath """ Parse a string into a URL path (simple eg does not support typing of URL parameters) """ if not url_path: return cls() nodes = [] for node in url_path.rstrip('/').split('/'): # Identifies a PathNode if '{' in node or '}' in node: m = PATH_NODE_RE.match(node) if not m: raise ValueError("Invalid path param: {}".format(node)) # Parse out name and type name, param_type, param_arg = m.groups() try: type_ = Type[param_type] except KeyError: if param_type is not None: raise ValueError("Unknown param type `{}` in: {}".format(param_type, node)) type_ = Type.Integer nodes.append(PathParam(name, type_, param_arg)) else: nodes.append(node) return cls(*nodes)
Parse a string into a URL path (simple eg does not support typing of URL parameters)
def get_agent_requirement_line(check, version): """ Compose a text line to be used in a requirements.txt file to install a check pinned to a specific version. """ package_name = get_package_name(check) # no manifest if check in ('datadog_checks_base', 'datadog_checks_downloader'): return '{}=={}'.format(package_name, version) m = load_manifest(check) platforms = sorted(m.get('supported_os', [])) # all platforms if platforms == ALL_PLATFORMS: return '{}=={}'.format(package_name, version) # one specific platform elif len(platforms) == 1: return "{}=={}; sys_platform == '{}'".format(package_name, version, PLATFORMS_TO_PY.get(platforms[0])) elif platforms: if 'windows' not in platforms: return "{}=={}; sys_platform != 'win32'".format(package_name, version) elif 'mac_os' not in platforms: return "{}=={}; sys_platform != 'darwin'".format(package_name, version) elif 'linux' not in platforms: return "{}=={}; sys_platform != 'linux2'".format(package_name, version) raise ManifestError("Can't parse the `supported_os` list for the check {}: {}".format(check, platforms))
Compose a text line to be used in a requirements.txt file to install a check pinned to a specific version.
def start_list(self): """Start a list.""" self._ordered = False self.start_container(List) self.set_next_paragraph_style('list-paragraph' if self._item_level <= 0 else 'sublist-paragraph')
Start a list.
def call_env_doctree_read(cls, kb_app, sphinx_app: Sphinx, doctree: doctree): """ On doctree-read, do callbacks""" for callback in EventAction.get_callbacks(kb_app, SphinxEvent.DREAD): callback(kb_app, sphinx_app, doctree)
On doctree-read, do callbacks
def get_comparable_values(self): """Return a tupple of values representing the unicity of the object """ return (str(self.name), str(self.description), str(self.type), bool(self.optional), str(self.constraints) if isinstance(self, Constraintable) else "")
Return a tupple of values representing the unicity of the object
def get_thumbnail_of_file(image_name, width): """Return the file contents of the thumbnail of the given file.""" hdr = {'User-Agent': 'Python urllib2'} url = make_thumb_url(image_name, width) req = urllib2.Request(url, headers=hdr) try: logging.debug("Retrieving %s", url) opened = urllib2.urlopen(req) extension = opened.headers.subtype return opened.read(), make_thumbnail_name(image_name, extension) except urllib2.HTTPError, e: message = e.fp.read() raise get_exception_based_on_api_message(message, image_name)
Return the file contents of the thumbnail of the given file.
def _handle_actionpush(self, length): """Handle the ActionPush action.""" init_pos = self._src.tell() while self._src.tell() < init_pos + length: obj = _make_object("ActionPush") obj.Type = unpack_ui8(self._src) # name and how to read each type push_types = { 0: ("String", self._get_struct_string), 1: ("Float", lambda: unpack_float(self._src)), 2: ("Null", lambda: None), 4: ("RegisterNumber", lambda: unpack_ui8(self._src)), 5: ("Boolean", lambda: unpack_ui8(self._src)), 6: ("Double", lambda: unpack_double(self._src)), 7: ("Integer", lambda: unpack_ui32(self._src)), 8: ("Constant8", lambda: unpack_ui8(self._src)), 9: ("Constant16", lambda: unpack_ui16(self._src)), } name, func = push_types[obj.Type] setattr(obj, name, func()) yield obj
Handle the ActionPush action.
def enable_root_user(self): """ Enables login from any host for the root user and provides the user with a generated root password. """ uri = "/instances/%s/root" % self.id resp, body = self.manager.api.method_post(uri) return body["user"]["password"]
Enables login from any host for the root user and provides the user with a generated root password.
def package_version(): """Get the package version via Git Tag.""" version_path = os.path.join(os.path.dirname(__file__), 'version.py') version = read_version(version_path) write_version(version_path, version) return version
Get the package version via Git Tag.
def add_bucket_key_data(self, bucket, key, data, bucket_type=None): """ Adds a bucket/key/keydata triple to the inputs. :param bucket: the bucket :type bucket: string :param key: the key or list of keys :type key: string :param data: the key-specific data :type data: string, list, dict, None :param bucket_type: Optional name of a bucket type :type bucket_type: string, None :rtype: :class:`RiakMapReduce` """ if self._input_mode == 'bucket': raise ValueError('Already added a bucket, can\'t add an object.') elif self._input_mode == 'query': raise ValueError('Already added a query, can\'t add an object.') else: if isinstance(key, Iterable) and \ not isinstance(key, string_types): if bucket_type is not None: for k in key: self._inputs.append([bucket, k, data, bucket_type]) else: for k in key: self._inputs.append([bucket, k, data]) else: if bucket_type is not None: self._inputs.append([bucket, key, data, bucket_type]) else: self._inputs.append([bucket, key, data]) return self
Adds a bucket/key/keydata triple to the inputs. :param bucket: the bucket :type bucket: string :param key: the key or list of keys :type key: string :param data: the key-specific data :type data: string, list, dict, None :param bucket_type: Optional name of a bucket type :type bucket_type: string, None :rtype: :class:`RiakMapReduce`
def restore_package_version_from_recycle_bin(self, package_version_details, feed_id, package_name, package_version): """RestorePackageVersionFromRecycleBin. [Preview API] Restore a package version from the recycle bin to its associated feed. :param :class:`<PyPiRecycleBinPackageVersionDetails> <azure.devops.v5_0.py_pi_api.models.PyPiRecycleBinPackageVersionDetails>` package_version_details: Set the 'Deleted' state to 'false' to restore the package to its feed. :param str feed_id: Name or ID of the feed. :param str package_name: Name of the package. :param str package_version: Version of the package. """ route_values = {} if feed_id is not None: route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str') if package_name is not None: route_values['packageName'] = self._serialize.url('package_name', package_name, 'str') if package_version is not None: route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str') content = self._serialize.body(package_version_details, 'PyPiRecycleBinPackageVersionDetails') self._send(http_method='PATCH', location_id='07143752-3d94-45fd-86c2-0c77ed87847b', version='5.0-preview.1', route_values=route_values, content=content)
RestorePackageVersionFromRecycleBin. [Preview API] Restore a package version from the recycle bin to its associated feed. :param :class:`<PyPiRecycleBinPackageVersionDetails> <azure.devops.v5_0.py_pi_api.models.PyPiRecycleBinPackageVersionDetails>` package_version_details: Set the 'Deleted' state to 'false' to restore the package to its feed. :param str feed_id: Name or ID of the feed. :param str package_name: Name of the package. :param str package_version: Version of the package.
def create(tournament, name, **params): """Add a participant to a tournament.""" params.update({"name": name}) return api.fetch_and_parse( "POST", "tournaments/%s/participants" % tournament, "participant", **params)
Add a participant to a tournament.
def make_step_rcont (transition): """Return a ufunc-like step function that is right-continuous. Returns 1 if x >= transition, 0 otherwise. """ if not np.isfinite (transition): raise ValueError ('"transition" argument must be finite number; got %r' % transition) def step_rcont (x): x = np.asarray (x) x1 = np.atleast_1d (x) r = (x1 >= transition).astype (x.dtype) if x.ndim == 0: return np.asscalar (r) return r step_rcont.__doc__ = ('Right-continuous step function. Returns 1 if x >= ' '%g, 0 otherwise.') % (transition,) return step_rcont
Return a ufunc-like step function that is right-continuous. Returns 1 if x >= transition, 0 otherwise.
def boolean(cls, true_code, false_code=None): """Callback to validate a response code. The returned callback checks whether a given response has a ``status_code`` that is considered good (``true_code``) and raise an appropriate error if not. The optional ``false_code`` allows for a non-successful status code to return False instead of throwing an error. This is used, for example in relationship mutation to indicate that the relationship was not modified. Args: true_code(int): The http status code to consider as a success Keyword Args: false_code(int): The http status code to consider a failure Returns: A function that given a response returns ``True`` if the response's status code matches the given code. Raises a :class:`HeliumError` if the response code does not match. """ def func(response): if response is not None: status_code = response.status if status_code == true_code: return True if false_code is not None and status_code == false_code: return False raise error_for(response) return func
Callback to validate a response code. The returned callback checks whether a given response has a ``status_code`` that is considered good (``true_code``) and raise an appropriate error if not. The optional ``false_code`` allows for a non-successful status code to return False instead of throwing an error. This is used, for example in relationship mutation to indicate that the relationship was not modified. Args: true_code(int): The http status code to consider as a success Keyword Args: false_code(int): The http status code to consider a failure Returns: A function that given a response returns ``True`` if the response's status code matches the given code. Raises a :class:`HeliumError` if the response code does not match.
def delete_namespaced_daemon_set(self, name, namespace, **kwargs): # noqa: E501 """delete_namespaced_daemon_set # noqa: E501 delete a DaemonSet # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_daemon_set(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the DaemonSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :param V1DeleteOptions body: :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_namespaced_daemon_set_with_http_info(name, namespace, **kwargs) # noqa: E501 else: (data) = self.delete_namespaced_daemon_set_with_http_info(name, namespace, **kwargs) # noqa: E501 return data
delete_namespaced_daemon_set # noqa: E501 delete a DaemonSet # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_daemon_set(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the DaemonSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :param V1DeleteOptions body: :return: V1Status If the method is called asynchronously, returns the request thread.
def set_maxdays(name, days): ''' Set the maximum age of the password in days :param str name: The username of the account :param int days: The maximum age of the account in days :return: True if successful, False if not :rtype: bool :raises: CommandExecutionError on user not found or any other unknown error CLI Example: .. code-block:: bash salt '*' shadow.set_maxdays admin 90 ''' minutes = days * 24 * 60 _set_account_policy( name, 'maxMinutesUntilChangePassword={0}'.format(minutes)) return get_maxdays(name) == days
Set the maximum age of the password in days :param str name: The username of the account :param int days: The maximum age of the account in days :return: True if successful, False if not :rtype: bool :raises: CommandExecutionError on user not found or any other unknown error CLI Example: .. code-block:: bash salt '*' shadow.set_maxdays admin 90
def extract_assets(dstore, what): """ Extract an array of assets, optionally filtered by tag. Use it as /extract/assets?taxonomy=RC&taxonomy=MSBC&occupancy=RES """ qdict = parse(what) dic = {} dic1, dic2 = dstore['assetcol/tagcol'].__toh5__() dic.update(dic1) dic.update(dic2) arr = dstore['assetcol/array'].value for tag, vals in qdict.items(): cond = numpy.zeros(len(arr), bool) for val in vals: tagidx, = numpy.where(dic[tag] == val) cond |= arr[tag] == tagidx arr = arr[cond] return ArrayWrapper(arr, dic)
Extract an array of assets, optionally filtered by tag. Use it as /extract/assets?taxonomy=RC&taxonomy=MSBC&occupancy=RES
def loop_misc(self): """Process miscellaneous network events. Use in place of calling loop() if you wish to call select() or equivalent on. Do not use if you are using the threaded interface loop_start().""" if self._sock is None and self._ssl is None: return MQTT_ERR_NO_CONN now = time.time() self._check_keepalive() if self._last_retry_check+1 < now: # Only check once a second at most self._message_retry_check() self._last_retry_check = now if self._ping_t > 0 and now - self._ping_t >= self._keepalive: # client->ping_t != 0 means we are waiting for a pingresp. # This hasn't happened in the keepalive time so we should disconnect. if self._ssl: self._ssl.close() self._ssl = None elif self._sock: self._sock.close() self._sock = None self._callback_mutex.acquire() if self._state == mqtt_cs_disconnecting: rc = MQTT_ERR_SUCCESS else: rc = 1 if self.on_disconnect: self._in_callback = True self.on_disconnect(self, self._userdata, rc) self._in_callback = False self._callback_mutex.release() return MQTT_ERR_CONN_LOST return MQTT_ERR_SUCCESS
Process miscellaneous network events. Use in place of calling loop() if you wish to call select() or equivalent on. Do not use if you are using the threaded interface loop_start().
def _find_short_paths(self, paths): """ Find short paths of given paths. E.g. if both `/home` and `/home/aoik` exist, only keep `/home`. :param paths: Paths. :return: Set of short paths. """ # Split each path to parts. # E.g. '/home/aoik' to ['', 'home', 'aoik'] path_parts_s = [path.split(os.path.sep) for path in paths] # Root node root_node = {} # Sort these path parts by length, with the longest being the first. # # Longer paths appear first so that their extra parts are discarded # when a shorter path is found at 5TQ8L. # # Then for each path's parts. for parts in sorted(path_parts_s, key=len, reverse=True): # Start from the root node node = root_node # For each part of the path for part in parts: # Create node of the path node = node.setdefault(part, {}) # 5TQ8L # Clear the last path part's node's child nodes. # # This aims to keep only the shortest path that needs be watched. # node.clear() # Short paths short_path_s = set() # Collect leaf paths self._collect_leaf_paths( node=root_node, path_parts=(), leaf_paths=short_path_s, ) # Return short paths return short_path_s
Find short paths of given paths. E.g. if both `/home` and `/home/aoik` exist, only keep `/home`. :param paths: Paths. :return: Set of short paths.
def changeLane(self, vehID, laneIndex, duration): """changeLane(string, int, int) -> None Forces a lane change to the lane with the given index; if successful, the lane will be chosen for the given amount of time (in ms). """ self._connection._beginMessage( tc.CMD_SET_VEHICLE_VARIABLE, tc.CMD_CHANGELANE, vehID, 1 + 4 + 1 + 1 + 1 + 4) self._connection._string += struct.pack( "!BiBBBi", tc.TYPE_COMPOUND, 2, tc.TYPE_BYTE, laneIndex, tc.TYPE_INTEGER, duration) self._connection._sendExact()
changeLane(string, int, int) -> None Forces a lane change to the lane with the given index; if successful, the lane will be chosen for the given amount of time (in ms).
def element_css_attribute_should_be(self, locator, prop, expected): """Verifies the element identified by `locator` has the expected value for the targeted `prop`. | *Argument* | *Description* | *Example* | | locator | Selenium 2 element locator | id=my_id | | prop | targeted css attribute | background-color | | expected | expected value | rgba(0, 128, 0, 1) |""" self._info("Verifying element '%s' has css attribute '%s' with a value of '%s'" % (locator, prop, expected)) self._check_element_css_value(locator, prop, expected)
Verifies the element identified by `locator` has the expected value for the targeted `prop`. | *Argument* | *Description* | *Example* | | locator | Selenium 2 element locator | id=my_id | | prop | targeted css attribute | background-color | | expected | expected value | rgba(0, 128, 0, 1) |
def set_logger(name=None, filename=None, mode='a', level='NOTSET:NOTSET', fmt= '%(asctime)s %(filename)s:%(lineno)d [PID:%(process)-5d THD:%(thread)-5d %(levelname)-7s] %(message)s', # fmt='[%(levelname)s] %(asctime)s %(message)s', backup_count=5, limit=20480, when=None, with_filehandler=True): """Configure the global logger.""" level = level.split(':') if len(level) == 1: # Both set to the same level s_level = f_level = level[0] else: s_level = level[0] # StreamHandler log level f_level = level[1] # FileHandler log level init_logger(name=name) add_streamhandler(s_level, fmt) if with_filehandler: add_filehandler(f_level, fmt, filename, mode, backup_count, limit, when) # Import the common log functions for convenient import_log_funcs()
Configure the global logger.
def createSimulate (netParams=None, simConfig=None, output=False): ''' Sequence of commands create, simulate and analyse network ''' from .. import sim (pops, cells, conns, stims, rxd, simData) = sim.create(netParams, simConfig, output=True) sim.simulate() if output: return (pops, cells, conns, stims, simData)
Sequence of commands create, simulate and analyse network
def set_emissions(self, scenario): """Set emissions from Pandas DataFrame.""" for section in emissions: for source in emissions[section]: if source not in scenario.columns: continue self._set_timed_array( section, source, list(scenario.index), list(scenario[source]) )
Set emissions from Pandas DataFrame.