code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def select_mask(cls, dataset, selection): """ Given a Dataset object and a dictionary with dimension keys and selection keys (i.e tuple ranges, slices, sets, lists or literals) return a boolean mask over the rows in the Dataset object that have been selected. """ mask = np.ones(len(dataset), dtype=np.bool) for dim, k in selection.items(): if isinstance(k, tuple): k = slice(*k) arr = cls.values(dataset, dim) if isinstance(k, slice): with warnings.catch_warnings(): warnings.filterwarnings('ignore', r'invalid value encountered') if k.start is not None: mask &= k.start <= arr if k.stop is not None: mask &= arr < k.stop elif isinstance(k, (set, list)): iter_slcs = [] for ik in k: with warnings.catch_warnings(): warnings.filterwarnings('ignore', r'invalid value encountered') iter_slcs.append(arr == ik) mask &= np.logical_or.reduce(iter_slcs) elif callable(k): mask &= k(arr) else: index_mask = arr == k if dataset.ndims == 1 and np.sum(index_mask) == 0: data_index = np.argmin(np.abs(arr - k)) mask = np.zeros(len(dataset), dtype=np.bool) mask[data_index] = True else: mask &= index_mask return mask
Given a Dataset object and a dictionary with dimension keys and selection keys (i.e tuple ranges, slices, sets, lists or literals) return a boolean mask over the rows in the Dataset object that have been selected.
Below is the the instruction that describes the task: ### Input: Given a Dataset object and a dictionary with dimension keys and selection keys (i.e tuple ranges, slices, sets, lists or literals) return a boolean mask over the rows in the Dataset object that have been selected. ### Response: def select_mask(cls, dataset, selection): """ Given a Dataset object and a dictionary with dimension keys and selection keys (i.e tuple ranges, slices, sets, lists or literals) return a boolean mask over the rows in the Dataset object that have been selected. """ mask = np.ones(len(dataset), dtype=np.bool) for dim, k in selection.items(): if isinstance(k, tuple): k = slice(*k) arr = cls.values(dataset, dim) if isinstance(k, slice): with warnings.catch_warnings(): warnings.filterwarnings('ignore', r'invalid value encountered') if k.start is not None: mask &= k.start <= arr if k.stop is not None: mask &= arr < k.stop elif isinstance(k, (set, list)): iter_slcs = [] for ik in k: with warnings.catch_warnings(): warnings.filterwarnings('ignore', r'invalid value encountered') iter_slcs.append(arr == ik) mask &= np.logical_or.reduce(iter_slcs) elif callable(k): mask &= k(arr) else: index_mask = arr == k if dataset.ndims == 1 and np.sum(index_mask) == 0: data_index = np.argmin(np.abs(arr - k)) mask = np.zeros(len(dataset), dtype=np.bool) mask[data_index] = True else: mask &= index_mask return mask
def reorder_nodes_before_add_or_move(self, pos, newpos, newdepth, target, siblings, oldpath=None, movebranch=False): """ Handles the reordering of nodes and branches when adding/moving nodes. :returns: A tuple containing the old path and the new path. """ if ( (pos == 'last-sibling') or (pos == 'right' and target == target.get_last_sibling()) ): # easy, the last node last = target.get_last_sibling() newpath = last._inc_path() if movebranch: self.stmts.append( self.get_sql_newpath_in_branches(oldpath, newpath)) else: # do the UPDATE dance if newpos is None: siblings = target.get_siblings() siblings = {'left': siblings.filter(path__gte=target.path), 'right': siblings.filter(path__gt=target.path), 'first-sibling': siblings}[pos] basenum = target._get_lastpos_in_path() newpos = {'first-sibling': 1, 'left': basenum, 'right': basenum + 1}[pos] newpath = self.node_cls._get_path(target.path, newdepth, newpos) # If the move is amongst siblings and is to the left and there # are siblings to the right of its new position then to be on # the safe side we temporarily dump it on the end of the list tempnewpath = None if movebranch and len(oldpath) == len(newpath): parentoldpath = self.node_cls._get_basepath( oldpath, int(len(oldpath) / self.node_cls.steplen) - 1 ) parentnewpath = self.node_cls._get_basepath( newpath, newdepth - 1) if ( parentoldpath == parentnewpath and siblings and newpath < oldpath ): last = target.get_last_sibling() basenum = last._get_lastpos_in_path() tempnewpath = self.node_cls._get_path( newpath, newdepth, basenum + 2) self.stmts.append( self.get_sql_newpath_in_branches( oldpath, tempnewpath)) # Optimisation to only move siblings which need moving # (i.e. if we've got holes, allow them to compress) movesiblings = [] priorpath = newpath for node in siblings: # If the path of the node is already greater than the path # of the previous node it doesn't need shifting if node.path > priorpath: break # It does need shifting, so add to the list movesiblings.append(node) # Calculate the path that it would be moved to, as that's # the next "priorpath" priorpath = node._inc_path() movesiblings.reverse() for node in movesiblings: # moving the siblings (and their branches) at the right of the # related position one step to the right sql, vals = self.get_sql_newpath_in_branches( node.path, node._inc_path()) self.stmts.append((sql, vals)) if movebranch: if oldpath.startswith(node.path): # if moving to a parent, update oldpath since we just # increased the path of the entire branch oldpath = vals[0] + oldpath[len(vals[0]):] if target.path.startswith(node.path): # and if we moved the target, update the object # django made for us, since the update won't do it # maybe useful in loops target.path = vals[0] + target.path[len(vals[0]):] if movebranch: # node to move if tempnewpath: self.stmts.append( self.get_sql_newpath_in_branches( tempnewpath, newpath)) else: self.stmts.append( self.get_sql_newpath_in_branches( oldpath, newpath)) return oldpath, newpath
Handles the reordering of nodes and branches when adding/moving nodes. :returns: A tuple containing the old path and the new path.
Below is the the instruction that describes the task: ### Input: Handles the reordering of nodes and branches when adding/moving nodes. :returns: A tuple containing the old path and the new path. ### Response: def reorder_nodes_before_add_or_move(self, pos, newpos, newdepth, target, siblings, oldpath=None, movebranch=False): """ Handles the reordering of nodes and branches when adding/moving nodes. :returns: A tuple containing the old path and the new path. """ if ( (pos == 'last-sibling') or (pos == 'right' and target == target.get_last_sibling()) ): # easy, the last node last = target.get_last_sibling() newpath = last._inc_path() if movebranch: self.stmts.append( self.get_sql_newpath_in_branches(oldpath, newpath)) else: # do the UPDATE dance if newpos is None: siblings = target.get_siblings() siblings = {'left': siblings.filter(path__gte=target.path), 'right': siblings.filter(path__gt=target.path), 'first-sibling': siblings}[pos] basenum = target._get_lastpos_in_path() newpos = {'first-sibling': 1, 'left': basenum, 'right': basenum + 1}[pos] newpath = self.node_cls._get_path(target.path, newdepth, newpos) # If the move is amongst siblings and is to the left and there # are siblings to the right of its new position then to be on # the safe side we temporarily dump it on the end of the list tempnewpath = None if movebranch and len(oldpath) == len(newpath): parentoldpath = self.node_cls._get_basepath( oldpath, int(len(oldpath) / self.node_cls.steplen) - 1 ) parentnewpath = self.node_cls._get_basepath( newpath, newdepth - 1) if ( parentoldpath == parentnewpath and siblings and newpath < oldpath ): last = target.get_last_sibling() basenum = last._get_lastpos_in_path() tempnewpath = self.node_cls._get_path( newpath, newdepth, basenum + 2) self.stmts.append( self.get_sql_newpath_in_branches( oldpath, tempnewpath)) # Optimisation to only move siblings which need moving # (i.e. if we've got holes, allow them to compress) movesiblings = [] priorpath = newpath for node in siblings: # If the path of the node is already greater than the path # of the previous node it doesn't need shifting if node.path > priorpath: break # It does need shifting, so add to the list movesiblings.append(node) # Calculate the path that it would be moved to, as that's # the next "priorpath" priorpath = node._inc_path() movesiblings.reverse() for node in movesiblings: # moving the siblings (and their branches) at the right of the # related position one step to the right sql, vals = self.get_sql_newpath_in_branches( node.path, node._inc_path()) self.stmts.append((sql, vals)) if movebranch: if oldpath.startswith(node.path): # if moving to a parent, update oldpath since we just # increased the path of the entire branch oldpath = vals[0] + oldpath[len(vals[0]):] if target.path.startswith(node.path): # and if we moved the target, update the object # django made for us, since the update won't do it # maybe useful in loops target.path = vals[0] + target.path[len(vals[0]):] if movebranch: # node to move if tempnewpath: self.stmts.append( self.get_sql_newpath_in_branches( tempnewpath, newpath)) else: self.stmts.append( self.get_sql_newpath_in_branches( oldpath, newpath)) return oldpath, newpath
def apply_T6(word): '''If a VVV-sequence contains a long vowel, there is a syllable boundary between it and the third vowel, e.g. [kor.ke.aa], [yh.ti.öön], [ruu.an], [mää.yt.te].''' T6 = '' WORD = word.split('.') for i, v in enumerate(WORD): if contains_VVV(v): VV = [v.find(j) for j in LONG_VOWELS if v.find(j) > 0] if VV: I = VV[0] T6 = ' T6' if I + 2 == len(v) or is_vowel(v[I + 2]): WORD[i] = v[:I + 2] + '.' + v[I + 2:] # TODO else: WORD[i] = v[:I] + '.' + v[I:] word = '.'.join(WORD) word = word.strip('.') # TODO return word, T6
If a VVV-sequence contains a long vowel, there is a syllable boundary between it and the third vowel, e.g. [kor.ke.aa], [yh.ti.öön], [ruu.an], [mää.yt.te].
Below is the the instruction that describes the task: ### Input: If a VVV-sequence contains a long vowel, there is a syllable boundary between it and the third vowel, e.g. [kor.ke.aa], [yh.ti.öön], [ruu.an], [mää.yt.te]. ### Response: def apply_T6(word): '''If a VVV-sequence contains a long vowel, there is a syllable boundary between it and the third vowel, e.g. [kor.ke.aa], [yh.ti.öön], [ruu.an], [mää.yt.te].''' T6 = '' WORD = word.split('.') for i, v in enumerate(WORD): if contains_VVV(v): VV = [v.find(j) for j in LONG_VOWELS if v.find(j) > 0] if VV: I = VV[0] T6 = ' T6' if I + 2 == len(v) or is_vowel(v[I + 2]): WORD[i] = v[:I + 2] + '.' + v[I + 2:] # TODO else: WORD[i] = v[:I] + '.' + v[I:] word = '.'.join(WORD) word = word.strip('.') # TODO return word, T6
def trigger_modified(self, filepath): """Triggers modified event if the given filepath mod time is newer.""" mod_time = self._get_modified_time(filepath) if mod_time > self._watched_files.get(filepath, 0): self._trigger('modified', filepath) self._watched_files[filepath] = mod_time
Triggers modified event if the given filepath mod time is newer.
Below is the the instruction that describes the task: ### Input: Triggers modified event if the given filepath mod time is newer. ### Response: def trigger_modified(self, filepath): """Triggers modified event if the given filepath mod time is newer.""" mod_time = self._get_modified_time(filepath) if mod_time > self._watched_files.get(filepath, 0): self._trigger('modified', filepath) self._watched_files[filepath] = mod_time
def update_field_names(self, data, matching): """ This method updates the names of the fields according to matching :param data: original Pandas dataframe :param matching: dictionary of matchings between old and new values :type data: pandas.DataFrame :type matching: dictionary :returns: Pandas dataframe with updated names :rtype: pandas.DataFrame """ for key in matching.keys(): if key in data.columns: data.rename(columns={key:matching[key]}) return data
This method updates the names of the fields according to matching :param data: original Pandas dataframe :param matching: dictionary of matchings between old and new values :type data: pandas.DataFrame :type matching: dictionary :returns: Pandas dataframe with updated names :rtype: pandas.DataFrame
Below is the the instruction that describes the task: ### Input: This method updates the names of the fields according to matching :param data: original Pandas dataframe :param matching: dictionary of matchings between old and new values :type data: pandas.DataFrame :type matching: dictionary :returns: Pandas dataframe with updated names :rtype: pandas.DataFrame ### Response: def update_field_names(self, data, matching): """ This method updates the names of the fields according to matching :param data: original Pandas dataframe :param matching: dictionary of matchings between old and new values :type data: pandas.DataFrame :type matching: dictionary :returns: Pandas dataframe with updated names :rtype: pandas.DataFrame """ for key in matching.keys(): if key in data.columns: data.rename(columns={key:matching[key]}) return data
def run(self, resources): """Runs the flash step Args: resources (dict): A dictionary containing the required resources that we needed access to in order to perform this step. """ if not resources['connection']._port.startswith('jlink'): raise ArgumentError("FlashBoardStep is currently only possible through jlink", invalid_port=args['port']) hwman = resources['connection'] debug = hwman.hwman.debug(self._debug_string) debug.flash(self._file)
Runs the flash step Args: resources (dict): A dictionary containing the required resources that we needed access to in order to perform this step.
Below is the the instruction that describes the task: ### Input: Runs the flash step Args: resources (dict): A dictionary containing the required resources that we needed access to in order to perform this step. ### Response: def run(self, resources): """Runs the flash step Args: resources (dict): A dictionary containing the required resources that we needed access to in order to perform this step. """ if not resources['connection']._port.startswith('jlink'): raise ArgumentError("FlashBoardStep is currently only possible through jlink", invalid_port=args['port']) hwman = resources['connection'] debug = hwman.hwman.debug(self._debug_string) debug.flash(self._file)
def _get_function_name_from_arn(function_arn): """ Given the integration ARN, extract the Lambda function name from the ARN. If there are stage variables, or other unsupported formats, this function will return None. Parameters ---------- function_arn : basestring or None Function ARN from the swagger document Returns ------- basestring or None Function name of this integration. None if the ARN is not parsable """ if not function_arn: return None matches = re.match(LambdaUri._REGEX_GET_FUNCTION_NAME, function_arn) if not matches or not matches.groups(): LOG.debug("No Lambda function ARN defined for integration containing ARN %s", function_arn) return None groups = matches.groups() maybe_function_name = groups[0] # This regex has only one group match # Function name could be a real name or a stage variable or some unknown format if re.match(LambdaUri._REGEX_STAGE_VARIABLE, maybe_function_name): # yes, this is a stage variable LOG.debug("Stage variables are not supported. Ignoring integration with function ARN %s", function_arn) return None elif re.match(LambdaUri._REGEX_VALID_FUNCTION_NAME, maybe_function_name): # Yes, this is a real function name return maybe_function_name # Some unknown format LOG.debug("Ignoring integration ARN. Unable to parse Function Name from function arn %s", function_arn)
Given the integration ARN, extract the Lambda function name from the ARN. If there are stage variables, or other unsupported formats, this function will return None. Parameters ---------- function_arn : basestring or None Function ARN from the swagger document Returns ------- basestring or None Function name of this integration. None if the ARN is not parsable
Below is the the instruction that describes the task: ### Input: Given the integration ARN, extract the Lambda function name from the ARN. If there are stage variables, or other unsupported formats, this function will return None. Parameters ---------- function_arn : basestring or None Function ARN from the swagger document Returns ------- basestring or None Function name of this integration. None if the ARN is not parsable ### Response: def _get_function_name_from_arn(function_arn): """ Given the integration ARN, extract the Lambda function name from the ARN. If there are stage variables, or other unsupported formats, this function will return None. Parameters ---------- function_arn : basestring or None Function ARN from the swagger document Returns ------- basestring or None Function name of this integration. None if the ARN is not parsable """ if not function_arn: return None matches = re.match(LambdaUri._REGEX_GET_FUNCTION_NAME, function_arn) if not matches or not matches.groups(): LOG.debug("No Lambda function ARN defined for integration containing ARN %s", function_arn) return None groups = matches.groups() maybe_function_name = groups[0] # This regex has only one group match # Function name could be a real name or a stage variable or some unknown format if re.match(LambdaUri._REGEX_STAGE_VARIABLE, maybe_function_name): # yes, this is a stage variable LOG.debug("Stage variables are not supported. Ignoring integration with function ARN %s", function_arn) return None elif re.match(LambdaUri._REGEX_VALID_FUNCTION_NAME, maybe_function_name): # Yes, this is a real function name return maybe_function_name # Some unknown format LOG.debug("Ignoring integration ARN. Unable to parse Function Name from function arn %s", function_arn)
def get_distributions(self): """ Returns a dictionary of name and its distribution. Distribution is a ndarray. The ndarray is stored in the standard way such that the rightmost variable changes most often. Consider a CPD of variable 'd' which has parents 'b' and 'c' (distribution['CONDSET'] = ['b', 'c']) | d_0 d_1 --------------------------- b_0, c_0 | 0.8 0.2 b_0, c_1 | 0.9 0.1 b_1, c_0 | 0.7 0.3 b_1, c_1 | 0.05 0.95 The value of distribution['d']['DPIS'] for the above example will be: array([[ 0.8 , 0.2 ], [ 0.9 , 0.1 ], [ 0.7 , 0.3 ], [ 0.05, 0.95]]) Examples -------- >>> reader = XBNReader('xbn_test.xml') >>> reader.get_distributions() {'a': {'TYPE': 'discrete', 'DPIS': array([[ 0.2, 0.8]])}, 'e': {'TYPE': 'discrete', 'DPIS': array([[ 0.8, 0.2], [ 0.6, 0.4]]), 'CONDSET': ['c'], 'CARDINALITY': [2]}, 'b': {'TYPE': 'discrete', 'DPIS': array([[ 0.8, 0.2], [ 0.2, 0.8]]), 'CONDSET': ['a'], 'CARDINALITY': [2]}, 'c': {'TYPE': 'discrete', 'DPIS': array([[ 0.2 , 0.8 ], [ 0.05, 0.95]]), 'CONDSET': ['a'], 'CARDINALITY': [2]}, 'd': {'TYPE': 'discrete', 'DPIS': array([[ 0.8 , 0.2 ], [ 0.9 , 0.1 ], [ 0.7 , 0.3 ], [ 0.05, 0.95]]), 'CONDSET': ['b', 'c']}, 'CARDINALITY': [2, 2]} """ distribution = {} for dist in self.bnmodel.find('DISTRIBUTIONS'): variable_name = dist.find('PRIVATE').get('NAME') distribution[variable_name] = {'TYPE': dist.get('TYPE')} if dist.find('CONDSET') is not None: distribution[variable_name]['CONDSET'] = [var.get('NAME') for var in dist.find('CONDSET').findall('CONDELEM')] distribution[variable_name]['CARDINALITY'] = np.array( [len(set(np.array([list(map(int, dpi.get('INDEXES').split())) for dpi in dist.find('DPIS')])[:, i])) for i in range(len(distribution[variable_name]['CONDSET']))]) distribution[variable_name]['DPIS'] = np.array( [list(map(float, dpi.text.split())) for dpi in dist.find('DPIS')]) return distribution
Returns a dictionary of name and its distribution. Distribution is a ndarray. The ndarray is stored in the standard way such that the rightmost variable changes most often. Consider a CPD of variable 'd' which has parents 'b' and 'c' (distribution['CONDSET'] = ['b', 'c']) | d_0 d_1 --------------------------- b_0, c_0 | 0.8 0.2 b_0, c_1 | 0.9 0.1 b_1, c_0 | 0.7 0.3 b_1, c_1 | 0.05 0.95 The value of distribution['d']['DPIS'] for the above example will be: array([[ 0.8 , 0.2 ], [ 0.9 , 0.1 ], [ 0.7 , 0.3 ], [ 0.05, 0.95]]) Examples -------- >>> reader = XBNReader('xbn_test.xml') >>> reader.get_distributions() {'a': {'TYPE': 'discrete', 'DPIS': array([[ 0.2, 0.8]])}, 'e': {'TYPE': 'discrete', 'DPIS': array([[ 0.8, 0.2], [ 0.6, 0.4]]), 'CONDSET': ['c'], 'CARDINALITY': [2]}, 'b': {'TYPE': 'discrete', 'DPIS': array([[ 0.8, 0.2], [ 0.2, 0.8]]), 'CONDSET': ['a'], 'CARDINALITY': [2]}, 'c': {'TYPE': 'discrete', 'DPIS': array([[ 0.2 , 0.8 ], [ 0.05, 0.95]]), 'CONDSET': ['a'], 'CARDINALITY': [2]}, 'd': {'TYPE': 'discrete', 'DPIS': array([[ 0.8 , 0.2 ], [ 0.9 , 0.1 ], [ 0.7 , 0.3 ], [ 0.05, 0.95]]), 'CONDSET': ['b', 'c']}, 'CARDINALITY': [2, 2]}
Below is the the instruction that describes the task: ### Input: Returns a dictionary of name and its distribution. Distribution is a ndarray. The ndarray is stored in the standard way such that the rightmost variable changes most often. Consider a CPD of variable 'd' which has parents 'b' and 'c' (distribution['CONDSET'] = ['b', 'c']) | d_0 d_1 --------------------------- b_0, c_0 | 0.8 0.2 b_0, c_1 | 0.9 0.1 b_1, c_0 | 0.7 0.3 b_1, c_1 | 0.05 0.95 The value of distribution['d']['DPIS'] for the above example will be: array([[ 0.8 , 0.2 ], [ 0.9 , 0.1 ], [ 0.7 , 0.3 ], [ 0.05, 0.95]]) Examples -------- >>> reader = XBNReader('xbn_test.xml') >>> reader.get_distributions() {'a': {'TYPE': 'discrete', 'DPIS': array([[ 0.2, 0.8]])}, 'e': {'TYPE': 'discrete', 'DPIS': array([[ 0.8, 0.2], [ 0.6, 0.4]]), 'CONDSET': ['c'], 'CARDINALITY': [2]}, 'b': {'TYPE': 'discrete', 'DPIS': array([[ 0.8, 0.2], [ 0.2, 0.8]]), 'CONDSET': ['a'], 'CARDINALITY': [2]}, 'c': {'TYPE': 'discrete', 'DPIS': array([[ 0.2 , 0.8 ], [ 0.05, 0.95]]), 'CONDSET': ['a'], 'CARDINALITY': [2]}, 'd': {'TYPE': 'discrete', 'DPIS': array([[ 0.8 , 0.2 ], [ 0.9 , 0.1 ], [ 0.7 , 0.3 ], [ 0.05, 0.95]]), 'CONDSET': ['b', 'c']}, 'CARDINALITY': [2, 2]} ### Response: def get_distributions(self): """ Returns a dictionary of name and its distribution. Distribution is a ndarray. The ndarray is stored in the standard way such that the rightmost variable changes most often. Consider a CPD of variable 'd' which has parents 'b' and 'c' (distribution['CONDSET'] = ['b', 'c']) | d_0 d_1 --------------------------- b_0, c_0 | 0.8 0.2 b_0, c_1 | 0.9 0.1 b_1, c_0 | 0.7 0.3 b_1, c_1 | 0.05 0.95 The value of distribution['d']['DPIS'] for the above example will be: array([[ 0.8 , 0.2 ], [ 0.9 , 0.1 ], [ 0.7 , 0.3 ], [ 0.05, 0.95]]) Examples -------- >>> reader = XBNReader('xbn_test.xml') >>> reader.get_distributions() {'a': {'TYPE': 'discrete', 'DPIS': array([[ 0.2, 0.8]])}, 'e': {'TYPE': 'discrete', 'DPIS': array([[ 0.8, 0.2], [ 0.6, 0.4]]), 'CONDSET': ['c'], 'CARDINALITY': [2]}, 'b': {'TYPE': 'discrete', 'DPIS': array([[ 0.8, 0.2], [ 0.2, 0.8]]), 'CONDSET': ['a'], 'CARDINALITY': [2]}, 'c': {'TYPE': 'discrete', 'DPIS': array([[ 0.2 , 0.8 ], [ 0.05, 0.95]]), 'CONDSET': ['a'], 'CARDINALITY': [2]}, 'd': {'TYPE': 'discrete', 'DPIS': array([[ 0.8 , 0.2 ], [ 0.9 , 0.1 ], [ 0.7 , 0.3 ], [ 0.05, 0.95]]), 'CONDSET': ['b', 'c']}, 'CARDINALITY': [2, 2]} """ distribution = {} for dist in self.bnmodel.find('DISTRIBUTIONS'): variable_name = dist.find('PRIVATE').get('NAME') distribution[variable_name] = {'TYPE': dist.get('TYPE')} if dist.find('CONDSET') is not None: distribution[variable_name]['CONDSET'] = [var.get('NAME') for var in dist.find('CONDSET').findall('CONDELEM')] distribution[variable_name]['CARDINALITY'] = np.array( [len(set(np.array([list(map(int, dpi.get('INDEXES').split())) for dpi in dist.find('DPIS')])[:, i])) for i in range(len(distribution[variable_name]['CONDSET']))]) distribution[variable_name]['DPIS'] = np.array( [list(map(float, dpi.text.split())) for dpi in dist.find('DPIS')]) return distribution
def may_contain_matches(self, path): """Tests whether it's possible for paths under the given one to match. If this method returns None, no path under the given one will match the pattern. """ path = self._prepare_path(path) return self.int_regex.search(path) is not None
Tests whether it's possible for paths under the given one to match. If this method returns None, no path under the given one will match the pattern.
Below is the the instruction that describes the task: ### Input: Tests whether it's possible for paths under the given one to match. If this method returns None, no path under the given one will match the pattern. ### Response: def may_contain_matches(self, path): """Tests whether it's possible for paths under the given one to match. If this method returns None, no path under the given one will match the pattern. """ path = self._prepare_path(path) return self.int_regex.search(path) is not None
def vector_analysis(vector, coordinates, elements_vdw, increment=1.0): """Analyse a sampling vector's path for window analysis purpose.""" # Calculate number of chunks if vector length is divided by increment. chunks = int(np.linalg.norm(vector) // increment) # Create a single chunk. chunk = vector / chunks # Calculate set of points on vector's path every increment. vector_pathway = np.array([chunk * i for i in range(chunks + 1)]) analysed_vector = np.array([ np.amin( euclidean_distances(coordinates, i.reshape(1, -1)) - elements_vdw) for i in vector_pathway ]) if all(i > 0 for i in analysed_vector): pos = np.argmin(analysed_vector) # As first argument we need to give the distance from the origin. dist = np.linalg.norm(chunk * pos) return np.array( [dist, analysed_vector[pos] * 2, *chunk * pos, *vector])
Analyse a sampling vector's path for window analysis purpose.
Below is the the instruction that describes the task: ### Input: Analyse a sampling vector's path for window analysis purpose. ### Response: def vector_analysis(vector, coordinates, elements_vdw, increment=1.0): """Analyse a sampling vector's path for window analysis purpose.""" # Calculate number of chunks if vector length is divided by increment. chunks = int(np.linalg.norm(vector) // increment) # Create a single chunk. chunk = vector / chunks # Calculate set of points on vector's path every increment. vector_pathway = np.array([chunk * i for i in range(chunks + 1)]) analysed_vector = np.array([ np.amin( euclidean_distances(coordinates, i.reshape(1, -1)) - elements_vdw) for i in vector_pathway ]) if all(i > 0 for i in analysed_vector): pos = np.argmin(analysed_vector) # As first argument we need to give the distance from the origin. dist = np.linalg.norm(chunk * pos) return np.array( [dist, analysed_vector[pos] * 2, *chunk * pos, *vector])
def increase_posts_count(sender, instance, **kwargs): """ Increases the member's post count after a post save. This receiver handles the update of the profile related to the user who is the poster of the forum post being created or updated. """ if instance.poster is None: # An anonymous post is considered. No profile can be updated in # that case. return profile, dummy = ForumProfile.objects.get_or_create(user=instance.poster) increase_posts_count = False if instance.pk: try: old_instance = instance.__class__._default_manager.get(pk=instance.pk) except ObjectDoesNotExist: # pragma: no cover # This should never happen (except with django loaddata command) increase_posts_count = True old_instance = None if old_instance and old_instance.approved is False and instance.approved is True: increase_posts_count = True elif instance.approved: increase_posts_count = True if increase_posts_count: profile.posts_count = F('posts_count') + 1 profile.save()
Increases the member's post count after a post save. This receiver handles the update of the profile related to the user who is the poster of the forum post being created or updated.
Below is the the instruction that describes the task: ### Input: Increases the member's post count after a post save. This receiver handles the update of the profile related to the user who is the poster of the forum post being created or updated. ### Response: def increase_posts_count(sender, instance, **kwargs): """ Increases the member's post count after a post save. This receiver handles the update of the profile related to the user who is the poster of the forum post being created or updated. """ if instance.poster is None: # An anonymous post is considered. No profile can be updated in # that case. return profile, dummy = ForumProfile.objects.get_or_create(user=instance.poster) increase_posts_count = False if instance.pk: try: old_instance = instance.__class__._default_manager.get(pk=instance.pk) except ObjectDoesNotExist: # pragma: no cover # This should never happen (except with django loaddata command) increase_posts_count = True old_instance = None if old_instance and old_instance.approved is False and instance.approved is True: increase_posts_count = True elif instance.approved: increase_posts_count = True if increase_posts_count: profile.posts_count = F('posts_count') + 1 profile.save()
def rename(self, old_host, new_host): """ Renames a host configuration. Parameters ---------- old_host : the host to rename. new_host : the new host value """ if new_host in self.hosts_: raise ValueError("Host %s: already exists." % new_host) for line in self.lines_: # update lines if line.host == old_host: line.host = new_host if line.key.lower() == "host": line.value = new_host line.line = "Host %s" % new_host self.hosts_.remove(old_host) # update host cache self.hosts_.add(new_host)
Renames a host configuration. Parameters ---------- old_host : the host to rename. new_host : the new host value
Below is the the instruction that describes the task: ### Input: Renames a host configuration. Parameters ---------- old_host : the host to rename. new_host : the new host value ### Response: def rename(self, old_host, new_host): """ Renames a host configuration. Parameters ---------- old_host : the host to rename. new_host : the new host value """ if new_host in self.hosts_: raise ValueError("Host %s: already exists." % new_host) for line in self.lines_: # update lines if line.host == old_host: line.host = new_host if line.key.lower() == "host": line.value = new_host line.line = "Host %s" % new_host self.hosts_.remove(old_host) # update host cache self.hosts_.add(new_host)
def get_skill_by_name(nme, character): """ returns the skill by name in a character """ for ndx, sk in enumerate(character["skills"]): if sk["name"] == nme: return ndx return 0
returns the skill by name in a character
Below is the the instruction that describes the task: ### Input: returns the skill by name in a character ### Response: def get_skill_by_name(nme, character): """ returns the skill by name in a character """ for ndx, sk in enumerate(character["skills"]): if sk["name"] == nme: return ndx return 0
def clone(self, transformed=True): """ Clone a ``Actor(vtkActor)`` and make an exact copy of it. :param transformed: if `False` ignore any previous trasformation applied to the mesh. .. hint:: |carcrash| |carcrash.py|_ """ poly = self.polydata(transformed=transformed) polyCopy = vtk.vtkPolyData() polyCopy.DeepCopy(poly) cloned = Actor() cloned.poly = polyCopy cloned.mapper.SetInputData(polyCopy) cloned.mapper.SetScalarVisibility(self.mapper.GetScalarVisibility()) pr = vtk.vtkProperty() pr.DeepCopy(self.GetProperty()) cloned.SetProperty(pr) return cloned
Clone a ``Actor(vtkActor)`` and make an exact copy of it. :param transformed: if `False` ignore any previous trasformation applied to the mesh. .. hint:: |carcrash| |carcrash.py|_
Below is the the instruction that describes the task: ### Input: Clone a ``Actor(vtkActor)`` and make an exact copy of it. :param transformed: if `False` ignore any previous trasformation applied to the mesh. .. hint:: |carcrash| |carcrash.py|_ ### Response: def clone(self, transformed=True): """ Clone a ``Actor(vtkActor)`` and make an exact copy of it. :param transformed: if `False` ignore any previous trasformation applied to the mesh. .. hint:: |carcrash| |carcrash.py|_ """ poly = self.polydata(transformed=transformed) polyCopy = vtk.vtkPolyData() polyCopy.DeepCopy(poly) cloned = Actor() cloned.poly = polyCopy cloned.mapper.SetInputData(polyCopy) cloned.mapper.SetScalarVisibility(self.mapper.GetScalarVisibility()) pr = vtk.vtkProperty() pr.DeepCopy(self.GetProperty()) cloned.SetProperty(pr) return cloned
def _options_error(cls, opt, objtype, backend, valid_options): """ Generates an error message for an invalid option suggesting similar options through fuzzy matching. """ current_backend = Store.current_backend loaded_backends = Store.loaded_backends() kws = Keywords(values=valid_options) matches = sorted(kws.fuzzy_match(opt)) if backend is not None: if matches: raise ValueError('Unexpected option %r for %s type ' 'when using the %r extension. Similar ' 'options are: %s.' % (opt, objtype, backend, matches)) else: raise ValueError('Unexpected option %r for %s type ' 'when using the %r extension. No ' 'similar options founds.' % (opt, objtype, backend)) # Check option is invalid for all backends found = [] for lb in [b for b in loaded_backends if b != backend]: lb_options = Store.options(backend=lb).get(objtype) if lb_options is None: continue for g, group_opts in lb_options.groups.items(): if opt in group_opts.allowed_keywords: found.append(lb) if found: param.main.param.warning( 'Option %r for %s type not valid for selected ' 'backend (%r). Option only applies to following ' 'backends: %r' % (opt, objtype, current_backend, found)) return if matches: raise ValueError('Unexpected option %r for %s type ' 'across all extensions. Similar options ' 'for current extension (%r) are: %s.' % (opt, objtype, current_backend, matches)) else: raise ValueError('Unexpected option %r for %s type ' 'across all extensions. No similar options ' 'found.' % (opt, objtype))
Generates an error message for an invalid option suggesting similar options through fuzzy matching.
Below is the the instruction that describes the task: ### Input: Generates an error message for an invalid option suggesting similar options through fuzzy matching. ### Response: def _options_error(cls, opt, objtype, backend, valid_options): """ Generates an error message for an invalid option suggesting similar options through fuzzy matching. """ current_backend = Store.current_backend loaded_backends = Store.loaded_backends() kws = Keywords(values=valid_options) matches = sorted(kws.fuzzy_match(opt)) if backend is not None: if matches: raise ValueError('Unexpected option %r for %s type ' 'when using the %r extension. Similar ' 'options are: %s.' % (opt, objtype, backend, matches)) else: raise ValueError('Unexpected option %r for %s type ' 'when using the %r extension. No ' 'similar options founds.' % (opt, objtype, backend)) # Check option is invalid for all backends found = [] for lb in [b for b in loaded_backends if b != backend]: lb_options = Store.options(backend=lb).get(objtype) if lb_options is None: continue for g, group_opts in lb_options.groups.items(): if opt in group_opts.allowed_keywords: found.append(lb) if found: param.main.param.warning( 'Option %r for %s type not valid for selected ' 'backend (%r). Option only applies to following ' 'backends: %r' % (opt, objtype, current_backend, found)) return if matches: raise ValueError('Unexpected option %r for %s type ' 'across all extensions. Similar options ' 'for current extension (%r) are: %s.' % (opt, objtype, current_backend, matches)) else: raise ValueError('Unexpected option %r for %s type ' 'across all extensions. No similar options ' 'found.' % (opt, objtype))
def contains(self, key, value, findAll=False, exclude=False, includeMissing=False): '''Return entries that: * have the key * key points to a list, and * value is found in the list. If value is also a list itself, then the list entry is selected if any of the values match. If findAll is set to True, then all the entries must be found. Example of use: >>> test = [ ... {"name": "Jim", "age": 18, "income": 93000, "wigs": [9, 12] }, ... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]}, ... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]}, ... {"name": "Bill", "age": 19, "income": 29000 }, ... ] >>> print PLOD(test).contains("wigs", [1, 12]).returnString() [ {age: 18, income: 93000, name: 'Jim', wigs: [9, 12] }, {age: 20, income: 15000, name: 'Joe', wigs: [1, 2, 3]} ] .. versionadded:: 0.1.3b :param key: The dictionary key (or cascading list of keys) that should point to a list. :param value: The value to locate in the list. This argument can be an immutable value such as a string, tuple, or number. If this argument is a list of values instead, then this method will search for any of the values in that list. If the optional 'findAll' parameter is set to True, then all of the values in that list must be found. Optional named arguments: :param finalAll: If True, then all the values in the 'value' parameter must be found. :param exclude: If 'exclude' is True, then the entries that do NOT match the above conditions are returned. :param includeMissing: If 'includeMissing' is True, then if the key is missing then that entry is included in the results. However, it does not include entries that have the key but its value is for a non-list or empty list. :returns: self ''' result = [] result_index = [] for counter, row in enumerate(self.table): (target, tkey, target_list) = internal.dict_crawl(row, key) if target: if findAll: success = internal.list_match_all(target_list, value) else: success = internal.list_match_any(target_list, value) if exclude: success = not success if success: result.append(row) result_index.append(self.index_track[counter]) else: # item missing from list, so skip over pass else: if includeMissing: result.append(row) result_index.append(self.index_track[counter]) else: pass self.table = result self.index_track = result_index return self
Return entries that: * have the key * key points to a list, and * value is found in the list. If value is also a list itself, then the list entry is selected if any of the values match. If findAll is set to True, then all the entries must be found. Example of use: >>> test = [ ... {"name": "Jim", "age": 18, "income": 93000, "wigs": [9, 12] }, ... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]}, ... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]}, ... {"name": "Bill", "age": 19, "income": 29000 }, ... ] >>> print PLOD(test).contains("wigs", [1, 12]).returnString() [ {age: 18, income: 93000, name: 'Jim', wigs: [9, 12] }, {age: 20, income: 15000, name: 'Joe', wigs: [1, 2, 3]} ] .. versionadded:: 0.1.3b :param key: The dictionary key (or cascading list of keys) that should point to a list. :param value: The value to locate in the list. This argument can be an immutable value such as a string, tuple, or number. If this argument is a list of values instead, then this method will search for any of the values in that list. If the optional 'findAll' parameter is set to True, then all of the values in that list must be found. Optional named arguments: :param finalAll: If True, then all the values in the 'value' parameter must be found. :param exclude: If 'exclude' is True, then the entries that do NOT match the above conditions are returned. :param includeMissing: If 'includeMissing' is True, then if the key is missing then that entry is included in the results. However, it does not include entries that have the key but its value is for a non-list or empty list. :returns: self
Below is the the instruction that describes the task: ### Input: Return entries that: * have the key * key points to a list, and * value is found in the list. If value is also a list itself, then the list entry is selected if any of the values match. If findAll is set to True, then all the entries must be found. Example of use: >>> test = [ ... {"name": "Jim", "age": 18, "income": 93000, "wigs": [9, 12] }, ... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]}, ... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]}, ... {"name": "Bill", "age": 19, "income": 29000 }, ... ] >>> print PLOD(test).contains("wigs", [1, 12]).returnString() [ {age: 18, income: 93000, name: 'Jim', wigs: [9, 12] }, {age: 20, income: 15000, name: 'Joe', wigs: [1, 2, 3]} ] .. versionadded:: 0.1.3b :param key: The dictionary key (or cascading list of keys) that should point to a list. :param value: The value to locate in the list. This argument can be an immutable value such as a string, tuple, or number. If this argument is a list of values instead, then this method will search for any of the values in that list. If the optional 'findAll' parameter is set to True, then all of the values in that list must be found. Optional named arguments: :param finalAll: If True, then all the values in the 'value' parameter must be found. :param exclude: If 'exclude' is True, then the entries that do NOT match the above conditions are returned. :param includeMissing: If 'includeMissing' is True, then if the key is missing then that entry is included in the results. However, it does not include entries that have the key but its value is for a non-list or empty list. :returns: self ### Response: def contains(self, key, value, findAll=False, exclude=False, includeMissing=False): '''Return entries that: * have the key * key points to a list, and * value is found in the list. If value is also a list itself, then the list entry is selected if any of the values match. If findAll is set to True, then all the entries must be found. Example of use: >>> test = [ ... {"name": "Jim", "age": 18, "income": 93000, "wigs": [9, 12] }, ... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]}, ... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]}, ... {"name": "Bill", "age": 19, "income": 29000 }, ... ] >>> print PLOD(test).contains("wigs", [1, 12]).returnString() [ {age: 18, income: 93000, name: 'Jim', wigs: [9, 12] }, {age: 20, income: 15000, name: 'Joe', wigs: [1, 2, 3]} ] .. versionadded:: 0.1.3b :param key: The dictionary key (or cascading list of keys) that should point to a list. :param value: The value to locate in the list. This argument can be an immutable value such as a string, tuple, or number. If this argument is a list of values instead, then this method will search for any of the values in that list. If the optional 'findAll' parameter is set to True, then all of the values in that list must be found. Optional named arguments: :param finalAll: If True, then all the values in the 'value' parameter must be found. :param exclude: If 'exclude' is True, then the entries that do NOT match the above conditions are returned. :param includeMissing: If 'includeMissing' is True, then if the key is missing then that entry is included in the results. However, it does not include entries that have the key but its value is for a non-list or empty list. :returns: self ''' result = [] result_index = [] for counter, row in enumerate(self.table): (target, tkey, target_list) = internal.dict_crawl(row, key) if target: if findAll: success = internal.list_match_all(target_list, value) else: success = internal.list_match_any(target_list, value) if exclude: success = not success if success: result.append(row) result_index.append(self.index_track[counter]) else: # item missing from list, so skip over pass else: if includeMissing: result.append(row) result_index.append(self.index_track[counter]) else: pass self.table = result self.index_track = result_index return self
def phi_a(mass1, mass2, spin1x, spin1y, spin2x, spin2y): """ Returns the angle between the in-plane perpendicular spins.""" phi1 = phi_from_spinx_spiny(primary_spin(mass1, mass2, spin1x, spin2x), primary_spin(mass1, mass2, spin1y, spin2y)) phi2 = phi_from_spinx_spiny(secondary_spin(mass1, mass2, spin1x, spin2x), secondary_spin(mass1, mass2, spin1y, spin2y)) return (phi1 - phi2) % (2 * numpy.pi)
Returns the angle between the in-plane perpendicular spins.
Below is the the instruction that describes the task: ### Input: Returns the angle between the in-plane perpendicular spins. ### Response: def phi_a(mass1, mass2, spin1x, spin1y, spin2x, spin2y): """ Returns the angle between the in-plane perpendicular spins.""" phi1 = phi_from_spinx_spiny(primary_spin(mass1, mass2, spin1x, spin2x), primary_spin(mass1, mass2, spin1y, spin2y)) phi2 = phi_from_spinx_spiny(secondary_spin(mass1, mass2, spin1x, spin2x), secondary_spin(mass1, mass2, spin1y, spin2y)) return (phi1 - phi2) % (2 * numpy.pi)
def get_row_from_id(self, experiment_id): """ Returns row in matching the given experiment_id. """ row = [row for row in self if row.experiment_id == experiment_id] if len(row) > 1: raise ValueError("duplicate ids in experiment table") if len(row) == 0: raise ValueError("id '%s' not found in table" % experiment_id) return row[0]
Returns row in matching the given experiment_id.
Below is the the instruction that describes the task: ### Input: Returns row in matching the given experiment_id. ### Response: def get_row_from_id(self, experiment_id): """ Returns row in matching the given experiment_id. """ row = [row for row in self if row.experiment_id == experiment_id] if len(row) > 1: raise ValueError("duplicate ids in experiment table") if len(row) == 0: raise ValueError("id '%s' not found in table" % experiment_id) return row[0]
def send_video(self, video: str, reply: Message=None, on_success: callable=None, reply_markup: botapi.ReplyMarkup=None): """ Send video to this peer. :param video: File path to video to send. :param reply: Message object. :param on_success: Callback to call when call is complete. :type reply: int or Message """ self.twx.send_video(peer=self, video=video, reply_to_message_id=reply, on_success=on_success, reply_markup=reply_markup)
Send video to this peer. :param video: File path to video to send. :param reply: Message object. :param on_success: Callback to call when call is complete. :type reply: int or Message
Below is the the instruction that describes the task: ### Input: Send video to this peer. :param video: File path to video to send. :param reply: Message object. :param on_success: Callback to call when call is complete. :type reply: int or Message ### Response: def send_video(self, video: str, reply: Message=None, on_success: callable=None, reply_markup: botapi.ReplyMarkup=None): """ Send video to this peer. :param video: File path to video to send. :param reply: Message object. :param on_success: Callback to call when call is complete. :type reply: int or Message """ self.twx.send_video(peer=self, video=video, reply_to_message_id=reply, on_success=on_success, reply_markup=reply_markup)
def _release_waiter(self) -> None: """ Iterates over all waiters till found one that is not finsihed and belongs to a host that has available connections. """ if not self._waiters: return # Having the dict keys ordered this avoids to iterate # at the same order at each call. queues = list(self._waiters.keys()) random.shuffle(queues) for key in queues: if self._available_connections(key) < 1: continue waiters = self._waiters[key] while waiters: waiter = waiters.popleft() if not waiter.done(): waiter.set_result(None) return
Iterates over all waiters till found one that is not finsihed and belongs to a host that has available connections.
Below is the the instruction that describes the task: ### Input: Iterates over all waiters till found one that is not finsihed and belongs to a host that has available connections. ### Response: def _release_waiter(self) -> None: """ Iterates over all waiters till found one that is not finsihed and belongs to a host that has available connections. """ if not self._waiters: return # Having the dict keys ordered this avoids to iterate # at the same order at each call. queues = list(self._waiters.keys()) random.shuffle(queues) for key in queues: if self._available_connections(key) < 1: continue waiters = self._waiters[key] while waiters: waiter = waiters.popleft() if not waiter.done(): waiter.set_result(None) return
def yield_pair_gradients(self, index1, index2): """Yields pairs ((s'(r_ij), grad_i v(bar{r}_ij))""" A = self.As[index1, index2] B = self.Bs[index1, index2] distance = self.distances[index1, index2] yield -B*A*np.exp(-B*distance), np.zeros(3)
Yields pairs ((s'(r_ij), grad_i v(bar{r}_ij))
Below is the the instruction that describes the task: ### Input: Yields pairs ((s'(r_ij), grad_i v(bar{r}_ij)) ### Response: def yield_pair_gradients(self, index1, index2): """Yields pairs ((s'(r_ij), grad_i v(bar{r}_ij))""" A = self.As[index1, index2] B = self.Bs[index1, index2] distance = self.distances[index1, index2] yield -B*A*np.exp(-B*distance), np.zeros(3)
def get_public_members(): """Get public members in :mod:`bezier` package. Also validates the contents of ``bezier.__all__``. Returns: list: List of all public members **defined** in the main package (i.e. in ``__init__.py``). Raises: ValueError: If ``__all__`` has repeated elements. ValueError: If the "public" members in ``__init__.py`` don't match the members described in ``__all__``. """ if bezier is None: return [] local_members = [] all_members = set() for name in dir(bezier): # Filter out non-public. if name.startswith("_") and name not in SPECIAL_MEMBERS: continue value = getattr(bezier, name) # Filter out imported modules. if isinstance(value, types.ModuleType): continue all_members.add(name) # Only keep values defined in the base package. home = getattr(value, "__module__", "bezier") if home == "bezier": local_members.append(name) size_all = len(bezier.__all__) all_exports = set(bezier.__all__) if len(all_exports) != size_all: raise ValueError("__all__ has repeated elements") if all_exports != all_members: raise ValueError( "__all__ does not agree with the publicly imported members", all_exports, all_members, ) local_members = [ member for member in local_members if member not in UNDOCUMENTED_SPECIAL_MEMBERS ] return local_members
Get public members in :mod:`bezier` package. Also validates the contents of ``bezier.__all__``. Returns: list: List of all public members **defined** in the main package (i.e. in ``__init__.py``). Raises: ValueError: If ``__all__`` has repeated elements. ValueError: If the "public" members in ``__init__.py`` don't match the members described in ``__all__``.
Below is the the instruction that describes the task: ### Input: Get public members in :mod:`bezier` package. Also validates the contents of ``bezier.__all__``. Returns: list: List of all public members **defined** in the main package (i.e. in ``__init__.py``). Raises: ValueError: If ``__all__`` has repeated elements. ValueError: If the "public" members in ``__init__.py`` don't match the members described in ``__all__``. ### Response: def get_public_members(): """Get public members in :mod:`bezier` package. Also validates the contents of ``bezier.__all__``. Returns: list: List of all public members **defined** in the main package (i.e. in ``__init__.py``). Raises: ValueError: If ``__all__`` has repeated elements. ValueError: If the "public" members in ``__init__.py`` don't match the members described in ``__all__``. """ if bezier is None: return [] local_members = [] all_members = set() for name in dir(bezier): # Filter out non-public. if name.startswith("_") and name not in SPECIAL_MEMBERS: continue value = getattr(bezier, name) # Filter out imported modules. if isinstance(value, types.ModuleType): continue all_members.add(name) # Only keep values defined in the base package. home = getattr(value, "__module__", "bezier") if home == "bezier": local_members.append(name) size_all = len(bezier.__all__) all_exports = set(bezier.__all__) if len(all_exports) != size_all: raise ValueError("__all__ has repeated elements") if all_exports != all_members: raise ValueError( "__all__ does not agree with the publicly imported members", all_exports, all_members, ) local_members = [ member for member in local_members if member not in UNDOCUMENTED_SPECIAL_MEMBERS ] return local_members
def or_where(self, key, operator, value): """Make or_where clause :@param key :@param operator :@param value :@type key, operator, value: string :@return self """ if len(self._queries) > 0: self._current_query_index += 1 self.__store_query({"key": key, "operator": operator, "value": value}) return self
Make or_where clause :@param key :@param operator :@param value :@type key, operator, value: string :@return self
Below is the the instruction that describes the task: ### Input: Make or_where clause :@param key :@param operator :@param value :@type key, operator, value: string :@return self ### Response: def or_where(self, key, operator, value): """Make or_where clause :@param key :@param operator :@param value :@type key, operator, value: string :@return self """ if len(self._queries) > 0: self._current_query_index += 1 self.__store_query({"key": key, "operator": operator, "value": value}) return self
def obfn_gvar(self): """Variable to be evaluated in computing regularisation term, depending on 'gEvalY' option value. """ if self.opt['gEvalY']: return self.Y else: return self.cnst_A(self.X) - self.cnst_c()
Variable to be evaluated in computing regularisation term, depending on 'gEvalY' option value.
Below is the the instruction that describes the task: ### Input: Variable to be evaluated in computing regularisation term, depending on 'gEvalY' option value. ### Response: def obfn_gvar(self): """Variable to be evaluated in computing regularisation term, depending on 'gEvalY' option value. """ if self.opt['gEvalY']: return self.Y else: return self.cnst_A(self.X) - self.cnst_c()
def _get_effect_statement(self, effect, methods): """ This function loops over an array of objects containing a resourceArn and conditions statement and generates the array of statements for the policy. """ statements = [] if len(methods) > 0: statement = self._get_empty_statement(effect) for method in methods: if (method['conditions'] is None or len(method['conditions']) == 0): statement['Resource'].append(method['resource_arn']) else: cond_statement = self._get_empty_statement(effect) cond_statement['Resource'].append(method['resource_arn']) cond_statement['Condition'] = method['conditions'] statements.append(cond_statement) statements.append(statement) return statements
This function loops over an array of objects containing a resourceArn and conditions statement and generates the array of statements for the policy.
Below is the the instruction that describes the task: ### Input: This function loops over an array of objects containing a resourceArn and conditions statement and generates the array of statements for the policy. ### Response: def _get_effect_statement(self, effect, methods): """ This function loops over an array of objects containing a resourceArn and conditions statement and generates the array of statements for the policy. """ statements = [] if len(methods) > 0: statement = self._get_empty_statement(effect) for method in methods: if (method['conditions'] is None or len(method['conditions']) == 0): statement['Resource'].append(method['resource_arn']) else: cond_statement = self._get_empty_statement(effect) cond_statement['Resource'].append(method['resource_arn']) cond_statement['Condition'] = method['conditions'] statements.append(cond_statement) statements.append(statement) return statements
def _embedding_tsne(matrix, dimensions=3, early_exaggeration=12.0, method='barnes_hut', perplexity=30, learning_rate=200, n_iter=1000): """ Private method to perform tSNE embedding :param matrix: treeCl Distance Matrix :param dimensions: Number of dimensions in which to embed points :return: treeCl CoordinateMatrix """ tsne = sklearn.manifold.TSNE(n_components=dimensions, metric="precomputed", early_exaggeration=early_exaggeration, method=method, perplexity=perplexity, learning_rate=learning_rate, n_iter=1000) return tsne.fit_transform(matrix)
Private method to perform tSNE embedding :param matrix: treeCl Distance Matrix :param dimensions: Number of dimensions in which to embed points :return: treeCl CoordinateMatrix
Below is the the instruction that describes the task: ### Input: Private method to perform tSNE embedding :param matrix: treeCl Distance Matrix :param dimensions: Number of dimensions in which to embed points :return: treeCl CoordinateMatrix ### Response: def _embedding_tsne(matrix, dimensions=3, early_exaggeration=12.0, method='barnes_hut', perplexity=30, learning_rate=200, n_iter=1000): """ Private method to perform tSNE embedding :param matrix: treeCl Distance Matrix :param dimensions: Number of dimensions in which to embed points :return: treeCl CoordinateMatrix """ tsne = sklearn.manifold.TSNE(n_components=dimensions, metric="precomputed", early_exaggeration=early_exaggeration, method=method, perplexity=perplexity, learning_rate=learning_rate, n_iter=1000) return tsne.fit_transform(matrix)
def compute_checksum(line): """Compute the TLE checksum for the given line.""" return sum((int(c) if c.isdigit() else c == '-') for c in line[0:68]) % 10
Compute the TLE checksum for the given line.
Below is the the instruction that describes the task: ### Input: Compute the TLE checksum for the given line. ### Response: def compute_checksum(line): """Compute the TLE checksum for the given line.""" return sum((int(c) if c.isdigit() else c == '-') for c in line[0:68]) % 10
def cyk(grammar, parse_sequence): # type: (Grammar, Iterable[Any]) -> Nonterminal """ Perform CYK algorithm. :param grammar: Grammar to use in Chomsky Normal Form. :param parse_sequence: Input sequence to parse. :return: Instance of root Nonterminal in parsed tree. """ # check start symbol if grammar.start is None: raise StartSymbolNotSetException() # create variables parse_sequence = list(parse_sequence) input_length = len(parse_sequence) index = input_length - 1 f = Field(input_length) # creating mapping for speedup rules searching (termmap, rulemap) = _create_mapping(grammar) # fill first line with rules directly rewritable to terminal f.fill(termmap, parse_sequence) # fill rest of fields for y in range(1, input_length): for x in range(input_length - y): positions = f.positions(x, y) pairs_of_rules = [(f.rules(pos[0].x, pos[0].y), f.rules(pos[1].x, pos[1].y)) for pos in positions] rules = set() for pair_of_rule in pairs_of_rules: for (first_rule, second_rule) in _all_combinations(pair_of_rule): h = hash((first_rule.fromSymbol, second_rule.fromSymbol)) if h in rulemap: for r in rulemap[h]: # list of rules rules.add(PlaceItem(r, first_rule, second_rule)) f.put(x, y, list(rules)) # Check if is start symol on the bottom of field if grammar.start not in [r.fromSymbol for r in f.rules(0, input_length - 1)]: raise NotParsedException() # Find init symbol and rule start = grammar.start() # type: Nonterminal start_rule = [r for r in f.rules(0, input_length - 1) if grammar.start == r.fromSymbol][0] # Prepare buffer for proccess to_process = list() to_process.append({'n': start, 'r': start_rule}) # Prepare tree while len(to_process) > 0: working = to_process.pop() rule_class = working['r'] working_nonterm = working['n'] # type: Nonterminal # its middle rule - not rewritable to nonterminal if isinstance(rule_class, PlaceItem): created_rule = rule_class.rule() # type: Rule working_nonterm._set_to_rule(created_rule) created_rule._from_symbols.append(working_nonterm) for side in rule_class.to_rule: symbol = side.fromSymbol() # type: Nonterminal symbol._set_from_rule(created_rule) created_rule._to_symbols.append(symbol) to_process.append({'n': symbol, 'r': side}) # it is rule rewritable to nonterminal else: created_rule = rule_class() # type: Rule working_nonterm._set_to_rule(created_rule) created_rule._from_symbols.append(working_nonterm) t = Terminal(parse_sequence[index]) index -= 1 created_rule._to_symbols.append(t) t._set_from_rule(created_rule) return start
Perform CYK algorithm. :param grammar: Grammar to use in Chomsky Normal Form. :param parse_sequence: Input sequence to parse. :return: Instance of root Nonterminal in parsed tree.
Below is the the instruction that describes the task: ### Input: Perform CYK algorithm. :param grammar: Grammar to use in Chomsky Normal Form. :param parse_sequence: Input sequence to parse. :return: Instance of root Nonterminal in parsed tree. ### Response: def cyk(grammar, parse_sequence): # type: (Grammar, Iterable[Any]) -> Nonterminal """ Perform CYK algorithm. :param grammar: Grammar to use in Chomsky Normal Form. :param parse_sequence: Input sequence to parse. :return: Instance of root Nonterminal in parsed tree. """ # check start symbol if grammar.start is None: raise StartSymbolNotSetException() # create variables parse_sequence = list(parse_sequence) input_length = len(parse_sequence) index = input_length - 1 f = Field(input_length) # creating mapping for speedup rules searching (termmap, rulemap) = _create_mapping(grammar) # fill first line with rules directly rewritable to terminal f.fill(termmap, parse_sequence) # fill rest of fields for y in range(1, input_length): for x in range(input_length - y): positions = f.positions(x, y) pairs_of_rules = [(f.rules(pos[0].x, pos[0].y), f.rules(pos[1].x, pos[1].y)) for pos in positions] rules = set() for pair_of_rule in pairs_of_rules: for (first_rule, second_rule) in _all_combinations(pair_of_rule): h = hash((first_rule.fromSymbol, second_rule.fromSymbol)) if h in rulemap: for r in rulemap[h]: # list of rules rules.add(PlaceItem(r, first_rule, second_rule)) f.put(x, y, list(rules)) # Check if is start symol on the bottom of field if grammar.start not in [r.fromSymbol for r in f.rules(0, input_length - 1)]: raise NotParsedException() # Find init symbol and rule start = grammar.start() # type: Nonterminal start_rule = [r for r in f.rules(0, input_length - 1) if grammar.start == r.fromSymbol][0] # Prepare buffer for proccess to_process = list() to_process.append({'n': start, 'r': start_rule}) # Prepare tree while len(to_process) > 0: working = to_process.pop() rule_class = working['r'] working_nonterm = working['n'] # type: Nonterminal # its middle rule - not rewritable to nonterminal if isinstance(rule_class, PlaceItem): created_rule = rule_class.rule() # type: Rule working_nonterm._set_to_rule(created_rule) created_rule._from_symbols.append(working_nonterm) for side in rule_class.to_rule: symbol = side.fromSymbol() # type: Nonterminal symbol._set_from_rule(created_rule) created_rule._to_symbols.append(symbol) to_process.append({'n': symbol, 'r': side}) # it is rule rewritable to nonterminal else: created_rule = rule_class() # type: Rule working_nonterm._set_to_rule(created_rule) created_rule._from_symbols.append(working_nonterm) t = Terminal(parse_sequence[index]) index -= 1 created_rule._to_symbols.append(t) t._set_from_rule(created_rule) return start
def retrieveVals(self): """Retrieve values for graphs.""" if self._stats is None: serverInfo = MemcachedInfo(self._host, self._port, self._socket_file) stats = serverInfo.getStats() else: stats = self._stats if stats is None: raise Exception("Undetermined error accesing stats.") stats['set_hits'] = stats.get('total_items') if stats.has_key('cmd_set') and stats.has_key('total_items'): stats['set_misses'] = stats['cmd_set'] - stats['total_items'] self.saveState(stats) if self.hasGraph('memcached_connections'): self.setGraphVal('memcached_connections', 'conn', stats.get('curr_connections')) if self.hasGraph('memcached_items'): self.setGraphVal('memcached_items', 'items', stats.get('curr_items')) if self.hasGraph('memcached_memory'): self.setGraphVal('memcached_memory', 'bytes', stats.get('bytes')) if self.hasGraph('memcached_connrate'): self.setGraphVal('memcached_connrate', 'conn', stats.get('total_connections')) if self.hasGraph('memcached_traffic'): self.setGraphVal('memcached_traffic', 'rxbytes', stats.get('bytes_read')) self.setGraphVal('memcached_traffic', 'txbytes', stats.get('bytes_written')) if self.hasGraph('memcached_reqrate'): self.setGraphVal('memcached_reqrate', 'set', stats.get('cmd_set')) self.setGraphVal('memcached_reqrate', 'get', stats.get('cmd_get')) if self.graphHasField('memcached_reqrate', 'del'): self.setGraphVal('memcached_reqrate', 'del', safe_sum([stats.get('delete_hits'), stats.get('delete_misses')])) if self.graphHasField('memcached_reqrate', 'cas'): self.setGraphVal('memcached_reqrate', 'cas', safe_sum([stats.get('cas_hits'), stats.get('cas_misses'), stats.get('cas_badval')])) if self.graphHasField('memcached_reqrate', 'incr'): self.setGraphVal('memcached_reqrate', 'incr', safe_sum([stats.get('incr_hits'), stats.get('incr_misses')])) if self.graphHasField('memcached_reqrate', 'decr'): self.setGraphVal('memcached_reqrate', 'decr', safe_sum([stats.get('decr_hits'), stats.get('decr_misses')])) if self.hasGraph('memcached_statget'): self.setGraphVal('memcached_statget', 'hit', stats.get('get_hits')) self.setGraphVal('memcached_statget', 'miss', stats.get('get_misses')) self.setGraphVal('memcached_statget', 'total', safe_sum([stats.get('get_hits'), stats.get('get_misses')])) if self.hasGraph('memcached_statset'): self.setGraphVal('memcached_statset', 'hit', stats.get('set_hits')) self.setGraphVal('memcached_statset', 'miss', stats.get('set_misses')) self.setGraphVal('memcached_statset', 'total', safe_sum([stats.get('set_hits'), stats.get('set_misses')])) if self.hasGraph('memcached_statdel'): self.setGraphVal('memcached_statdel', 'hit', stats.get('delete_hits')) self.setGraphVal('memcached_statdel', 'miss', stats.get('delete_misses')) self.setGraphVal('memcached_statdel', 'total', safe_sum([stats.get('delete_hits'), stats.get('delete_misses')])) if self.hasGraph('memcached_statcas'): self.setGraphVal('memcached_statcas', 'hit', stats.get('cas_hits')) self.setGraphVal('memcached_statcas', 'miss', stats.get('cas_misses')) self.setGraphVal('memcached_statcas', 'badval', stats.get('cas_badval')) self.setGraphVal('memcached_statcas', 'total', safe_sum([stats.get('cas_hits'), stats.get('cas_misses'), stats.get('cas_badval')])) if self.hasGraph('memcached_statincrdecr'): self.setGraphVal('memcached_statincrdecr', 'incr_hit', stats.get('incr_hits')) self.setGraphVal('memcached_statincrdecr', 'decr_hit', stats.get('decr_hits')) self.setGraphVal('memcached_statincrdecr', 'incr_miss', stats.get('incr_misses')) self.setGraphVal('memcached_statincrdecr', 'decr_miss', stats.get('decr_misses')) self.setGraphVal('memcached_statincrdecr', 'total', safe_sum([stats.get('incr_hits'), stats.get('decr_hits'), stats.get('incr_misses'), stats.get('decr_misses')])) if self.hasGraph('memcached_statevict'): self.setGraphVal('memcached_statevict', 'evict', stats.get('evictions')) if self.graphHasField('memcached_statevict', 'reclaim'): self.setGraphVal('memcached_statevict', 'reclaim', stats.get('reclaimed')) if self.hasGraph('memcached_statauth'): self.setGraphVal('memcached_statauth', 'reqs', stats.get('auth_cmds')) self.setGraphVal('memcached_statauth', 'errors', stats.get('auth_errors')) if self.hasGraph('memcached_hitpct'): prev_stats = self._prev_stats for (field_name, field_hits, field_misses) in ( ('set', 'set_hits', 'set_misses'), ('get', 'get_hits', 'get_misses'), ('del', 'delete_hits', 'delete_misses'), ('cas', 'cas_hits', 'cas_misses'), ('incr', 'incr_hits', 'incr_misses'), ('decr', 'decr_hits', 'decr_misses') ): if prev_stats: if (stats.has_key(field_hits) and prev_stats.has_key(field_hits) and stats.has_key(field_misses) and prev_stats.has_key(field_misses)): hits = stats[field_hits] - prev_stats[field_hits] misses = stats[field_misses] - prev_stats[field_misses] total = hits + misses if total > 0: val = 100.0 * hits / total else: val = 0 self.setGraphVal('memcached_hitpct', field_name, round(val, 2))
Retrieve values for graphs.
Below is the the instruction that describes the task: ### Input: Retrieve values for graphs. ### Response: def retrieveVals(self): """Retrieve values for graphs.""" if self._stats is None: serverInfo = MemcachedInfo(self._host, self._port, self._socket_file) stats = serverInfo.getStats() else: stats = self._stats if stats is None: raise Exception("Undetermined error accesing stats.") stats['set_hits'] = stats.get('total_items') if stats.has_key('cmd_set') and stats.has_key('total_items'): stats['set_misses'] = stats['cmd_set'] - stats['total_items'] self.saveState(stats) if self.hasGraph('memcached_connections'): self.setGraphVal('memcached_connections', 'conn', stats.get('curr_connections')) if self.hasGraph('memcached_items'): self.setGraphVal('memcached_items', 'items', stats.get('curr_items')) if self.hasGraph('memcached_memory'): self.setGraphVal('memcached_memory', 'bytes', stats.get('bytes')) if self.hasGraph('memcached_connrate'): self.setGraphVal('memcached_connrate', 'conn', stats.get('total_connections')) if self.hasGraph('memcached_traffic'): self.setGraphVal('memcached_traffic', 'rxbytes', stats.get('bytes_read')) self.setGraphVal('memcached_traffic', 'txbytes', stats.get('bytes_written')) if self.hasGraph('memcached_reqrate'): self.setGraphVal('memcached_reqrate', 'set', stats.get('cmd_set')) self.setGraphVal('memcached_reqrate', 'get', stats.get('cmd_get')) if self.graphHasField('memcached_reqrate', 'del'): self.setGraphVal('memcached_reqrate', 'del', safe_sum([stats.get('delete_hits'), stats.get('delete_misses')])) if self.graphHasField('memcached_reqrate', 'cas'): self.setGraphVal('memcached_reqrate', 'cas', safe_sum([stats.get('cas_hits'), stats.get('cas_misses'), stats.get('cas_badval')])) if self.graphHasField('memcached_reqrate', 'incr'): self.setGraphVal('memcached_reqrate', 'incr', safe_sum([stats.get('incr_hits'), stats.get('incr_misses')])) if self.graphHasField('memcached_reqrate', 'decr'): self.setGraphVal('memcached_reqrate', 'decr', safe_sum([stats.get('decr_hits'), stats.get('decr_misses')])) if self.hasGraph('memcached_statget'): self.setGraphVal('memcached_statget', 'hit', stats.get('get_hits')) self.setGraphVal('memcached_statget', 'miss', stats.get('get_misses')) self.setGraphVal('memcached_statget', 'total', safe_sum([stats.get('get_hits'), stats.get('get_misses')])) if self.hasGraph('memcached_statset'): self.setGraphVal('memcached_statset', 'hit', stats.get('set_hits')) self.setGraphVal('memcached_statset', 'miss', stats.get('set_misses')) self.setGraphVal('memcached_statset', 'total', safe_sum([stats.get('set_hits'), stats.get('set_misses')])) if self.hasGraph('memcached_statdel'): self.setGraphVal('memcached_statdel', 'hit', stats.get('delete_hits')) self.setGraphVal('memcached_statdel', 'miss', stats.get('delete_misses')) self.setGraphVal('memcached_statdel', 'total', safe_sum([stats.get('delete_hits'), stats.get('delete_misses')])) if self.hasGraph('memcached_statcas'): self.setGraphVal('memcached_statcas', 'hit', stats.get('cas_hits')) self.setGraphVal('memcached_statcas', 'miss', stats.get('cas_misses')) self.setGraphVal('memcached_statcas', 'badval', stats.get('cas_badval')) self.setGraphVal('memcached_statcas', 'total', safe_sum([stats.get('cas_hits'), stats.get('cas_misses'), stats.get('cas_badval')])) if self.hasGraph('memcached_statincrdecr'): self.setGraphVal('memcached_statincrdecr', 'incr_hit', stats.get('incr_hits')) self.setGraphVal('memcached_statincrdecr', 'decr_hit', stats.get('decr_hits')) self.setGraphVal('memcached_statincrdecr', 'incr_miss', stats.get('incr_misses')) self.setGraphVal('memcached_statincrdecr', 'decr_miss', stats.get('decr_misses')) self.setGraphVal('memcached_statincrdecr', 'total', safe_sum([stats.get('incr_hits'), stats.get('decr_hits'), stats.get('incr_misses'), stats.get('decr_misses')])) if self.hasGraph('memcached_statevict'): self.setGraphVal('memcached_statevict', 'evict', stats.get('evictions')) if self.graphHasField('memcached_statevict', 'reclaim'): self.setGraphVal('memcached_statevict', 'reclaim', stats.get('reclaimed')) if self.hasGraph('memcached_statauth'): self.setGraphVal('memcached_statauth', 'reqs', stats.get('auth_cmds')) self.setGraphVal('memcached_statauth', 'errors', stats.get('auth_errors')) if self.hasGraph('memcached_hitpct'): prev_stats = self._prev_stats for (field_name, field_hits, field_misses) in ( ('set', 'set_hits', 'set_misses'), ('get', 'get_hits', 'get_misses'), ('del', 'delete_hits', 'delete_misses'), ('cas', 'cas_hits', 'cas_misses'), ('incr', 'incr_hits', 'incr_misses'), ('decr', 'decr_hits', 'decr_misses') ): if prev_stats: if (stats.has_key(field_hits) and prev_stats.has_key(field_hits) and stats.has_key(field_misses) and prev_stats.has_key(field_misses)): hits = stats[field_hits] - prev_stats[field_hits] misses = stats[field_misses] - prev_stats[field_misses] total = hits + misses if total > 0: val = 100.0 * hits / total else: val = 0 self.setGraphVal('memcached_hitpct', field_name, round(val, 2))
def close_links(self): """ Close all open links """ for uri, cf in self._cfs.items(): cf.close_link() self._is_open = False
Close all open links
Below is the the instruction that describes the task: ### Input: Close all open links ### Response: def close_links(self): """ Close all open links """ for uri, cf in self._cfs.items(): cf.close_link() self._is_open = False
def take_2d_multi(arr, indexer, out=None, fill_value=np.nan, mask_info=None, allow_fill=True): """ Specialized Cython take which sets NaN values in one pass """ if indexer is None or (indexer[0] is None and indexer[1] is None): row_idx = np.arange(arr.shape[0], dtype=np.int64) col_idx = np.arange(arr.shape[1], dtype=np.int64) indexer = row_idx, col_idx dtype, fill_value = arr.dtype, arr.dtype.type() else: row_idx, col_idx = indexer if row_idx is None: row_idx = np.arange(arr.shape[0], dtype=np.int64) else: row_idx = ensure_int64(row_idx) if col_idx is None: col_idx = np.arange(arr.shape[1], dtype=np.int64) else: col_idx = ensure_int64(col_idx) indexer = row_idx, col_idx if not allow_fill: dtype, fill_value = arr.dtype, arr.dtype.type() mask_info = None, False else: # check for promotion based on types only (do this first because # it's faster than computing a mask) dtype, fill_value = maybe_promote(arr.dtype, fill_value) if dtype != arr.dtype and (out is None or out.dtype != dtype): # check if promotion is actually required based on indexer if mask_info is not None: (row_mask, col_mask), (row_needs, col_needs) = mask_info else: row_mask = row_idx == -1 col_mask = col_idx == -1 row_needs = row_mask.any() col_needs = col_mask.any() mask_info = (row_mask, col_mask), (row_needs, col_needs) if row_needs or col_needs: if out is not None and out.dtype != dtype: raise TypeError('Incompatible type for fill_value') else: # if not, then depromote, set fill_value to dummy # (it won't be used but we don't want the cython code # to crash when trying to cast it to dtype) dtype, fill_value = arr.dtype, arr.dtype.type() # at this point, it's guaranteed that dtype can hold both the arr values # and the fill_value if out is None: out_shape = len(row_idx), len(col_idx) out = np.empty(out_shape, dtype=dtype) func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None) if func is None and arr.dtype != out.dtype: func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None) if func is not None: func = _convert_wrapper(func, out.dtype) if func is None: def func(arr, indexer, out, fill_value=np.nan): _take_2d_multi_object(arr, indexer, out, fill_value=fill_value, mask_info=mask_info) func(arr, indexer, out=out, fill_value=fill_value) return out
Specialized Cython take which sets NaN values in one pass
Below is the the instruction that describes the task: ### Input: Specialized Cython take which sets NaN values in one pass ### Response: def take_2d_multi(arr, indexer, out=None, fill_value=np.nan, mask_info=None, allow_fill=True): """ Specialized Cython take which sets NaN values in one pass """ if indexer is None or (indexer[0] is None and indexer[1] is None): row_idx = np.arange(arr.shape[0], dtype=np.int64) col_idx = np.arange(arr.shape[1], dtype=np.int64) indexer = row_idx, col_idx dtype, fill_value = arr.dtype, arr.dtype.type() else: row_idx, col_idx = indexer if row_idx is None: row_idx = np.arange(arr.shape[0], dtype=np.int64) else: row_idx = ensure_int64(row_idx) if col_idx is None: col_idx = np.arange(arr.shape[1], dtype=np.int64) else: col_idx = ensure_int64(col_idx) indexer = row_idx, col_idx if not allow_fill: dtype, fill_value = arr.dtype, arr.dtype.type() mask_info = None, False else: # check for promotion based on types only (do this first because # it's faster than computing a mask) dtype, fill_value = maybe_promote(arr.dtype, fill_value) if dtype != arr.dtype and (out is None or out.dtype != dtype): # check if promotion is actually required based on indexer if mask_info is not None: (row_mask, col_mask), (row_needs, col_needs) = mask_info else: row_mask = row_idx == -1 col_mask = col_idx == -1 row_needs = row_mask.any() col_needs = col_mask.any() mask_info = (row_mask, col_mask), (row_needs, col_needs) if row_needs or col_needs: if out is not None and out.dtype != dtype: raise TypeError('Incompatible type for fill_value') else: # if not, then depromote, set fill_value to dummy # (it won't be used but we don't want the cython code # to crash when trying to cast it to dtype) dtype, fill_value = arr.dtype, arr.dtype.type() # at this point, it's guaranteed that dtype can hold both the arr values # and the fill_value if out is None: out_shape = len(row_idx), len(col_idx) out = np.empty(out_shape, dtype=dtype) func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None) if func is None and arr.dtype != out.dtype: func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None) if func is not None: func = _convert_wrapper(func, out.dtype) if func is None: def func(arr, indexer, out, fill_value=np.nan): _take_2d_multi_object(arr, indexer, out, fill_value=fill_value, mask_info=mask_info) func(arr, indexer, out=out, fill_value=fill_value) return out
def sg_print(tensor_list): r"""Simple tensor printing function for debugging. Prints the value, shape, and data type of each tensor in the list. Args: tensor_list: A list/tuple of tensors or a single tensor. Returns: The value of the tensors. For example, ```python import sugartensor as tf a = tf.constant([1.]) b = tf.constant([2.]) out = tf.sg_print([a, b]) # Should print [ 1.] (1,) float32 # [ 2.] (1,) float32 print(out) # Should print [array([ 1.], dtype=float32), array([ 2.], dtype=float32)] ``` """ # to list if type(tensor_list) is not list and type(tensor_list) is not tuple: tensor_list = [tensor_list] # evaluate tensor list with queue runner with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: sg_init(sess) with tf.sg_queue_context(): res = sess.run(tensor_list) for r in res: print(r, r.shape, r.dtype) if len(res) == 1: return res[0] else: return res
r"""Simple tensor printing function for debugging. Prints the value, shape, and data type of each tensor in the list. Args: tensor_list: A list/tuple of tensors or a single tensor. Returns: The value of the tensors. For example, ```python import sugartensor as tf a = tf.constant([1.]) b = tf.constant([2.]) out = tf.sg_print([a, b]) # Should print [ 1.] (1,) float32 # [ 2.] (1,) float32 print(out) # Should print [array([ 1.], dtype=float32), array([ 2.], dtype=float32)] ```
Below is the the instruction that describes the task: ### Input: r"""Simple tensor printing function for debugging. Prints the value, shape, and data type of each tensor in the list. Args: tensor_list: A list/tuple of tensors or a single tensor. Returns: The value of the tensors. For example, ```python import sugartensor as tf a = tf.constant([1.]) b = tf.constant([2.]) out = tf.sg_print([a, b]) # Should print [ 1.] (1,) float32 # [ 2.] (1,) float32 print(out) # Should print [array([ 1.], dtype=float32), array([ 2.], dtype=float32)] ``` ### Response: def sg_print(tensor_list): r"""Simple tensor printing function for debugging. Prints the value, shape, and data type of each tensor in the list. Args: tensor_list: A list/tuple of tensors or a single tensor. Returns: The value of the tensors. For example, ```python import sugartensor as tf a = tf.constant([1.]) b = tf.constant([2.]) out = tf.sg_print([a, b]) # Should print [ 1.] (1,) float32 # [ 2.] (1,) float32 print(out) # Should print [array([ 1.], dtype=float32), array([ 2.], dtype=float32)] ``` """ # to list if type(tensor_list) is not list and type(tensor_list) is not tuple: tensor_list = [tensor_list] # evaluate tensor list with queue runner with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: sg_init(sess) with tf.sg_queue_context(): res = sess.run(tensor_list) for r in res: print(r, r.shape, r.dtype) if len(res) == 1: return res[0] else: return res
def corpus_token_counts( text_filepattern, corpus_max_lines, split_on_newlines=True): """Read the corpus and compute a dictionary of token counts. Args: text_filepattern: A pattern matching one or more files. corpus_max_lines: An integer; maximum total lines to read. split_on_newlines: A boolean. If true, then split files by lines and strip leading and trailing whitespace from each line. Otherwise, treat each file as a single string. Returns: a dictionary mapping token to count. """ counts = collections.Counter() for doc in _read_filepattern( text_filepattern, max_lines=corpus_max_lines, split_on_newlines=split_on_newlines): counts.update(encode(_native_to_unicode(doc))) mlperf_log.transformer_print( key=mlperf_log.PREPROC_VOCAB_SIZE, value=len(counts)) return counts
Read the corpus and compute a dictionary of token counts. Args: text_filepattern: A pattern matching one or more files. corpus_max_lines: An integer; maximum total lines to read. split_on_newlines: A boolean. If true, then split files by lines and strip leading and trailing whitespace from each line. Otherwise, treat each file as a single string. Returns: a dictionary mapping token to count.
Below is the the instruction that describes the task: ### Input: Read the corpus and compute a dictionary of token counts. Args: text_filepattern: A pattern matching one or more files. corpus_max_lines: An integer; maximum total lines to read. split_on_newlines: A boolean. If true, then split files by lines and strip leading and trailing whitespace from each line. Otherwise, treat each file as a single string. Returns: a dictionary mapping token to count. ### Response: def corpus_token_counts( text_filepattern, corpus_max_lines, split_on_newlines=True): """Read the corpus and compute a dictionary of token counts. Args: text_filepattern: A pattern matching one or more files. corpus_max_lines: An integer; maximum total lines to read. split_on_newlines: A boolean. If true, then split files by lines and strip leading and trailing whitespace from each line. Otherwise, treat each file as a single string. Returns: a dictionary mapping token to count. """ counts = collections.Counter() for doc in _read_filepattern( text_filepattern, max_lines=corpus_max_lines, split_on_newlines=split_on_newlines): counts.update(encode(_native_to_unicode(doc))) mlperf_log.transformer_print( key=mlperf_log.PREPROC_VOCAB_SIZE, value=len(counts)) return counts
def flushInput(self): '''flush any pending input''' self.buf = '' saved_timeout = self.timeout self.timeout = 0.5 self._recv() self.timeout = saved_timeout self.buf = '' self.debug("flushInput")
flush any pending input
Below is the the instruction that describes the task: ### Input: flush any pending input ### Response: def flushInput(self): '''flush any pending input''' self.buf = '' saved_timeout = self.timeout self.timeout = 0.5 self._recv() self.timeout = saved_timeout self.buf = '' self.debug("flushInput")
def array_scanlines_interlace(self, pixels): """ Generator for interlaced scanlines from an array. `pixels` is the full source image in flat row flat pixel format. The generator yields each scanline of the reduced passes in turn, in boxed row flat pixel format. """ # http://www.w3.org/TR/PNG/#8InterlaceMethods # Array type. fmt = 'BH'[self.bitdepth > 8] # Value per row vpr = self.width * self.planes for xstart, ystart, xstep, ystep in _adam7: if xstart >= self.width: continue # Pixels per row (of reduced image) ppr = int(math.ceil((self.width-xstart)/float(xstep))) # number of values in reduced image row. row_len = ppr*self.planes for y in range(ystart, self.height, ystep): if xstep == 1: offset = y * vpr yield pixels[offset:offset+vpr] else: row = array(fmt) # There's no easier way to set the length of an array row.extend(pixels[0:row_len]) offset = y * vpr + xstart * self.planes end_offset = (y+1) * vpr skip = self.planes * xstep for i in range(self.planes): row[i::self.planes] = \ pixels[offset+i:end_offset:skip] yield row
Generator for interlaced scanlines from an array. `pixels` is the full source image in flat row flat pixel format. The generator yields each scanline of the reduced passes in turn, in boxed row flat pixel format.
Below is the the instruction that describes the task: ### Input: Generator for interlaced scanlines from an array. `pixels` is the full source image in flat row flat pixel format. The generator yields each scanline of the reduced passes in turn, in boxed row flat pixel format. ### Response: def array_scanlines_interlace(self, pixels): """ Generator for interlaced scanlines from an array. `pixels` is the full source image in flat row flat pixel format. The generator yields each scanline of the reduced passes in turn, in boxed row flat pixel format. """ # http://www.w3.org/TR/PNG/#8InterlaceMethods # Array type. fmt = 'BH'[self.bitdepth > 8] # Value per row vpr = self.width * self.planes for xstart, ystart, xstep, ystep in _adam7: if xstart >= self.width: continue # Pixels per row (of reduced image) ppr = int(math.ceil((self.width-xstart)/float(xstep))) # number of values in reduced image row. row_len = ppr*self.planes for y in range(ystart, self.height, ystep): if xstep == 1: offset = y * vpr yield pixels[offset:offset+vpr] else: row = array(fmt) # There's no easier way to set the length of an array row.extend(pixels[0:row_len]) offset = y * vpr + xstart * self.planes end_offset = (y+1) * vpr skip = self.planes * xstep for i in range(self.planes): row[i::self.planes] = \ pixels[offset+i:end_offset:skip] yield row
def clone(self, **data): '''Utility method for cloning the instance as a new object. :parameter data: additional which override field data. :rtype: a new instance of this class. ''' meta = self._meta session = self.session pkname = meta.pkname() pkvalue = data.pop(pkname, None) fields = self.todict(exclude_cache=True) fields.update(data) fields.pop('__dbdata__', None) obj = self._meta.make_object((pkvalue, None, fields)) obj.session = session return obj
Utility method for cloning the instance as a new object. :parameter data: additional which override field data. :rtype: a new instance of this class.
Below is the the instruction that describes the task: ### Input: Utility method for cloning the instance as a new object. :parameter data: additional which override field data. :rtype: a new instance of this class. ### Response: def clone(self, **data): '''Utility method for cloning the instance as a new object. :parameter data: additional which override field data. :rtype: a new instance of this class. ''' meta = self._meta session = self.session pkname = meta.pkname() pkvalue = data.pop(pkname, None) fields = self.todict(exclude_cache=True) fields.update(data) fields.pop('__dbdata__', None) obj = self._meta.make_object((pkvalue, None, fields)) obj.session = session return obj
def plot_bargraph( self, rank="auto", normalize="auto", top_n="auto", threshold="auto", title=None, xlabel=None, ylabel=None, tooltip=None, return_chart=False, haxis=None, legend="auto", label=None, ): """Plot a bargraph of relative abundance of taxa for multiple samples. Parameters ---------- rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional Analysis will be restricted to abundances of taxa at the specified level. normalize : 'auto' or `bool`, optional Convert read counts to relative abundances such that each sample sums to 1.0. Setting 'auto' will choose automatically based on the data. return_chart : `bool`, optional When True, return an `altair.Chart` object instead of displaying the resulting plot in the current notebook. top_n : `int`, optional Display the top N most abundant taxa in the entire cohort of samples. threshold : `float` Display only taxa that are more abundant that this threshold in one or more samples. title : `string`, optional Text label at the top of the plot. xlabel : `string`, optional Text label along the horizontal axis. ylabel : `string`, optional Text label along the vertical axis. tooltip : `string` or `list`, optional A string or list containing strings representing metadata fields. When a point in the plot is hovered over, the value of the metadata associated with that sample will be displayed in a modal. haxis : `string`, optional The metadata field (or tuple containing multiple categorical fields) used to group samples together. legend: `string`, optional Title for color scale. Defaults to the field used to generate the plot, e.g. readcount_w_children or abundance. label : `string` or `callable`, optional A metadata field (or function) used to label each analysis. If passing a function, a dict containing the metadata for each analysis is passed as the first and only positional argument. The callable function must return a string. Examples -------- Plot a bargraph of the top 10 most abundant genera >>> plot_bargraph(rank='genus', top_n=10) """ if rank is None: raise OneCodexException("Please specify a rank or 'auto' to choose automatically") if not (threshold or top_n): raise OneCodexException("Please specify at least one of: threshold, top_n") if top_n == "auto" and threshold == "auto": top_n = 10 threshold = None elif top_n == "auto" and threshold != "auto": top_n = None elif top_n != "auto" and threshold == "auto": threshold = None if legend == "auto": legend = self._field df = self.to_df( rank=rank, normalize=normalize, top_n=top_n, threshold=threshold, table_format="long" ) if tooltip: if not isinstance(tooltip, list): tooltip = [tooltip] else: tooltip = [] if haxis: tooltip.append(haxis) tooltip.insert(0, "Label") # takes metadata columns and returns a dataframe with just those columns # renames columns in the case where columns are taxids magic_metadata, magic_fields = self._metadata_fetch(tooltip, label=label) # add sort order to long-format df if haxis: sort_order = magic_metadata.sort_values(magic_fields[haxis]).index.tolist() for sort_num, sort_class_id in enumerate(sort_order): magic_metadata.loc[sort_class_id, "sort_order"] = sort_num df["sort_order"] = magic_metadata["sort_order"][df["classification_id"]].tolist() sort_order = alt.EncodingSortField(field="sort_order", op="mean") else: sort_order = None # transfer metadata from wide-format df (magic_metadata) to long-format df for f in tooltip: df[magic_fields[f]] = magic_metadata[magic_fields[f]][df["classification_id"]].tolist() # add taxa names df["tax_name"] = [ "{} ({})".format(self.taxonomy["name"][t], t) if t in self.taxonomy["name"] else t for t in df["tax_id"] ] # # TODO: how to sort bars in bargraph # - abundance (mean across all samples) # - parent taxon (this will require that we make a few assumptions # about taxonomic ranks but as all taxonomic data will be coming from # OCX this should be okay) # ylabel = self._field if ylabel is None else ylabel xlabel = "" if xlabel is None else xlabel # should ultimately be Label, tax_name, readcount_w_children, then custom fields tooltip_for_altair = [magic_fields[f] for f in tooltip] tooltip_for_altair.insert(1, "tax_name") tooltip_for_altair.insert(2, "{}:Q".format(self._field)) # generate dataframes to plot, one per facet dfs_to_plot = [] if haxis: # if using facets, first facet is just the vertical axis blank_df = df.iloc[:1].copy() blank_df[self._field] = 0 dfs_to_plot.append(blank_df) for md_val in magic_metadata[magic_fields[haxis]].unique(): plot_df = df.where(df[magic_fields[haxis]] == md_val).dropna() # preserve booleans if magic_metadata[magic_fields[haxis]].dtype == "bool": plot_df[magic_fields[haxis]] = plot_df[magic_fields[haxis]].astype(bool) dfs_to_plot.append(plot_df) else: dfs_to_plot.append(df) charts = [] for plot_num, plot_df in enumerate(dfs_to_plot): chart = ( alt.Chart(plot_df) .mark_bar() .encode( x=alt.X("Label", axis=alt.Axis(title=xlabel), sort=sort_order), y=alt.Y( self._field, axis=alt.Axis(title=ylabel), scale=alt.Scale(domain=[0, 1], zero=True, nice=False), ), color=alt.Color("tax_name", legend=alt.Legend(title=legend)), tooltip=tooltip_for_altair, href="url:N", ) ) if haxis: if plot_num == 0: # first plot (blank_df) has vert axis but no horiz axis chart.encoding.x.axis = None elif plot_num > 0: # strip vertical axis from subsequent facets chart.encoding.y.axis = None # facet's title set to value of metadata in this group chart.title = str(plot_df[magic_fields[haxis]].tolist()[0]) charts.append(chart) # add all the facets together final_chart = charts[0] if len(charts) > 1: for chart in charts[1:]: final_chart |= chart # add title to chart # (cannot specify None or False for no title) final_chart = final_chart.properties(title=title) if title else final_chart return final_chart if return_chart else final_chart.display()
Plot a bargraph of relative abundance of taxa for multiple samples. Parameters ---------- rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional Analysis will be restricted to abundances of taxa at the specified level. normalize : 'auto' or `bool`, optional Convert read counts to relative abundances such that each sample sums to 1.0. Setting 'auto' will choose automatically based on the data. return_chart : `bool`, optional When True, return an `altair.Chart` object instead of displaying the resulting plot in the current notebook. top_n : `int`, optional Display the top N most abundant taxa in the entire cohort of samples. threshold : `float` Display only taxa that are more abundant that this threshold in one or more samples. title : `string`, optional Text label at the top of the plot. xlabel : `string`, optional Text label along the horizontal axis. ylabel : `string`, optional Text label along the vertical axis. tooltip : `string` or `list`, optional A string or list containing strings representing metadata fields. When a point in the plot is hovered over, the value of the metadata associated with that sample will be displayed in a modal. haxis : `string`, optional The metadata field (or tuple containing multiple categorical fields) used to group samples together. legend: `string`, optional Title for color scale. Defaults to the field used to generate the plot, e.g. readcount_w_children or abundance. label : `string` or `callable`, optional A metadata field (or function) used to label each analysis. If passing a function, a dict containing the metadata for each analysis is passed as the first and only positional argument. The callable function must return a string. Examples -------- Plot a bargraph of the top 10 most abundant genera >>> plot_bargraph(rank='genus', top_n=10)
Below is the the instruction that describes the task: ### Input: Plot a bargraph of relative abundance of taxa for multiple samples. Parameters ---------- rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional Analysis will be restricted to abundances of taxa at the specified level. normalize : 'auto' or `bool`, optional Convert read counts to relative abundances such that each sample sums to 1.0. Setting 'auto' will choose automatically based on the data. return_chart : `bool`, optional When True, return an `altair.Chart` object instead of displaying the resulting plot in the current notebook. top_n : `int`, optional Display the top N most abundant taxa in the entire cohort of samples. threshold : `float` Display only taxa that are more abundant that this threshold in one or more samples. title : `string`, optional Text label at the top of the plot. xlabel : `string`, optional Text label along the horizontal axis. ylabel : `string`, optional Text label along the vertical axis. tooltip : `string` or `list`, optional A string or list containing strings representing metadata fields. When a point in the plot is hovered over, the value of the metadata associated with that sample will be displayed in a modal. haxis : `string`, optional The metadata field (or tuple containing multiple categorical fields) used to group samples together. legend: `string`, optional Title for color scale. Defaults to the field used to generate the plot, e.g. readcount_w_children or abundance. label : `string` or `callable`, optional A metadata field (or function) used to label each analysis. If passing a function, a dict containing the metadata for each analysis is passed as the first and only positional argument. The callable function must return a string. Examples -------- Plot a bargraph of the top 10 most abundant genera >>> plot_bargraph(rank='genus', top_n=10) ### Response: def plot_bargraph( self, rank="auto", normalize="auto", top_n="auto", threshold="auto", title=None, xlabel=None, ylabel=None, tooltip=None, return_chart=False, haxis=None, legend="auto", label=None, ): """Plot a bargraph of relative abundance of taxa for multiple samples. Parameters ---------- rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional Analysis will be restricted to abundances of taxa at the specified level. normalize : 'auto' or `bool`, optional Convert read counts to relative abundances such that each sample sums to 1.0. Setting 'auto' will choose automatically based on the data. return_chart : `bool`, optional When True, return an `altair.Chart` object instead of displaying the resulting plot in the current notebook. top_n : `int`, optional Display the top N most abundant taxa in the entire cohort of samples. threshold : `float` Display only taxa that are more abundant that this threshold in one or more samples. title : `string`, optional Text label at the top of the plot. xlabel : `string`, optional Text label along the horizontal axis. ylabel : `string`, optional Text label along the vertical axis. tooltip : `string` or `list`, optional A string or list containing strings representing metadata fields. When a point in the plot is hovered over, the value of the metadata associated with that sample will be displayed in a modal. haxis : `string`, optional The metadata field (or tuple containing multiple categorical fields) used to group samples together. legend: `string`, optional Title for color scale. Defaults to the field used to generate the plot, e.g. readcount_w_children or abundance. label : `string` or `callable`, optional A metadata field (or function) used to label each analysis. If passing a function, a dict containing the metadata for each analysis is passed as the first and only positional argument. The callable function must return a string. Examples -------- Plot a bargraph of the top 10 most abundant genera >>> plot_bargraph(rank='genus', top_n=10) """ if rank is None: raise OneCodexException("Please specify a rank or 'auto' to choose automatically") if not (threshold or top_n): raise OneCodexException("Please specify at least one of: threshold, top_n") if top_n == "auto" and threshold == "auto": top_n = 10 threshold = None elif top_n == "auto" and threshold != "auto": top_n = None elif top_n != "auto" and threshold == "auto": threshold = None if legend == "auto": legend = self._field df = self.to_df( rank=rank, normalize=normalize, top_n=top_n, threshold=threshold, table_format="long" ) if tooltip: if not isinstance(tooltip, list): tooltip = [tooltip] else: tooltip = [] if haxis: tooltip.append(haxis) tooltip.insert(0, "Label") # takes metadata columns and returns a dataframe with just those columns # renames columns in the case where columns are taxids magic_metadata, magic_fields = self._metadata_fetch(tooltip, label=label) # add sort order to long-format df if haxis: sort_order = magic_metadata.sort_values(magic_fields[haxis]).index.tolist() for sort_num, sort_class_id in enumerate(sort_order): magic_metadata.loc[sort_class_id, "sort_order"] = sort_num df["sort_order"] = magic_metadata["sort_order"][df["classification_id"]].tolist() sort_order = alt.EncodingSortField(field="sort_order", op="mean") else: sort_order = None # transfer metadata from wide-format df (magic_metadata) to long-format df for f in tooltip: df[magic_fields[f]] = magic_metadata[magic_fields[f]][df["classification_id"]].tolist() # add taxa names df["tax_name"] = [ "{} ({})".format(self.taxonomy["name"][t], t) if t in self.taxonomy["name"] else t for t in df["tax_id"] ] # # TODO: how to sort bars in bargraph # - abundance (mean across all samples) # - parent taxon (this will require that we make a few assumptions # about taxonomic ranks but as all taxonomic data will be coming from # OCX this should be okay) # ylabel = self._field if ylabel is None else ylabel xlabel = "" if xlabel is None else xlabel # should ultimately be Label, tax_name, readcount_w_children, then custom fields tooltip_for_altair = [magic_fields[f] for f in tooltip] tooltip_for_altair.insert(1, "tax_name") tooltip_for_altair.insert(2, "{}:Q".format(self._field)) # generate dataframes to plot, one per facet dfs_to_plot = [] if haxis: # if using facets, first facet is just the vertical axis blank_df = df.iloc[:1].copy() blank_df[self._field] = 0 dfs_to_plot.append(blank_df) for md_val in magic_metadata[magic_fields[haxis]].unique(): plot_df = df.where(df[magic_fields[haxis]] == md_val).dropna() # preserve booleans if magic_metadata[magic_fields[haxis]].dtype == "bool": plot_df[magic_fields[haxis]] = plot_df[magic_fields[haxis]].astype(bool) dfs_to_plot.append(plot_df) else: dfs_to_plot.append(df) charts = [] for plot_num, plot_df in enumerate(dfs_to_plot): chart = ( alt.Chart(plot_df) .mark_bar() .encode( x=alt.X("Label", axis=alt.Axis(title=xlabel), sort=sort_order), y=alt.Y( self._field, axis=alt.Axis(title=ylabel), scale=alt.Scale(domain=[0, 1], zero=True, nice=False), ), color=alt.Color("tax_name", legend=alt.Legend(title=legend)), tooltip=tooltip_for_altair, href="url:N", ) ) if haxis: if plot_num == 0: # first plot (blank_df) has vert axis but no horiz axis chart.encoding.x.axis = None elif plot_num > 0: # strip vertical axis from subsequent facets chart.encoding.y.axis = None # facet's title set to value of metadata in this group chart.title = str(plot_df[magic_fields[haxis]].tolist()[0]) charts.append(chart) # add all the facets together final_chart = charts[0] if len(charts) > 1: for chart in charts[1:]: final_chart |= chart # add title to chart # (cannot specify None or False for no title) final_chart = final_chart.properties(title=title) if title else final_chart return final_chart if return_chart else final_chart.display()
def read_block(self, block): """Read an 8-byte data block at address (block * 8). """ if block < 0 or block > 255: raise ValueError("invalid block number") log.debug("read block {0}".format(block)) cmd = "\x02" + chr(block) + 8 * chr(0) + self.uid return self.transceive(cmd)[1:9]
Read an 8-byte data block at address (block * 8).
Below is the the instruction that describes the task: ### Input: Read an 8-byte data block at address (block * 8). ### Response: def read_block(self, block): """Read an 8-byte data block at address (block * 8). """ if block < 0 or block > 255: raise ValueError("invalid block number") log.debug("read block {0}".format(block)) cmd = "\x02" + chr(block) + 8 * chr(0) + self.uid return self.transceive(cmd)[1:9]
def _is_packed_binary(self, data): ''' Check if data is hexadecimal packed :param data: :return: ''' packed = False if isinstance(data, bytes) and len(data) == 16 and b':' not in data: try: packed = bool(int(binascii.hexlify(data), 16)) except (ValueError, TypeError): pass return packed
Check if data is hexadecimal packed :param data: :return:
Below is the the instruction that describes the task: ### Input: Check if data is hexadecimal packed :param data: :return: ### Response: def _is_packed_binary(self, data): ''' Check if data is hexadecimal packed :param data: :return: ''' packed = False if isinstance(data, bytes) and len(data) == 16 and b':' not in data: try: packed = bool(int(binascii.hexlify(data), 16)) except (ValueError, TypeError): pass return packed
def get_block_by_height(self, height: int, is_full: bool = False) -> dict: """ This interface is used to get the block information by block height in current network. Return: the decimal total number of blocks in current network. """ payload = self.generate_json_rpc_payload(RpcMethod.GET_BLOCK, [height, 1]) response = self.__post(self.__url, payload) if is_full: return response return response['result']
This interface is used to get the block information by block height in current network. Return: the decimal total number of blocks in current network.
Below is the the instruction that describes the task: ### Input: This interface is used to get the block information by block height in current network. Return: the decimal total number of blocks in current network. ### Response: def get_block_by_height(self, height: int, is_full: bool = False) -> dict: """ This interface is used to get the block information by block height in current network. Return: the decimal total number of blocks in current network. """ payload = self.generate_json_rpc_payload(RpcMethod.GET_BLOCK, [height, 1]) response = self.__post(self.__url, payload) if is_full: return response return response['result']
def fetch_extra_data(resource): """Return a dict with extra data retrieved from cern oauth.""" person_id = resource.get('PersonID', [None])[0] identity_class = resource.get('IdentityClass', [None])[0] department = resource.get('Department', [None])[0] return dict( person_id=person_id, identity_class=identity_class, department=department )
Return a dict with extra data retrieved from cern oauth.
Below is the the instruction that describes the task: ### Input: Return a dict with extra data retrieved from cern oauth. ### Response: def fetch_extra_data(resource): """Return a dict with extra data retrieved from cern oauth.""" person_id = resource.get('PersonID', [None])[0] identity_class = resource.get('IdentityClass', [None])[0] department = resource.get('Department', [None])[0] return dict( person_id=person_id, identity_class=identity_class, department=department )
def prefix_to_addrmask(value, sep=" "): """ Converts a CIDR formatted prefix into an address netmask representation. Argument sep specifies the separator between the address and netmask parts. By default it's a single space. Examples: >>> "{{ '192.168.0.1/24|prefix_to_addrmask }}" -> "192.168.0.1 255.255.255.0" >>> "{{ '192.168.0.1/24|prefix_to_addrmask('/') }}" -> "192.168.0.1/255.255.255.0" """ prefix = netaddr.IPNetwork(value) return "{}{}{}".format(prefix.ip, sep, prefix.netmask)
Converts a CIDR formatted prefix into an address netmask representation. Argument sep specifies the separator between the address and netmask parts. By default it's a single space. Examples: >>> "{{ '192.168.0.1/24|prefix_to_addrmask }}" -> "192.168.0.1 255.255.255.0" >>> "{{ '192.168.0.1/24|prefix_to_addrmask('/') }}" -> "192.168.0.1/255.255.255.0"
Below is the the instruction that describes the task: ### Input: Converts a CIDR formatted prefix into an address netmask representation. Argument sep specifies the separator between the address and netmask parts. By default it's a single space. Examples: >>> "{{ '192.168.0.1/24|prefix_to_addrmask }}" -> "192.168.0.1 255.255.255.0" >>> "{{ '192.168.0.1/24|prefix_to_addrmask('/') }}" -> "192.168.0.1/255.255.255.0" ### Response: def prefix_to_addrmask(value, sep=" "): """ Converts a CIDR formatted prefix into an address netmask representation. Argument sep specifies the separator between the address and netmask parts. By default it's a single space. Examples: >>> "{{ '192.168.0.1/24|prefix_to_addrmask }}" -> "192.168.0.1 255.255.255.0" >>> "{{ '192.168.0.1/24|prefix_to_addrmask('/') }}" -> "192.168.0.1/255.255.255.0" """ prefix = netaddr.IPNetwork(value) return "{}{}{}".format(prefix.ip, sep, prefix.netmask)
def Alt(cls, key): """ 在指定元素上执行alt组合事件 @note: key event -> alt + key @param key: 如'X' """ element = cls._element() element.send_keys(Keys.ALT, key)
在指定元素上执行alt组合事件 @note: key event -> alt + key @param key: 如'X'
Below is the the instruction that describes the task: ### Input: 在指定元素上执行alt组合事件 @note: key event -> alt + key @param key: 如'X' ### Response: def Alt(cls, key): """ 在指定元素上执行alt组合事件 @note: key event -> alt + key @param key: 如'X' """ element = cls._element() element.send_keys(Keys.ALT, key)
def findContours(*args, **kwargs): """ Wraps cv2.findContours to maintain compatiblity between versions 3 and 4 Returns: contours, hierarchy """ if cv2.__version__.startswith('4'): contours, hierarchy = cv2.findContours(*args, **kwargs) elif cv2.__version__.startswith('3'): _, contours, hierarchy = cv2.findContours(*args, **kwargs) else: raise AssertionError( 'cv2 must be either version 3 or 4 to call this method') return contours, hierarchy
Wraps cv2.findContours to maintain compatiblity between versions 3 and 4 Returns: contours, hierarchy
Below is the the instruction that describes the task: ### Input: Wraps cv2.findContours to maintain compatiblity between versions 3 and 4 Returns: contours, hierarchy ### Response: def findContours(*args, **kwargs): """ Wraps cv2.findContours to maintain compatiblity between versions 3 and 4 Returns: contours, hierarchy """ if cv2.__version__.startswith('4'): contours, hierarchy = cv2.findContours(*args, **kwargs) elif cv2.__version__.startswith('3'): _, contours, hierarchy = cv2.findContours(*args, **kwargs) else: raise AssertionError( 'cv2 must be either version 3 or 4 to call this method') return contours, hierarchy
def restrict_to_routes(feed: "Feed", route_ids: List[str]) -> "Feed": """ Build a new feed by restricting this one to only the stops, trips, shapes, etc. used by the routes with the given list of route IDs. Return the resulting feed. """ # Initialize the new feed as the old feed. # Restrict its DataFrames below. feed = feed.copy() # Slice routes feed.routes = feed.routes[feed.routes["route_id"].isin(route_ids)].copy() # Slice trips feed.trips = feed.trips[feed.trips["route_id"].isin(route_ids)].copy() # Slice stop times trip_ids = feed.trips["trip_id"] feed.stop_times = feed.stop_times[ feed.stop_times["trip_id"].isin(trip_ids) ].copy() # Slice stops stop_ids = feed.stop_times["stop_id"].unique() feed.stops = feed.stops[feed.stops["stop_id"].isin(stop_ids)].copy() # Slice calendar service_ids = feed.trips["service_id"] if feed.calendar is not None: feed.calendar = feed.calendar[ feed.calendar["service_id"].isin(service_ids) ].copy() # Get agency for trips if "agency_id" in feed.routes.columns: agency_ids = feed.routes["agency_id"] if len(agency_ids): feed.agency = feed.agency[ feed.agency["agency_id"].isin(agency_ids) ].copy() # Now for the optional files. # Get calendar dates for trips. if feed.calendar_dates is not None: feed.calendar_dates = feed.calendar_dates[ feed.calendar_dates["service_id"].isin(service_ids) ].copy() # Get frequencies for trips if feed.frequencies is not None: feed.frequencies = feed.frequencies[ feed.frequencies["trip_id"].isin(trip_ids) ].copy() # Get shapes for trips if feed.shapes is not None: shape_ids = feed.trips["shape_id"] feed.shapes = feed.shapes[ feed.shapes["shape_id"].isin(shape_ids) ].copy() # Get transfers for stops if feed.transfers is not None: feed.transfers = feed.transfers[ feed.transfers["from_stop_id"].isin(stop_ids) | feed.transfers["to_stop_id"].isin(stop_ids) ].copy() return feed
Build a new feed by restricting this one to only the stops, trips, shapes, etc. used by the routes with the given list of route IDs. Return the resulting feed.
Below is the the instruction that describes the task: ### Input: Build a new feed by restricting this one to only the stops, trips, shapes, etc. used by the routes with the given list of route IDs. Return the resulting feed. ### Response: def restrict_to_routes(feed: "Feed", route_ids: List[str]) -> "Feed": """ Build a new feed by restricting this one to only the stops, trips, shapes, etc. used by the routes with the given list of route IDs. Return the resulting feed. """ # Initialize the new feed as the old feed. # Restrict its DataFrames below. feed = feed.copy() # Slice routes feed.routes = feed.routes[feed.routes["route_id"].isin(route_ids)].copy() # Slice trips feed.trips = feed.trips[feed.trips["route_id"].isin(route_ids)].copy() # Slice stop times trip_ids = feed.trips["trip_id"] feed.stop_times = feed.stop_times[ feed.stop_times["trip_id"].isin(trip_ids) ].copy() # Slice stops stop_ids = feed.stop_times["stop_id"].unique() feed.stops = feed.stops[feed.stops["stop_id"].isin(stop_ids)].copy() # Slice calendar service_ids = feed.trips["service_id"] if feed.calendar is not None: feed.calendar = feed.calendar[ feed.calendar["service_id"].isin(service_ids) ].copy() # Get agency for trips if "agency_id" in feed.routes.columns: agency_ids = feed.routes["agency_id"] if len(agency_ids): feed.agency = feed.agency[ feed.agency["agency_id"].isin(agency_ids) ].copy() # Now for the optional files. # Get calendar dates for trips. if feed.calendar_dates is not None: feed.calendar_dates = feed.calendar_dates[ feed.calendar_dates["service_id"].isin(service_ids) ].copy() # Get frequencies for trips if feed.frequencies is not None: feed.frequencies = feed.frequencies[ feed.frequencies["trip_id"].isin(trip_ids) ].copy() # Get shapes for trips if feed.shapes is not None: shape_ids = feed.trips["shape_id"] feed.shapes = feed.shapes[ feed.shapes["shape_id"].isin(shape_ids) ].copy() # Get transfers for stops if feed.transfers is not None: feed.transfers = feed.transfers[ feed.transfers["from_stop_id"].isin(stop_ids) | feed.transfers["to_stop_id"].isin(stop_ids) ].copy() return feed
def check_trademark_symbol(text): """Use the trademark symbol instead of (TM).""" err = "typography.symbols.trademark" msg = u"(TM) is a goofy alphabetic approximation, use the symbol ™." regex = "\(TM\)" return existence_check( text, [regex], err, msg, max_errors=3, require_padding=False)
Use the trademark symbol instead of (TM).
Below is the the instruction that describes the task: ### Input: Use the trademark symbol instead of (TM). ### Response: def check_trademark_symbol(text): """Use the trademark symbol instead of (TM).""" err = "typography.symbols.trademark" msg = u"(TM) is a goofy alphabetic approximation, use the symbol ™." regex = "\(TM\)" return existence_check( text, [regex], err, msg, max_errors=3, require_padding=False)
def from_networkx(cls, graph, weight='weight'): r"""Import a graph from NetworkX. Edge weights are retrieved as an edge attribute, under the name specified by the ``weight`` parameter. Signals are retrieved from node attributes, and stored in the :attr:`signals` dictionary under the attribute name. `N`-dimensional signals that were broken during export are joined. Parameters ---------- graph : :class:`networkx.Graph` A NetworkX graph object. weight : string or None, optional The edge attribute that holds the numerical values used as the edge weights. All edge weights are set to 1 if None, or not found. Returns ------- graph : :class:`~pygsp.graphs.Graph` A PyGSP graph object. Notes ----- The nodes are ordered according to :meth:`networkx.Graph.nodes`. In NetworkX, node attributes need not be set for every node. If a node attribute is not set for a node, a NaN is assigned to the corresponding signal for that node. If the graph is a :class:`networkx.MultiGraph`, multiedges are aggregated by summation. See Also -------- from_graphtool : import from graph-tool load : load from a file Examples -------- >>> import networkx as nx >>> graph = nx.Graph() >>> graph.add_edge(1, 2, weight=0.2) >>> graph.add_edge(2, 3, weight=0.9) >>> graph.add_node(4, sig=3.1416) >>> graph.nodes() NodeView((1, 2, 3, 4)) >>> graph = graphs.Graph.from_networkx(graph) >>> graph.W.toarray() array([[0. , 0.2, 0. , 0. ], [0.2, 0. , 0.9, 0. ], [0. , 0.9, 0. , 0. ], [0. , 0. , 0. , 0. ]]) >>> graph.signals {'sig': array([ nan, nan, nan, 3.1416])} """ nx = _import_networkx() from .graph import Graph adjacency = nx.to_scipy_sparse_matrix(graph, weight=weight) graph_pg = Graph(adjacency) for i, node in enumerate(graph.nodes()): for name in graph.nodes[node].keys(): try: signal = graph_pg.signals[name] except KeyError: signal = np.full(graph_pg.n_vertices, np.nan) graph_pg.set_signal(signal, name) try: signal[i] = graph.nodes[node][name] except KeyError: pass # attribute not set for node graph_pg._join_signals() return graph_pg
r"""Import a graph from NetworkX. Edge weights are retrieved as an edge attribute, under the name specified by the ``weight`` parameter. Signals are retrieved from node attributes, and stored in the :attr:`signals` dictionary under the attribute name. `N`-dimensional signals that were broken during export are joined. Parameters ---------- graph : :class:`networkx.Graph` A NetworkX graph object. weight : string or None, optional The edge attribute that holds the numerical values used as the edge weights. All edge weights are set to 1 if None, or not found. Returns ------- graph : :class:`~pygsp.graphs.Graph` A PyGSP graph object. Notes ----- The nodes are ordered according to :meth:`networkx.Graph.nodes`. In NetworkX, node attributes need not be set for every node. If a node attribute is not set for a node, a NaN is assigned to the corresponding signal for that node. If the graph is a :class:`networkx.MultiGraph`, multiedges are aggregated by summation. See Also -------- from_graphtool : import from graph-tool load : load from a file Examples -------- >>> import networkx as nx >>> graph = nx.Graph() >>> graph.add_edge(1, 2, weight=0.2) >>> graph.add_edge(2, 3, weight=0.9) >>> graph.add_node(4, sig=3.1416) >>> graph.nodes() NodeView((1, 2, 3, 4)) >>> graph = graphs.Graph.from_networkx(graph) >>> graph.W.toarray() array([[0. , 0.2, 0. , 0. ], [0.2, 0. , 0.9, 0. ], [0. , 0.9, 0. , 0. ], [0. , 0. , 0. , 0. ]]) >>> graph.signals {'sig': array([ nan, nan, nan, 3.1416])}
Below is the the instruction that describes the task: ### Input: r"""Import a graph from NetworkX. Edge weights are retrieved as an edge attribute, under the name specified by the ``weight`` parameter. Signals are retrieved from node attributes, and stored in the :attr:`signals` dictionary under the attribute name. `N`-dimensional signals that were broken during export are joined. Parameters ---------- graph : :class:`networkx.Graph` A NetworkX graph object. weight : string or None, optional The edge attribute that holds the numerical values used as the edge weights. All edge weights are set to 1 if None, or not found. Returns ------- graph : :class:`~pygsp.graphs.Graph` A PyGSP graph object. Notes ----- The nodes are ordered according to :meth:`networkx.Graph.nodes`. In NetworkX, node attributes need not be set for every node. If a node attribute is not set for a node, a NaN is assigned to the corresponding signal for that node. If the graph is a :class:`networkx.MultiGraph`, multiedges are aggregated by summation. See Also -------- from_graphtool : import from graph-tool load : load from a file Examples -------- >>> import networkx as nx >>> graph = nx.Graph() >>> graph.add_edge(1, 2, weight=0.2) >>> graph.add_edge(2, 3, weight=0.9) >>> graph.add_node(4, sig=3.1416) >>> graph.nodes() NodeView((1, 2, 3, 4)) >>> graph = graphs.Graph.from_networkx(graph) >>> graph.W.toarray() array([[0. , 0.2, 0. , 0. ], [0.2, 0. , 0.9, 0. ], [0. , 0.9, 0. , 0. ], [0. , 0. , 0. , 0. ]]) >>> graph.signals {'sig': array([ nan, nan, nan, 3.1416])} ### Response: def from_networkx(cls, graph, weight='weight'): r"""Import a graph from NetworkX. Edge weights are retrieved as an edge attribute, under the name specified by the ``weight`` parameter. Signals are retrieved from node attributes, and stored in the :attr:`signals` dictionary under the attribute name. `N`-dimensional signals that were broken during export are joined. Parameters ---------- graph : :class:`networkx.Graph` A NetworkX graph object. weight : string or None, optional The edge attribute that holds the numerical values used as the edge weights. All edge weights are set to 1 if None, or not found. Returns ------- graph : :class:`~pygsp.graphs.Graph` A PyGSP graph object. Notes ----- The nodes are ordered according to :meth:`networkx.Graph.nodes`. In NetworkX, node attributes need not be set for every node. If a node attribute is not set for a node, a NaN is assigned to the corresponding signal for that node. If the graph is a :class:`networkx.MultiGraph`, multiedges are aggregated by summation. See Also -------- from_graphtool : import from graph-tool load : load from a file Examples -------- >>> import networkx as nx >>> graph = nx.Graph() >>> graph.add_edge(1, 2, weight=0.2) >>> graph.add_edge(2, 3, weight=0.9) >>> graph.add_node(4, sig=3.1416) >>> graph.nodes() NodeView((1, 2, 3, 4)) >>> graph = graphs.Graph.from_networkx(graph) >>> graph.W.toarray() array([[0. , 0.2, 0. , 0. ], [0.2, 0. , 0.9, 0. ], [0. , 0.9, 0. , 0. ], [0. , 0. , 0. , 0. ]]) >>> graph.signals {'sig': array([ nan, nan, nan, 3.1416])} """ nx = _import_networkx() from .graph import Graph adjacency = nx.to_scipy_sparse_matrix(graph, weight=weight) graph_pg = Graph(adjacency) for i, node in enumerate(graph.nodes()): for name in graph.nodes[node].keys(): try: signal = graph_pg.signals[name] except KeyError: signal = np.full(graph_pg.n_vertices, np.nan) graph_pg.set_signal(signal, name) try: signal[i] = graph.nodes[node][name] except KeyError: pass # attribute not set for node graph_pg._join_signals() return graph_pg
def get_distance(self, node1_name, node2_name): """ Returns a length of an edge / path, if exists, from the current tree :param node1_name: a first node name in current tree :param node2_name: a second node name in current tree :return: a length of specified by a pair of vertices edge / path :rtype: `Number` :raises: ValueError, if requested a length of an edge, that is not present in current tree """ return self.__root.get_distance(target=node1_name, target2=node2_name)
Returns a length of an edge / path, if exists, from the current tree :param node1_name: a first node name in current tree :param node2_name: a second node name in current tree :return: a length of specified by a pair of vertices edge / path :rtype: `Number` :raises: ValueError, if requested a length of an edge, that is not present in current tree
Below is the the instruction that describes the task: ### Input: Returns a length of an edge / path, if exists, from the current tree :param node1_name: a first node name in current tree :param node2_name: a second node name in current tree :return: a length of specified by a pair of vertices edge / path :rtype: `Number` :raises: ValueError, if requested a length of an edge, that is not present in current tree ### Response: def get_distance(self, node1_name, node2_name): """ Returns a length of an edge / path, if exists, from the current tree :param node1_name: a first node name in current tree :param node2_name: a second node name in current tree :return: a length of specified by a pair of vertices edge / path :rtype: `Number` :raises: ValueError, if requested a length of an edge, that is not present in current tree """ return self.__root.get_distance(target=node1_name, target2=node2_name)
def security_label_pivot(self, security_label_resource): """Pivot point on security labels for this resource. This method will return all *resources* (group, indicators, task, victims, etc) for this resource that have the provided security label applied. **Example Endpoints URI's** +--------------+----------------------------------------------------------------------+ | HTTP Method | API Endpoint URI's | +==============+======================================================================+ | GET | /v2/securityLabels/{resourceId}/groups/{resourceType} | +--------------+----------------------------------------------------------------------+ | GET | /v2/securityLabels/{resourceId}/groups/{resourceType}/{uniqueId} | +--------------+----------------------------------------------------------------------+ | GET | /v2/securityLabels/{resourceId}/indicators/{resourceType} | +--------------+----------------------------------------------------------------------+ | GET | /v2/securityLabels/{resourceId}/indicators/{resourceType}/{uniqueId} | +--------------+----------------------------------------------------------------------+ Args: resource_id (string): The resource pivot id (security label name). """ resource = self.copy() resource._request_uri = '{}/{}'.format( security_label_resource.request_uri, resource._request_uri ) return resource
Pivot point on security labels for this resource. This method will return all *resources* (group, indicators, task, victims, etc) for this resource that have the provided security label applied. **Example Endpoints URI's** +--------------+----------------------------------------------------------------------+ | HTTP Method | API Endpoint URI's | +==============+======================================================================+ | GET | /v2/securityLabels/{resourceId}/groups/{resourceType} | +--------------+----------------------------------------------------------------------+ | GET | /v2/securityLabels/{resourceId}/groups/{resourceType}/{uniqueId} | +--------------+----------------------------------------------------------------------+ | GET | /v2/securityLabels/{resourceId}/indicators/{resourceType} | +--------------+----------------------------------------------------------------------+ | GET | /v2/securityLabels/{resourceId}/indicators/{resourceType}/{uniqueId} | +--------------+----------------------------------------------------------------------+ Args: resource_id (string): The resource pivot id (security label name).
Below is the the instruction that describes the task: ### Input: Pivot point on security labels for this resource. This method will return all *resources* (group, indicators, task, victims, etc) for this resource that have the provided security label applied. **Example Endpoints URI's** +--------------+----------------------------------------------------------------------+ | HTTP Method | API Endpoint URI's | +==============+======================================================================+ | GET | /v2/securityLabels/{resourceId}/groups/{resourceType} | +--------------+----------------------------------------------------------------------+ | GET | /v2/securityLabels/{resourceId}/groups/{resourceType}/{uniqueId} | +--------------+----------------------------------------------------------------------+ | GET | /v2/securityLabels/{resourceId}/indicators/{resourceType} | +--------------+----------------------------------------------------------------------+ | GET | /v2/securityLabels/{resourceId}/indicators/{resourceType}/{uniqueId} | +--------------+----------------------------------------------------------------------+ Args: resource_id (string): The resource pivot id (security label name). ### Response: def security_label_pivot(self, security_label_resource): """Pivot point on security labels for this resource. This method will return all *resources* (group, indicators, task, victims, etc) for this resource that have the provided security label applied. **Example Endpoints URI's** +--------------+----------------------------------------------------------------------+ | HTTP Method | API Endpoint URI's | +==============+======================================================================+ | GET | /v2/securityLabels/{resourceId}/groups/{resourceType} | +--------------+----------------------------------------------------------------------+ | GET | /v2/securityLabels/{resourceId}/groups/{resourceType}/{uniqueId} | +--------------+----------------------------------------------------------------------+ | GET | /v2/securityLabels/{resourceId}/indicators/{resourceType} | +--------------+----------------------------------------------------------------------+ | GET | /v2/securityLabels/{resourceId}/indicators/{resourceType}/{uniqueId} | +--------------+----------------------------------------------------------------------+ Args: resource_id (string): The resource pivot id (security label name). """ resource = self.copy() resource._request_uri = '{}/{}'.format( security_label_resource.request_uri, resource._request_uri ) return resource
def ShowInfo(self): """Shows information about available hashers, parsers, plugins, etc.""" self._output_writer.Write( '{0:=^80s}\n'.format(' log2timeline/plaso information ')) plugin_list = self._GetPluginData() for header, data in plugin_list.items(): table_view = views.ViewsFactory.GetTableView( self._views_format_type, column_names=['Name', 'Description'], title=header) for entry_header, entry_data in sorted(data): table_view.AddRow([entry_header, entry_data]) table_view.Write(self._output_writer)
Shows information about available hashers, parsers, plugins, etc.
Below is the the instruction that describes the task: ### Input: Shows information about available hashers, parsers, plugins, etc. ### Response: def ShowInfo(self): """Shows information about available hashers, parsers, plugins, etc.""" self._output_writer.Write( '{0:=^80s}\n'.format(' log2timeline/plaso information ')) plugin_list = self._GetPluginData() for header, data in plugin_list.items(): table_view = views.ViewsFactory.GetTableView( self._views_format_type, column_names=['Name', 'Description'], title=header) for entry_header, entry_data in sorted(data): table_view.AddRow([entry_header, entry_data]) table_view.Write(self._output_writer)
def _nack(self, message_id, subscription_id, **kwargs): """Reject receipt of a message. This only makes sense when the 'acknowledgement' flag was set for the relevant subscription. :param message_id: ID of the message to be rejected :param subscription: ID of the relevant subscriptiong :param **kwargs: Further parameters for the transport layer. For example transaction: Transaction ID if rejection should be part of a transaction """ self._conn.nack(message_id, subscription_id, **kwargs)
Reject receipt of a message. This only makes sense when the 'acknowledgement' flag was set for the relevant subscription. :param message_id: ID of the message to be rejected :param subscription: ID of the relevant subscriptiong :param **kwargs: Further parameters for the transport layer. For example transaction: Transaction ID if rejection should be part of a transaction
Below is the the instruction that describes the task: ### Input: Reject receipt of a message. This only makes sense when the 'acknowledgement' flag was set for the relevant subscription. :param message_id: ID of the message to be rejected :param subscription: ID of the relevant subscriptiong :param **kwargs: Further parameters for the transport layer. For example transaction: Transaction ID if rejection should be part of a transaction ### Response: def _nack(self, message_id, subscription_id, **kwargs): """Reject receipt of a message. This only makes sense when the 'acknowledgement' flag was set for the relevant subscription. :param message_id: ID of the message to be rejected :param subscription: ID of the relevant subscriptiong :param **kwargs: Further parameters for the transport layer. For example transaction: Transaction ID if rejection should be part of a transaction """ self._conn.nack(message_id, subscription_id, **kwargs)
def should_reuse_driver(self, scope, test_passed, context=None): """Check if the driver should be reused :param scope: execution scope (function, module, class or session) :param test_passed: True if the test has passed :param context: behave context :returns: True if the driver should be reused """ reuse_driver = self.config.getboolean_optional('Driver', 'reuse_driver') reuse_driver_session = self.config.getboolean_optional('Driver', 'reuse_driver_session') restart_driver_after_failure = (self.config.getboolean_optional('Driver', 'restart_driver_after_failure') or self.config.getboolean_optional('Driver', 'restart_driver_fail')) if context and scope == 'function': reuse_driver = reuse_driver or (hasattr(context, 'reuse_driver_from_tags') and context.reuse_driver_from_tags) return (((reuse_driver and scope == 'function') or (reuse_driver_session and scope != 'session')) and (test_passed or not restart_driver_after_failure))
Check if the driver should be reused :param scope: execution scope (function, module, class or session) :param test_passed: True if the test has passed :param context: behave context :returns: True if the driver should be reused
Below is the the instruction that describes the task: ### Input: Check if the driver should be reused :param scope: execution scope (function, module, class or session) :param test_passed: True if the test has passed :param context: behave context :returns: True if the driver should be reused ### Response: def should_reuse_driver(self, scope, test_passed, context=None): """Check if the driver should be reused :param scope: execution scope (function, module, class or session) :param test_passed: True if the test has passed :param context: behave context :returns: True if the driver should be reused """ reuse_driver = self.config.getboolean_optional('Driver', 'reuse_driver') reuse_driver_session = self.config.getboolean_optional('Driver', 'reuse_driver_session') restart_driver_after_failure = (self.config.getboolean_optional('Driver', 'restart_driver_after_failure') or self.config.getboolean_optional('Driver', 'restart_driver_fail')) if context and scope == 'function': reuse_driver = reuse_driver or (hasattr(context, 'reuse_driver_from_tags') and context.reuse_driver_from_tags) return (((reuse_driver and scope == 'function') or (reuse_driver_session and scope != 'session')) and (test_passed or not restart_driver_after_failure))
def viterbi(self,observations): """ The probability of occurence of the observation sequence **Arguments**: :param observations: The observation sequence, where each element belongs to 'observations' variable declared with __init__ object. :type observations: A list or tuple :return: Returns a list of hidden states. :rtype: list of states **Features**: Scaling applied here. This ensures that no underflow error occurs. **Example**: >>> states = ('s', 't') >>> possible_observation = ('A','B' ) >>> # Numpy arrays of the data >>> start_probability = np.matrix( '0.5 0.5 ') >>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ') >>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' ) >>> # Initialize class object >>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability) >>> observations = ('A', 'B','B','A') >>> print(test.viterbi(observations)) """ # Find total states,observations total_stages = len(observations) num_states = len(self.states) # initialize data # Path stores the state sequence giving maximum probability old_path = np.zeros( (total_stages, num_states) ) new_path = np.zeros( (total_stages, num_states) ) # Find initial delta # Map observation to an index # delta[s] stores the probability of most probable path ending in state 's' ob_ind = self.obs_map[ observations[0] ] delta = np.multiply ( np.transpose(self.em_prob[:,ob_ind]) , self.start_prob ) # Scale delta delta = delta /np.sum(delta) # initialize path old_path[0,:] = [i for i in range(num_states) ] # Find delta[t][x] for each state 'x' at the iteration 't' # delta[t][x] can be found using delta[t-1][x] and taking the maximum possible path for curr_t in range(1,total_stages): # Map observation to an index ob_ind = self.obs_map[ observations[curr_t] ] # Find temp and take max along each row to get delta temp = np.multiply (np.multiply(delta , self.trans_prob.transpose()) , self.em_prob[:, ob_ind] ) # Update delta and scale it delta = temp.max(axis = 1).transpose() delta = delta /np.sum(delta) # Find state which is most probable using argax # Convert to a list for easier processing max_temp = temp.argmax(axis=1).transpose() max_temp = np.ravel(max_temp).tolist() # Update path for s in range(num_states): new_path[:curr_t,s] = old_path[0:curr_t, max_temp[s] ] new_path[curr_t,:] = [i for i in range(num_states) ] old_path = new_path.copy() # Find the state in last stage, giving maximum probability final_max = np.argmax(np.ravel(delta)) best_path = old_path[:,final_max].tolist() best_path_map = [ self.state_map[i] for i in best_path] return best_path_map
The probability of occurence of the observation sequence **Arguments**: :param observations: The observation sequence, where each element belongs to 'observations' variable declared with __init__ object. :type observations: A list or tuple :return: Returns a list of hidden states. :rtype: list of states **Features**: Scaling applied here. This ensures that no underflow error occurs. **Example**: >>> states = ('s', 't') >>> possible_observation = ('A','B' ) >>> # Numpy arrays of the data >>> start_probability = np.matrix( '0.5 0.5 ') >>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ') >>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' ) >>> # Initialize class object >>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability) >>> observations = ('A', 'B','B','A') >>> print(test.viterbi(observations))
Below is the the instruction that describes the task: ### Input: The probability of occurence of the observation sequence **Arguments**: :param observations: The observation sequence, where each element belongs to 'observations' variable declared with __init__ object. :type observations: A list or tuple :return: Returns a list of hidden states. :rtype: list of states **Features**: Scaling applied here. This ensures that no underflow error occurs. **Example**: >>> states = ('s', 't') >>> possible_observation = ('A','B' ) >>> # Numpy arrays of the data >>> start_probability = np.matrix( '0.5 0.5 ') >>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ') >>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' ) >>> # Initialize class object >>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability) >>> observations = ('A', 'B','B','A') >>> print(test.viterbi(observations)) ### Response: def viterbi(self,observations): """ The probability of occurence of the observation sequence **Arguments**: :param observations: The observation sequence, where each element belongs to 'observations' variable declared with __init__ object. :type observations: A list or tuple :return: Returns a list of hidden states. :rtype: list of states **Features**: Scaling applied here. This ensures that no underflow error occurs. **Example**: >>> states = ('s', 't') >>> possible_observation = ('A','B' ) >>> # Numpy arrays of the data >>> start_probability = np.matrix( '0.5 0.5 ') >>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ') >>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' ) >>> # Initialize class object >>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability) >>> observations = ('A', 'B','B','A') >>> print(test.viterbi(observations)) """ # Find total states,observations total_stages = len(observations) num_states = len(self.states) # initialize data # Path stores the state sequence giving maximum probability old_path = np.zeros( (total_stages, num_states) ) new_path = np.zeros( (total_stages, num_states) ) # Find initial delta # Map observation to an index # delta[s] stores the probability of most probable path ending in state 's' ob_ind = self.obs_map[ observations[0] ] delta = np.multiply ( np.transpose(self.em_prob[:,ob_ind]) , self.start_prob ) # Scale delta delta = delta /np.sum(delta) # initialize path old_path[0,:] = [i for i in range(num_states) ] # Find delta[t][x] for each state 'x' at the iteration 't' # delta[t][x] can be found using delta[t-1][x] and taking the maximum possible path for curr_t in range(1,total_stages): # Map observation to an index ob_ind = self.obs_map[ observations[curr_t] ] # Find temp and take max along each row to get delta temp = np.multiply (np.multiply(delta , self.trans_prob.transpose()) , self.em_prob[:, ob_ind] ) # Update delta and scale it delta = temp.max(axis = 1).transpose() delta = delta /np.sum(delta) # Find state which is most probable using argax # Convert to a list for easier processing max_temp = temp.argmax(axis=1).transpose() max_temp = np.ravel(max_temp).tolist() # Update path for s in range(num_states): new_path[:curr_t,s] = old_path[0:curr_t, max_temp[s] ] new_path[curr_t,:] = [i for i in range(num_states) ] old_path = new_path.copy() # Find the state in last stage, giving maximum probability final_max = np.argmax(np.ravel(delta)) best_path = old_path[:,final_max].tolist() best_path_map = [ self.state_map[i] for i in best_path] return best_path_map
def convertbase(number, base=10): """ Convert a number in base 10 to another base :type number: number :param number: The number to convert :type base: integer :param base: The base to convert to. """ integer = number if not integer: return '0' sign = 1 if integer > 0 else -1 alphanum = string.digits + string.ascii_lowercase nums = alphanum[:base] res = '' integer *= sign while integer: integer, mod = divmod(integer, base) res += nums[mod] return ('' if sign == 1 else '-') + res[::-1]
Convert a number in base 10 to another base :type number: number :param number: The number to convert :type base: integer :param base: The base to convert to.
Below is the the instruction that describes the task: ### Input: Convert a number in base 10 to another base :type number: number :param number: The number to convert :type base: integer :param base: The base to convert to. ### Response: def convertbase(number, base=10): """ Convert a number in base 10 to another base :type number: number :param number: The number to convert :type base: integer :param base: The base to convert to. """ integer = number if not integer: return '0' sign = 1 if integer > 0 else -1 alphanum = string.digits + string.ascii_lowercase nums = alphanum[:base] res = '' integer *= sign while integer: integer, mod = divmod(integer, base) res += nums[mod] return ('' if sign == 1 else '-') + res[::-1]
def cpp_spec(): """C++ specification, provided for example, and java compatible.""" return { INDENTATION : '\t', BEG_BLOCK : '{', END_BLOCK : '}', BEG_LINE : '', END_LINE : '\n', BEG_ACTION : '', END_ACTION : ';', BEG_CONDITION : 'if(', END_CONDITION : ')', LOGICAL_AND : ' && ', LOGICAL_OR : ' || ' }
C++ specification, provided for example, and java compatible.
Below is the the instruction that describes the task: ### Input: C++ specification, provided for example, and java compatible. ### Response: def cpp_spec(): """C++ specification, provided for example, and java compatible.""" return { INDENTATION : '\t', BEG_BLOCK : '{', END_BLOCK : '}', BEG_LINE : '', END_LINE : '\n', BEG_ACTION : '', END_ACTION : ';', BEG_CONDITION : 'if(', END_CONDITION : ')', LOGICAL_AND : ' && ', LOGICAL_OR : ' || ' }
def disconnect(self, callback): """ Disconnects the signal from the given function. :type callback: object :param callback: The callback function. """ if self.weak_subscribers is not None: with self.lock: index = self._weakly_connected_index(callback) if index is not None: self.weak_subscribers.pop(index)[0] if self.hard_subscribers is not None: try: index = self._hard_callbacks().index(callback) except ValueError: pass else: self.hard_subscribers.pop(index)
Disconnects the signal from the given function. :type callback: object :param callback: The callback function.
Below is the the instruction that describes the task: ### Input: Disconnects the signal from the given function. :type callback: object :param callback: The callback function. ### Response: def disconnect(self, callback): """ Disconnects the signal from the given function. :type callback: object :param callback: The callback function. """ if self.weak_subscribers is not None: with self.lock: index = self._weakly_connected_index(callback) if index is not None: self.weak_subscribers.pop(index)[0] if self.hard_subscribers is not None: try: index = self._hard_callbacks().index(callback) except ValueError: pass else: self.hard_subscribers.pop(index)
def exit_config_mode(self, exit_config="end", pattern="#"): """Exit from configuration mode.""" return super(CiscoBaseConnection, self).exit_config_mode( exit_config=exit_config, pattern=pattern )
Exit from configuration mode.
Below is the the instruction that describes the task: ### Input: Exit from configuration mode. ### Response: def exit_config_mode(self, exit_config="end", pattern="#"): """Exit from configuration mode.""" return super(CiscoBaseConnection, self).exit_config_mode( exit_config=exit_config, pattern=pattern )
def cloud_cover_to_ghi_linear(self, cloud_cover, ghi_clear, offset=35, **kwargs): """ Convert cloud cover to GHI using a linear relationship. 0% cloud cover returns ghi_clear. 100% cloud cover returns offset*ghi_clear. Parameters ---------- cloud_cover: numeric Cloud cover in %. ghi_clear: numeric GHI under clear sky conditions. offset: numeric, default 35 Determines the minimum GHI. kwargs Not used. Returns ------- ghi: numeric Estimated GHI. References ---------- Larson et. al. "Day-ahead forecasting of solar power output from photovoltaic plants in the American Southwest" Renewable Energy 91, 11-20 (2016). """ offset = offset / 100. cloud_cover = cloud_cover / 100. ghi = (offset + (1 - offset) * (1 - cloud_cover)) * ghi_clear return ghi
Convert cloud cover to GHI using a linear relationship. 0% cloud cover returns ghi_clear. 100% cloud cover returns offset*ghi_clear. Parameters ---------- cloud_cover: numeric Cloud cover in %. ghi_clear: numeric GHI under clear sky conditions. offset: numeric, default 35 Determines the minimum GHI. kwargs Not used. Returns ------- ghi: numeric Estimated GHI. References ---------- Larson et. al. "Day-ahead forecasting of solar power output from photovoltaic plants in the American Southwest" Renewable Energy 91, 11-20 (2016).
Below is the the instruction that describes the task: ### Input: Convert cloud cover to GHI using a linear relationship. 0% cloud cover returns ghi_clear. 100% cloud cover returns offset*ghi_clear. Parameters ---------- cloud_cover: numeric Cloud cover in %. ghi_clear: numeric GHI under clear sky conditions. offset: numeric, default 35 Determines the minimum GHI. kwargs Not used. Returns ------- ghi: numeric Estimated GHI. References ---------- Larson et. al. "Day-ahead forecasting of solar power output from photovoltaic plants in the American Southwest" Renewable Energy 91, 11-20 (2016). ### Response: def cloud_cover_to_ghi_linear(self, cloud_cover, ghi_clear, offset=35, **kwargs): """ Convert cloud cover to GHI using a linear relationship. 0% cloud cover returns ghi_clear. 100% cloud cover returns offset*ghi_clear. Parameters ---------- cloud_cover: numeric Cloud cover in %. ghi_clear: numeric GHI under clear sky conditions. offset: numeric, default 35 Determines the minimum GHI. kwargs Not used. Returns ------- ghi: numeric Estimated GHI. References ---------- Larson et. al. "Day-ahead forecasting of solar power output from photovoltaic plants in the American Southwest" Renewable Energy 91, 11-20 (2016). """ offset = offset / 100. cloud_cover = cloud_cover / 100. ghi = (offset + (1 - offset) * (1 - cloud_cover)) * ghi_clear return ghi
def bgp_table_parser(bgp_table): """Generator that parses a line of bgp summary table and returns a dict compatible with NAPALM Example line: 10.2.1.14 4 10 472516 472238 361 0 0 3w1d 9 """ bgp_table = bgp_table.strip() for bgp_entry in bgp_table.splitlines(): bgp_table_fields = bgp_entry.split() try: if re.search(r"Shut.*Admin", bgp_entry): ( peer_ip, bgp_version, remote_as, msg_rcvd, msg_sent, _, _, _, uptime, state_1, state_2, ) = bgp_table_fields state_pfxrcd = "{} {}".format(state_1, state_2) else: ( peer_ip, bgp_version, remote_as, msg_rcvd, msg_sent, _, _, _, uptime, state_pfxrcd, ) = bgp_table_fields except ValueError: raise ValueError( "Unexpected entry ({}) in BGP summary table".format(bgp_table_fields) ) is_enabled = True try: received_prefixes = int(state_pfxrcd) is_up = True except ValueError: received_prefixes = -1 is_up = False if re.search(r"Shut.*Admin", state_pfxrcd): is_enabled = False if not is_up: uptime = -1 if uptime != -1: uptime = bgp_time_conversion(uptime) yield { peer_ip: { "is_enabled": is_enabled, "uptime": uptime, "remote_as": helpers.as_number(remote_as), "is_up": is_up, "description": "", "received_prefixes": received_prefixes, } }
Generator that parses a line of bgp summary table and returns a dict compatible with NAPALM Example line: 10.2.1.14 4 10 472516 472238 361 0 0 3w1d 9
Below is the the instruction that describes the task: ### Input: Generator that parses a line of bgp summary table and returns a dict compatible with NAPALM Example line: 10.2.1.14 4 10 472516 472238 361 0 0 3w1d 9 ### Response: def bgp_table_parser(bgp_table): """Generator that parses a line of bgp summary table and returns a dict compatible with NAPALM Example line: 10.2.1.14 4 10 472516 472238 361 0 0 3w1d 9 """ bgp_table = bgp_table.strip() for bgp_entry in bgp_table.splitlines(): bgp_table_fields = bgp_entry.split() try: if re.search(r"Shut.*Admin", bgp_entry): ( peer_ip, bgp_version, remote_as, msg_rcvd, msg_sent, _, _, _, uptime, state_1, state_2, ) = bgp_table_fields state_pfxrcd = "{} {}".format(state_1, state_2) else: ( peer_ip, bgp_version, remote_as, msg_rcvd, msg_sent, _, _, _, uptime, state_pfxrcd, ) = bgp_table_fields except ValueError: raise ValueError( "Unexpected entry ({}) in BGP summary table".format(bgp_table_fields) ) is_enabled = True try: received_prefixes = int(state_pfxrcd) is_up = True except ValueError: received_prefixes = -1 is_up = False if re.search(r"Shut.*Admin", state_pfxrcd): is_enabled = False if not is_up: uptime = -1 if uptime != -1: uptime = bgp_time_conversion(uptime) yield { peer_ip: { "is_enabled": is_enabled, "uptime": uptime, "remote_as": helpers.as_number(remote_as), "is_up": is_up, "description": "", "received_prefixes": received_prefixes, } }
def setup_cluster(self, cluster, extra_args=tuple()): """ Configure the cluster by running an Ansible playbook. The ElastiCluster configuration attribute `<kind>_groups` determines, for each node kind, what Ansible groups nodes of that kind are assigned to. :param cluster: cluster to configure :type cluster: :py:class:`elasticluster.cluster.Cluster` :param list extra_args: List of additional command-line arguments that are appended to each invocation of the setup program. :return: ``True`` on success, ``False`` otherwise. Please note, if nothing has to be configured, then ``True`` is returned. :raises: `ConfigurationError` if the playbook can not be found or is corrupt. """ inventory_path = self._build_inventory(cluster) if inventory_path is None: # No inventory file has been created, maybe an # invalid class has been specified in config file? Or none? # assume it is fine. elasticluster.log.info("No setup required for this cluster.") return True assert os.path.exists(inventory_path), ( "inventory file `{inventory_path}` does not exist" .format(inventory_path=inventory_path)) # build list of directories to search for roles/include files ansible_roles_dirs = [ # include Ansible default first ... '/etc/ansible/roles', ] for root_path in [ # ... then ElastiCluster's built-in defaults resource_filename('elasticluster', 'share/playbooks'), # ... then wherever the playbook is os.path.dirname(self._playbook_path), ]: for path in [ root_path, os.path.join(root_path, 'roles'), ]: if path not in ansible_roles_dirs and os.path.exists(path): ansible_roles_dirs.append(path) # Use env vars to configure Ansible; # see all values in https://github.com/ansible/ansible/blob/devel/lib/ansible/constants.py # # Ansible does not merge keys in configuration files: rather # it uses the first configuration file found. However, # environment variables can be used to selectively override # parts of the config; according to [1]: "they are mostly # considered to be a legacy system as compared to the config # file, but are equally valid." # # [1]: http://docs.ansible.com/ansible/intro_configuration.html#environmental-configuration # # Provide default values for important configuration variables... ansible_env = { 'ANSIBLE_FORKS': '10', 'ANSIBLE_HOST_KEY_CHECKING': 'no', 'ANSIBLE_PRIVATE_KEY_FILE': cluster.user_key_private, 'ANSIBLE_ROLES_PATH': ':'.join(reversed(ansible_roles_dirs)), 'ANSIBLE_SSH_PIPELINING': 'yes', 'ANSIBLE_TIMEOUT': '120', } # ...override them with key/values set in the config file(s) for k, v in self.extra_conf.items(): if k.startswith('ansible_'): ansible_env[k.upper()] = str(v) # ...finally allow the environment have the final word ansible_env.update(os.environ) if __debug__: elasticluster.log.debug( "Calling `ansible-playbook` with the following environment:") for var, value in sorted(ansible_env.items()): elasticluster.log.debug("- %s=%r", var, value) elasticluster.log.debug("Using playbook file %s.", self._playbook_path) # build `ansible-playbook` command-line cmd = shlex.split(self.extra_conf.get('ansible_command', 'ansible-playbook')) cmd += [ os.path.realpath(self._playbook_path), ('--inventory=' + inventory_path), ] + list(extra_args) if self._sudo: cmd.extend([ # force all plays to use `sudo` (even if not marked as such) '--become', # desired sudo-to user ('--become-user=' + self._sudo_user), ]) # determine Ansible verbosity as a function of ElastiCluster's # log level (we cannot read `ElastiCluster().params.verbose` # here, still we can access the log configuration since it's # global). verbosity = (logging.WARNING - elasticluster.log.getEffectiveLevel()) / 10 if verbosity > 0: cmd.append('-' + ('v' * verbosity)) # e.g., `-vv` # append any additional arguments provided by users ansible_extra_args = self.extra_conf.get('ansible_extra_args', None) if ansible_extra_args: cmd += shlex.split(ansible_extra_args) cmdline = ' '.join(cmd) elasticluster.log.debug("Running Ansible command `%s` ...", cmdline) rc = call(cmd, env=ansible_env, bufsize=1, close_fds=True) if rc == 0: elasticluster.log.info("Cluster correctly configured.") return True else: elasticluster.log.error( "Command `%s` failed with exit code %d.", cmdline, rc) elasticluster.log.error( "Check the output lines above for additional information on this error.") elasticluster.log.error( "The cluster has likely *not* been configured correctly." " You may need to re-run `elasticluster setup` or fix the playbooks.") return False
Configure the cluster by running an Ansible playbook. The ElastiCluster configuration attribute `<kind>_groups` determines, for each node kind, what Ansible groups nodes of that kind are assigned to. :param cluster: cluster to configure :type cluster: :py:class:`elasticluster.cluster.Cluster` :param list extra_args: List of additional command-line arguments that are appended to each invocation of the setup program. :return: ``True`` on success, ``False`` otherwise. Please note, if nothing has to be configured, then ``True`` is returned. :raises: `ConfigurationError` if the playbook can not be found or is corrupt.
Below is the the instruction that describes the task: ### Input: Configure the cluster by running an Ansible playbook. The ElastiCluster configuration attribute `<kind>_groups` determines, for each node kind, what Ansible groups nodes of that kind are assigned to. :param cluster: cluster to configure :type cluster: :py:class:`elasticluster.cluster.Cluster` :param list extra_args: List of additional command-line arguments that are appended to each invocation of the setup program. :return: ``True`` on success, ``False`` otherwise. Please note, if nothing has to be configured, then ``True`` is returned. :raises: `ConfigurationError` if the playbook can not be found or is corrupt. ### Response: def setup_cluster(self, cluster, extra_args=tuple()): """ Configure the cluster by running an Ansible playbook. The ElastiCluster configuration attribute `<kind>_groups` determines, for each node kind, what Ansible groups nodes of that kind are assigned to. :param cluster: cluster to configure :type cluster: :py:class:`elasticluster.cluster.Cluster` :param list extra_args: List of additional command-line arguments that are appended to each invocation of the setup program. :return: ``True`` on success, ``False`` otherwise. Please note, if nothing has to be configured, then ``True`` is returned. :raises: `ConfigurationError` if the playbook can not be found or is corrupt. """ inventory_path = self._build_inventory(cluster) if inventory_path is None: # No inventory file has been created, maybe an # invalid class has been specified in config file? Or none? # assume it is fine. elasticluster.log.info("No setup required for this cluster.") return True assert os.path.exists(inventory_path), ( "inventory file `{inventory_path}` does not exist" .format(inventory_path=inventory_path)) # build list of directories to search for roles/include files ansible_roles_dirs = [ # include Ansible default first ... '/etc/ansible/roles', ] for root_path in [ # ... then ElastiCluster's built-in defaults resource_filename('elasticluster', 'share/playbooks'), # ... then wherever the playbook is os.path.dirname(self._playbook_path), ]: for path in [ root_path, os.path.join(root_path, 'roles'), ]: if path not in ansible_roles_dirs and os.path.exists(path): ansible_roles_dirs.append(path) # Use env vars to configure Ansible; # see all values in https://github.com/ansible/ansible/blob/devel/lib/ansible/constants.py # # Ansible does not merge keys in configuration files: rather # it uses the first configuration file found. However, # environment variables can be used to selectively override # parts of the config; according to [1]: "they are mostly # considered to be a legacy system as compared to the config # file, but are equally valid." # # [1]: http://docs.ansible.com/ansible/intro_configuration.html#environmental-configuration # # Provide default values for important configuration variables... ansible_env = { 'ANSIBLE_FORKS': '10', 'ANSIBLE_HOST_KEY_CHECKING': 'no', 'ANSIBLE_PRIVATE_KEY_FILE': cluster.user_key_private, 'ANSIBLE_ROLES_PATH': ':'.join(reversed(ansible_roles_dirs)), 'ANSIBLE_SSH_PIPELINING': 'yes', 'ANSIBLE_TIMEOUT': '120', } # ...override them with key/values set in the config file(s) for k, v in self.extra_conf.items(): if k.startswith('ansible_'): ansible_env[k.upper()] = str(v) # ...finally allow the environment have the final word ansible_env.update(os.environ) if __debug__: elasticluster.log.debug( "Calling `ansible-playbook` with the following environment:") for var, value in sorted(ansible_env.items()): elasticluster.log.debug("- %s=%r", var, value) elasticluster.log.debug("Using playbook file %s.", self._playbook_path) # build `ansible-playbook` command-line cmd = shlex.split(self.extra_conf.get('ansible_command', 'ansible-playbook')) cmd += [ os.path.realpath(self._playbook_path), ('--inventory=' + inventory_path), ] + list(extra_args) if self._sudo: cmd.extend([ # force all plays to use `sudo` (even if not marked as such) '--become', # desired sudo-to user ('--become-user=' + self._sudo_user), ]) # determine Ansible verbosity as a function of ElastiCluster's # log level (we cannot read `ElastiCluster().params.verbose` # here, still we can access the log configuration since it's # global). verbosity = (logging.WARNING - elasticluster.log.getEffectiveLevel()) / 10 if verbosity > 0: cmd.append('-' + ('v' * verbosity)) # e.g., `-vv` # append any additional arguments provided by users ansible_extra_args = self.extra_conf.get('ansible_extra_args', None) if ansible_extra_args: cmd += shlex.split(ansible_extra_args) cmdline = ' '.join(cmd) elasticluster.log.debug("Running Ansible command `%s` ...", cmdline) rc = call(cmd, env=ansible_env, bufsize=1, close_fds=True) if rc == 0: elasticluster.log.info("Cluster correctly configured.") return True else: elasticluster.log.error( "Command `%s` failed with exit code %d.", cmdline, rc) elasticluster.log.error( "Check the output lines above for additional information on this error.") elasticluster.log.error( "The cluster has likely *not* been configured correctly." " You may need to re-run `elasticluster setup` or fix the playbooks.") return False
def xarray_to_ndarray(data, *, var_names=None, combined=True): """Take xarray data and unpacks into variables and data into list and numpy array respectively. Assumes that chain and draw are in coordinates Parameters ---------- data: xarray.DataSet Data in an xarray from an InferenceData object. Examples include posterior or sample_stats var_names: iter Should be a subset of data.data_vars not including chain and draws. Defaults to all of them combined: bool Whether to combine chain into one array Returns ------- var_names: list List of variable names data: np.array Data values """ unpacked_data, unpacked_var_names, = [], [] # Merge chains and variables for var_name, selection, data_array in xarray_var_iter( data, var_names=var_names, combined=combined ): unpacked_data.append(data_array.flatten()) unpacked_var_names.append(make_label(var_name, selection)) return unpacked_var_names, np.array(unpacked_data)
Take xarray data and unpacks into variables and data into list and numpy array respectively. Assumes that chain and draw are in coordinates Parameters ---------- data: xarray.DataSet Data in an xarray from an InferenceData object. Examples include posterior or sample_stats var_names: iter Should be a subset of data.data_vars not including chain and draws. Defaults to all of them combined: bool Whether to combine chain into one array Returns ------- var_names: list List of variable names data: np.array Data values
Below is the the instruction that describes the task: ### Input: Take xarray data and unpacks into variables and data into list and numpy array respectively. Assumes that chain and draw are in coordinates Parameters ---------- data: xarray.DataSet Data in an xarray from an InferenceData object. Examples include posterior or sample_stats var_names: iter Should be a subset of data.data_vars not including chain and draws. Defaults to all of them combined: bool Whether to combine chain into one array Returns ------- var_names: list List of variable names data: np.array Data values ### Response: def xarray_to_ndarray(data, *, var_names=None, combined=True): """Take xarray data and unpacks into variables and data into list and numpy array respectively. Assumes that chain and draw are in coordinates Parameters ---------- data: xarray.DataSet Data in an xarray from an InferenceData object. Examples include posterior or sample_stats var_names: iter Should be a subset of data.data_vars not including chain and draws. Defaults to all of them combined: bool Whether to combine chain into one array Returns ------- var_names: list List of variable names data: np.array Data values """ unpacked_data, unpacked_var_names, = [], [] # Merge chains and variables for var_name, selection, data_array in xarray_var_iter( data, var_names=var_names, combined=combined ): unpacked_data.append(data_array.flatten()) unpacked_var_names.append(make_label(var_name, selection)) return unpacked_var_names, np.array(unpacked_data)
def clear(self): """ Reset the config object to its initial state """ with self._lock: self._config = { CacheConfig.Morlist: {'last': defaultdict(float), 'intl': {}}, CacheConfig.Metadata: {'last': defaultdict(float), 'intl': {}}, }
Reset the config object to its initial state
Below is the the instruction that describes the task: ### Input: Reset the config object to its initial state ### Response: def clear(self): """ Reset the config object to its initial state """ with self._lock: self._config = { CacheConfig.Morlist: {'last': defaultdict(float), 'intl': {}}, CacheConfig.Metadata: {'last': defaultdict(float), 'intl': {}}, }
def del_node(self, node): """ Remove a node from the graph. @type node: node @param node: Node identifier. """ for each in list(self.incidents(node)): # Delete all the edges incident on this node self.del_edge((each, node)) for each in list(self.neighbors(node)): # Delete all the edges pointing to this node. self.del_edge((node, each)) # Remove this node from the neighbors and incidents tables del(self.node_neighbors[node]) del(self.node_incidence[node]) # Remove any labeling which may exist. self.del_node_labeling( node )
Remove a node from the graph. @type node: node @param node: Node identifier.
Below is the the instruction that describes the task: ### Input: Remove a node from the graph. @type node: node @param node: Node identifier. ### Response: def del_node(self, node): """ Remove a node from the graph. @type node: node @param node: Node identifier. """ for each in list(self.incidents(node)): # Delete all the edges incident on this node self.del_edge((each, node)) for each in list(self.neighbors(node)): # Delete all the edges pointing to this node. self.del_edge((node, each)) # Remove this node from the neighbors and incidents tables del(self.node_neighbors[node]) del(self.node_incidence[node]) # Remove any labeling which may exist. self.del_node_labeling( node )
def volumes_tagged(name, tag_maps, authoritative=False, region=None, key=None, keyid=None, profile=None): ''' Ensure EC2 volume(s) matching the given filters have the defined tags. .. versionadded:: 2016.11.0 name State definition name. tag_maps List of dicts of filters and tags, where 'filters' is a dict suitable for passing to the 'filters' argument of boto_ec2.get_all_volumes(), and 'tags' is a dict of tags to be set on volumes as matched by the given filters. The filter syntax is extended to permit passing either a list of volume_ids or an instance_name (with instance_name being the Name tag of the instance to which the desired volumes are mapped). Each mapping in the list is applied separately, so multiple sets of volumes can be all tagged differently with one call to this function. YAML example fragment: .. code-block:: yaml - filters: attachment.instance_id: i-abcdef12 tags: Name: dev-int-abcdef12.aws-foo.com - filters: attachment.device: /dev/sdf tags: ManagedSnapshots: true BillingGroup: bubba.hotep@aws-foo.com - filters: instance_name: prd-foo-01.aws-foo.com tags: Name: prd-foo-01.aws-foo.com BillingGroup: infra-team@aws-foo.com - filters: volume_ids: [ vol-12345689, vol-abcdef12 ] tags: BillingGroup: infra-team@aws-foo.com authoritative Should un-declared tags currently set on matched volumes be deleted? Boolean. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } args = {'tag_maps': tag_maps, 'authoritative': authoritative, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if __opts__['test']: args['dry_run'] = True r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags would be updated.' ret['changes'] = r['changes'] ret['result'] = None else: ret['comment'] = 'Error validating requested volume tags.' ret['result'] = False return ret r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags applied.' ret['changes'] = r['changes'] else: ret['comment'] = 'Error updating requested volume tags.' ret['result'] = False return ret
Ensure EC2 volume(s) matching the given filters have the defined tags. .. versionadded:: 2016.11.0 name State definition name. tag_maps List of dicts of filters and tags, where 'filters' is a dict suitable for passing to the 'filters' argument of boto_ec2.get_all_volumes(), and 'tags' is a dict of tags to be set on volumes as matched by the given filters. The filter syntax is extended to permit passing either a list of volume_ids or an instance_name (with instance_name being the Name tag of the instance to which the desired volumes are mapped). Each mapping in the list is applied separately, so multiple sets of volumes can be all tagged differently with one call to this function. YAML example fragment: .. code-block:: yaml - filters: attachment.instance_id: i-abcdef12 tags: Name: dev-int-abcdef12.aws-foo.com - filters: attachment.device: /dev/sdf tags: ManagedSnapshots: true BillingGroup: bubba.hotep@aws-foo.com - filters: instance_name: prd-foo-01.aws-foo.com tags: Name: prd-foo-01.aws-foo.com BillingGroup: infra-team@aws-foo.com - filters: volume_ids: [ vol-12345689, vol-abcdef12 ] tags: BillingGroup: infra-team@aws-foo.com authoritative Should un-declared tags currently set on matched volumes be deleted? Boolean. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid.
Below is the the instruction that describes the task: ### Input: Ensure EC2 volume(s) matching the given filters have the defined tags. .. versionadded:: 2016.11.0 name State definition name. tag_maps List of dicts of filters and tags, where 'filters' is a dict suitable for passing to the 'filters' argument of boto_ec2.get_all_volumes(), and 'tags' is a dict of tags to be set on volumes as matched by the given filters. The filter syntax is extended to permit passing either a list of volume_ids or an instance_name (with instance_name being the Name tag of the instance to which the desired volumes are mapped). Each mapping in the list is applied separately, so multiple sets of volumes can be all tagged differently with one call to this function. YAML example fragment: .. code-block:: yaml - filters: attachment.instance_id: i-abcdef12 tags: Name: dev-int-abcdef12.aws-foo.com - filters: attachment.device: /dev/sdf tags: ManagedSnapshots: true BillingGroup: bubba.hotep@aws-foo.com - filters: instance_name: prd-foo-01.aws-foo.com tags: Name: prd-foo-01.aws-foo.com BillingGroup: infra-team@aws-foo.com - filters: volume_ids: [ vol-12345689, vol-abcdef12 ] tags: BillingGroup: infra-team@aws-foo.com authoritative Should un-declared tags currently set on matched volumes be deleted? Boolean. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ### Response: def volumes_tagged(name, tag_maps, authoritative=False, region=None, key=None, keyid=None, profile=None): ''' Ensure EC2 volume(s) matching the given filters have the defined tags. .. versionadded:: 2016.11.0 name State definition name. tag_maps List of dicts of filters and tags, where 'filters' is a dict suitable for passing to the 'filters' argument of boto_ec2.get_all_volumes(), and 'tags' is a dict of tags to be set on volumes as matched by the given filters. The filter syntax is extended to permit passing either a list of volume_ids or an instance_name (with instance_name being the Name tag of the instance to which the desired volumes are mapped). Each mapping in the list is applied separately, so multiple sets of volumes can be all tagged differently with one call to this function. YAML example fragment: .. code-block:: yaml - filters: attachment.instance_id: i-abcdef12 tags: Name: dev-int-abcdef12.aws-foo.com - filters: attachment.device: /dev/sdf tags: ManagedSnapshots: true BillingGroup: bubba.hotep@aws-foo.com - filters: instance_name: prd-foo-01.aws-foo.com tags: Name: prd-foo-01.aws-foo.com BillingGroup: infra-team@aws-foo.com - filters: volume_ids: [ vol-12345689, vol-abcdef12 ] tags: BillingGroup: infra-team@aws-foo.com authoritative Should un-declared tags currently set on matched volumes be deleted? Boolean. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } args = {'tag_maps': tag_maps, 'authoritative': authoritative, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if __opts__['test']: args['dry_run'] = True r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags would be updated.' ret['changes'] = r['changes'] ret['result'] = None else: ret['comment'] = 'Error validating requested volume tags.' ret['result'] = False return ret r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags applied.' ret['changes'] = r['changes'] else: ret['comment'] = 'Error updating requested volume tags.' ret['result'] = False return ret
def missing_vars(template_vars, parsed_content, obj): """If we find missing variables when rendering a template we want to give the user a friendly error""" missing = [] default_vars = grok_vars(parsed_content) for var in template_vars: if var not in default_vars and var not in obj: missing.append(var) if missing: e_msg = "Missing required variables %s" % \ ','.join(missing) raise aomi_excep.AomiData(e_msg)
If we find missing variables when rendering a template we want to give the user a friendly error
Below is the the instruction that describes the task: ### Input: If we find missing variables when rendering a template we want to give the user a friendly error ### Response: def missing_vars(template_vars, parsed_content, obj): """If we find missing variables when rendering a template we want to give the user a friendly error""" missing = [] default_vars = grok_vars(parsed_content) for var in template_vars: if var not in default_vars and var not in obj: missing.append(var) if missing: e_msg = "Missing required variables %s" % \ ','.join(missing) raise aomi_excep.AomiData(e_msg)
def delete_document( self, name, current_document=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Deletes a document. Example: >>> from google.cloud import firestore_v1beta1 >>> >>> client = firestore_v1beta1.FirestoreClient() >>> >>> name = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') >>> >>> client.delete_document(name) Args: name (str): The resource name of the Document to delete. In the format: ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. current_document (Union[dict, ~google.cloud.firestore_v1beta1.types.Precondition]): An optional precondition on the document. The request will fail if this is set and not met by the target document. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.Precondition` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "delete_document" not in self._inner_api_calls: self._inner_api_calls[ "delete_document" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_document, default_retry=self._method_configs["DeleteDocument"].retry, default_timeout=self._method_configs["DeleteDocument"].timeout, client_info=self._client_info, ) request = firestore_pb2.DeleteDocumentRequest( name=name, current_document=current_document ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) self._inner_api_calls["delete_document"]( request, retry=retry, timeout=timeout, metadata=metadata )
Deletes a document. Example: >>> from google.cloud import firestore_v1beta1 >>> >>> client = firestore_v1beta1.FirestoreClient() >>> >>> name = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') >>> >>> client.delete_document(name) Args: name (str): The resource name of the Document to delete. In the format: ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. current_document (Union[dict, ~google.cloud.firestore_v1beta1.types.Precondition]): An optional precondition on the document. The request will fail if this is set and not met by the target document. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.Precondition` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
Below is the the instruction that describes the task: ### Input: Deletes a document. Example: >>> from google.cloud import firestore_v1beta1 >>> >>> client = firestore_v1beta1.FirestoreClient() >>> >>> name = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') >>> >>> client.delete_document(name) Args: name (str): The resource name of the Document to delete. In the format: ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. current_document (Union[dict, ~google.cloud.firestore_v1beta1.types.Precondition]): An optional precondition on the document. The request will fail if this is set and not met by the target document. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.Precondition` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. ### Response: def delete_document( self, name, current_document=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Deletes a document. Example: >>> from google.cloud import firestore_v1beta1 >>> >>> client = firestore_v1beta1.FirestoreClient() >>> >>> name = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') >>> >>> client.delete_document(name) Args: name (str): The resource name of the Document to delete. In the format: ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. current_document (Union[dict, ~google.cloud.firestore_v1beta1.types.Precondition]): An optional precondition on the document. The request will fail if this is set and not met by the target document. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.Precondition` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "delete_document" not in self._inner_api_calls: self._inner_api_calls[ "delete_document" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_document, default_retry=self._method_configs["DeleteDocument"].retry, default_timeout=self._method_configs["DeleteDocument"].timeout, client_info=self._client_info, ) request = firestore_pb2.DeleteDocumentRequest( name=name, current_document=current_document ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) self._inner_api_calls["delete_document"]( request, retry=retry, timeout=timeout, metadata=metadata )
def login_generic(request, username, password): """Helper method. Generic login with username and password.""" user = authenticate(username=username, password=password) if user is not None and user.is_active: login(request, user) return True return False
Helper method. Generic login with username and password.
Below is the the instruction that describes the task: ### Input: Helper method. Generic login with username and password. ### Response: def login_generic(request, username, password): """Helper method. Generic login with username and password.""" user = authenticate(username=username, password=password) if user is not None and user.is_active: login(request, user) return True return False
def handle_agent_tasks(self, task): """ When request(s) are received by the host agent, it is sent here for handling & processing. """ logger.debug("Received agent request with messageId: %s" % task["messageId"]) if "action" in task: if task["action"] == "python.source": payload = get_py_source(task["args"]["file"]) else: message = "Unrecognized action: %s. An newer Instana package may be required " \ "for this. Current version: %s" % (task["action"], package_version()) payload = {"error": message} else: payload = {"error": "Instana Python: No action specified in request."} self.agent.task_response(task["messageId"], payload)
When request(s) are received by the host agent, it is sent here for handling & processing.
Below is the the instruction that describes the task: ### Input: When request(s) are received by the host agent, it is sent here for handling & processing. ### Response: def handle_agent_tasks(self, task): """ When request(s) are received by the host agent, it is sent here for handling & processing. """ logger.debug("Received agent request with messageId: %s" % task["messageId"]) if "action" in task: if task["action"] == "python.source": payload = get_py_source(task["args"]["file"]) else: message = "Unrecognized action: %s. An newer Instana package may be required " \ "for this. Current version: %s" % (task["action"], package_version()) payload = {"error": message} else: payload = {"error": "Instana Python: No action specified in request."} self.agent.task_response(task["messageId"], payload)
def clear(self): """Clears the state of the KNNClassifier.""" self._Memory = None self._numPatterns = 0 self._M = None self._categoryList = [] self._partitionIdList = [] self._partitionIdMap = {} self._finishedLearning = False self._iterationIdx = -1 # Fixed capacity KNN if self.maxStoredPatterns > 0: assert self.useSparseMemory, ("Fixed capacity KNN is implemented only " "in the sparse memory mode") self.fixedCapacity = True self._categoryRecencyList = [] else: self.fixedCapacity = False # Cached value of the store prototype sizes self._protoSizes = None # Used by PCA self._s = None self._vt = None self._nc = None self._mean = None # Used by Network Builder self._specificIndexTraining = False self._nextTrainingIndices = None
Clears the state of the KNNClassifier.
Below is the the instruction that describes the task: ### Input: Clears the state of the KNNClassifier. ### Response: def clear(self): """Clears the state of the KNNClassifier.""" self._Memory = None self._numPatterns = 0 self._M = None self._categoryList = [] self._partitionIdList = [] self._partitionIdMap = {} self._finishedLearning = False self._iterationIdx = -1 # Fixed capacity KNN if self.maxStoredPatterns > 0: assert self.useSparseMemory, ("Fixed capacity KNN is implemented only " "in the sparse memory mode") self.fixedCapacity = True self._categoryRecencyList = [] else: self.fixedCapacity = False # Cached value of the store prototype sizes self._protoSizes = None # Used by PCA self._s = None self._vt = None self._nc = None self._mean = None # Used by Network Builder self._specificIndexTraining = False self._nextTrainingIndices = None
def mget(self, *keys): """ -> #list of values at the specified @keys """ return list(map( self._loads, self._client.hmget(self.key_prefix, *keys)))
-> #list of values at the specified @keys
Below is the the instruction that describes the task: ### Input: -> #list of values at the specified @keys ### Response: def mget(self, *keys): """ -> #list of values at the specified @keys """ return list(map( self._loads, self._client.hmget(self.key_prefix, *keys)))
def check_key(request): """ Check to see if we already have an access_key stored, if we do then we have already gone through OAuth. If not then we haven't and we probably need to. """ try: access_key = request.session.get('oauth_token', None) if not access_key: return False except KeyError: return False return True
Check to see if we already have an access_key stored, if we do then we have already gone through OAuth. If not then we haven't and we probably need to.
Below is the the instruction that describes the task: ### Input: Check to see if we already have an access_key stored, if we do then we have already gone through OAuth. If not then we haven't and we probably need to. ### Response: def check_key(request): """ Check to see if we already have an access_key stored, if we do then we have already gone through OAuth. If not then we haven't and we probably need to. """ try: access_key = request.session.get('oauth_token', None) if not access_key: return False except KeyError: return False return True
def clear_dir(path): """Empty out the image directory.""" for f in os.listdir(path): f_path = os.path.join(path, f) if os.path.isfile(f_path) or os.path.islink(f_path): os.unlink(f_path)
Empty out the image directory.
Below is the the instruction that describes the task: ### Input: Empty out the image directory. ### Response: def clear_dir(path): """Empty out the image directory.""" for f in os.listdir(path): f_path = os.path.join(path, f) if os.path.isfile(f_path) or os.path.islink(f_path): os.unlink(f_path)
def copy_object(ACL=None, Bucket=None, CacheControl=None, ContentDisposition=None, ContentEncoding=None, ContentLanguage=None, ContentType=None, CopySource=None, CopySourceIfMatch=None, CopySourceIfModifiedSince=None, CopySourceIfNoneMatch=None, CopySourceIfUnmodifiedSince=None, Expires=None, GrantFullControl=None, GrantRead=None, GrantReadACP=None, GrantWriteACP=None, Key=None, Metadata=None, MetadataDirective=None, TaggingDirective=None, ServerSideEncryption=None, StorageClass=None, WebsiteRedirectLocation=None, SSECustomerAlgorithm=None, SSECustomerKey=None, SSECustomerKeyMD5=None, SSEKMSKeyId=None, CopySourceSSECustomerAlgorithm=None, CopySourceSSECustomerKey=None, CopySourceSSECustomerKeyMD5=None, RequestPayer=None, Tagging=None): """ Creates a copy of an object that is already stored in Amazon S3. See also: AWS API Documentation :example: response = client.copy_object( ACL='private'|'public-read'|'public-read-write'|'authenticated-read'|'aws-exec-read'|'bucket-owner-read'|'bucket-owner-full-control', Bucket='string', CacheControl='string', ContentDisposition='string', ContentEncoding='string', ContentLanguage='string', ContentType='string', CopySource='string' or {'Bucket': 'string', 'Key': 'string', 'VersionId': 'string'}, CopySourceIfMatch='string', CopySourceIfModifiedSince=datetime(2015, 1, 1), CopySourceIfNoneMatch='string', CopySourceIfUnmodifiedSince=datetime(2015, 1, 1), Expires=datetime(2015, 1, 1), GrantFullControl='string', GrantRead='string', GrantReadACP='string', GrantWriteACP='string', Key='string', Metadata={ 'string': 'string' }, MetadataDirective='COPY'|'REPLACE', TaggingDirective='COPY'|'REPLACE', ServerSideEncryption='AES256'|'aws:kms', StorageClass='STANDARD'|'REDUCED_REDUNDANCY'|'STANDARD_IA', WebsiteRedirectLocation='string', SSECustomerAlgorithm='string', SSECustomerKey='string', SSEKMSKeyId='string', CopySourceSSECustomerAlgorithm='string', CopySourceSSECustomerKey='string', RequestPayer='requester', Tagging='string' ) :type ACL: string :param ACL: The canned ACL to apply to the object. :type Bucket: string :param Bucket: [REQUIRED] :type CacheControl: string :param CacheControl: Specifies caching behavior along the request/reply chain. :type ContentDisposition: string :param ContentDisposition: Specifies presentational information for the object. :type ContentEncoding: string :param ContentEncoding: Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. :type ContentLanguage: string :param ContentLanguage: The language the content is in. :type ContentType: string :param ContentType: A standard MIME type describing the format of the object data. :type CopySource: str or dict :param CopySource: [REQUIRED] The name of the source bucket, key name of the source object, and optional version ID of the source object. You can either provide this value as a string or a dictionary. The string form is {bucket}/{key} or {bucket}/{key}?versionId={versionId} if you want to copy a specific version. You can also provide this value as a dictionary. The dictionary format is recommended over the string format because it is more explicit. The dictionary format is: {'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}. Note that the VersionId key is optional and may be omitted. :type CopySourceIfMatch: string :param CopySourceIfMatch: Copies the object if its entity tag (ETag) matches the specified tag. :type CopySourceIfModifiedSince: datetime :param CopySourceIfModifiedSince: Copies the object if it has been modified since the specified time. :type CopySourceIfNoneMatch: string :param CopySourceIfNoneMatch: Copies the object if its entity tag (ETag) is different than the specified ETag. :type CopySourceIfUnmodifiedSince: datetime :param CopySourceIfUnmodifiedSince: Copies the object if it hasn't been modified since the specified time. :type Expires: datetime :param Expires: The date and time at which the object is no longer cacheable. :type GrantFullControl: string :param GrantFullControl: Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. :type GrantRead: string :param GrantRead: Allows grantee to read the object data and its metadata. :type GrantReadACP: string :param GrantReadACP: Allows grantee to read the object ACL. :type GrantWriteACP: string :param GrantWriteACP: Allows grantee to write the ACL for the applicable object. :type Key: string :param Key: [REQUIRED] :type Metadata: dict :param Metadata: A map of metadata to store with the object in S3. (string) -- (string) -- :type MetadataDirective: string :param MetadataDirective: Specifies whether the metadata is copied from the source object or replaced with metadata provided in the request. :type TaggingDirective: string :param TaggingDirective: Specifies whether the object tag-set are copied from the source object or replaced with tag-set provided in the request. :type ServerSideEncryption: string :param ServerSideEncryption: The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms). :type StorageClass: string :param StorageClass: The type of storage to use for the object. Defaults to 'STANDARD'. :type WebsiteRedirectLocation: string :param WebsiteRedirectLocation: If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata. :type SSECustomerAlgorithm: string :param SSECustomerAlgorithm: Specifies the algorithm to use to when encrypting the object (e.g., AES256). :type SSECustomerKey: string :param SSECustomerKey: Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side -encryption -customer-algorithm header. :type SSECustomerKeyMD5: string :param SSECustomerKeyMD5: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error. Please note that this parameter is automatically populated if it is not provided. Including this parameter is not required :type SSEKMSKeyId: string :param SSEKMSKeyId: Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version :type CopySourceSSECustomerAlgorithm: string :param CopySourceSSECustomerAlgorithm: Specifies the algorithm to use when decrypting the source object (e.g., AES256). :type CopySourceSSECustomerKey: string :param CopySourceSSECustomerKey: Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source object. The encryption key provided in this header must be one that was used when the source object was created. :type CopySourceSSECustomerKeyMD5: string :param CopySourceSSECustomerKeyMD5: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error. Please note that this parameter is automatically populated if it is not provided. Including this parameter is not required :type RequestPayer: string :param RequestPayer: Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html :type Tagging: string :param Tagging: The tag-set for the object destination object this value must be used in conjunction with the TaggingDirective. The tag-set must be encoded as URL Query parameters :rtype: dict :return: { 'CopyObjectResult': { 'ETag': 'string', 'LastModified': datetime(2015, 1, 1) }, 'Expiration': 'string', 'CopySourceVersionId': 'string', 'VersionId': 'string', 'ServerSideEncryption': 'AES256'|'aws:kms', 'SSECustomerAlgorithm': 'string', 'SSECustomerKeyMD5': 'string', 'SSEKMSKeyId': 'string', 'RequestCharged': 'requester' } :returns: (dict) -- CopyObjectResult (dict) -- ETag (string) -- LastModified (datetime) -- Expiration (string) -- If the object expiration is configured, the response includes this header. CopySourceVersionId (string) -- VersionId (string) -- Version ID of the newly created copy. ServerSideEncryption (string) -- The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms). SSECustomerAlgorithm (string) -- If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used. SSECustomerKeyMD5 (string) -- If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key. SSEKMSKeyId (string) -- If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object. RequestCharged (string) -- If present, indicates that the requester was successfully charged for the request. """ pass
Creates a copy of an object that is already stored in Amazon S3. See also: AWS API Documentation :example: response = client.copy_object( ACL='private'|'public-read'|'public-read-write'|'authenticated-read'|'aws-exec-read'|'bucket-owner-read'|'bucket-owner-full-control', Bucket='string', CacheControl='string', ContentDisposition='string', ContentEncoding='string', ContentLanguage='string', ContentType='string', CopySource='string' or {'Bucket': 'string', 'Key': 'string', 'VersionId': 'string'}, CopySourceIfMatch='string', CopySourceIfModifiedSince=datetime(2015, 1, 1), CopySourceIfNoneMatch='string', CopySourceIfUnmodifiedSince=datetime(2015, 1, 1), Expires=datetime(2015, 1, 1), GrantFullControl='string', GrantRead='string', GrantReadACP='string', GrantWriteACP='string', Key='string', Metadata={ 'string': 'string' }, MetadataDirective='COPY'|'REPLACE', TaggingDirective='COPY'|'REPLACE', ServerSideEncryption='AES256'|'aws:kms', StorageClass='STANDARD'|'REDUCED_REDUNDANCY'|'STANDARD_IA', WebsiteRedirectLocation='string', SSECustomerAlgorithm='string', SSECustomerKey='string', SSEKMSKeyId='string', CopySourceSSECustomerAlgorithm='string', CopySourceSSECustomerKey='string', RequestPayer='requester', Tagging='string' ) :type ACL: string :param ACL: The canned ACL to apply to the object. :type Bucket: string :param Bucket: [REQUIRED] :type CacheControl: string :param CacheControl: Specifies caching behavior along the request/reply chain. :type ContentDisposition: string :param ContentDisposition: Specifies presentational information for the object. :type ContentEncoding: string :param ContentEncoding: Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. :type ContentLanguage: string :param ContentLanguage: The language the content is in. :type ContentType: string :param ContentType: A standard MIME type describing the format of the object data. :type CopySource: str or dict :param CopySource: [REQUIRED] The name of the source bucket, key name of the source object, and optional version ID of the source object. You can either provide this value as a string or a dictionary. The string form is {bucket}/{key} or {bucket}/{key}?versionId={versionId} if you want to copy a specific version. You can also provide this value as a dictionary. The dictionary format is recommended over the string format because it is more explicit. The dictionary format is: {'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}. Note that the VersionId key is optional and may be omitted. :type CopySourceIfMatch: string :param CopySourceIfMatch: Copies the object if its entity tag (ETag) matches the specified tag. :type CopySourceIfModifiedSince: datetime :param CopySourceIfModifiedSince: Copies the object if it has been modified since the specified time. :type CopySourceIfNoneMatch: string :param CopySourceIfNoneMatch: Copies the object if its entity tag (ETag) is different than the specified ETag. :type CopySourceIfUnmodifiedSince: datetime :param CopySourceIfUnmodifiedSince: Copies the object if it hasn't been modified since the specified time. :type Expires: datetime :param Expires: The date and time at which the object is no longer cacheable. :type GrantFullControl: string :param GrantFullControl: Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. :type GrantRead: string :param GrantRead: Allows grantee to read the object data and its metadata. :type GrantReadACP: string :param GrantReadACP: Allows grantee to read the object ACL. :type GrantWriteACP: string :param GrantWriteACP: Allows grantee to write the ACL for the applicable object. :type Key: string :param Key: [REQUIRED] :type Metadata: dict :param Metadata: A map of metadata to store with the object in S3. (string) -- (string) -- :type MetadataDirective: string :param MetadataDirective: Specifies whether the metadata is copied from the source object or replaced with metadata provided in the request. :type TaggingDirective: string :param TaggingDirective: Specifies whether the object tag-set are copied from the source object or replaced with tag-set provided in the request. :type ServerSideEncryption: string :param ServerSideEncryption: The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms). :type StorageClass: string :param StorageClass: The type of storage to use for the object. Defaults to 'STANDARD'. :type WebsiteRedirectLocation: string :param WebsiteRedirectLocation: If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata. :type SSECustomerAlgorithm: string :param SSECustomerAlgorithm: Specifies the algorithm to use to when encrypting the object (e.g., AES256). :type SSECustomerKey: string :param SSECustomerKey: Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side -encryption -customer-algorithm header. :type SSECustomerKeyMD5: string :param SSECustomerKeyMD5: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error. Please note that this parameter is automatically populated if it is not provided. Including this parameter is not required :type SSEKMSKeyId: string :param SSEKMSKeyId: Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version :type CopySourceSSECustomerAlgorithm: string :param CopySourceSSECustomerAlgorithm: Specifies the algorithm to use when decrypting the source object (e.g., AES256). :type CopySourceSSECustomerKey: string :param CopySourceSSECustomerKey: Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source object. The encryption key provided in this header must be one that was used when the source object was created. :type CopySourceSSECustomerKeyMD5: string :param CopySourceSSECustomerKeyMD5: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error. Please note that this parameter is automatically populated if it is not provided. Including this parameter is not required :type RequestPayer: string :param RequestPayer: Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html :type Tagging: string :param Tagging: The tag-set for the object destination object this value must be used in conjunction with the TaggingDirective. The tag-set must be encoded as URL Query parameters :rtype: dict :return: { 'CopyObjectResult': { 'ETag': 'string', 'LastModified': datetime(2015, 1, 1) }, 'Expiration': 'string', 'CopySourceVersionId': 'string', 'VersionId': 'string', 'ServerSideEncryption': 'AES256'|'aws:kms', 'SSECustomerAlgorithm': 'string', 'SSECustomerKeyMD5': 'string', 'SSEKMSKeyId': 'string', 'RequestCharged': 'requester' } :returns: (dict) -- CopyObjectResult (dict) -- ETag (string) -- LastModified (datetime) -- Expiration (string) -- If the object expiration is configured, the response includes this header. CopySourceVersionId (string) -- VersionId (string) -- Version ID of the newly created copy. ServerSideEncryption (string) -- The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms). SSECustomerAlgorithm (string) -- If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used. SSECustomerKeyMD5 (string) -- If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key. SSEKMSKeyId (string) -- If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object. RequestCharged (string) -- If present, indicates that the requester was successfully charged for the request.
Below is the the instruction that describes the task: ### Input: Creates a copy of an object that is already stored in Amazon S3. See also: AWS API Documentation :example: response = client.copy_object( ACL='private'|'public-read'|'public-read-write'|'authenticated-read'|'aws-exec-read'|'bucket-owner-read'|'bucket-owner-full-control', Bucket='string', CacheControl='string', ContentDisposition='string', ContentEncoding='string', ContentLanguage='string', ContentType='string', CopySource='string' or {'Bucket': 'string', 'Key': 'string', 'VersionId': 'string'}, CopySourceIfMatch='string', CopySourceIfModifiedSince=datetime(2015, 1, 1), CopySourceIfNoneMatch='string', CopySourceIfUnmodifiedSince=datetime(2015, 1, 1), Expires=datetime(2015, 1, 1), GrantFullControl='string', GrantRead='string', GrantReadACP='string', GrantWriteACP='string', Key='string', Metadata={ 'string': 'string' }, MetadataDirective='COPY'|'REPLACE', TaggingDirective='COPY'|'REPLACE', ServerSideEncryption='AES256'|'aws:kms', StorageClass='STANDARD'|'REDUCED_REDUNDANCY'|'STANDARD_IA', WebsiteRedirectLocation='string', SSECustomerAlgorithm='string', SSECustomerKey='string', SSEKMSKeyId='string', CopySourceSSECustomerAlgorithm='string', CopySourceSSECustomerKey='string', RequestPayer='requester', Tagging='string' ) :type ACL: string :param ACL: The canned ACL to apply to the object. :type Bucket: string :param Bucket: [REQUIRED] :type CacheControl: string :param CacheControl: Specifies caching behavior along the request/reply chain. :type ContentDisposition: string :param ContentDisposition: Specifies presentational information for the object. :type ContentEncoding: string :param ContentEncoding: Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. :type ContentLanguage: string :param ContentLanguage: The language the content is in. :type ContentType: string :param ContentType: A standard MIME type describing the format of the object data. :type CopySource: str or dict :param CopySource: [REQUIRED] The name of the source bucket, key name of the source object, and optional version ID of the source object. You can either provide this value as a string or a dictionary. The string form is {bucket}/{key} or {bucket}/{key}?versionId={versionId} if you want to copy a specific version. You can also provide this value as a dictionary. The dictionary format is recommended over the string format because it is more explicit. The dictionary format is: {'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}. Note that the VersionId key is optional and may be omitted. :type CopySourceIfMatch: string :param CopySourceIfMatch: Copies the object if its entity tag (ETag) matches the specified tag. :type CopySourceIfModifiedSince: datetime :param CopySourceIfModifiedSince: Copies the object if it has been modified since the specified time. :type CopySourceIfNoneMatch: string :param CopySourceIfNoneMatch: Copies the object if its entity tag (ETag) is different than the specified ETag. :type CopySourceIfUnmodifiedSince: datetime :param CopySourceIfUnmodifiedSince: Copies the object if it hasn't been modified since the specified time. :type Expires: datetime :param Expires: The date and time at which the object is no longer cacheable. :type GrantFullControl: string :param GrantFullControl: Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. :type GrantRead: string :param GrantRead: Allows grantee to read the object data and its metadata. :type GrantReadACP: string :param GrantReadACP: Allows grantee to read the object ACL. :type GrantWriteACP: string :param GrantWriteACP: Allows grantee to write the ACL for the applicable object. :type Key: string :param Key: [REQUIRED] :type Metadata: dict :param Metadata: A map of metadata to store with the object in S3. (string) -- (string) -- :type MetadataDirective: string :param MetadataDirective: Specifies whether the metadata is copied from the source object or replaced with metadata provided in the request. :type TaggingDirective: string :param TaggingDirective: Specifies whether the object tag-set are copied from the source object or replaced with tag-set provided in the request. :type ServerSideEncryption: string :param ServerSideEncryption: The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms). :type StorageClass: string :param StorageClass: The type of storage to use for the object. Defaults to 'STANDARD'. :type WebsiteRedirectLocation: string :param WebsiteRedirectLocation: If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata. :type SSECustomerAlgorithm: string :param SSECustomerAlgorithm: Specifies the algorithm to use to when encrypting the object (e.g., AES256). :type SSECustomerKey: string :param SSECustomerKey: Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side -encryption -customer-algorithm header. :type SSECustomerKeyMD5: string :param SSECustomerKeyMD5: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error. Please note that this parameter is automatically populated if it is not provided. Including this parameter is not required :type SSEKMSKeyId: string :param SSEKMSKeyId: Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version :type CopySourceSSECustomerAlgorithm: string :param CopySourceSSECustomerAlgorithm: Specifies the algorithm to use when decrypting the source object (e.g., AES256). :type CopySourceSSECustomerKey: string :param CopySourceSSECustomerKey: Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source object. The encryption key provided in this header must be one that was used when the source object was created. :type CopySourceSSECustomerKeyMD5: string :param CopySourceSSECustomerKeyMD5: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error. Please note that this parameter is automatically populated if it is not provided. Including this parameter is not required :type RequestPayer: string :param RequestPayer: Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html :type Tagging: string :param Tagging: The tag-set for the object destination object this value must be used in conjunction with the TaggingDirective. The tag-set must be encoded as URL Query parameters :rtype: dict :return: { 'CopyObjectResult': { 'ETag': 'string', 'LastModified': datetime(2015, 1, 1) }, 'Expiration': 'string', 'CopySourceVersionId': 'string', 'VersionId': 'string', 'ServerSideEncryption': 'AES256'|'aws:kms', 'SSECustomerAlgorithm': 'string', 'SSECustomerKeyMD5': 'string', 'SSEKMSKeyId': 'string', 'RequestCharged': 'requester' } :returns: (dict) -- CopyObjectResult (dict) -- ETag (string) -- LastModified (datetime) -- Expiration (string) -- If the object expiration is configured, the response includes this header. CopySourceVersionId (string) -- VersionId (string) -- Version ID of the newly created copy. ServerSideEncryption (string) -- The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms). SSECustomerAlgorithm (string) -- If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used. SSECustomerKeyMD5 (string) -- If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key. SSEKMSKeyId (string) -- If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object. RequestCharged (string) -- If present, indicates that the requester was successfully charged for the request. ### Response: def copy_object(ACL=None, Bucket=None, CacheControl=None, ContentDisposition=None, ContentEncoding=None, ContentLanguage=None, ContentType=None, CopySource=None, CopySourceIfMatch=None, CopySourceIfModifiedSince=None, CopySourceIfNoneMatch=None, CopySourceIfUnmodifiedSince=None, Expires=None, GrantFullControl=None, GrantRead=None, GrantReadACP=None, GrantWriteACP=None, Key=None, Metadata=None, MetadataDirective=None, TaggingDirective=None, ServerSideEncryption=None, StorageClass=None, WebsiteRedirectLocation=None, SSECustomerAlgorithm=None, SSECustomerKey=None, SSECustomerKeyMD5=None, SSEKMSKeyId=None, CopySourceSSECustomerAlgorithm=None, CopySourceSSECustomerKey=None, CopySourceSSECustomerKeyMD5=None, RequestPayer=None, Tagging=None): """ Creates a copy of an object that is already stored in Amazon S3. See also: AWS API Documentation :example: response = client.copy_object( ACL='private'|'public-read'|'public-read-write'|'authenticated-read'|'aws-exec-read'|'bucket-owner-read'|'bucket-owner-full-control', Bucket='string', CacheControl='string', ContentDisposition='string', ContentEncoding='string', ContentLanguage='string', ContentType='string', CopySource='string' or {'Bucket': 'string', 'Key': 'string', 'VersionId': 'string'}, CopySourceIfMatch='string', CopySourceIfModifiedSince=datetime(2015, 1, 1), CopySourceIfNoneMatch='string', CopySourceIfUnmodifiedSince=datetime(2015, 1, 1), Expires=datetime(2015, 1, 1), GrantFullControl='string', GrantRead='string', GrantReadACP='string', GrantWriteACP='string', Key='string', Metadata={ 'string': 'string' }, MetadataDirective='COPY'|'REPLACE', TaggingDirective='COPY'|'REPLACE', ServerSideEncryption='AES256'|'aws:kms', StorageClass='STANDARD'|'REDUCED_REDUNDANCY'|'STANDARD_IA', WebsiteRedirectLocation='string', SSECustomerAlgorithm='string', SSECustomerKey='string', SSEKMSKeyId='string', CopySourceSSECustomerAlgorithm='string', CopySourceSSECustomerKey='string', RequestPayer='requester', Tagging='string' ) :type ACL: string :param ACL: The canned ACL to apply to the object. :type Bucket: string :param Bucket: [REQUIRED] :type CacheControl: string :param CacheControl: Specifies caching behavior along the request/reply chain. :type ContentDisposition: string :param ContentDisposition: Specifies presentational information for the object. :type ContentEncoding: string :param ContentEncoding: Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. :type ContentLanguage: string :param ContentLanguage: The language the content is in. :type ContentType: string :param ContentType: A standard MIME type describing the format of the object data. :type CopySource: str or dict :param CopySource: [REQUIRED] The name of the source bucket, key name of the source object, and optional version ID of the source object. You can either provide this value as a string or a dictionary. The string form is {bucket}/{key} or {bucket}/{key}?versionId={versionId} if you want to copy a specific version. You can also provide this value as a dictionary. The dictionary format is recommended over the string format because it is more explicit. The dictionary format is: {'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}. Note that the VersionId key is optional and may be omitted. :type CopySourceIfMatch: string :param CopySourceIfMatch: Copies the object if its entity tag (ETag) matches the specified tag. :type CopySourceIfModifiedSince: datetime :param CopySourceIfModifiedSince: Copies the object if it has been modified since the specified time. :type CopySourceIfNoneMatch: string :param CopySourceIfNoneMatch: Copies the object if its entity tag (ETag) is different than the specified ETag. :type CopySourceIfUnmodifiedSince: datetime :param CopySourceIfUnmodifiedSince: Copies the object if it hasn't been modified since the specified time. :type Expires: datetime :param Expires: The date and time at which the object is no longer cacheable. :type GrantFullControl: string :param GrantFullControl: Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. :type GrantRead: string :param GrantRead: Allows grantee to read the object data and its metadata. :type GrantReadACP: string :param GrantReadACP: Allows grantee to read the object ACL. :type GrantWriteACP: string :param GrantWriteACP: Allows grantee to write the ACL for the applicable object. :type Key: string :param Key: [REQUIRED] :type Metadata: dict :param Metadata: A map of metadata to store with the object in S3. (string) -- (string) -- :type MetadataDirective: string :param MetadataDirective: Specifies whether the metadata is copied from the source object or replaced with metadata provided in the request. :type TaggingDirective: string :param TaggingDirective: Specifies whether the object tag-set are copied from the source object or replaced with tag-set provided in the request. :type ServerSideEncryption: string :param ServerSideEncryption: The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms). :type StorageClass: string :param StorageClass: The type of storage to use for the object. Defaults to 'STANDARD'. :type WebsiteRedirectLocation: string :param WebsiteRedirectLocation: If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata. :type SSECustomerAlgorithm: string :param SSECustomerAlgorithm: Specifies the algorithm to use to when encrypting the object (e.g., AES256). :type SSECustomerKey: string :param SSECustomerKey: Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side -encryption -customer-algorithm header. :type SSECustomerKeyMD5: string :param SSECustomerKeyMD5: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error. Please note that this parameter is automatically populated if it is not provided. Including this parameter is not required :type SSEKMSKeyId: string :param SSEKMSKeyId: Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version :type CopySourceSSECustomerAlgorithm: string :param CopySourceSSECustomerAlgorithm: Specifies the algorithm to use when decrypting the source object (e.g., AES256). :type CopySourceSSECustomerKey: string :param CopySourceSSECustomerKey: Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source object. The encryption key provided in this header must be one that was used when the source object was created. :type CopySourceSSECustomerKeyMD5: string :param CopySourceSSECustomerKeyMD5: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error. Please note that this parameter is automatically populated if it is not provided. Including this parameter is not required :type RequestPayer: string :param RequestPayer: Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html :type Tagging: string :param Tagging: The tag-set for the object destination object this value must be used in conjunction with the TaggingDirective. The tag-set must be encoded as URL Query parameters :rtype: dict :return: { 'CopyObjectResult': { 'ETag': 'string', 'LastModified': datetime(2015, 1, 1) }, 'Expiration': 'string', 'CopySourceVersionId': 'string', 'VersionId': 'string', 'ServerSideEncryption': 'AES256'|'aws:kms', 'SSECustomerAlgorithm': 'string', 'SSECustomerKeyMD5': 'string', 'SSEKMSKeyId': 'string', 'RequestCharged': 'requester' } :returns: (dict) -- CopyObjectResult (dict) -- ETag (string) -- LastModified (datetime) -- Expiration (string) -- If the object expiration is configured, the response includes this header. CopySourceVersionId (string) -- VersionId (string) -- Version ID of the newly created copy. ServerSideEncryption (string) -- The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms). SSECustomerAlgorithm (string) -- If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used. SSECustomerKeyMD5 (string) -- If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key. SSEKMSKeyId (string) -- If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object. RequestCharged (string) -- If present, indicates that the requester was successfully charged for the request. """ pass
def rate_unstable(self): """Returns an unstable rate based on the last two entries in the timing data. Less intensive to compute.""" if not self.started or self.stalled: return 0.0 x1, y1 = self._timing_data[-2] x2, y2 = self._timing_data[-1] return (y2 - y1) / (x2 - x1)
Returns an unstable rate based on the last two entries in the timing data. Less intensive to compute.
Below is the the instruction that describes the task: ### Input: Returns an unstable rate based on the last two entries in the timing data. Less intensive to compute. ### Response: def rate_unstable(self): """Returns an unstable rate based on the last two entries in the timing data. Less intensive to compute.""" if not self.started or self.stalled: return 0.0 x1, y1 = self._timing_data[-2] x2, y2 = self._timing_data[-1] return (y2 - y1) / (x2 - x1)
def reshape(self, newshape, order='C'): """If axis 0 is unaffected by the reshape, then returns a Timeseries, otherwise returns an ndarray. Preserves labels of axis j only if all axes<=j are unaffected by the reshape. See ``numpy.ndarray.reshape()`` for more information """ oldshape = self.shape ar = np.asarray(self).reshape(newshape, order=order) if (newshape is -1 and len(oldshape) is 1 or (isinstance(newshape, numbers.Integral) and newshape == oldshape[0]) or (isinstance(newshape, Sequence) and (newshape[0] == oldshape[0] or (newshape[0] is -1 and np.array(oldshape[1:]).prod() == np.array(newshape[1:]).prod())))): # then axis 0 is unaffected by the reshape newlabels = [None] * ar.ndim i = 1 while i < ar.ndim and i < self.ndim and ar.shape[i] == oldshape[i]: newlabels[i] = self.labels[i] i += 1 return Timeseries(ar, self.tspan, newlabels) else: return ar
If axis 0 is unaffected by the reshape, then returns a Timeseries, otherwise returns an ndarray. Preserves labels of axis j only if all axes<=j are unaffected by the reshape. See ``numpy.ndarray.reshape()`` for more information
Below is the the instruction that describes the task: ### Input: If axis 0 is unaffected by the reshape, then returns a Timeseries, otherwise returns an ndarray. Preserves labels of axis j only if all axes<=j are unaffected by the reshape. See ``numpy.ndarray.reshape()`` for more information ### Response: def reshape(self, newshape, order='C'): """If axis 0 is unaffected by the reshape, then returns a Timeseries, otherwise returns an ndarray. Preserves labels of axis j only if all axes<=j are unaffected by the reshape. See ``numpy.ndarray.reshape()`` for more information """ oldshape = self.shape ar = np.asarray(self).reshape(newshape, order=order) if (newshape is -1 and len(oldshape) is 1 or (isinstance(newshape, numbers.Integral) and newshape == oldshape[0]) or (isinstance(newshape, Sequence) and (newshape[0] == oldshape[0] or (newshape[0] is -1 and np.array(oldshape[1:]).prod() == np.array(newshape[1:]).prod())))): # then axis 0 is unaffected by the reshape newlabels = [None] * ar.ndim i = 1 while i < ar.ndim and i < self.ndim and ar.shape[i] == oldshape[i]: newlabels[i] = self.labels[i] i += 1 return Timeseries(ar, self.tspan, newlabels) else: return ar
def bmrblex(text): """A lexical analyzer for the BMRB NMR-STAR format syntax. :param text: Input text. :type text: :py:class:`str` or :py:class:`bytes` :return: Current token. :rtype: :py:class:`str` """ stream = transform_text(text) wordchars = (u"abcdfeghijklmnopqrstuvwxyz" u"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_" u"ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ" u"ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ" u"!@$%^&*()_+:;?/>.<,~`|\{[}]-=") whitespace = u" \t\v\r\n" comment = u"#" state = u" " token = u"" single_line_comment = u"" while len(stream) > 0: nextnextchar = stream.popleft() while True: nextchar = nextnextchar if len(stream) > 0: nextnextchar = stream.popleft() else: nextnextchar = u"" # Process multiline string, comment, or single line comment if len(nextchar) > 1: state = u" " token = nextchar break # emit current token elif nextchar in whitespace and nextnextchar in comment and state not in (u"'", u'"'): single_line_comment = u"" state = u"#" if state is None: token = u"" # past end of file break elif state == u" ": if not nextchar: state = None break elif nextchar in whitespace: if token: state = u" " break # emit current token else: continue elif nextchar in wordchars: token = nextchar state = u"a" elif nextchar == u"'" or nextchar == u'"': token = nextchar state = nextchar else: token = nextchar if token: state = u" " break # emit current token else: continue # Process single-quoted or double-quoted token elif state == u"'" or state == u'"': token += nextchar if nextchar == state: if nextnextchar in whitespace: state = u" " token = token[1:-1] # remove single or double quotes from the ends break # Process single line comment elif state == u"#": single_line_comment += nextchar if nextchar == u"\n": state = u" " break # Process regular (unquoted) token elif state == u"a": if not nextchar: state = None break elif nextchar in whitespace: state = u" " if token: break # emit current token else: continue else: token += nextchar if nextnextchar: stream.appendleft(nextnextchar) yield token token = u""
A lexical analyzer for the BMRB NMR-STAR format syntax. :param text: Input text. :type text: :py:class:`str` or :py:class:`bytes` :return: Current token. :rtype: :py:class:`str`
Below is the the instruction that describes the task: ### Input: A lexical analyzer for the BMRB NMR-STAR format syntax. :param text: Input text. :type text: :py:class:`str` or :py:class:`bytes` :return: Current token. :rtype: :py:class:`str` ### Response: def bmrblex(text): """A lexical analyzer for the BMRB NMR-STAR format syntax. :param text: Input text. :type text: :py:class:`str` or :py:class:`bytes` :return: Current token. :rtype: :py:class:`str` """ stream = transform_text(text) wordchars = (u"abcdfeghijklmnopqrstuvwxyz" u"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_" u"ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ" u"ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ" u"!@$%^&*()_+:;?/>.<,~`|\{[}]-=") whitespace = u" \t\v\r\n" comment = u"#" state = u" " token = u"" single_line_comment = u"" while len(stream) > 0: nextnextchar = stream.popleft() while True: nextchar = nextnextchar if len(stream) > 0: nextnextchar = stream.popleft() else: nextnextchar = u"" # Process multiline string, comment, or single line comment if len(nextchar) > 1: state = u" " token = nextchar break # emit current token elif nextchar in whitespace and nextnextchar in comment and state not in (u"'", u'"'): single_line_comment = u"" state = u"#" if state is None: token = u"" # past end of file break elif state == u" ": if not nextchar: state = None break elif nextchar in whitespace: if token: state = u" " break # emit current token else: continue elif nextchar in wordchars: token = nextchar state = u"a" elif nextchar == u"'" or nextchar == u'"': token = nextchar state = nextchar else: token = nextchar if token: state = u" " break # emit current token else: continue # Process single-quoted or double-quoted token elif state == u"'" or state == u'"': token += nextchar if nextchar == state: if nextnextchar in whitespace: state = u" " token = token[1:-1] # remove single or double quotes from the ends break # Process single line comment elif state == u"#": single_line_comment += nextchar if nextchar == u"\n": state = u" " break # Process regular (unquoted) token elif state == u"a": if not nextchar: state = None break elif nextchar in whitespace: state = u" " if token: break # emit current token else: continue else: token += nextchar if nextnextchar: stream.appendleft(nextnextchar) yield token token = u""
def check_running_job_count(): """Check upper limit on running jobs.""" try: job_list = current_k8s_batchv1_api_client.\ list_job_for_all_namespaces() if len(job_list.items) > K8S_MAXIMUM_CONCURRENT_JOBS: return False except ApiException as e: log.error('Something went wrong while getting running job list.') log.error(e) return False return True
Check upper limit on running jobs.
Below is the the instruction that describes the task: ### Input: Check upper limit on running jobs. ### Response: def check_running_job_count(): """Check upper limit on running jobs.""" try: job_list = current_k8s_batchv1_api_client.\ list_job_for_all_namespaces() if len(job_list.items) > K8S_MAXIMUM_CONCURRENT_JOBS: return False except ApiException as e: log.error('Something went wrong while getting running job list.') log.error(e) return False return True
def maybe_reshape_4d_to_3d(x): """Reshape input from 4D to 3D if necessary.""" x_shape = common_layers.shape_list(x) is_4d = False if len(x_shape) == 4: x = tf.reshape(x, [x_shape[0], x_shape[1]*x_shape[2], x_shape[3]]) is_4d = True return x, x_shape, is_4d
Reshape input from 4D to 3D if necessary.
Below is the the instruction that describes the task: ### Input: Reshape input from 4D to 3D if necessary. ### Response: def maybe_reshape_4d_to_3d(x): """Reshape input from 4D to 3D if necessary.""" x_shape = common_layers.shape_list(x) is_4d = False if len(x_shape) == 4: x = tf.reshape(x, [x_shape[0], x_shape[1]*x_shape[2], x_shape[3]]) is_4d = True return x, x_shape, is_4d
def start(args_string): """Launch and display a TensorBoard instance as if at the command line. Args: args_string: Command-line arguments to TensorBoard, to be interpreted by `shlex.split`: e.g., "--logdir ./logs --port 0". Shell metacharacters are not supported: e.g., "--logdir 2>&1" will point the logdir at the literal directory named "2>&1". """ context = _get_context() try: import IPython import IPython.display except ImportError: IPython = None if context == _CONTEXT_NONE: handle = None print("Launching TensorBoard...") else: handle = IPython.display.display( IPython.display.Pretty("Launching TensorBoard..."), display_id=True, ) def print_or_update(message): if handle is None: print(message) else: handle.update(IPython.display.Pretty(message)) parsed_args = shlex.split(args_string, comments=True, posix=True) start_result = manager.start(parsed_args) if isinstance(start_result, manager.StartLaunched): _display( port=start_result.info.port, print_message=False, display_handle=handle, ) elif isinstance(start_result, manager.StartReused): template = ( "Reusing TensorBoard on port {port} (pid {pid}), started {delta} ago. " "(Use '!kill {pid}' to kill it.)" ) message = template.format( port=start_result.info.port, pid=start_result.info.pid, delta=_time_delta_from_info(start_result.info), ) print_or_update(message) _display( port=start_result.info.port, print_message=False, display_handle=None, ) elif isinstance(start_result, manager.StartFailed): def format_stream(name, value): if value == "": return "" elif value is None: return "\n<could not read %s>" % name else: return "\nContents of %s:\n%s" % (name, value.strip()) message = ( "ERROR: Failed to launch TensorBoard (exited with %d).%s%s" % ( start_result.exit_code, format_stream("stderr", start_result.stderr), format_stream("stdout", start_result.stdout), ) ) print_or_update(message) elif isinstance(start_result, manager.StartTimedOut): message = ( "ERROR: Timed out waiting for TensorBoard to start. " "It may still be running as pid %d." % start_result.pid ) print_or_update(message) else: raise TypeError( "Unexpected result from `manager.start`: %r.\n" "This is a TensorBoard bug; please report it." % start_result )
Launch and display a TensorBoard instance as if at the command line. Args: args_string: Command-line arguments to TensorBoard, to be interpreted by `shlex.split`: e.g., "--logdir ./logs --port 0". Shell metacharacters are not supported: e.g., "--logdir 2>&1" will point the logdir at the literal directory named "2>&1".
Below is the the instruction that describes the task: ### Input: Launch and display a TensorBoard instance as if at the command line. Args: args_string: Command-line arguments to TensorBoard, to be interpreted by `shlex.split`: e.g., "--logdir ./logs --port 0". Shell metacharacters are not supported: e.g., "--logdir 2>&1" will point the logdir at the literal directory named "2>&1". ### Response: def start(args_string): """Launch and display a TensorBoard instance as if at the command line. Args: args_string: Command-line arguments to TensorBoard, to be interpreted by `shlex.split`: e.g., "--logdir ./logs --port 0". Shell metacharacters are not supported: e.g., "--logdir 2>&1" will point the logdir at the literal directory named "2>&1". """ context = _get_context() try: import IPython import IPython.display except ImportError: IPython = None if context == _CONTEXT_NONE: handle = None print("Launching TensorBoard...") else: handle = IPython.display.display( IPython.display.Pretty("Launching TensorBoard..."), display_id=True, ) def print_or_update(message): if handle is None: print(message) else: handle.update(IPython.display.Pretty(message)) parsed_args = shlex.split(args_string, comments=True, posix=True) start_result = manager.start(parsed_args) if isinstance(start_result, manager.StartLaunched): _display( port=start_result.info.port, print_message=False, display_handle=handle, ) elif isinstance(start_result, manager.StartReused): template = ( "Reusing TensorBoard on port {port} (pid {pid}), started {delta} ago. " "(Use '!kill {pid}' to kill it.)" ) message = template.format( port=start_result.info.port, pid=start_result.info.pid, delta=_time_delta_from_info(start_result.info), ) print_or_update(message) _display( port=start_result.info.port, print_message=False, display_handle=None, ) elif isinstance(start_result, manager.StartFailed): def format_stream(name, value): if value == "": return "" elif value is None: return "\n<could not read %s>" % name else: return "\nContents of %s:\n%s" % (name, value.strip()) message = ( "ERROR: Failed to launch TensorBoard (exited with %d).%s%s" % ( start_result.exit_code, format_stream("stderr", start_result.stderr), format_stream("stdout", start_result.stdout), ) ) print_or_update(message) elif isinstance(start_result, manager.StartTimedOut): message = ( "ERROR: Timed out waiting for TensorBoard to start. " "It may still be running as pid %d." % start_result.pid ) print_or_update(message) else: raise TypeError( "Unexpected result from `manager.start`: %r.\n" "This is a TensorBoard bug; please report it." % start_result )
def run_once(self): """ Execute the worker once. This method will return after a file change is detected. """ self._capture_signals() self._start_monitor() try: self._run_worker() except KeyboardInterrupt: return finally: self._stop_monitor() self._restore_signals()
Execute the worker once. This method will return after a file change is detected.
Below is the the instruction that describes the task: ### Input: Execute the worker once. This method will return after a file change is detected. ### Response: def run_once(self): """ Execute the worker once. This method will return after a file change is detected. """ self._capture_signals() self._start_monitor() try: self._run_worker() except KeyboardInterrupt: return finally: self._stop_monitor() self._restore_signals()
def is_android_api(self): """ Tries to guess if the current class is an Android API class. This might be not very precise unless an apilist is given, with classes that are in fact known APIs. Such a list might be generated by using the android.jar files. :return: boolean """ # Packages found at https://developer.android.com/reference/packages.html api_candidates = ["Landroid/", "Lcom/android/internal/util", "Ldalvik/", "Ljava/", "Ljavax/", "Lorg/apache/", "Lorg/json/", "Lorg/w3c/dom/", "Lorg/xml/sax", "Lorg/xmlpull/v1/", "Ljunit/"] if not self.is_external(): # API must be external return False if self.apilist: return self.orig_class.get_name() in self.apilist else: for candidate in api_candidates: if self.orig_class.get_name().startswith(candidate): return True return False
Tries to guess if the current class is an Android API class. This might be not very precise unless an apilist is given, with classes that are in fact known APIs. Such a list might be generated by using the android.jar files. :return: boolean
Below is the the instruction that describes the task: ### Input: Tries to guess if the current class is an Android API class. This might be not very precise unless an apilist is given, with classes that are in fact known APIs. Such a list might be generated by using the android.jar files. :return: boolean ### Response: def is_android_api(self): """ Tries to guess if the current class is an Android API class. This might be not very precise unless an apilist is given, with classes that are in fact known APIs. Such a list might be generated by using the android.jar files. :return: boolean """ # Packages found at https://developer.android.com/reference/packages.html api_candidates = ["Landroid/", "Lcom/android/internal/util", "Ldalvik/", "Ljava/", "Ljavax/", "Lorg/apache/", "Lorg/json/", "Lorg/w3c/dom/", "Lorg/xml/sax", "Lorg/xmlpull/v1/", "Ljunit/"] if not self.is_external(): # API must be external return False if self.apilist: return self.orig_class.get_name() in self.apilist else: for candidate in api_candidates: if self.orig_class.get_name().startswith(candidate): return True return False
def _GetNormalizedTimestamp(self): """Retrieves the normalized timestamp. Returns: decimal.Decimal: normalized timestamp, which contains the number of seconds since January 1, 1970 00:00:00 and a fraction of second used for increased precision, or None if the normalized timestamp cannot be determined. """ if self._normalized_timestamp is None: if self._timestamp is not None: self._normalized_timestamp = ( decimal.Decimal(self._timestamp) - self._DELPHI_TO_POSIX_BASE) self._normalized_timestamp *= definitions.SECONDS_PER_DAY return self._normalized_timestamp
Retrieves the normalized timestamp. Returns: decimal.Decimal: normalized timestamp, which contains the number of seconds since January 1, 1970 00:00:00 and a fraction of second used for increased precision, or None if the normalized timestamp cannot be determined.
Below is the the instruction that describes the task: ### Input: Retrieves the normalized timestamp. Returns: decimal.Decimal: normalized timestamp, which contains the number of seconds since January 1, 1970 00:00:00 and a fraction of second used for increased precision, or None if the normalized timestamp cannot be determined. ### Response: def _GetNormalizedTimestamp(self): """Retrieves the normalized timestamp. Returns: decimal.Decimal: normalized timestamp, which contains the number of seconds since January 1, 1970 00:00:00 and a fraction of second used for increased precision, or None if the normalized timestamp cannot be determined. """ if self._normalized_timestamp is None: if self._timestamp is not None: self._normalized_timestamp = ( decimal.Decimal(self._timestamp) - self._DELPHI_TO_POSIX_BASE) self._normalized_timestamp *= definitions.SECONDS_PER_DAY return self._normalized_timestamp
def check_valid_package(package, cyg_arch='x86_64', mirrors=None): ''' Check if the package is valid on the given mirrors. Args: package: The name of the package cyg_arch: The cygwin architecture mirrors: any mirrors to check Returns (bool): True if Valid, otherwise False CLI Example: .. code-block:: bash salt '*' cyg.check_valid_package <package name> ''' if mirrors is None: mirrors = [{DEFAULT_MIRROR: DEFAULT_MIRROR_KEY}] LOG.debug('Checking Valid Mirrors: %s', mirrors) for mirror in mirrors: for mirror_url, key in mirror.items(): if package in _get_all_packages(mirror_url, cyg_arch): return True return False
Check if the package is valid on the given mirrors. Args: package: The name of the package cyg_arch: The cygwin architecture mirrors: any mirrors to check Returns (bool): True if Valid, otherwise False CLI Example: .. code-block:: bash salt '*' cyg.check_valid_package <package name>
Below is the the instruction that describes the task: ### Input: Check if the package is valid on the given mirrors. Args: package: The name of the package cyg_arch: The cygwin architecture mirrors: any mirrors to check Returns (bool): True if Valid, otherwise False CLI Example: .. code-block:: bash salt '*' cyg.check_valid_package <package name> ### Response: def check_valid_package(package, cyg_arch='x86_64', mirrors=None): ''' Check if the package is valid on the given mirrors. Args: package: The name of the package cyg_arch: The cygwin architecture mirrors: any mirrors to check Returns (bool): True if Valid, otherwise False CLI Example: .. code-block:: bash salt '*' cyg.check_valid_package <package name> ''' if mirrors is None: mirrors = [{DEFAULT_MIRROR: DEFAULT_MIRROR_KEY}] LOG.debug('Checking Valid Mirrors: %s', mirrors) for mirror in mirrors: for mirror_url, key in mirror.items(): if package in _get_all_packages(mirror_url, cyg_arch): return True return False
def set_up_phase(self, training_info, model, source): """ Prepare the phase for learning """ # To parameter groups handles properly filtering parameters that don't require gradient self._optimizer_instance = self.optimizer_factory.instantiate(model) self._source = source self.special_callback = CycleCallback( self._optimizer_instance, max_lr=self.max_lr, min_lr=self.min_lr, cycles=self.cycles, cycle_len=self.cycle_len, cycle_mult=self.cycle_mult, interpolate=self.interpolate, init_iter=self.init_iter, init_lr=self.init_lr ) return self._optimizer_instance
Prepare the phase for learning
Below is the the instruction that describes the task: ### Input: Prepare the phase for learning ### Response: def set_up_phase(self, training_info, model, source): """ Prepare the phase for learning """ # To parameter groups handles properly filtering parameters that don't require gradient self._optimizer_instance = self.optimizer_factory.instantiate(model) self._source = source self.special_callback = CycleCallback( self._optimizer_instance, max_lr=self.max_lr, min_lr=self.min_lr, cycles=self.cycles, cycle_len=self.cycle_len, cycle_mult=self.cycle_mult, interpolate=self.interpolate, init_iter=self.init_iter, init_lr=self.init_lr ) return self._optimizer_instance
def create_module(clear_target, target): """Creates a new template HFOS plugin module""" if os.path.exists(target): if clear_target: shutil.rmtree(target) else: log("Target exists! Use --clear to delete it first.", emitter='MANAGE') sys.exit(2) done = False info = None while not done: info = _ask_questionnaire() pprint(info) done = _ask('Is the above correct', default='y', data_type='bool') augmented_info = _augment_info(info) log("Constructing module %(plugin_name)s" % info) _construct_module(augmented_info, target)
Creates a new template HFOS plugin module
Below is the the instruction that describes the task: ### Input: Creates a new template HFOS plugin module ### Response: def create_module(clear_target, target): """Creates a new template HFOS plugin module""" if os.path.exists(target): if clear_target: shutil.rmtree(target) else: log("Target exists! Use --clear to delete it first.", emitter='MANAGE') sys.exit(2) done = False info = None while not done: info = _ask_questionnaire() pprint(info) done = _ask('Is the above correct', default='y', data_type='bool') augmented_info = _augment_info(info) log("Constructing module %(plugin_name)s" % info) _construct_module(augmented_info, target)
def start_server(data_stream, port=5557, hwm=10): """Start a data processing server. This command starts a server in the current process that performs the actual data processing (by retrieving data from the given data stream). It also starts a second process, the broker, which mediates between the server and the client. The broker also keeps a buffer of batches in memory. Parameters ---------- data_stream : :class:`.DataStream` The data stream to return examples from. port : int, optional The port the server and the client (training loop) will use to communicate. Defaults to 5557. hwm : int, optional The `ZeroMQ high-water mark (HWM) <http://zguide.zeromq.org/page:all#High-Water-Marks>`_ on the sending socket. Increasing this increases the buffer, which can be useful if your data preprocessing times are very random. However, it will increase memory usage. There is no easy way to tell how many batches will actually be queued with a particular HWM. Defaults to 10. Be sure to set the corresponding HWM on the receiving end as well. """ logging.basicConfig(level='INFO') context = zmq.Context() socket = context.socket(zmq.PUSH) socket.set_hwm(hwm) socket.bind('tcp://*:{}'.format(port)) it = data_stream.get_epoch_iterator() logger.info('server started') while True: try: data = next(it) stop = False logger.debug("sending {} arrays".format(len(data))) except StopIteration: it = data_stream.get_epoch_iterator() data = None stop = True logger.debug("sending StopIteration") send_arrays(socket, data, stop=stop)
Start a data processing server. This command starts a server in the current process that performs the actual data processing (by retrieving data from the given data stream). It also starts a second process, the broker, which mediates between the server and the client. The broker also keeps a buffer of batches in memory. Parameters ---------- data_stream : :class:`.DataStream` The data stream to return examples from. port : int, optional The port the server and the client (training loop) will use to communicate. Defaults to 5557. hwm : int, optional The `ZeroMQ high-water mark (HWM) <http://zguide.zeromq.org/page:all#High-Water-Marks>`_ on the sending socket. Increasing this increases the buffer, which can be useful if your data preprocessing times are very random. However, it will increase memory usage. There is no easy way to tell how many batches will actually be queued with a particular HWM. Defaults to 10. Be sure to set the corresponding HWM on the receiving end as well.
Below is the the instruction that describes the task: ### Input: Start a data processing server. This command starts a server in the current process that performs the actual data processing (by retrieving data from the given data stream). It also starts a second process, the broker, which mediates between the server and the client. The broker also keeps a buffer of batches in memory. Parameters ---------- data_stream : :class:`.DataStream` The data stream to return examples from. port : int, optional The port the server and the client (training loop) will use to communicate. Defaults to 5557. hwm : int, optional The `ZeroMQ high-water mark (HWM) <http://zguide.zeromq.org/page:all#High-Water-Marks>`_ on the sending socket. Increasing this increases the buffer, which can be useful if your data preprocessing times are very random. However, it will increase memory usage. There is no easy way to tell how many batches will actually be queued with a particular HWM. Defaults to 10. Be sure to set the corresponding HWM on the receiving end as well. ### Response: def start_server(data_stream, port=5557, hwm=10): """Start a data processing server. This command starts a server in the current process that performs the actual data processing (by retrieving data from the given data stream). It also starts a second process, the broker, which mediates between the server and the client. The broker also keeps a buffer of batches in memory. Parameters ---------- data_stream : :class:`.DataStream` The data stream to return examples from. port : int, optional The port the server and the client (training loop) will use to communicate. Defaults to 5557. hwm : int, optional The `ZeroMQ high-water mark (HWM) <http://zguide.zeromq.org/page:all#High-Water-Marks>`_ on the sending socket. Increasing this increases the buffer, which can be useful if your data preprocessing times are very random. However, it will increase memory usage. There is no easy way to tell how many batches will actually be queued with a particular HWM. Defaults to 10. Be sure to set the corresponding HWM on the receiving end as well. """ logging.basicConfig(level='INFO') context = zmq.Context() socket = context.socket(zmq.PUSH) socket.set_hwm(hwm) socket.bind('tcp://*:{}'.format(port)) it = data_stream.get_epoch_iterator() logger.info('server started') while True: try: data = next(it) stop = False logger.debug("sending {} arrays".format(len(data))) except StopIteration: it = data_stream.get_epoch_iterator() data = None stop = True logger.debug("sending StopIteration") send_arrays(socket, data, stop=stop)
def plane_errors(axes, covariance_matrix, sheet='upper',**kwargs): """ kwargs: traditional_layout boolean [True] Lay the stereonet out traditionally, with north at the pole of the diagram. The default is a more natural and intuitive visualization with vertical at the pole and the compass points of strike around the equator. Thus, longitude at the equator represents strike and latitude represents apparent dip at that azimuth. """ level = kwargs.pop('level',1) traditional_layout = kwargs.pop('traditional_layout',True) d = N.sqrt(covariance_matrix) ell = ellipse(**kwargs) bundle = dot(ell, d[:2]) res = d[2]*level # Switch hemispheres if PCA is upside-down # Normal vector is always correctly fit #if traditional_layout: #if axes[2,2] > 0: if axes[2,2] > 0: res *= -1 if sheet == 'upper': bundle += res elif sheet == 'lower': bundle -= res _ = dot(bundle,axes).T if traditional_layout: lon,lat = stereonet_math.cart2sph(_[2],_[0],_[1]) else: lon,lat = stereonet_math.cart2sph(-_[1],_[0],_[2]) return list(zip(lon,lat))
kwargs: traditional_layout boolean [True] Lay the stereonet out traditionally, with north at the pole of the diagram. The default is a more natural and intuitive visualization with vertical at the pole and the compass points of strike around the equator. Thus, longitude at the equator represents strike and latitude represents apparent dip at that azimuth.
Below is the the instruction that describes the task: ### Input: kwargs: traditional_layout boolean [True] Lay the stereonet out traditionally, with north at the pole of the diagram. The default is a more natural and intuitive visualization with vertical at the pole and the compass points of strike around the equator. Thus, longitude at the equator represents strike and latitude represents apparent dip at that azimuth. ### Response: def plane_errors(axes, covariance_matrix, sheet='upper',**kwargs): """ kwargs: traditional_layout boolean [True] Lay the stereonet out traditionally, with north at the pole of the diagram. The default is a more natural and intuitive visualization with vertical at the pole and the compass points of strike around the equator. Thus, longitude at the equator represents strike and latitude represents apparent dip at that azimuth. """ level = kwargs.pop('level',1) traditional_layout = kwargs.pop('traditional_layout',True) d = N.sqrt(covariance_matrix) ell = ellipse(**kwargs) bundle = dot(ell, d[:2]) res = d[2]*level # Switch hemispheres if PCA is upside-down # Normal vector is always correctly fit #if traditional_layout: #if axes[2,2] > 0: if axes[2,2] > 0: res *= -1 if sheet == 'upper': bundle += res elif sheet == 'lower': bundle -= res _ = dot(bundle,axes).T if traditional_layout: lon,lat = stereonet_math.cart2sph(_[2],_[0],_[1]) else: lon,lat = stereonet_math.cart2sph(-_[1],_[0],_[2]) return list(zip(lon,lat))
def instance_throughput_ratio(self, inst_id): """ The relative throughput of an instance compared to the backup instances. """ inst_thrp, otherThrp = self.getThroughputs(inst_id) # Backup throughput may be 0 so moving ahead only if it is not 0 r = inst_thrp / otherThrp if otherThrp and inst_thrp is not None \ else None return r
The relative throughput of an instance compared to the backup instances.
Below is the the instruction that describes the task: ### Input: The relative throughput of an instance compared to the backup instances. ### Response: def instance_throughput_ratio(self, inst_id): """ The relative throughput of an instance compared to the backup instances. """ inst_thrp, otherThrp = self.getThroughputs(inst_id) # Backup throughput may be 0 so moving ahead only if it is not 0 r = inst_thrp / otherThrp if otherThrp and inst_thrp is not None \ else None return r
def get_validators(self, id=None, endpoint=None): """ Returns the current NEO consensus nodes information and voting status. Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_VALIDATORS, id=id, endpoint=endpoint)
Returns the current NEO consensus nodes information and voting status. Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
Below is the the instruction that describes the task: ### Input: Returns the current NEO consensus nodes information and voting status. Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call ### Response: def get_validators(self, id=None, endpoint=None): """ Returns the current NEO consensus nodes information and voting status. Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_VALIDATORS, id=id, endpoint=endpoint)
def get_cache_key(datatable_class, view=None, user=None, **kwargs): """ Returns a cache key unique to the current table, and (if available) the request user. The ``view`` argument should be the class reference itself, since it is easily obtainable in contexts where the instance is not available. """ datatable_name = datatable_class.__name__ if datatable_name.endswith('_Synthesized'): datatable_name = datatable_name[:-12] datatable_id = '%s.%s' % (datatable_class.__module__, datatable_name) if CACHE_KEY_HASH: datatable_id = _hash_key_component(datatable_id) cache_key = 'datatable_%s' % (datatable_id,) if view: if not inspect.isclass(view): # Reduce view to its class view = view.__class__ view_id = '%s.%s' % (view.__module__, view.__name__) if CACHE_KEY_HASH: view_id = _hash_key_component(view_id) cache_key += '__view_%s' % (view_id,) if user and user.is_authenticated(): cache_key += '__user_%s' % (user.pk,) # All other kwargs are used directly to create a hashed suffix # Order the kwargs by key name, then convert them to their repr() values. items = sorted(kwargs.items(), key=lambda item: item[0]) values = [] for k, v in items: values.append('%r:%r' % (k, v)) if values: kwargs_id = '__'.join(values) kwargs_id = _hash_key_component(kwargs_id) cache_key += '__kwargs_%s' % (kwargs_id,) log.debug("Cache key derived for %r: %r (from kwargs %r)", datatable_class, cache_key, values) return cache_key
Returns a cache key unique to the current table, and (if available) the request user. The ``view`` argument should be the class reference itself, since it is easily obtainable in contexts where the instance is not available.
Below is the the instruction that describes the task: ### Input: Returns a cache key unique to the current table, and (if available) the request user. The ``view`` argument should be the class reference itself, since it is easily obtainable in contexts where the instance is not available. ### Response: def get_cache_key(datatable_class, view=None, user=None, **kwargs): """ Returns a cache key unique to the current table, and (if available) the request user. The ``view`` argument should be the class reference itself, since it is easily obtainable in contexts where the instance is not available. """ datatable_name = datatable_class.__name__ if datatable_name.endswith('_Synthesized'): datatable_name = datatable_name[:-12] datatable_id = '%s.%s' % (datatable_class.__module__, datatable_name) if CACHE_KEY_HASH: datatable_id = _hash_key_component(datatable_id) cache_key = 'datatable_%s' % (datatable_id,) if view: if not inspect.isclass(view): # Reduce view to its class view = view.__class__ view_id = '%s.%s' % (view.__module__, view.__name__) if CACHE_KEY_HASH: view_id = _hash_key_component(view_id) cache_key += '__view_%s' % (view_id,) if user and user.is_authenticated(): cache_key += '__user_%s' % (user.pk,) # All other kwargs are used directly to create a hashed suffix # Order the kwargs by key name, then convert them to their repr() values. items = sorted(kwargs.items(), key=lambda item: item[0]) values = [] for k, v in items: values.append('%r:%r' % (k, v)) if values: kwargs_id = '__'.join(values) kwargs_id = _hash_key_component(kwargs_id) cache_key += '__kwargs_%s' % (kwargs_id,) log.debug("Cache key derived for %r: %r (from kwargs %r)", datatable_class, cache_key, values) return cache_key
def getTableColumns(table, columns, namespace = "default", network = "current", host=cytoscape_host,port=cytoscape_port,verbose=False): """ Gets tables from cytoscape :param table: table to retrieve eg. node :param columns: columns to retrieve in list format :param namespace: namepsace, default="default" :param network: a network name or id, default="current" :param host: cytoscape host address, default=cytoscape_host :param port: cytoscape port, default=1234 :param verbose: print more information :returns: a pandas dataframe """ if type(network) != int: network=cytoscape("network", "get attribute",\ {"network":network,\ "namespace":namespace,\ "columnList":"SUID"},host=host,port=port) network=network[0]["SUID"] df=pd.DataFrame() def target(column): URL="http://"+str(host)+":"+str(port)+"/v1/networks/"+str(network)+"/tables/"+namespace+table+"/columns/"+column if verbose: print("'"+URL+"'") sys.stdout.flush() response = urllib2.urlopen(URL) response = response.read() colA=json.loads(response) col=pd.DataFrame() colHeader=colA["name"] colValues=colA["values"] col[colHeader]=colValues return col ncols=["name"] for c in columns: ncols.append(c.replace(" ","%20") ) for c in ncols: try: col=target(c) df=pd.concat([df,col],axis=1) except: print("Could not find "+c) sys.stdout.flush() df.index=df["name"].tolist() df=df.drop(["name"],axis=1) return df
Gets tables from cytoscape :param table: table to retrieve eg. node :param columns: columns to retrieve in list format :param namespace: namepsace, default="default" :param network: a network name or id, default="current" :param host: cytoscape host address, default=cytoscape_host :param port: cytoscape port, default=1234 :param verbose: print more information :returns: a pandas dataframe
Below is the the instruction that describes the task: ### Input: Gets tables from cytoscape :param table: table to retrieve eg. node :param columns: columns to retrieve in list format :param namespace: namepsace, default="default" :param network: a network name or id, default="current" :param host: cytoscape host address, default=cytoscape_host :param port: cytoscape port, default=1234 :param verbose: print more information :returns: a pandas dataframe ### Response: def getTableColumns(table, columns, namespace = "default", network = "current", host=cytoscape_host,port=cytoscape_port,verbose=False): """ Gets tables from cytoscape :param table: table to retrieve eg. node :param columns: columns to retrieve in list format :param namespace: namepsace, default="default" :param network: a network name or id, default="current" :param host: cytoscape host address, default=cytoscape_host :param port: cytoscape port, default=1234 :param verbose: print more information :returns: a pandas dataframe """ if type(network) != int: network=cytoscape("network", "get attribute",\ {"network":network,\ "namespace":namespace,\ "columnList":"SUID"},host=host,port=port) network=network[0]["SUID"] df=pd.DataFrame() def target(column): URL="http://"+str(host)+":"+str(port)+"/v1/networks/"+str(network)+"/tables/"+namespace+table+"/columns/"+column if verbose: print("'"+URL+"'") sys.stdout.flush() response = urllib2.urlopen(URL) response = response.read() colA=json.loads(response) col=pd.DataFrame() colHeader=colA["name"] colValues=colA["values"] col[colHeader]=colValues return col ncols=["name"] for c in columns: ncols.append(c.replace(" ","%20") ) for c in ncols: try: col=target(c) df=pd.concat([df,col],axis=1) except: print("Could not find "+c) sys.stdout.flush() df.index=df["name"].tolist() df=df.drop(["name"],axis=1) return df
def cleanup(self): """Gracefully exit the SSH session.""" self.exit_config_mode() self.write_channel("logout" + self.RETURN) count = 0 while count <= 5: time.sleep(0.5) output = self.read_channel() if "Do you want to log out" in output: self._session_log_fin = True self.write_channel("y" + self.RETURN) # Don't automatically save the config (user's responsibility) elif "Do you want to save the current" in output: self._session_log_fin = True self.write_channel("n" + self.RETURN) try: self.write_channel(self.RETURN) except socket.error: break count += 1
Gracefully exit the SSH session.
Below is the the instruction that describes the task: ### Input: Gracefully exit the SSH session. ### Response: def cleanup(self): """Gracefully exit the SSH session.""" self.exit_config_mode() self.write_channel("logout" + self.RETURN) count = 0 while count <= 5: time.sleep(0.5) output = self.read_channel() if "Do you want to log out" in output: self._session_log_fin = True self.write_channel("y" + self.RETURN) # Don't automatically save the config (user's responsibility) elif "Do you want to save the current" in output: self._session_log_fin = True self.write_channel("n" + self.RETURN) try: self.write_channel(self.RETURN) except socket.error: break count += 1
def max_tangent_sphere(mesh, points, inwards=True, normals=None, threshold=1e-6, max_iter=100): """ Find the center and radius of the sphere which is tangent to the mesh at the given point and at least one more point with no non-tangential intersections with the mesh. Masatomo Inui, Nobuyuki Umezu & Ryohei Shimane (2016) Shrinking sphere: A parallel algorithm for computing the thickness of 3D objects, Computer-Aided Design and Applications, 13:2, 199-207, DOI: 10.1080/16864360.2015.1084186 Parameters ---------- points : (n,3) float, list of points in space inwards : bool, whether to have the sphere inside or outside the mesh normals : (n,3) float, normals of the mesh at the given points None, compute this automatically. Returns ---------- centers : (n,3) float, centers of spheres radii : (n,) float, radii of spheres """ points = np.asanyarray(points, dtype=np.float64) if not util.is_shape(points, (-1, 3)): raise ValueError('points must be (n,3)!') if normals is not None: normals = np.asanyarray(normals, dtype=np.float64) if not util.is_shape(normals, (-1, 3)): raise ValueError('normals must be (n,3)!') if len(points) != len(normals): raise ValueError('number of points must equal number of normals!') else: normals = mesh.face_normals[closest_point(mesh, points)[2]] if inwards: normals = -normals # Find initial tangent spheres distances = longest_ray(mesh, points, normals) radii = distances * 0.5 not_converged = np.ones(len(points), dtype=np.bool) # boolean mask # If ray is infinite, find the vertex which is furthest from our point # when projected onto the ray. I.e. find v which maximises # (v-p).n = v.n - p.n. # We use a loop rather a vectorised approach to reduce memory cost # it also seems to run faster. for i in np.where(np.isinf(distances))[0]: projections = np.dot(mesh.vertices - points[i], normals[i]) # If no points lie outside the tangent plane, then the radius is infinite # otherwise we have a point outside the tangent plane, take the one with maximal # projection if projections.max() < tol.planar: radii[i] = np.inf not_converged[i] = False else: vertex = mesh.vertices[projections.argmax()] radii[i] = (np.dot(vertex - points[i], vertex - points[i]) / (2 * np.dot(vertex - points[i], normals[i]))) # Compute centers centers = points + normals * np.nan_to_num(radii.reshape(-1, 1)) centers[np.isinf(radii)] = [np.nan, np.nan, np.nan] # Our iterative process terminates when the difference in sphere # radius is less than threshold*D D = np.linalg.norm(mesh.bounds[1] - mesh.bounds[0]) convergence_threshold = threshold * D n_iter = 0 while not_converged.sum() > 0 and n_iter < max_iter: n_iter += 1 n_points, n_dists, n_faces = mesh.nearest.on_surface( centers[not_converged]) # If the distance to the nearest point is the same as the distance # to the start point then we are done. done = np.abs( n_dists - np.linalg.norm( centers[not_converged] - points[not_converged], axis=1)) < tol.planar not_converged[np.where(not_converged)[0][done]] = False # Otherwise find the radius and center of the sphere tangent to the mesh # at the point and the nearest point. diff = n_points[~done] - points[not_converged] old_radii = radii[not_converged].copy() # np.einsum produces element wise dot product radii[not_converged] = (np.einsum('ij, ij->i', diff, diff) / (2 * np.einsum('ij, ij->i', diff, normals[not_converged]))) centers[not_converged] = points[not_converged] + \ normals[not_converged] * radii[not_converged].reshape(-1, 1) # If change in radius is less than threshold we have converged cvged = old_radii - radii[not_converged] < convergence_threshold not_converged[np.where(not_converged)[0][cvged]] = False return centers, radii
Find the center and radius of the sphere which is tangent to the mesh at the given point and at least one more point with no non-tangential intersections with the mesh. Masatomo Inui, Nobuyuki Umezu & Ryohei Shimane (2016) Shrinking sphere: A parallel algorithm for computing the thickness of 3D objects, Computer-Aided Design and Applications, 13:2, 199-207, DOI: 10.1080/16864360.2015.1084186 Parameters ---------- points : (n,3) float, list of points in space inwards : bool, whether to have the sphere inside or outside the mesh normals : (n,3) float, normals of the mesh at the given points None, compute this automatically. Returns ---------- centers : (n,3) float, centers of spheres radii : (n,) float, radii of spheres
Below is the the instruction that describes the task: ### Input: Find the center and radius of the sphere which is tangent to the mesh at the given point and at least one more point with no non-tangential intersections with the mesh. Masatomo Inui, Nobuyuki Umezu & Ryohei Shimane (2016) Shrinking sphere: A parallel algorithm for computing the thickness of 3D objects, Computer-Aided Design and Applications, 13:2, 199-207, DOI: 10.1080/16864360.2015.1084186 Parameters ---------- points : (n,3) float, list of points in space inwards : bool, whether to have the sphere inside or outside the mesh normals : (n,3) float, normals of the mesh at the given points None, compute this automatically. Returns ---------- centers : (n,3) float, centers of spheres radii : (n,) float, radii of spheres ### Response: def max_tangent_sphere(mesh, points, inwards=True, normals=None, threshold=1e-6, max_iter=100): """ Find the center and radius of the sphere which is tangent to the mesh at the given point and at least one more point with no non-tangential intersections with the mesh. Masatomo Inui, Nobuyuki Umezu & Ryohei Shimane (2016) Shrinking sphere: A parallel algorithm for computing the thickness of 3D objects, Computer-Aided Design and Applications, 13:2, 199-207, DOI: 10.1080/16864360.2015.1084186 Parameters ---------- points : (n,3) float, list of points in space inwards : bool, whether to have the sphere inside or outside the mesh normals : (n,3) float, normals of the mesh at the given points None, compute this automatically. Returns ---------- centers : (n,3) float, centers of spheres radii : (n,) float, radii of spheres """ points = np.asanyarray(points, dtype=np.float64) if not util.is_shape(points, (-1, 3)): raise ValueError('points must be (n,3)!') if normals is not None: normals = np.asanyarray(normals, dtype=np.float64) if not util.is_shape(normals, (-1, 3)): raise ValueError('normals must be (n,3)!') if len(points) != len(normals): raise ValueError('number of points must equal number of normals!') else: normals = mesh.face_normals[closest_point(mesh, points)[2]] if inwards: normals = -normals # Find initial tangent spheres distances = longest_ray(mesh, points, normals) radii = distances * 0.5 not_converged = np.ones(len(points), dtype=np.bool) # boolean mask # If ray is infinite, find the vertex which is furthest from our point # when projected onto the ray. I.e. find v which maximises # (v-p).n = v.n - p.n. # We use a loop rather a vectorised approach to reduce memory cost # it also seems to run faster. for i in np.where(np.isinf(distances))[0]: projections = np.dot(mesh.vertices - points[i], normals[i]) # If no points lie outside the tangent plane, then the radius is infinite # otherwise we have a point outside the tangent plane, take the one with maximal # projection if projections.max() < tol.planar: radii[i] = np.inf not_converged[i] = False else: vertex = mesh.vertices[projections.argmax()] radii[i] = (np.dot(vertex - points[i], vertex - points[i]) / (2 * np.dot(vertex - points[i], normals[i]))) # Compute centers centers = points + normals * np.nan_to_num(radii.reshape(-1, 1)) centers[np.isinf(radii)] = [np.nan, np.nan, np.nan] # Our iterative process terminates when the difference in sphere # radius is less than threshold*D D = np.linalg.norm(mesh.bounds[1] - mesh.bounds[0]) convergence_threshold = threshold * D n_iter = 0 while not_converged.sum() > 0 and n_iter < max_iter: n_iter += 1 n_points, n_dists, n_faces = mesh.nearest.on_surface( centers[not_converged]) # If the distance to the nearest point is the same as the distance # to the start point then we are done. done = np.abs( n_dists - np.linalg.norm( centers[not_converged] - points[not_converged], axis=1)) < tol.planar not_converged[np.where(not_converged)[0][done]] = False # Otherwise find the radius and center of the sphere tangent to the mesh # at the point and the nearest point. diff = n_points[~done] - points[not_converged] old_radii = radii[not_converged].copy() # np.einsum produces element wise dot product radii[not_converged] = (np.einsum('ij, ij->i', diff, diff) / (2 * np.einsum('ij, ij->i', diff, normals[not_converged]))) centers[not_converged] = points[not_converged] + \ normals[not_converged] * radii[not_converged].reshape(-1, 1) # If change in radius is less than threshold we have converged cvged = old_radii - radii[not_converged] < convergence_threshold not_converged[np.where(not_converged)[0][cvged]] = False return centers, radii
def variable_length_to_fixed_length_categorical( self, left_edge=4, right_edge=4, max_length=15): """ Encode variable-length sequences using a fixed-length encoding designed for preserving the anchor positions of class I peptides. The sequences must be of length at least left_edge + right_edge, and at most max_length. Parameters ---------- left_edge : int, size of fixed-position left side right_edge : int, size of the fixed-position right side max_length : sequence length of the resulting encoding Returns ------- numpy.array of integers with shape (num sequences, max_length) """ cache_key = ( "fixed_length_categorical", left_edge, right_edge, max_length) if cache_key not in self.encoding_cache: fixed_length_sequences = ( self.sequences_to_fixed_length_index_encoded_array( self.sequences, left_edge=left_edge, right_edge=right_edge, max_length=max_length)) self.encoding_cache[cache_key] = fixed_length_sequences return self.encoding_cache[cache_key]
Encode variable-length sequences using a fixed-length encoding designed for preserving the anchor positions of class I peptides. The sequences must be of length at least left_edge + right_edge, and at most max_length. Parameters ---------- left_edge : int, size of fixed-position left side right_edge : int, size of the fixed-position right side max_length : sequence length of the resulting encoding Returns ------- numpy.array of integers with shape (num sequences, max_length)
Below is the the instruction that describes the task: ### Input: Encode variable-length sequences using a fixed-length encoding designed for preserving the anchor positions of class I peptides. The sequences must be of length at least left_edge + right_edge, and at most max_length. Parameters ---------- left_edge : int, size of fixed-position left side right_edge : int, size of the fixed-position right side max_length : sequence length of the resulting encoding Returns ------- numpy.array of integers with shape (num sequences, max_length) ### Response: def variable_length_to_fixed_length_categorical( self, left_edge=4, right_edge=4, max_length=15): """ Encode variable-length sequences using a fixed-length encoding designed for preserving the anchor positions of class I peptides. The sequences must be of length at least left_edge + right_edge, and at most max_length. Parameters ---------- left_edge : int, size of fixed-position left side right_edge : int, size of the fixed-position right side max_length : sequence length of the resulting encoding Returns ------- numpy.array of integers with shape (num sequences, max_length) """ cache_key = ( "fixed_length_categorical", left_edge, right_edge, max_length) if cache_key not in self.encoding_cache: fixed_length_sequences = ( self.sequences_to_fixed_length_index_encoded_array( self.sequences, left_edge=left_edge, right_edge=right_edge, max_length=max_length)) self.encoding_cache[cache_key] = fixed_length_sequences return self.encoding_cache[cache_key]
def _create_vec_field(fval, gradf, d1x, d2x, color_axis, smooth=0): """Calculate the deformation vector field In: fval: float gradf: numpy.ndarray of shape C x h x w with C = 3 or C = 1 (color channels), h, w >= 1. d1x: numpy.ndarray of shape C x h x w and [type] = 'Float' or 'Double'. d2x: numpy.ndarray of shape C x h x w and [type] = 'Float' or 'Double'. smooth: float Width of the Gaussian kernel used for smoothing (default is 0 for no smoothing). Out: vec_field: numpy.ndarray of shape (2, h, w). """ if color_axis == 2: gradf = _transpose_image(gradf) c, h, w = gradf.shape # colors, height, width # Sum over color channels alpha1 = np.sum(gradf * d1x, axis=0) alpha2 = np.sum(gradf * d2x, axis=0) norm_squared_alpha = (alpha1 ** 2).sum() + (alpha2 ** 2).sum() # Smoothing if smooth > 0: alpha1 = gaussian_filter(alpha1, smooth) alpha2 = gaussian_filter(alpha2, smooth) norm_squared_alpha = (alpha1 ** 2).sum() + (alpha2 ** 2).sum() # In theory, we need to apply the filter a second time. alpha1 = gaussian_filter(alpha1, smooth) alpha2 = gaussian_filter(alpha2, smooth) vec_field = np.empty((h, w, 2)) vec_field[:, :, 0] = -fval * alpha1 / norm_squared_alpha vec_field[:, :, 1] = -fval * alpha2 / norm_squared_alpha return vec_field
Calculate the deformation vector field In: fval: float gradf: numpy.ndarray of shape C x h x w with C = 3 or C = 1 (color channels), h, w >= 1. d1x: numpy.ndarray of shape C x h x w and [type] = 'Float' or 'Double'. d2x: numpy.ndarray of shape C x h x w and [type] = 'Float' or 'Double'. smooth: float Width of the Gaussian kernel used for smoothing (default is 0 for no smoothing). Out: vec_field: numpy.ndarray of shape (2, h, w).
Below is the the instruction that describes the task: ### Input: Calculate the deformation vector field In: fval: float gradf: numpy.ndarray of shape C x h x w with C = 3 or C = 1 (color channels), h, w >= 1. d1x: numpy.ndarray of shape C x h x w and [type] = 'Float' or 'Double'. d2x: numpy.ndarray of shape C x h x w and [type] = 'Float' or 'Double'. smooth: float Width of the Gaussian kernel used for smoothing (default is 0 for no smoothing). Out: vec_field: numpy.ndarray of shape (2, h, w). ### Response: def _create_vec_field(fval, gradf, d1x, d2x, color_axis, smooth=0): """Calculate the deformation vector field In: fval: float gradf: numpy.ndarray of shape C x h x w with C = 3 or C = 1 (color channels), h, w >= 1. d1x: numpy.ndarray of shape C x h x w and [type] = 'Float' or 'Double'. d2x: numpy.ndarray of shape C x h x w and [type] = 'Float' or 'Double'. smooth: float Width of the Gaussian kernel used for smoothing (default is 0 for no smoothing). Out: vec_field: numpy.ndarray of shape (2, h, w). """ if color_axis == 2: gradf = _transpose_image(gradf) c, h, w = gradf.shape # colors, height, width # Sum over color channels alpha1 = np.sum(gradf * d1x, axis=0) alpha2 = np.sum(gradf * d2x, axis=0) norm_squared_alpha = (alpha1 ** 2).sum() + (alpha2 ** 2).sum() # Smoothing if smooth > 0: alpha1 = gaussian_filter(alpha1, smooth) alpha2 = gaussian_filter(alpha2, smooth) norm_squared_alpha = (alpha1 ** 2).sum() + (alpha2 ** 2).sum() # In theory, we need to apply the filter a second time. alpha1 = gaussian_filter(alpha1, smooth) alpha2 = gaussian_filter(alpha2, smooth) vec_field = np.empty((h, w, 2)) vec_field[:, :, 0] = -fval * alpha1 / norm_squared_alpha vec_field[:, :, 1] = -fval * alpha2 / norm_squared_alpha return vec_field
def start_scan(self, active): """Start the scanning task""" self._command_task.sync_command(['_start_scan', active]) self.scanning = True
Start the scanning task
Below is the the instruction that describes the task: ### Input: Start the scanning task ### Response: def start_scan(self, active): """Start the scanning task""" self._command_task.sync_command(['_start_scan', active]) self.scanning = True
def greater_than_pi_constraints(self): """get the names of the prior information eqs that are listed as greater than inequality constraints. Zero- weighted pi are skipped Returns ------- pandas.Series : pilbl of prior information that are non-zero weighted greater than constraints """ pi = self.prior_information gt_pi = pi.loc[pi.apply(lambda x: self._is_greater_const(x.obgnme) \ and x.weight != 0.0, axis=1), "pilbl"] return gt_pi
get the names of the prior information eqs that are listed as greater than inequality constraints. Zero- weighted pi are skipped Returns ------- pandas.Series : pilbl of prior information that are non-zero weighted greater than constraints
Below is the the instruction that describes the task: ### Input: get the names of the prior information eqs that are listed as greater than inequality constraints. Zero- weighted pi are skipped Returns ------- pandas.Series : pilbl of prior information that are non-zero weighted greater than constraints ### Response: def greater_than_pi_constraints(self): """get the names of the prior information eqs that are listed as greater than inequality constraints. Zero- weighted pi are skipped Returns ------- pandas.Series : pilbl of prior information that are non-zero weighted greater than constraints """ pi = self.prior_information gt_pi = pi.loc[pi.apply(lambda x: self._is_greater_const(x.obgnme) \ and x.weight != 0.0, axis=1), "pilbl"] return gt_pi
async def run_script(self, script): """Execute the script and save results.""" script = os.linesep.join(['set -x', 'set +B', script, 'exit']) + os.linesep self.proc.stdin.write(script.encode('utf-8')) await self.proc.stdin.drain() self.proc.stdin.close()
Execute the script and save results.
Below is the the instruction that describes the task: ### Input: Execute the script and save results. ### Response: async def run_script(self, script): """Execute the script and save results.""" script = os.linesep.join(['set -x', 'set +B', script, 'exit']) + os.linesep self.proc.stdin.write(script.encode('utf-8')) await self.proc.stdin.drain() self.proc.stdin.close()
def array_2d_from_array_1d(self, array_1d): """ Map a 1D array the same dimension as the grid to its original masked 2D array. Parameters ----------- array_1d : ndarray The 1D array which is mapped to its masked 2D array. """ return mapping_util.map_masked_1d_array_to_2d_array_from_array_1d_shape_and_one_to_two( array_1d=array_1d, shape=self.mask.shape, one_to_two=self.mask.masked_grid_index_to_pixel)
Map a 1D array the same dimension as the grid to its original masked 2D array. Parameters ----------- array_1d : ndarray The 1D array which is mapped to its masked 2D array.
Below is the the instruction that describes the task: ### Input: Map a 1D array the same dimension as the grid to its original masked 2D array. Parameters ----------- array_1d : ndarray The 1D array which is mapped to its masked 2D array. ### Response: def array_2d_from_array_1d(self, array_1d): """ Map a 1D array the same dimension as the grid to its original masked 2D array. Parameters ----------- array_1d : ndarray The 1D array which is mapped to its masked 2D array. """ return mapping_util.map_masked_1d_array_to_2d_array_from_array_1d_shape_and_one_to_two( array_1d=array_1d, shape=self.mask.shape, one_to_two=self.mask.masked_grid_index_to_pixel)
def _get_response(self, endpoint, request_dict): """ Returns a dictionary of data requested by each function. Arguments: ---------- endpoint: string, mandatory Set in all other methods, this is the API endpoint specific to each function. request_dict: string, mandatory A dictionary of parameters that are formatted into the API call. Returns: -------- response: A dictionary that has been dumped from JSON. Raises: ------- MesoPyError: Overrides the exceptions given in the requests library to give more custom error messages. Connection_error occurs if no internet connection exists. Timeout_error occurs if the request takes too long and redirect_error is shown if the url is formatted incorrectly. """ http_error = 'Could not connect to the API. This could be because you have no internet connection, a parameter' \ ' was input incorrectly, or the API is currently down. Please try again.' json_error = 'Could not retrieve JSON values. Try again with a shorter date range.' # For python 3.4 try: qsp = urllib.parse.urlencode(request_dict, doseq=True) resp = urllib.request.urlopen(self.base_url + endpoint + '?' + qsp).read() # For python 2.7 except AttributeError or NameError: try: qsp = urllib.urlencode(request_dict, doseq=True) resp = urllib2.urlopen(self.base_url + endpoint + '?' + qsp).read() except urllib2.URLError: raise MesoPyError(http_error) except urllib.error.URLError: raise MesoPyError(http_error) try: json_data = json.loads(resp.decode('utf-8')) except ValueError: raise MesoPyError(json_error) return self._checkresponse(json_data)
Returns a dictionary of data requested by each function. Arguments: ---------- endpoint: string, mandatory Set in all other methods, this is the API endpoint specific to each function. request_dict: string, mandatory A dictionary of parameters that are formatted into the API call. Returns: -------- response: A dictionary that has been dumped from JSON. Raises: ------- MesoPyError: Overrides the exceptions given in the requests library to give more custom error messages. Connection_error occurs if no internet connection exists. Timeout_error occurs if the request takes too long and redirect_error is shown if the url is formatted incorrectly.
Below is the the instruction that describes the task: ### Input: Returns a dictionary of data requested by each function. Arguments: ---------- endpoint: string, mandatory Set in all other methods, this is the API endpoint specific to each function. request_dict: string, mandatory A dictionary of parameters that are formatted into the API call. Returns: -------- response: A dictionary that has been dumped from JSON. Raises: ------- MesoPyError: Overrides the exceptions given in the requests library to give more custom error messages. Connection_error occurs if no internet connection exists. Timeout_error occurs if the request takes too long and redirect_error is shown if the url is formatted incorrectly. ### Response: def _get_response(self, endpoint, request_dict): """ Returns a dictionary of data requested by each function. Arguments: ---------- endpoint: string, mandatory Set in all other methods, this is the API endpoint specific to each function. request_dict: string, mandatory A dictionary of parameters that are formatted into the API call. Returns: -------- response: A dictionary that has been dumped from JSON. Raises: ------- MesoPyError: Overrides the exceptions given in the requests library to give more custom error messages. Connection_error occurs if no internet connection exists. Timeout_error occurs if the request takes too long and redirect_error is shown if the url is formatted incorrectly. """ http_error = 'Could not connect to the API. This could be because you have no internet connection, a parameter' \ ' was input incorrectly, or the API is currently down. Please try again.' json_error = 'Could not retrieve JSON values. Try again with a shorter date range.' # For python 3.4 try: qsp = urllib.parse.urlencode(request_dict, doseq=True) resp = urllib.request.urlopen(self.base_url + endpoint + '?' + qsp).read() # For python 2.7 except AttributeError or NameError: try: qsp = urllib.urlencode(request_dict, doseq=True) resp = urllib2.urlopen(self.base_url + endpoint + '?' + qsp).read() except urllib2.URLError: raise MesoPyError(http_error) except urllib.error.URLError: raise MesoPyError(http_error) try: json_data = json.loads(resp.decode('utf-8')) except ValueError: raise MesoPyError(json_error) return self._checkresponse(json_data)