id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
20,200
plivo/sharq-server
sharq_server/server.py
SharQServer._view_clear_queue
def _view_clear_queue(self, queue_type, queue_id): """remove queueu from SharQ based on the queue_type and queue_id.""" response = { 'status': 'failure' } try: request_data = json.loads(request.data) except Exception, e: response['message'] = e.message return jsonify(**response), 400 request_data.update({ 'queue_type': queue_type, 'queue_id': queue_id }) try: response = self.sq.clear_queue(**request_data) except Exception, e: response['message'] = e.message return jsonify(**response), 400 return jsonify(**response)
python
def _view_clear_queue(self, queue_type, queue_id): response = { 'status': 'failure' } try: request_data = json.loads(request.data) except Exception, e: response['message'] = e.message return jsonify(**response), 400 request_data.update({ 'queue_type': queue_type, 'queue_id': queue_id }) try: response = self.sq.clear_queue(**request_data) except Exception, e: response['message'] = e.message return jsonify(**response), 400 return jsonify(**response)
[ "def", "_view_clear_queue", "(", "self", ",", "queue_type", ",", "queue_id", ")", ":", "response", "=", "{", "'status'", ":", "'failure'", "}", "try", ":", "request_data", "=", "json", ".", "loads", "(", "request", ".", "data", ")", "except", "Exception", ...
remove queueu from SharQ based on the queue_type and queue_id.
[ "remove", "queueu", "from", "SharQ", "based", "on", "the", "queue_type", "and", "queue_id", "." ]
9f4c50eb5ee28d1084591febc4a3a34d7ffd0556
https://github.com/plivo/sharq-server/blob/9f4c50eb5ee28d1084591febc4a3a34d7ffd0556/sharq_server/server.py#L180-L201
20,201
depop/python-automock
automock/base.py
start_patching
def start_patching(name=None): # type: (Optional[str]) -> None """ Initiate mocking of the functions listed in `_factory_map`. For this to work reliably all mocked helper functions should be imported and used like this: import dp_paypal.client as paypal res = paypal.do_paypal_express_checkout(...) (i.e. don't use `from dp_paypal.client import x` import style) Kwargs: name (Optional[str]): if given, only patch the specified path, else all defined default mocks """ global _factory_map, _patchers, _mocks if _patchers and name is None: warnings.warn('start_patching() called again, already patched') _pre_import() if name is not None: factory = _factory_map[name] items = [(name, factory)] else: items = _factory_map.items() for name, factory in items: patcher = mock.patch(name, new=factory()) mocked = patcher.start() _patchers[name] = patcher _mocks[name] = mocked
python
def start_patching(name=None): # type: (Optional[str]) -> None global _factory_map, _patchers, _mocks if _patchers and name is None: warnings.warn('start_patching() called again, already patched') _pre_import() if name is not None: factory = _factory_map[name] items = [(name, factory)] else: items = _factory_map.items() for name, factory in items: patcher = mock.patch(name, new=factory()) mocked = patcher.start() _patchers[name] = patcher _mocks[name] = mocked
[ "def", "start_patching", "(", "name", "=", "None", ")", ":", "# type: (Optional[str]) -> None", "global", "_factory_map", ",", "_patchers", ",", "_mocks", "if", "_patchers", "and", "name", "is", "None", ":", "warnings", ".", "warn", "(", "'start_patching() called ...
Initiate mocking of the functions listed in `_factory_map`. For this to work reliably all mocked helper functions should be imported and used like this: import dp_paypal.client as paypal res = paypal.do_paypal_express_checkout(...) (i.e. don't use `from dp_paypal.client import x` import style) Kwargs: name (Optional[str]): if given, only patch the specified path, else all defined default mocks
[ "Initiate", "mocking", "of", "the", "functions", "listed", "in", "_factory_map", "." ]
8a02acecd9265c8f9a00d7b8e097cae87cdf28bd
https://github.com/depop/python-automock/blob/8a02acecd9265c8f9a00d7b8e097cae87cdf28bd/automock/base.py#L88-L121
20,202
depop/python-automock
automock/base.py
stop_patching
def stop_patching(name=None): # type: (Optional[str]) -> None """ Finish the mocking initiated by `start_patching` Kwargs: name (Optional[str]): if given, only unpatch the specified path, else all defined default mocks """ global _patchers, _mocks if not _patchers: warnings.warn('stop_patching() called again, already stopped') if name is not None: items = [(name, _patchers[name])] else: items = list(_patchers.items()) for name, patcher in items: patcher.stop() del _patchers[name] del _mocks[name]
python
def stop_patching(name=None): # type: (Optional[str]) -> None global _patchers, _mocks if not _patchers: warnings.warn('stop_patching() called again, already stopped') if name is not None: items = [(name, _patchers[name])] else: items = list(_patchers.items()) for name, patcher in items: patcher.stop() del _patchers[name] del _mocks[name]
[ "def", "stop_patching", "(", "name", "=", "None", ")", ":", "# type: (Optional[str]) -> None", "global", "_patchers", ",", "_mocks", "if", "not", "_patchers", ":", "warnings", ".", "warn", "(", "'stop_patching() called again, already stopped'", ")", "if", "name", "i...
Finish the mocking initiated by `start_patching` Kwargs: name (Optional[str]): if given, only unpatch the specified path, else all defined default mocks
[ "Finish", "the", "mocking", "initiated", "by", "start_patching" ]
8a02acecd9265c8f9a00d7b8e097cae87cdf28bd
https://github.com/depop/python-automock/blob/8a02acecd9265c8f9a00d7b8e097cae87cdf28bd/automock/base.py#L124-L145
20,203
matousc89/padasip
padasip/preprocess/standardize_back.py
standardize_back
def standardize_back(xs, offset, scale): """ This is function for de-standarization of input series. **Args:** * `xs` : standardized input (1 dimensional array) * `offset` : offset to add (float). * `scale` : scale (float). **Returns:** * `x` : original (destandardised) series """ try: offset = float(offset) except: raise ValueError('The argument offset is not None or float.') try: scale = float(scale) except: raise ValueError('The argument scale is not None or float.') try: xs = np.array(xs, dtype="float64") except: raise ValueError('The argument xs is not numpy array or similar.') return xs*scale + offset
python
def standardize_back(xs, offset, scale): try: offset = float(offset) except: raise ValueError('The argument offset is not None or float.') try: scale = float(scale) except: raise ValueError('The argument scale is not None or float.') try: xs = np.array(xs, dtype="float64") except: raise ValueError('The argument xs is not numpy array or similar.') return xs*scale + offset
[ "def", "standardize_back", "(", "xs", ",", "offset", ",", "scale", ")", ":", "try", ":", "offset", "=", "float", "(", "offset", ")", "except", ":", "raise", "ValueError", "(", "'The argument offset is not None or float.'", ")", "try", ":", "scale", "=", "flo...
This is function for de-standarization of input series. **Args:** * `xs` : standardized input (1 dimensional array) * `offset` : offset to add (float). * `scale` : scale (float). **Returns:** * `x` : original (destandardised) series
[ "This", "is", "function", "for", "de", "-", "standarization", "of", "input", "series", "." ]
c969eadd7fa181a84da0554d737fc13c6450d16f
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/preprocess/standardize_back.py#L33-L62
20,204
matousc89/padasip
padasip/preprocess/standardize.py
standardize
def standardize(x, offset=None, scale=None): """ This is function for standarization of input series. **Args:** * `x` : series (1 dimensional array) **Kwargs:** * `offset` : offset to remove (float). If not given, \ the mean value of `x` is used. * `scale` : scale (float). If not given, \ the standard deviation of `x` is used. **Returns:** * `xs` : standardized series """ if offset == None: offset = np.array(x).mean() else: try: offset = float(offset) except: raise ValueError('The argument offset is not None or float') if scale == None: scale = np.array(x).std() else: try: scale = float(scale) except: raise ValueError('The argument scale is not None or float') try: x = np.array(x, dtype="float64") except: raise ValueError('The argument x is not numpy array or similar.') return (x - offset) / scale
python
def standardize(x, offset=None, scale=None): if offset == None: offset = np.array(x).mean() else: try: offset = float(offset) except: raise ValueError('The argument offset is not None or float') if scale == None: scale = np.array(x).std() else: try: scale = float(scale) except: raise ValueError('The argument scale is not None or float') try: x = np.array(x, dtype="float64") except: raise ValueError('The argument x is not numpy array or similar.') return (x - offset) / scale
[ "def", "standardize", "(", "x", ",", "offset", "=", "None", ",", "scale", "=", "None", ")", ":", "if", "offset", "==", "None", ":", "offset", "=", "np", ".", "array", "(", "x", ")", ".", "mean", "(", ")", "else", ":", "try", ":", "offset", "=",...
This is function for standarization of input series. **Args:** * `x` : series (1 dimensional array) **Kwargs:** * `offset` : offset to remove (float). If not given, \ the mean value of `x` is used. * `scale` : scale (float). If not given, \ the standard deviation of `x` is used. **Returns:** * `xs` : standardized series
[ "This", "is", "function", "for", "standarization", "of", "input", "series", "." ]
c969eadd7fa181a84da0554d737fc13c6450d16f
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/preprocess/standardize.py#L62-L100
20,205
matousc89/padasip
padasip/preprocess/input_from_history.py
input_from_history
def input_from_history(a, n, bias=False): """ This is function for creation of input matrix. **Args:** * `a` : series (1 dimensional array) * `n` : size of input matrix row (int). It means how many samples \ of previous history you want to use \ as the filter input. It also represents the filter length. **Kwargs:** * `bias` : decides if the bias is used (Boolean). If True, \ array of all ones is appended as a last column to matrix `x`. \ So matrix `x` has `n`+1 columns. **Returns:** * `x` : input matrix (2 dimensional array) \ constructed from an array `a`. The length of `x` \ is calculated as length of `a` - `n` + 1. \ If the `bias` is used, then the amount of columns is `n` if not then \ amount of columns is `n`+1). """ if not type(n) == int: raise ValueError('The argument n must be int.') if not n > 0: raise ValueError('The argument n must be greater than 0') try: a = np.array(a, dtype="float64") except: raise ValueError('The argument a is not numpy array or similar.') x = np.array([a[i:i+n] for i in range(len(a)-n+1)]) if bias: x = np.vstack((x.T, np.ones(len(x)))).T return x
python
def input_from_history(a, n, bias=False): if not type(n) == int: raise ValueError('The argument n must be int.') if not n > 0: raise ValueError('The argument n must be greater than 0') try: a = np.array(a, dtype="float64") except: raise ValueError('The argument a is not numpy array or similar.') x = np.array([a[i:i+n] for i in range(len(a)-n+1)]) if bias: x = np.vstack((x.T, np.ones(len(x)))).T return x
[ "def", "input_from_history", "(", "a", ",", "n", ",", "bias", "=", "False", ")", ":", "if", "not", "type", "(", "n", ")", "==", "int", ":", "raise", "ValueError", "(", "'The argument n must be int.'", ")", "if", "not", "n", ">", "0", ":", "raise", "V...
This is function for creation of input matrix. **Args:** * `a` : series (1 dimensional array) * `n` : size of input matrix row (int). It means how many samples \ of previous history you want to use \ as the filter input. It also represents the filter length. **Kwargs:** * `bias` : decides if the bias is used (Boolean). If True, \ array of all ones is appended as a last column to matrix `x`. \ So matrix `x` has `n`+1 columns. **Returns:** * `x` : input matrix (2 dimensional array) \ constructed from an array `a`. The length of `x` \ is calculated as length of `a` - `n` + 1. \ If the `bias` is used, then the amount of columns is `n` if not then \ amount of columns is `n`+1).
[ "This", "is", "function", "for", "creation", "of", "input", "matrix", "." ]
c969eadd7fa181a84da0554d737fc13c6450d16f
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/preprocess/input_from_history.py#L34-L72
20,206
matousc89/padasip
padasip/filters/base_filter.py
AdaptiveFilter.init_weights
def init_weights(self, w, n=-1): """ This function initialises the adaptive weights of the filter. **Args:** * `w` : initial weights of filter. Possible values are: * array with initial weights (1 dimensional array) of filter size * "random" : create random weights * "zeros" : create zero value weights **Kwargs:** * `n` : size of filter (int) - number of filter coefficients. **Returns:** * `y` : output value (float) calculated from input array. """ if n == -1: n = self.n if type(w) == str: if w == "random": w = np.random.normal(0, 0.5, n) elif w == "zeros": w = np.zeros(n) else: raise ValueError('Impossible to understand the w') elif len(w) == n: try: w = np.array(w, dtype="float64") except: raise ValueError('Impossible to understand the w') else: raise ValueError('Impossible to understand the w') self.w = w
python
def init_weights(self, w, n=-1): if n == -1: n = self.n if type(w) == str: if w == "random": w = np.random.normal(0, 0.5, n) elif w == "zeros": w = np.zeros(n) else: raise ValueError('Impossible to understand the w') elif len(w) == n: try: w = np.array(w, dtype="float64") except: raise ValueError('Impossible to understand the w') else: raise ValueError('Impossible to understand the w') self.w = w
[ "def", "init_weights", "(", "self", ",", "w", ",", "n", "=", "-", "1", ")", ":", "if", "n", "==", "-", "1", ":", "n", "=", "self", ".", "n", "if", "type", "(", "w", ")", "==", "str", ":", "if", "w", "==", "\"random\"", ":", "w", "=", "np"...
This function initialises the adaptive weights of the filter. **Args:** * `w` : initial weights of filter. Possible values are: * array with initial weights (1 dimensional array) of filter size * "random" : create random weights * "zeros" : create zero value weights **Kwargs:** * `n` : size of filter (int) - number of filter coefficients. **Returns:** * `y` : output value (float) calculated from input array.
[ "This", "function", "initialises", "the", "adaptive", "weights", "of", "the", "filter", "." ]
c969eadd7fa181a84da0554d737fc13c6450d16f
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/filters/base_filter.py#L16-L56
20,207
matousc89/padasip
padasip/filters/base_filter.py
AdaptiveFilter.predict
def predict(self, x): """ This function calculates the new output value `y` from input array `x`. **Args:** * `x` : input vector (1 dimension array) in length of filter. **Returns:** * `y` : output value (float) calculated from input array. """ y = np.dot(self.w, x) return y
python
def predict(self, x): y = np.dot(self.w, x) return y
[ "def", "predict", "(", "self", ",", "x", ")", ":", "y", "=", "np", ".", "dot", "(", "self", ".", "w", ",", "x", ")", "return", "y" ]
This function calculates the new output value `y` from input array `x`. **Args:** * `x` : input vector (1 dimension array) in length of filter. **Returns:** * `y` : output value (float) calculated from input array.
[ "This", "function", "calculates", "the", "new", "output", "value", "y", "from", "input", "array", "x", "." ]
c969eadd7fa181a84da0554d737fc13c6450d16f
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/filters/base_filter.py#L58-L72
20,208
matousc89/padasip
padasip/filters/base_filter.py
AdaptiveFilter.explore_learning
def explore_learning(self, d, x, mu_start=0, mu_end=1., steps=100, ntrain=0.5, epochs=1, criteria="MSE", target_w=False): """ Test what learning rate is the best. **Args:** * `d` : desired value (1 dimensional array) * `x` : input matrix (2-dimensional array). Rows are samples, columns are input arrays. **Kwargs:** * `mu_start` : starting learning rate (float) * `mu_end` : final learning rate (float) * `steps` : how many learning rates should be tested between `mu_start` and `mu_end`. * `ntrain` : train to test ratio (float), default value is 0.5 (that means 50% of data is used for training) * `epochs` : number of training epochs (int), default value is 1. This number describes how many times the training will be repeated on dedicated part of data. * `criteria` : how should be measured the mean error (str), default value is "MSE". * `target_w` : target weights (str or 1d array), default value is False. If False, the mean error is estimated from prediction error. If an array is provided, the error between weights and `target_w` is used. **Returns:** * `errors` : mean error for tested learning rates (1 dimensional array). * `mu_range` : range of used learning rates (1d array). Every value corresponds with one value from `errors` """ mu_range = np.linspace(mu_start, mu_end, steps) errors = np.zeros(len(mu_range)) for i, mu in enumerate(mu_range): # init self.init_weights("zeros") self.mu = mu # run y, e, w = self.pretrained_run(d, x, ntrain=ntrain, epochs=epochs) if type(target_w) != bool: errors[i] = get_mean_error(w[-1]-target_w, function=criteria) else: errors[i] = get_mean_error(e, function=criteria) return errors, mu_range
python
def explore_learning(self, d, x, mu_start=0, mu_end=1., steps=100, ntrain=0.5, epochs=1, criteria="MSE", target_w=False): mu_range = np.linspace(mu_start, mu_end, steps) errors = np.zeros(len(mu_range)) for i, mu in enumerate(mu_range): # init self.init_weights("zeros") self.mu = mu # run y, e, w = self.pretrained_run(d, x, ntrain=ntrain, epochs=epochs) if type(target_w) != bool: errors[i] = get_mean_error(w[-1]-target_w, function=criteria) else: errors[i] = get_mean_error(e, function=criteria) return errors, mu_range
[ "def", "explore_learning", "(", "self", ",", "d", ",", "x", ",", "mu_start", "=", "0", ",", "mu_end", "=", "1.", ",", "steps", "=", "100", ",", "ntrain", "=", "0.5", ",", "epochs", "=", "1", ",", "criteria", "=", "\"MSE\"", ",", "target_w", "=", ...
Test what learning rate is the best. **Args:** * `d` : desired value (1 dimensional array) * `x` : input matrix (2-dimensional array). Rows are samples, columns are input arrays. **Kwargs:** * `mu_start` : starting learning rate (float) * `mu_end` : final learning rate (float) * `steps` : how many learning rates should be tested between `mu_start` and `mu_end`. * `ntrain` : train to test ratio (float), default value is 0.5 (that means 50% of data is used for training) * `epochs` : number of training epochs (int), default value is 1. This number describes how many times the training will be repeated on dedicated part of data. * `criteria` : how should be measured the mean error (str), default value is "MSE". * `target_w` : target weights (str or 1d array), default value is False. If False, the mean error is estimated from prediction error. If an array is provided, the error between weights and `target_w` is used. **Returns:** * `errors` : mean error for tested learning rates (1 dimensional array). * `mu_range` : range of used learning rates (1d array). Every value corresponds with one value from `errors`
[ "Test", "what", "learning", "rate", "is", "the", "best", "." ]
c969eadd7fa181a84da0554d737fc13c6450d16f
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/filters/base_filter.py#L112-L168
20,209
matousc89/padasip
padasip/filters/base_filter.py
AdaptiveFilter.check_float_param
def check_float_param(self, param, low, high, name): """ Check if the value of the given parameter is in the given range and a float. Designed for testing parameters like `mu` and `eps`. To pass this function the variable `param` must be able to be converted into a float with a value between `low` and `high`. **Args:** * `param` : parameter to check (float or similar) * `low` : lowest allowed value (float), or None * `high` : highest allowed value (float), or None * `name` : name of the parameter (string), it is used for an error message **Returns:** * `param` : checked parameter converted to float """ try: param = float(param) except: raise ValueError( 'Parameter {} is not float or similar'.format(name) ) if low != None or high != None: if not low <= param <= high: raise ValueError('Parameter {} is not in range <{}, {}>' .format(name, low, high)) return param
python
def check_float_param(self, param, low, high, name): try: param = float(param) except: raise ValueError( 'Parameter {} is not float or similar'.format(name) ) if low != None or high != None: if not low <= param <= high: raise ValueError('Parameter {} is not in range <{}, {}>' .format(name, low, high)) return param
[ "def", "check_float_param", "(", "self", ",", "param", ",", "low", ",", "high", ",", "name", ")", ":", "try", ":", "param", "=", "float", "(", "param", ")", "except", ":", "raise", "ValueError", "(", "'Parameter {} is not float or similar'", ".", "format", ...
Check if the value of the given parameter is in the given range and a float. Designed for testing parameters like `mu` and `eps`. To pass this function the variable `param` must be able to be converted into a float with a value between `low` and `high`. **Args:** * `param` : parameter to check (float or similar) * `low` : lowest allowed value (float), or None * `high` : highest allowed value (float), or None * `name` : name of the parameter (string), it is used for an error message **Returns:** * `param` : checked parameter converted to float
[ "Check", "if", "the", "value", "of", "the", "given", "parameter", "is", "in", "the", "given", "range", "and", "a", "float", ".", "Designed", "for", "testing", "parameters", "like", "mu", "and", "eps", ".", "To", "pass", "this", "function", "the", "variab...
c969eadd7fa181a84da0554d737fc13c6450d16f
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/filters/base_filter.py#L170-L203
20,210
matousc89/padasip
padasip/filters/base_filter.py
AdaptiveFilter.check_int_param
def check_int_param(self, param, low, high, name): """ Check if the value of the given parameter is in the given range and an int. Designed for testing parameters like `mu` and `eps`. To pass this function the variable `param` must be able to be converted into a float with a value between `low` and `high`. **Args:** * `param` : parameter to check (int or similar) * `low` : lowest allowed value (int), or None * `high` : highest allowed value (int), or None * `name` : name of the parameter (string), it is used for an error message **Returns:** * `param` : checked parameter converted to float """ try: param = int(param) except: raise ValueError( 'Parameter {} is not int or similar'.format(name) ) if low != None or high != None: if not low <= param <= high: raise ValueError('Parameter {} is not in range <{}, {}>' .format(name, low, high)) return param
python
def check_int_param(self, param, low, high, name): try: param = int(param) except: raise ValueError( 'Parameter {} is not int or similar'.format(name) ) if low != None or high != None: if not low <= param <= high: raise ValueError('Parameter {} is not in range <{}, {}>' .format(name, low, high)) return param
[ "def", "check_int_param", "(", "self", ",", "param", ",", "low", ",", "high", ",", "name", ")", ":", "try", ":", "param", "=", "int", "(", "param", ")", "except", ":", "raise", "ValueError", "(", "'Parameter {} is not int or similar'", ".", "format", "(", ...
Check if the value of the given parameter is in the given range and an int. Designed for testing parameters like `mu` and `eps`. To pass this function the variable `param` must be able to be converted into a float with a value between `low` and `high`. **Args:** * `param` : parameter to check (int or similar) * `low` : lowest allowed value (int), or None * `high` : highest allowed value (int), or None * `name` : name of the parameter (string), it is used for an error message **Returns:** * `param` : checked parameter converted to float
[ "Check", "if", "the", "value", "of", "the", "given", "parameter", "is", "in", "the", "given", "range", "and", "an", "int", ".", "Designed", "for", "testing", "parameters", "like", "mu", "and", "eps", ".", "To", "pass", "this", "function", "the", "variabl...
c969eadd7fa181a84da0554d737fc13c6450d16f
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/filters/base_filter.py#L226-L259
20,211
matousc89/padasip
padasip/misc/error_evaluation.py
MAE
def MAE(x1, x2=-1): """ Mean absolute error - this function accepts two series of data or directly one series with error. **Args:** * `x1` - first data series or error (1d array) **Kwargs:** * `x2` - second series (1d array) if first series was not error directly,\\ then this should be the second series **Returns:** * `e` - MAE of error (float) obtained directly from `x1`, \\ or as a difference of `x1` and `x2` """ e = get_valid_error(x1, x2) return np.sum(np.abs(e)) / float(len(e))
python
def MAE(x1, x2=-1): e = get_valid_error(x1, x2) return np.sum(np.abs(e)) / float(len(e))
[ "def", "MAE", "(", "x1", ",", "x2", "=", "-", "1", ")", ":", "e", "=", "get_valid_error", "(", "x1", ",", "x2", ")", "return", "np", ".", "sum", "(", "np", ".", "abs", "(", "e", ")", ")", "/", "float", "(", "len", "(", "e", ")", ")" ]
Mean absolute error - this function accepts two series of data or directly one series with error. **Args:** * `x1` - first data series or error (1d array) **Kwargs:** * `x2` - second series (1d array) if first series was not error directly,\\ then this should be the second series **Returns:** * `e` - MAE of error (float) obtained directly from `x1`, \\ or as a difference of `x1` and `x2`
[ "Mean", "absolute", "error", "-", "this", "function", "accepts", "two", "series", "of", "data", "or", "directly", "one", "series", "with", "error", "." ]
c969eadd7fa181a84da0554d737fc13c6450d16f
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/misc/error_evaluation.py#L152-L173
20,212
matousc89/padasip
padasip/misc/error_evaluation.py
MSE
def MSE(x1, x2=-1): """ Mean squared error - this function accepts two series of data or directly one series with error. **Args:** * `x1` - first data series or error (1d array) **Kwargs:** * `x2` - second series (1d array) if first series was not error directly,\\ then this should be the second series **Returns:** * `e` - MSE of error (float) obtained directly from `x1`, \\ or as a difference of `x1` and `x2` """ e = get_valid_error(x1, x2) return np.dot(e, e) / float(len(e))
python
def MSE(x1, x2=-1): e = get_valid_error(x1, x2) return np.dot(e, e) / float(len(e))
[ "def", "MSE", "(", "x1", ",", "x2", "=", "-", "1", ")", ":", "e", "=", "get_valid_error", "(", "x1", ",", "x2", ")", "return", "np", ".", "dot", "(", "e", ",", "e", ")", "/", "float", "(", "len", "(", "e", ")", ")" ]
Mean squared error - this function accepts two series of data or directly one series with error. **Args:** * `x1` - first data series or error (1d array) **Kwargs:** * `x2` - second series (1d array) if first series was not error directly,\\ then this should be the second series **Returns:** * `e` - MSE of error (float) obtained directly from `x1`, \\ or as a difference of `x1` and `x2`
[ "Mean", "squared", "error", "-", "this", "function", "accepts", "two", "series", "of", "data", "or", "directly", "one", "series", "with", "error", "." ]
c969eadd7fa181a84da0554d737fc13c6450d16f
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/misc/error_evaluation.py#L175-L196
20,213
matousc89/padasip
padasip/misc/error_evaluation.py
RMSE
def RMSE(x1, x2=-1): """ Root-mean-square error - this function accepts two series of data or directly one series with error. **Args:** * `x1` - first data series or error (1d array) **Kwargs:** * `x2` - second series (1d array) if first series was not error directly,\\ then this should be the second series **Returns:** * `e` - RMSE of error (float) obtained directly from `x1`, \\ or as a difference of `x1` and `x2` """ e = get_valid_error(x1, x2) return np.sqrt(np.dot(e, e) / float(len(e)))
python
def RMSE(x1, x2=-1): e = get_valid_error(x1, x2) return np.sqrt(np.dot(e, e) / float(len(e)))
[ "def", "RMSE", "(", "x1", ",", "x2", "=", "-", "1", ")", ":", "e", "=", "get_valid_error", "(", "x1", ",", "x2", ")", "return", "np", ".", "sqrt", "(", "np", ".", "dot", "(", "e", ",", "e", ")", "/", "float", "(", "len", "(", "e", ")", ")...
Root-mean-square error - this function accepts two series of data or directly one series with error. **Args:** * `x1` - first data series or error (1d array) **Kwargs:** * `x2` - second series (1d array) if first series was not error directly,\\ then this should be the second series **Returns:** * `e` - RMSE of error (float) obtained directly from `x1`, \\ or as a difference of `x1` and `x2`
[ "Root", "-", "mean", "-", "square", "error", "-", "this", "function", "accepts", "two", "series", "of", "data", "or", "directly", "one", "series", "with", "error", "." ]
c969eadd7fa181a84da0554d737fc13c6450d16f
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/misc/error_evaluation.py#L198-L219
20,214
matousc89/padasip
padasip/detection/elbnd.py
ELBND
def ELBND(w, e, function="max"): """ This function estimates Error and Learning Based Novelty Detection measure from given data. **Args:** * `w` : history of adaptive parameters of an adaptive model (2d array), every row represents parameters in given time index. * `e` : error of adaptive model (1d array) **Kwargs:** * `functions` : output function (str). The way how to produce single value for every sample (from all parameters) * `max` - maximal value * `sum` - sum of values **Returns:** * ELBND values (1d array). This vector has same lenght as `w`. """ # check if the function is known if not function in ["max", "sum"]: raise ValueError('Unknown output function') # get length of data and number of parameters N = w.shape[0] n = w.shape[1] # get abs dw from w dw = np.zeros(w.shape) dw[:-1] = np.abs(np.diff(w, axis=0)) # absolute values of product of increments and error a = np.random.random((5,2)) b = a.T*np.array([1,2,3,4,5]) elbnd = np.abs((dw.T*e).T) # apply output function if function == "max": elbnd = np.max(elbnd, axis=1) elif function == "sum": elbnd = np.sum(elbnd, axis=1) # return output return elbnd
python
def ELBND(w, e, function="max"): # check if the function is known if not function in ["max", "sum"]: raise ValueError('Unknown output function') # get length of data and number of parameters N = w.shape[0] n = w.shape[1] # get abs dw from w dw = np.zeros(w.shape) dw[:-1] = np.abs(np.diff(w, axis=0)) # absolute values of product of increments and error a = np.random.random((5,2)) b = a.T*np.array([1,2,3,4,5]) elbnd = np.abs((dw.T*e).T) # apply output function if function == "max": elbnd = np.max(elbnd, axis=1) elif function == "sum": elbnd = np.sum(elbnd, axis=1) # return output return elbnd
[ "def", "ELBND", "(", "w", ",", "e", ",", "function", "=", "\"max\"", ")", ":", "# check if the function is known", "if", "not", "function", "in", "[", "\"max\"", ",", "\"sum\"", "]", ":", "raise", "ValueError", "(", "'Unknown output function'", ")", "# get len...
This function estimates Error and Learning Based Novelty Detection measure from given data. **Args:** * `w` : history of adaptive parameters of an adaptive model (2d array), every row represents parameters in given time index. * `e` : error of adaptive model (1d array) **Kwargs:** * `functions` : output function (str). The way how to produce single value for every sample (from all parameters) * `max` - maximal value * `sum` - sum of values **Returns:** * ELBND values (1d array). This vector has same lenght as `w`.
[ "This", "function", "estimates", "Error", "and", "Learning", "Based", "Novelty", "Detection", "measure", "from", "given", "data", "." ]
c969eadd7fa181a84da0554d737fc13c6450d16f
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/detection/elbnd.py#L93-L138
20,215
matousc89/padasip
padasip/preprocess/lda.py
LDA_base
def LDA_base(x, labels): """ Base function used for Linear Discriminant Analysis. **Args:** * `x` : input matrix (2d array), every row represents new sample * `labels` : list of labels (iterable), every item should be label for \ sample with corresponding index **Returns:** * `eigenvalues`, `eigenvectors` : eigenvalues and eigenvectors \ from LDA analysis """ classes = np.array(tuple(set(labels))) cols = x.shape[1] # mean values for every class means = np.zeros((len(classes), cols)) for i, cl in enumerate(classes): means[i] = np.mean(x[labels==cl], axis=0) # scatter matrices scatter_within = np.zeros((cols, cols)) for cl, mean in zip(classes, means): scatter_class = np.zeros((cols, cols)) for row in x[labels == cl]: dif = row - mean scatter_class += np.dot(dif.reshape(cols, 1), dif.reshape(1, cols)) scatter_within += scatter_class total_mean = np.mean(x, axis=0) scatter_between = np.zeros((cols, cols)) for cl, mean in zip(classes, means): dif = mean - total_mean dif_product = np.dot(dif.reshape(cols, 1), dif.reshape(1, cols)) scatter_between += x[labels == cl, :].shape[0] * dif_product # eigenvalues and eigenvectors from scatter matrices scatter_product = np.dot(np.linalg.inv(scatter_within), scatter_between) eigen_values, eigen_vectors = np.linalg.eig(scatter_product) return eigen_values, eigen_vectors
python
def LDA_base(x, labels): classes = np.array(tuple(set(labels))) cols = x.shape[1] # mean values for every class means = np.zeros((len(classes), cols)) for i, cl in enumerate(classes): means[i] = np.mean(x[labels==cl], axis=0) # scatter matrices scatter_within = np.zeros((cols, cols)) for cl, mean in zip(classes, means): scatter_class = np.zeros((cols, cols)) for row in x[labels == cl]: dif = row - mean scatter_class += np.dot(dif.reshape(cols, 1), dif.reshape(1, cols)) scatter_within += scatter_class total_mean = np.mean(x, axis=0) scatter_between = np.zeros((cols, cols)) for cl, mean in zip(classes, means): dif = mean - total_mean dif_product = np.dot(dif.reshape(cols, 1), dif.reshape(1, cols)) scatter_between += x[labels == cl, :].shape[0] * dif_product # eigenvalues and eigenvectors from scatter matrices scatter_product = np.dot(np.linalg.inv(scatter_within), scatter_between) eigen_values, eigen_vectors = np.linalg.eig(scatter_product) return eigen_values, eigen_vectors
[ "def", "LDA_base", "(", "x", ",", "labels", ")", ":", "classes", "=", "np", ".", "array", "(", "tuple", "(", "set", "(", "labels", ")", ")", ")", "cols", "=", "x", ".", "shape", "[", "1", "]", "# mean values for every class", "means", "=", "np", "....
Base function used for Linear Discriminant Analysis. **Args:** * `x` : input matrix (2d array), every row represents new sample * `labels` : list of labels (iterable), every item should be label for \ sample with corresponding index **Returns:** * `eigenvalues`, `eigenvectors` : eigenvalues and eigenvectors \ from LDA analysis
[ "Base", "function", "used", "for", "Linear", "Discriminant", "Analysis", "." ]
c969eadd7fa181a84da0554d737fc13c6450d16f
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/preprocess/lda.py#L104-L144
20,216
matousc89/padasip
padasip/preprocess/lda.py
LDA
def LDA(x, labels, n=False): """ Linear Discriminant Analysis function. **Args:** * `x` : input matrix (2d array), every row represents new sample * `labels` : list of labels (iterable), every item should be label for \ sample with corresponding index **Kwargs:** * `n` : number of features returned (integer) - how many columns should the output keep **Returns:** * new_x : matrix with reduced size (number of columns are equal `n`) """ # select n if not provided if not n: n = x.shape[1] - 1 # validate inputs try: x = np.array(x) except: raise ValueError('Impossible to convert x to a numpy array.') assert type(n) == int, "Provided n is not an integer." assert x.shape[1] > n, "The requested n is bigger than \ number of features in x." # make the LDA eigen_values, eigen_vectors = LDA_base(x, labels) # sort the eigen vectors according to eigen values eigen_order = eigen_vectors.T[(-eigen_values).argsort()] return eigen_order[:n].dot(x.T).T
python
def LDA(x, labels, n=False): # select n if not provided if not n: n = x.shape[1] - 1 # validate inputs try: x = np.array(x) except: raise ValueError('Impossible to convert x to a numpy array.') assert type(n) == int, "Provided n is not an integer." assert x.shape[1] > n, "The requested n is bigger than \ number of features in x." # make the LDA eigen_values, eigen_vectors = LDA_base(x, labels) # sort the eigen vectors according to eigen values eigen_order = eigen_vectors.T[(-eigen_values).argsort()] return eigen_order[:n].dot(x.T).T
[ "def", "LDA", "(", "x", ",", "labels", ",", "n", "=", "False", ")", ":", "# select n if not provided", "if", "not", "n", ":", "n", "=", "x", ".", "shape", "[", "1", "]", "-", "1", "# validate inputs", "try", ":", "x", "=", "np", ".", "array", "("...
Linear Discriminant Analysis function. **Args:** * `x` : input matrix (2d array), every row represents new sample * `labels` : list of labels (iterable), every item should be label for \ sample with corresponding index **Kwargs:** * `n` : number of features returned (integer) - how many columns should the output keep **Returns:** * new_x : matrix with reduced size (number of columns are equal `n`)
[ "Linear", "Discriminant", "Analysis", "function", "." ]
c969eadd7fa181a84da0554d737fc13c6450d16f
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/preprocess/lda.py#L146-L181
20,217
matousc89/padasip
padasip/preprocess/lda.py
LDA_discriminants
def LDA_discriminants(x, labels): """ Linear Discriminant Analysis helper for determination how many columns of data should be reduced. **Args:** * `x` : input matrix (2d array), every row represents new sample * `labels` : list of labels (iterable), every item should be label for \ sample with corresponding index **Returns:** * `discriminants` : array of eigenvalues sorted in descending order """ # validate inputs try: x = np.array(x) except: raise ValueError('Impossible to convert x to a numpy array.') # make the LDA eigen_values, eigen_vectors = LDA_base(x, labels) return eigen_values[(-eigen_values).argsort()]
python
def LDA_discriminants(x, labels): # validate inputs try: x = np.array(x) except: raise ValueError('Impossible to convert x to a numpy array.') # make the LDA eigen_values, eigen_vectors = LDA_base(x, labels) return eigen_values[(-eigen_values).argsort()]
[ "def", "LDA_discriminants", "(", "x", ",", "labels", ")", ":", "# validate inputs", "try", ":", "x", "=", "np", ".", "array", "(", "x", ")", "except", ":", "raise", "ValueError", "(", "'Impossible to convert x to a numpy array.'", ")", "# make the LDA", "eigen_v...
Linear Discriminant Analysis helper for determination how many columns of data should be reduced. **Args:** * `x` : input matrix (2d array), every row represents new sample * `labels` : list of labels (iterable), every item should be label for \ sample with corresponding index **Returns:** * `discriminants` : array of eigenvalues sorted in descending order
[ "Linear", "Discriminant", "Analysis", "helper", "for", "determination", "how", "many", "columns", "of", "data", "should", "be", "reduced", "." ]
c969eadd7fa181a84da0554d737fc13c6450d16f
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/preprocess/lda.py#L184-L208
20,218
matousc89/padasip
padasip/filters/ocnlms.py
FilterOCNLMS.read_memory
def read_memory(self): """ This function read mean value of target`d` and input vector `x` from history """ if self.mem_empty == True: if self.mem_idx == 0: m_x = np.zeros(self.n) m_d = 0 else: m_x = np.mean(self.mem_x[:self.mem_idx+1], axis=0) m_d = np.mean(self.mem_d[:self.mem_idx]) else: m_x = np.mean(self.mem_x, axis=0) m_d = np.mean(np.delete(self.mem_d, self.mem_idx)) self.mem_idx += 1 if self.mem_idx > len(self.mem_x)-1: self.mem_idx = 0 self.mem_empty = False return m_d, m_x
python
def read_memory(self): if self.mem_empty == True: if self.mem_idx == 0: m_x = np.zeros(self.n) m_d = 0 else: m_x = np.mean(self.mem_x[:self.mem_idx+1], axis=0) m_d = np.mean(self.mem_d[:self.mem_idx]) else: m_x = np.mean(self.mem_x, axis=0) m_d = np.mean(np.delete(self.mem_d, self.mem_idx)) self.mem_idx += 1 if self.mem_idx > len(self.mem_x)-1: self.mem_idx = 0 self.mem_empty = False return m_d, m_x
[ "def", "read_memory", "(", "self", ")", ":", "if", "self", ".", "mem_empty", "==", "True", ":", "if", "self", ".", "mem_idx", "==", "0", ":", "m_x", "=", "np", ".", "zeros", "(", "self", ".", "n", ")", "m_d", "=", "0", "else", ":", "m_x", "=", ...
This function read mean value of target`d` and input vector `x` from history
[ "This", "function", "read", "mean", "value", "of", "target", "d", "and", "input", "vector", "x", "from", "history" ]
c969eadd7fa181a84da0554d737fc13c6450d16f
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/filters/ocnlms.py#L86-L105
20,219
matousc89/padasip
padasip/detection/le.py
learning_entropy
def learning_entropy(w, m=10, order=1, alpha=False): """ This function estimates Learning Entropy. **Args:** * `w` : history of adaptive parameters of an adaptive model (2d array), every row represents parameters in given time index. **Kwargs:** * `m` : window size (1d array) - how many last samples are used for evaluation of every sample. * `order` : order of the LE (int) - order of weights differention * `alpha` : list of senstitivites (1d array). If not provided, the LE direct approach is used. **Returns:** * Learning Entropy of data (1 d array) - one value for every sample """ w = np.array(w) # get length of data and number of parameters N = w.shape[0] n = w.shape[1] # get abs dw from w dw = np.copy(w) dw[order:] = np.abs(np.diff(dw, n=order, axis=0)) # average floting window - window is k-m ... k-1 awd = np.zeros(w.shape) if not alpha: # estimate the ALPHA with multiscale approach swd = np.zeros(w.shape) for k in range(m, N): awd[k] = np.mean(dw[k-m:k], axis=0) swd[k] = np.std(dw[k-m:k], axis=0) # estimate the points of entropy eps = 1e-10 # regularization term le = (dw - awd) / (swd+eps) else: # estimate the ALPHA with direct approach for k in range(m, N): awd[k] = np.mean(dw[k-m:k], axis=0) # estimate the points of entropy alphas = np.array(alpha) fh = np.zeros(N) for alpha in alphas: fh += np.sum(awd*alpha < dw, axis=1) le = fh / float(n*len(alphas)) # clear unknown zone on begining le[:m] = 0 # return output return le
python
def learning_entropy(w, m=10, order=1, alpha=False): w = np.array(w) # get length of data and number of parameters N = w.shape[0] n = w.shape[1] # get abs dw from w dw = np.copy(w) dw[order:] = np.abs(np.diff(dw, n=order, axis=0)) # average floting window - window is k-m ... k-1 awd = np.zeros(w.shape) if not alpha: # estimate the ALPHA with multiscale approach swd = np.zeros(w.shape) for k in range(m, N): awd[k] = np.mean(dw[k-m:k], axis=0) swd[k] = np.std(dw[k-m:k], axis=0) # estimate the points of entropy eps = 1e-10 # regularization term le = (dw - awd) / (swd+eps) else: # estimate the ALPHA with direct approach for k in range(m, N): awd[k] = np.mean(dw[k-m:k], axis=0) # estimate the points of entropy alphas = np.array(alpha) fh = np.zeros(N) for alpha in alphas: fh += np.sum(awd*alpha < dw, axis=1) le = fh / float(n*len(alphas)) # clear unknown zone on begining le[:m] = 0 # return output return le
[ "def", "learning_entropy", "(", "w", ",", "m", "=", "10", ",", "order", "=", "1", ",", "alpha", "=", "False", ")", ":", "w", "=", "np", ".", "array", "(", "w", ")", "# get length of data and number of parameters", "N", "=", "w", ".", "shape", "[", "0...
This function estimates Learning Entropy. **Args:** * `w` : history of adaptive parameters of an adaptive model (2d array), every row represents parameters in given time index. **Kwargs:** * `m` : window size (1d array) - how many last samples are used for evaluation of every sample. * `order` : order of the LE (int) - order of weights differention * `alpha` : list of senstitivites (1d array). If not provided, the LE direct approach is used. **Returns:** * Learning Entropy of data (1 d array) - one value for every sample
[ "This", "function", "estimates", "Learning", "Entropy", "." ]
c969eadd7fa181a84da0554d737fc13c6450d16f
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/detection/le.py#L145-L200
20,220
matousc89/padasip
padasip/ann/mlp.py
Layer.activation
def activation(self, x, f="sigmoid", der=False): """ This function process values of layer outputs with activation function. **Args:** * `x` : array to process (1-dimensional array) **Kwargs:** * `f` : activation function * `der` : normal output, or its derivation (bool) **Returns:** * values processed with activation function (1-dimensional array) """ if f == "sigmoid": if der: return x * (1 - x) return 1. / (1 + np.exp(-x)) elif f == "tanh": if der: return 1 - x**2 return (2. / (1 + np.exp(-2*x))) - 1
python
def activation(self, x, f="sigmoid", der=False): if f == "sigmoid": if der: return x * (1 - x) return 1. / (1 + np.exp(-x)) elif f == "tanh": if der: return 1 - x**2 return (2. / (1 + np.exp(-2*x))) - 1
[ "def", "activation", "(", "self", ",", "x", ",", "f", "=", "\"sigmoid\"", ",", "der", "=", "False", ")", ":", "if", "f", "==", "\"sigmoid\"", ":", "if", "der", ":", "return", "x", "*", "(", "1", "-", "x", ")", "return", "1.", "/", "(", "1", "...
This function process values of layer outputs with activation function. **Args:** * `x` : array to process (1-dimensional array) **Kwargs:** * `f` : activation function * `der` : normal output, or its derivation (bool) **Returns:** * values processed with activation function (1-dimensional array)
[ "This", "function", "process", "values", "of", "layer", "outputs", "with", "activation", "function", "." ]
c969eadd7fa181a84da0554d737fc13c6450d16f
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/ann/mlp.py#L126-L152
20,221
matousc89/padasip
padasip/ann/mlp.py
NetworkMLP.train
def train(self, x, d, epochs=10, shuffle=False): """ Function for batch training of MLP. **Args:** * `x` : input array (2-dimensional array). Every row represents one input vector (features). * `d` : input array (n-dimensional array). Every row represents target for one input vector. Target can be one or more values (in case of multiple outputs). **Kwargs:** * `epochs` : amount of epochs (int). That means how many times the MLP will iterate over the passed set of data (`x`, `d`). * `shuffle` : if true, the order of inputs and outpust are shuffled (bool). That means the pairs input-output are in different order in every epoch. **Returns:** * `e`: output vector (m-dimensional array). Every row represents error (or errors) for an input and output in given epoch. The size of this array is length of provided data times amount of epochs (`N*epochs`). * `MSE` : mean squared error (1-dimensional array). Every value stands for MSE of one epoch. """ # measure the data and check if the dimmension agree N = len(x) if not len(d) == N: raise ValueError('The length of vector d and matrix x must agree.') if not len(x[0]) == self.n_input: raise ValueError('The number of network inputs is not correct.') if self.outputs == 1: if not len(d.shape) == 1: raise ValueError('For one output MLP the d must have one dimension') else: if not d.shape[1] == self.outputs: raise ValueError('The number of outputs must agree with number of columns in d') try: x = np.array(x) d = np.array(d) except: raise ValueError('Impossible to convert x or d to a numpy array') # create empty arrays if self.outputs == 1: e = np.zeros(epochs*N) else: e = np.zeros((epochs*N, self.outputs)) MSE = np.zeros(epochs) # shuffle data if demanded if shuffle: randomize = np.arange(len(x)) np.random.shuffle(randomize) x = x[randomize] d = d[randomize] # adaptation loop for epoch in range(epochs): for k in range(N): self.predict(x[k]) e[(epoch*N)+k] = self.update(d[k]) MSE[epoch] = np.sum(e[epoch*N:(epoch+1)*N-1]**2) / N return e, MSE
python
def train(self, x, d, epochs=10, shuffle=False): # measure the data and check if the dimmension agree N = len(x) if not len(d) == N: raise ValueError('The length of vector d and matrix x must agree.') if not len(x[0]) == self.n_input: raise ValueError('The number of network inputs is not correct.') if self.outputs == 1: if not len(d.shape) == 1: raise ValueError('For one output MLP the d must have one dimension') else: if not d.shape[1] == self.outputs: raise ValueError('The number of outputs must agree with number of columns in d') try: x = np.array(x) d = np.array(d) except: raise ValueError('Impossible to convert x or d to a numpy array') # create empty arrays if self.outputs == 1: e = np.zeros(epochs*N) else: e = np.zeros((epochs*N, self.outputs)) MSE = np.zeros(epochs) # shuffle data if demanded if shuffle: randomize = np.arange(len(x)) np.random.shuffle(randomize) x = x[randomize] d = d[randomize] # adaptation loop for epoch in range(epochs): for k in range(N): self.predict(x[k]) e[(epoch*N)+k] = self.update(d[k]) MSE[epoch] = np.sum(e[epoch*N:(epoch+1)*N-1]**2) / N return e, MSE
[ "def", "train", "(", "self", ",", "x", ",", "d", ",", "epochs", "=", "10", ",", "shuffle", "=", "False", ")", ":", "# measure the data and check if the dimmension agree", "N", "=", "len", "(", "x", ")", "if", "not", "len", "(", "d", ")", "==", "N", "...
Function for batch training of MLP. **Args:** * `x` : input array (2-dimensional array). Every row represents one input vector (features). * `d` : input array (n-dimensional array). Every row represents target for one input vector. Target can be one or more values (in case of multiple outputs). **Kwargs:** * `epochs` : amount of epochs (int). That means how many times the MLP will iterate over the passed set of data (`x`, `d`). * `shuffle` : if true, the order of inputs and outpust are shuffled (bool). That means the pairs input-output are in different order in every epoch. **Returns:** * `e`: output vector (m-dimensional array). Every row represents error (or errors) for an input and output in given epoch. The size of this array is length of provided data times amount of epochs (`N*epochs`). * `MSE` : mean squared error (1-dimensional array). Every value stands for MSE of one epoch.
[ "Function", "for", "batch", "training", "of", "MLP", "." ]
c969eadd7fa181a84da0554d737fc13c6450d16f
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/ann/mlp.py#L267-L334
20,222
matousc89/padasip
padasip/ann/mlp.py
NetworkMLP.run
def run(self, x): """ Function for batch usage of already trained and tested MLP. **Args:** * `x` : input array (2-dimensional array). Every row represents one input vector (features). **Returns:** * `y`: output vector (n-dimensional array). Every row represents output (outputs) for an input vector. """ # measure the data and check if the dimmension agree try: x = np.array(x) except: raise ValueError('Impossible to convert x to a numpy array') N = len(x) # create empty arrays if self.outputs == 1: y = np.zeros(N) else: y = np.zeros((N, self.outputs)) # predict data in loop for k in range(N): y[k] = self.predict(x[k]) return y
python
def run(self, x): # measure the data and check if the dimmension agree try: x = np.array(x) except: raise ValueError('Impossible to convert x to a numpy array') N = len(x) # create empty arrays if self.outputs == 1: y = np.zeros(N) else: y = np.zeros((N, self.outputs)) # predict data in loop for k in range(N): y[k] = self.predict(x[k]) return y
[ "def", "run", "(", "self", ",", "x", ")", ":", "# measure the data and check if the dimmension agree", "try", ":", "x", "=", "np", ".", "array", "(", "x", ")", "except", ":", "raise", "ValueError", "(", "'Impossible to convert x to a numpy array'", ")", "N", "="...
Function for batch usage of already trained and tested MLP. **Args:** * `x` : input array (2-dimensional array). Every row represents one input vector (features). **Returns:** * `y`: output vector (n-dimensional array). Every row represents output (outputs) for an input vector.
[ "Function", "for", "batch", "usage", "of", "already", "trained", "and", "tested", "MLP", "." ]
c969eadd7fa181a84da0554d737fc13c6450d16f
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/ann/mlp.py#L336-L365
20,223
matousc89/padasip
padasip/preprocess/pca.py
PCA_components
def PCA_components(x): """ Principal Component Analysis helper to check out eigenvalues of components. **Args:** * `x` : input matrix (2d array), every row represents new sample **Returns:** * `components`: sorted array of principal components eigenvalues """ # validate inputs try: x = np.array(x) except: raise ValueError('Impossible to convert x to a numpy array.') # eigen values and eigen vectors of data covariance matrix eigen_values, eigen_vectors = np.linalg.eig(np.cov(x.T)) # sort eigen vectors according biggest eigen value eigen_order = eigen_vectors.T[(-eigen_values).argsort()] # form output - order the eigenvalues return eigen_values[(-eigen_values).argsort()]
python
def PCA_components(x): # validate inputs try: x = np.array(x) except: raise ValueError('Impossible to convert x to a numpy array.') # eigen values and eigen vectors of data covariance matrix eigen_values, eigen_vectors = np.linalg.eig(np.cov(x.T)) # sort eigen vectors according biggest eigen value eigen_order = eigen_vectors.T[(-eigen_values).argsort()] # form output - order the eigenvalues return eigen_values[(-eigen_values).argsort()]
[ "def", "PCA_components", "(", "x", ")", ":", "# validate inputs", "try", ":", "x", "=", "np", ".", "array", "(", "x", ")", "except", ":", "raise", "ValueError", "(", "'Impossible to convert x to a numpy array.'", ")", "# eigen values and eigen vectors of data covarian...
Principal Component Analysis helper to check out eigenvalues of components. **Args:** * `x` : input matrix (2d array), every row represents new sample **Returns:** * `components`: sorted array of principal components eigenvalues
[ "Principal", "Component", "Analysis", "helper", "to", "check", "out", "eigenvalues", "of", "components", "." ]
c969eadd7fa181a84da0554d737fc13c6450d16f
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/preprocess/pca.py#L68-L91
20,224
matousc89/padasip
padasip/preprocess/pca.py
PCA
def PCA(x, n=False): """ Principal component analysis function. **Args:** * `x` : input matrix (2d array), every row represents new sample **Kwargs:** * `n` : number of features returned (integer) - how many columns should the output keep **Returns:** * `new_x` : matrix with reduced size (lower number of columns) """ # select n if not provided if not n: n = x.shape[1] - 1 # validate inputs try: x = np.array(x) except: raise ValueError('Impossible to convert x to a numpy array.') assert type(n) == int, "Provided n is not an integer." assert x.shape[1] > n, "The requested n is bigger than \ number of features in x." # eigen values and eigen vectors of data covariance matrix eigen_values, eigen_vectors = np.linalg.eig(np.cov(x.T)) # sort eigen vectors according biggest eigen value eigen_order = eigen_vectors.T[(-eigen_values).argsort()] # form output - reduced x matrix return eigen_order[:n].dot(x.T).T
python
def PCA(x, n=False): # select n if not provided if not n: n = x.shape[1] - 1 # validate inputs try: x = np.array(x) except: raise ValueError('Impossible to convert x to a numpy array.') assert type(n) == int, "Provided n is not an integer." assert x.shape[1] > n, "The requested n is bigger than \ number of features in x." # eigen values and eigen vectors of data covariance matrix eigen_values, eigen_vectors = np.linalg.eig(np.cov(x.T)) # sort eigen vectors according biggest eigen value eigen_order = eigen_vectors.T[(-eigen_values).argsort()] # form output - reduced x matrix return eigen_order[:n].dot(x.T).T
[ "def", "PCA", "(", "x", ",", "n", "=", "False", ")", ":", "# select n if not provided", "if", "not", "n", ":", "n", "=", "x", ".", "shape", "[", "1", "]", "-", "1", "# validate inputs", "try", ":", "x", "=", "np", ".", "array", "(", "x", ")", "...
Principal component analysis function. **Args:** * `x` : input matrix (2d array), every row represents new sample **Kwargs:** * `n` : number of features returned (integer) - how many columns should the output keep **Returns:** * `new_x` : matrix with reduced size (lower number of columns)
[ "Principal", "component", "analysis", "function", "." ]
c969eadd7fa181a84da0554d737fc13c6450d16f
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/preprocess/pca.py#L94-L127
20,225
widdowquinn/pyani
pyani/pyani_graphics.py
clean_axis
def clean_axis(axis): """Remove ticks, tick labels, and frame from axis""" axis.get_xaxis().set_ticks([]) axis.get_yaxis().set_ticks([]) for spine in list(axis.spines.values()): spine.set_visible(False)
python
def clean_axis(axis): axis.get_xaxis().set_ticks([]) axis.get_yaxis().set_ticks([]) for spine in list(axis.spines.values()): spine.set_visible(False)
[ "def", "clean_axis", "(", "axis", ")", ":", "axis", ".", "get_xaxis", "(", ")", ".", "set_ticks", "(", "[", "]", ")", "axis", ".", "get_yaxis", "(", ")", ".", "set_ticks", "(", "[", "]", ")", "for", "spine", "in", "list", "(", "axis", ".", "spine...
Remove ticks, tick labels, and frame from axis
[ "Remove", "ticks", "tick", "labels", "and", "frame", "from", "axis" ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_graphics.py#L63-L68
20,226
widdowquinn/pyani
pyani/pyani_graphics.py
get_seaborn_colorbar
def get_seaborn_colorbar(dfr, classes): """Return a colorbar representing classes, for a Seaborn plot. The aim is to get a pd.Series for the passed dataframe columns, in the form: 0 colour for class in col 0 1 colour for class in col 1 ... colour for class in col ... n colour for class in col n """ levels = sorted(list(set(classes.values()))) paldict = { lvl: pal for (lvl, pal) in zip( levels, sns.cubehelix_palette( len(levels), light=0.9, dark=0.1, reverse=True, start=1, rot=-2 ), ) } lvl_pal = {cls: paldict[lvl] for (cls, lvl) in list(classes.items())} col_cb = pd.Series(dfr.index).map(lvl_pal) # The col_cb Series index now has to match the dfr.index, but # we don't create the Series with this (and if we try, it # fails) - so change it with this line col_cb.index = dfr.index return col_cb
python
def get_seaborn_colorbar(dfr, classes): levels = sorted(list(set(classes.values()))) paldict = { lvl: pal for (lvl, pal) in zip( levels, sns.cubehelix_palette( len(levels), light=0.9, dark=0.1, reverse=True, start=1, rot=-2 ), ) } lvl_pal = {cls: paldict[lvl] for (cls, lvl) in list(classes.items())} col_cb = pd.Series(dfr.index).map(lvl_pal) # The col_cb Series index now has to match the dfr.index, but # we don't create the Series with this (and if we try, it # fails) - so change it with this line col_cb.index = dfr.index return col_cb
[ "def", "get_seaborn_colorbar", "(", "dfr", ",", "classes", ")", ":", "levels", "=", "sorted", "(", "list", "(", "set", "(", "classes", ".", "values", "(", ")", ")", ")", ")", "paldict", "=", "{", "lvl", ":", "pal", "for", "(", "lvl", ",", "pal", ...
Return a colorbar representing classes, for a Seaborn plot. The aim is to get a pd.Series for the passed dataframe columns, in the form: 0 colour for class in col 0 1 colour for class in col 1 ... colour for class in col ... n colour for class in col n
[ "Return", "a", "colorbar", "representing", "classes", "for", "a", "Seaborn", "plot", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_graphics.py#L72-L98
20,227
widdowquinn/pyani
pyani/pyani_graphics.py
get_safe_seaborn_labels
def get_safe_seaborn_labels(dfr, labels): """Returns labels guaranteed to correspond to the dataframe.""" if labels is not None: return [labels.get(i, i) for i in dfr.index] return [i for i in dfr.index]
python
def get_safe_seaborn_labels(dfr, labels): if labels is not None: return [labels.get(i, i) for i in dfr.index] return [i for i in dfr.index]
[ "def", "get_safe_seaborn_labels", "(", "dfr", ",", "labels", ")", ":", "if", "labels", "is", "not", "None", ":", "return", "[", "labels", ".", "get", "(", "i", ",", "i", ")", "for", "i", "in", "dfr", ".", "index", "]", "return", "[", "i", "for", ...
Returns labels guaranteed to correspond to the dataframe.
[ "Returns", "labels", "guaranteed", "to", "correspond", "to", "the", "dataframe", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_graphics.py#L102-L106
20,228
widdowquinn/pyani
pyani/pyani_graphics.py
get_seaborn_clustermap
def get_seaborn_clustermap(dfr, params, title=None, annot=True): """Returns a Seaborn clustermap.""" fig = sns.clustermap( dfr, cmap=params.cmap, vmin=params.vmin, vmax=params.vmax, col_colors=params.colorbar, row_colors=params.colorbar, figsize=(params.figsize, params.figsize), linewidths=params.linewidths, xticklabels=params.labels, yticklabels=params.labels, annot=annot, ) fig.cax.yaxis.set_label_position("left") if title: fig.cax.set_ylabel(title) # Rotate ticklabels fig.ax_heatmap.set_xticklabels(fig.ax_heatmap.get_xticklabels(), rotation=90) fig.ax_heatmap.set_yticklabels(fig.ax_heatmap.get_yticklabels(), rotation=0) # Return clustermap return fig
python
def get_seaborn_clustermap(dfr, params, title=None, annot=True): fig = sns.clustermap( dfr, cmap=params.cmap, vmin=params.vmin, vmax=params.vmax, col_colors=params.colorbar, row_colors=params.colorbar, figsize=(params.figsize, params.figsize), linewidths=params.linewidths, xticklabels=params.labels, yticklabels=params.labels, annot=annot, ) fig.cax.yaxis.set_label_position("left") if title: fig.cax.set_ylabel(title) # Rotate ticklabels fig.ax_heatmap.set_xticklabels(fig.ax_heatmap.get_xticklabels(), rotation=90) fig.ax_heatmap.set_yticklabels(fig.ax_heatmap.get_yticklabels(), rotation=0) # Return clustermap return fig
[ "def", "get_seaborn_clustermap", "(", "dfr", ",", "params", ",", "title", "=", "None", ",", "annot", "=", "True", ")", ":", "fig", "=", "sns", ".", "clustermap", "(", "dfr", ",", "cmap", "=", "params", ".", "cmap", ",", "vmin", "=", "params", ".", ...
Returns a Seaborn clustermap.
[ "Returns", "a", "Seaborn", "clustermap", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_graphics.py#L110-L134
20,229
widdowquinn/pyani
pyani/pyani_graphics.py
heatmap_seaborn
def heatmap_seaborn(dfr, outfilename=None, title=None, params=None): """Returns seaborn heatmap with cluster dendrograms. - dfr - pandas DataFrame with relevant data - outfilename - path to output file (indicates output format) """ # Decide on figure layout size: a minimum size is required for # aesthetics, and a maximum to avoid core dumps on rendering. # If we hit the maximum size, we should modify font size. maxfigsize = 120 calcfigsize = dfr.shape[0] * 1.1 figsize = min(max(8, calcfigsize), maxfigsize) if figsize == maxfigsize: scale = maxfigsize / calcfigsize sns.set_context("notebook", font_scale=scale) # Add a colorbar? if params.classes is None: col_cb = None else: col_cb = get_seaborn_colorbar(dfr, params.classes) # Labels are defined before we build the clustering # If a label mapping is missing, use the key text as fall back params.labels = get_safe_seaborn_labels(dfr, params.labels) # Add attributes to parameter object, and draw heatmap params.colorbar = col_cb params.figsize = figsize params.linewidths = 0.25 fig = get_seaborn_clustermap(dfr, params, title=title) # Save to file if outfilename: fig.savefig(outfilename) # Return clustermap return fig
python
def heatmap_seaborn(dfr, outfilename=None, title=None, params=None): # Decide on figure layout size: a minimum size is required for # aesthetics, and a maximum to avoid core dumps on rendering. # If we hit the maximum size, we should modify font size. maxfigsize = 120 calcfigsize = dfr.shape[0] * 1.1 figsize = min(max(8, calcfigsize), maxfigsize) if figsize == maxfigsize: scale = maxfigsize / calcfigsize sns.set_context("notebook", font_scale=scale) # Add a colorbar? if params.classes is None: col_cb = None else: col_cb = get_seaborn_colorbar(dfr, params.classes) # Labels are defined before we build the clustering # If a label mapping is missing, use the key text as fall back params.labels = get_safe_seaborn_labels(dfr, params.labels) # Add attributes to parameter object, and draw heatmap params.colorbar = col_cb params.figsize = figsize params.linewidths = 0.25 fig = get_seaborn_clustermap(dfr, params, title=title) # Save to file if outfilename: fig.savefig(outfilename) # Return clustermap return fig
[ "def", "heatmap_seaborn", "(", "dfr", ",", "outfilename", "=", "None", ",", "title", "=", "None", ",", "params", "=", "None", ")", ":", "# Decide on figure layout size: a minimum size is required for", "# aesthetics, and a maximum to avoid core dumps on rendering.", "# If we ...
Returns seaborn heatmap with cluster dendrograms. - dfr - pandas DataFrame with relevant data - outfilename - path to output file (indicates output format)
[ "Returns", "seaborn", "heatmap", "with", "cluster", "dendrograms", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_graphics.py#L138-L175
20,230
widdowquinn/pyani
pyani/pyani_graphics.py
add_mpl_dendrogram
def add_mpl_dendrogram(dfr, fig, heatmap_gs, orientation="col"): """Return a dendrogram and corresponding gridspec, attached to the fig Modifies the fig in-place. Orientation is either 'row' or 'col' and determines location and orientation of the rendered dendrogram. """ # Row or column axes? if orientation == "row": dists = distance.squareform(distance.pdist(dfr)) spec = heatmap_gs[1, 0] orient = "left" nrows, ncols = 1, 2 height_ratios = [1] else: # Column dendrogram dists = distance.squareform(distance.pdist(dfr.T)) spec = heatmap_gs[0, 1] orient = "top" nrows, ncols = 2, 1 height_ratios = [1, 0.15] # Create row dendrogram axis gspec = gridspec.GridSpecFromSubplotSpec( nrows, ncols, subplot_spec=spec, wspace=0.0, hspace=0.1, height_ratios=height_ratios, ) dend_axes = fig.add_subplot(gspec[0, 0]) dend = sch.dendrogram( sch.linkage(distance.squareform(dists), method="complete"), color_threshold=np.inf, orientation=orient, ) clean_axis(dend_axes) return {"dendrogram": dend, "gridspec": gspec}
python
def add_mpl_dendrogram(dfr, fig, heatmap_gs, orientation="col"): # Row or column axes? if orientation == "row": dists = distance.squareform(distance.pdist(dfr)) spec = heatmap_gs[1, 0] orient = "left" nrows, ncols = 1, 2 height_ratios = [1] else: # Column dendrogram dists = distance.squareform(distance.pdist(dfr.T)) spec = heatmap_gs[0, 1] orient = "top" nrows, ncols = 2, 1 height_ratios = [1, 0.15] # Create row dendrogram axis gspec = gridspec.GridSpecFromSubplotSpec( nrows, ncols, subplot_spec=spec, wspace=0.0, hspace=0.1, height_ratios=height_ratios, ) dend_axes = fig.add_subplot(gspec[0, 0]) dend = sch.dendrogram( sch.linkage(distance.squareform(dists), method="complete"), color_threshold=np.inf, orientation=orient, ) clean_axis(dend_axes) return {"dendrogram": dend, "gridspec": gspec}
[ "def", "add_mpl_dendrogram", "(", "dfr", ",", "fig", ",", "heatmap_gs", ",", "orientation", "=", "\"col\"", ")", ":", "# Row or column axes?", "if", "orientation", "==", "\"row\"", ":", "dists", "=", "distance", ".", "squareform", "(", "distance", ".", "pdist"...
Return a dendrogram and corresponding gridspec, attached to the fig Modifies the fig in-place. Orientation is either 'row' or 'col' and determines location and orientation of the rendered dendrogram.
[ "Return", "a", "dendrogram", "and", "corresponding", "gridspec", "attached", "to", "the", "fig" ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_graphics.py#L179-L215
20,231
widdowquinn/pyani
pyani/pyani_graphics.py
get_mpl_heatmap_axes
def get_mpl_heatmap_axes(dfr, fig, heatmap_gs): """Return axis for Matplotlib heatmap.""" # Create heatmap axis heatmap_axes = fig.add_subplot(heatmap_gs[1, 1]) heatmap_axes.set_xticks(np.linspace(0, dfr.shape[0] - 1, dfr.shape[0])) heatmap_axes.set_yticks(np.linspace(0, dfr.shape[0] - 1, dfr.shape[0])) heatmap_axes.grid(False) heatmap_axes.xaxis.tick_bottom() heatmap_axes.yaxis.tick_right() return heatmap_axes
python
def get_mpl_heatmap_axes(dfr, fig, heatmap_gs): # Create heatmap axis heatmap_axes = fig.add_subplot(heatmap_gs[1, 1]) heatmap_axes.set_xticks(np.linspace(0, dfr.shape[0] - 1, dfr.shape[0])) heatmap_axes.set_yticks(np.linspace(0, dfr.shape[0] - 1, dfr.shape[0])) heatmap_axes.grid(False) heatmap_axes.xaxis.tick_bottom() heatmap_axes.yaxis.tick_right() return heatmap_axes
[ "def", "get_mpl_heatmap_axes", "(", "dfr", ",", "fig", ",", "heatmap_gs", ")", ":", "# Create heatmap axis", "heatmap_axes", "=", "fig", ".", "add_subplot", "(", "heatmap_gs", "[", "1", ",", "1", "]", ")", "heatmap_axes", ".", "set_xticks", "(", "np", ".", ...
Return axis for Matplotlib heatmap.
[ "Return", "axis", "for", "Matplotlib", "heatmap", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_graphics.py#L219-L228
20,232
widdowquinn/pyani
pyani/pyani_graphics.py
add_mpl_colorbar
def add_mpl_colorbar(dfr, fig, dend, params, orientation="row"): """Add class colorbars to Matplotlib heatmap.""" for name in dfr.index[dend["dendrogram"]["leaves"]]: if name not in params.classes: params.classes[name] = name # Assign a numerical value to each class, for mpl classdict = {cls: idx for (idx, cls) in enumerate(params.classes.values())} # colourbar cblist = [] for name in dfr.index[dend["dendrogram"]["leaves"]]: try: cblist.append(classdict[params.classes[name]]) except KeyError: cblist.append(classdict[name]) colbar = pd.Series(cblist) # Create colourbar axis - could capture if needed if orientation == "row": cbaxes = fig.add_subplot(dend["gridspec"][0, 1]) cbaxes.imshow( [[cbar] for cbar in colbar.values], cmap=plt.get_cmap(pyani_config.MPL_CBAR), interpolation="nearest", aspect="auto", origin="lower", ) else: cbaxes = fig.add_subplot(dend["gridspec"][1, 0]) cbaxes.imshow( [colbar], cmap=plt.get_cmap(pyani_config.MPL_CBAR), interpolation="nearest", aspect="auto", origin="lower", ) clean_axis(cbaxes) return colbar
python
def add_mpl_colorbar(dfr, fig, dend, params, orientation="row"): for name in dfr.index[dend["dendrogram"]["leaves"]]: if name not in params.classes: params.classes[name] = name # Assign a numerical value to each class, for mpl classdict = {cls: idx for (idx, cls) in enumerate(params.classes.values())} # colourbar cblist = [] for name in dfr.index[dend["dendrogram"]["leaves"]]: try: cblist.append(classdict[params.classes[name]]) except KeyError: cblist.append(classdict[name]) colbar = pd.Series(cblist) # Create colourbar axis - could capture if needed if orientation == "row": cbaxes = fig.add_subplot(dend["gridspec"][0, 1]) cbaxes.imshow( [[cbar] for cbar in colbar.values], cmap=plt.get_cmap(pyani_config.MPL_CBAR), interpolation="nearest", aspect="auto", origin="lower", ) else: cbaxes = fig.add_subplot(dend["gridspec"][1, 0]) cbaxes.imshow( [colbar], cmap=plt.get_cmap(pyani_config.MPL_CBAR), interpolation="nearest", aspect="auto", origin="lower", ) clean_axis(cbaxes) return colbar
[ "def", "add_mpl_colorbar", "(", "dfr", ",", "fig", ",", "dend", ",", "params", ",", "orientation", "=", "\"row\"", ")", ":", "for", "name", "in", "dfr", ".", "index", "[", "dend", "[", "\"dendrogram\"", "]", "[", "\"leaves\"", "]", "]", ":", "if", "n...
Add class colorbars to Matplotlib heatmap.
[ "Add", "class", "colorbars", "to", "Matplotlib", "heatmap", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_graphics.py#L231-L269
20,233
widdowquinn/pyani
pyani/pyani_graphics.py
add_mpl_labels
def add_mpl_labels(heatmap_axes, rowlabels, collabels, params): """Add labels to Matplotlib heatmap axes, in-place.""" if params.labels: # If a label mapping is missing, use the key text as fall back rowlabels = [params.labels.get(lab, lab) for lab in rowlabels] collabels = [params.labels.get(lab, lab) for lab in collabels] xlabs = heatmap_axes.set_xticklabels(collabels) ylabs = heatmap_axes.set_yticklabels(rowlabels) for label in xlabs: # Rotate column labels label.set_rotation(90) for labset in (xlabs, ylabs): # Smaller font for label in labset: label.set_fontsize(8)
python
def add_mpl_labels(heatmap_axes, rowlabels, collabels, params): if params.labels: # If a label mapping is missing, use the key text as fall back rowlabels = [params.labels.get(lab, lab) for lab in rowlabels] collabels = [params.labels.get(lab, lab) for lab in collabels] xlabs = heatmap_axes.set_xticklabels(collabels) ylabs = heatmap_axes.set_yticklabels(rowlabels) for label in xlabs: # Rotate column labels label.set_rotation(90) for labset in (xlabs, ylabs): # Smaller font for label in labset: label.set_fontsize(8)
[ "def", "add_mpl_labels", "(", "heatmap_axes", ",", "rowlabels", ",", "collabels", ",", "params", ")", ":", "if", "params", ".", "labels", ":", "# If a label mapping is missing, use the key text as fall back", "rowlabels", "=", "[", "params", ".", "labels", ".", "get...
Add labels to Matplotlib heatmap axes, in-place.
[ "Add", "labels", "to", "Matplotlib", "heatmap", "axes", "in", "-", "place", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_graphics.py#L273-L285
20,234
widdowquinn/pyani
pyani/pyani_graphics.py
add_mpl_colorscale
def add_mpl_colorscale(fig, heatmap_gs, ax_map, params, title=None): """Add colour scale to heatmap.""" # Set tick intervals cbticks = [params.vmin + e * params.vdiff for e in (0, 0.25, 0.5, 0.75, 1)] if params.vmax > 10: exponent = int(floor(log10(params.vmax))) - 1 cbticks = [int(round(e, -exponent)) for e in cbticks] scale_subplot = gridspec.GridSpecFromSubplotSpec( 1, 3, subplot_spec=heatmap_gs[0, 0], wspace=0.0, hspace=0.0 ) scale_ax = fig.add_subplot(scale_subplot[0, 1]) cbar = fig.colorbar(ax_map, scale_ax, ticks=cbticks) if title: cbar.set_label(title, fontsize=6) cbar.ax.yaxis.set_ticks_position("left") cbar.ax.yaxis.set_label_position("left") cbar.ax.tick_params(labelsize=6) cbar.outline.set_linewidth(0) return cbar
python
def add_mpl_colorscale(fig, heatmap_gs, ax_map, params, title=None): # Set tick intervals cbticks = [params.vmin + e * params.vdiff for e in (0, 0.25, 0.5, 0.75, 1)] if params.vmax > 10: exponent = int(floor(log10(params.vmax))) - 1 cbticks = [int(round(e, -exponent)) for e in cbticks] scale_subplot = gridspec.GridSpecFromSubplotSpec( 1, 3, subplot_spec=heatmap_gs[0, 0], wspace=0.0, hspace=0.0 ) scale_ax = fig.add_subplot(scale_subplot[0, 1]) cbar = fig.colorbar(ax_map, scale_ax, ticks=cbticks) if title: cbar.set_label(title, fontsize=6) cbar.ax.yaxis.set_ticks_position("left") cbar.ax.yaxis.set_label_position("left") cbar.ax.tick_params(labelsize=6) cbar.outline.set_linewidth(0) return cbar
[ "def", "add_mpl_colorscale", "(", "fig", ",", "heatmap_gs", ",", "ax_map", ",", "params", ",", "title", "=", "None", ")", ":", "# Set tick intervals", "cbticks", "=", "[", "params", ".", "vmin", "+", "e", "*", "params", ".", "vdiff", "for", "e", "in", ...
Add colour scale to heatmap.
[ "Add", "colour", "scale", "to", "heatmap", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_graphics.py#L289-L308
20,235
widdowquinn/pyani
pyani/pyani_graphics.py
heatmap_mpl
def heatmap_mpl(dfr, outfilename=None, title=None, params=None): """Returns matplotlib heatmap with cluster dendrograms. - dfr - pandas DataFrame with relevant data - outfilename - path to output file (indicates output format) - params - a list of parameters for plotting: [colormap, vmin, vmax] - labels - dictionary of alternative labels, keyed by default sequence labels - classes - dictionary of sequence classes, keyed by default sequence labels """ # Layout figure grid and add title # Set figure size by the number of rows in the dataframe figsize = max(8, dfr.shape[0] * 0.175) fig = plt.figure(figsize=(figsize, figsize)) # if title: # fig.suptitle(title) heatmap_gs = gridspec.GridSpec( 2, 2, wspace=0.0, hspace=0.0, width_ratios=[0.3, 1], height_ratios=[0.3, 1] ) # Add column and row dendrograms/axes to figure coldend = add_mpl_dendrogram(dfr, fig, heatmap_gs, orientation="col") rowdend = add_mpl_dendrogram(dfr, fig, heatmap_gs, orientation="row") # Add heatmap axes to figure, with rows/columns as in the dendrograms heatmap_axes = get_mpl_heatmap_axes(dfr, fig, heatmap_gs) ax_map = heatmap_axes.imshow( dfr.iloc[rowdend["dendrogram"]["leaves"], coldend["dendrogram"]["leaves"]], interpolation="nearest", cmap=params.cmap, origin="lower", vmin=params.vmin, vmax=params.vmax, aspect="auto", ) # Are there class colourbars to add? if params.classes is not None: add_mpl_colorbar(dfr, fig, coldend, params, orientation="col") add_mpl_colorbar(dfr, fig, rowdend, params, orientation="row") # Add heatmap labels add_mpl_labels( heatmap_axes, dfr.index[rowdend["dendrogram"]["leaves"]], dfr.index[coldend["dendrogram"]["leaves"]], params, ) # Add colour scale add_mpl_colorscale(fig, heatmap_gs, ax_map, params, title) # Return figure output, and write, if required plt.subplots_adjust(top=0.85) # Leave room for title # fig.set_tight_layout(True) # We know that there is a UserWarning here about tight_layout and # using the Agg renderer on OSX, so catch and ignore it, for cleanliness. with warnings.catch_warnings(): warnings.simplefilter("ignore") heatmap_gs.tight_layout(fig, h_pad=0.1, w_pad=0.5) if outfilename: fig.savefig(outfilename) return fig
python
def heatmap_mpl(dfr, outfilename=None, title=None, params=None): # Layout figure grid and add title # Set figure size by the number of rows in the dataframe figsize = max(8, dfr.shape[0] * 0.175) fig = plt.figure(figsize=(figsize, figsize)) # if title: # fig.suptitle(title) heatmap_gs = gridspec.GridSpec( 2, 2, wspace=0.0, hspace=0.0, width_ratios=[0.3, 1], height_ratios=[0.3, 1] ) # Add column and row dendrograms/axes to figure coldend = add_mpl_dendrogram(dfr, fig, heatmap_gs, orientation="col") rowdend = add_mpl_dendrogram(dfr, fig, heatmap_gs, orientation="row") # Add heatmap axes to figure, with rows/columns as in the dendrograms heatmap_axes = get_mpl_heatmap_axes(dfr, fig, heatmap_gs) ax_map = heatmap_axes.imshow( dfr.iloc[rowdend["dendrogram"]["leaves"], coldend["dendrogram"]["leaves"]], interpolation="nearest", cmap=params.cmap, origin="lower", vmin=params.vmin, vmax=params.vmax, aspect="auto", ) # Are there class colourbars to add? if params.classes is not None: add_mpl_colorbar(dfr, fig, coldend, params, orientation="col") add_mpl_colorbar(dfr, fig, rowdend, params, orientation="row") # Add heatmap labels add_mpl_labels( heatmap_axes, dfr.index[rowdend["dendrogram"]["leaves"]], dfr.index[coldend["dendrogram"]["leaves"]], params, ) # Add colour scale add_mpl_colorscale(fig, heatmap_gs, ax_map, params, title) # Return figure output, and write, if required plt.subplots_adjust(top=0.85) # Leave room for title # fig.set_tight_layout(True) # We know that there is a UserWarning here about tight_layout and # using the Agg renderer on OSX, so catch and ignore it, for cleanliness. with warnings.catch_warnings(): warnings.simplefilter("ignore") heatmap_gs.tight_layout(fig, h_pad=0.1, w_pad=0.5) if outfilename: fig.savefig(outfilename) return fig
[ "def", "heatmap_mpl", "(", "dfr", ",", "outfilename", "=", "None", ",", "title", "=", "None", ",", "params", "=", "None", ")", ":", "# Layout figure grid and add title", "# Set figure size by the number of rows in the dataframe", "figsize", "=", "max", "(", "8", ","...
Returns matplotlib heatmap with cluster dendrograms. - dfr - pandas DataFrame with relevant data - outfilename - path to output file (indicates output format) - params - a list of parameters for plotting: [colormap, vmin, vmax] - labels - dictionary of alternative labels, keyed by default sequence labels - classes - dictionary of sequence classes, keyed by default sequence labels
[ "Returns", "matplotlib", "heatmap", "with", "cluster", "dendrograms", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_graphics.py#L312-L375
20,236
widdowquinn/pyani
pyani/run_multiprocessing.py
run_dependency_graph
def run_dependency_graph(jobgraph, workers=None, logger=None): """Creates and runs pools of jobs based on the passed jobgraph. - jobgraph - list of jobs, which may have dependencies. - verbose - flag for multiprocessing verbosity - logger - a logger module logger (optional) The strategy here is to loop over each job in the list of jobs (jobgraph), and create/populate a series of Sets of commands, to be run in reverse order with multiprocessing_run as asynchronous pools. """ cmdsets = [] for job in jobgraph: cmdsets = populate_cmdsets(job, cmdsets, depth=1) # Put command sets in reverse order, and submit to multiprocessing_run cmdsets.reverse() cumretval = 0 for cmdset in cmdsets: if logger: # Try to be informative, if the logger module is being used logger.info("Command pool now running:") for cmd in cmdset: logger.info(cmd) cumretval += multiprocessing_run(cmdset, workers) if logger: # Try to be informative, if the logger module is being used logger.info("Command pool done.") return cumretval
python
def run_dependency_graph(jobgraph, workers=None, logger=None): cmdsets = [] for job in jobgraph: cmdsets = populate_cmdsets(job, cmdsets, depth=1) # Put command sets in reverse order, and submit to multiprocessing_run cmdsets.reverse() cumretval = 0 for cmdset in cmdsets: if logger: # Try to be informative, if the logger module is being used logger.info("Command pool now running:") for cmd in cmdset: logger.info(cmd) cumretval += multiprocessing_run(cmdset, workers) if logger: # Try to be informative, if the logger module is being used logger.info("Command pool done.") return cumretval
[ "def", "run_dependency_graph", "(", "jobgraph", ",", "workers", "=", "None", ",", "logger", "=", "None", ")", ":", "cmdsets", "=", "[", "]", "for", "job", "in", "jobgraph", ":", "cmdsets", "=", "populate_cmdsets", "(", "job", ",", "cmdsets", ",", "depth"...
Creates and runs pools of jobs based on the passed jobgraph. - jobgraph - list of jobs, which may have dependencies. - verbose - flag for multiprocessing verbosity - logger - a logger module logger (optional) The strategy here is to loop over each job in the list of jobs (jobgraph), and create/populate a series of Sets of commands, to be run in reverse order with multiprocessing_run as asynchronous pools.
[ "Creates", "and", "runs", "pools", "of", "jobs", "based", "on", "the", "passed", "jobgraph", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_multiprocessing.py#L22-L48
20,237
widdowquinn/pyani
pyani/run_multiprocessing.py
populate_cmdsets
def populate_cmdsets(job, cmdsets, depth): """Creates a list of sets containing jobs at different depths of the dependency tree. This is a recursive function (is there something quicker in the itertools module?) that descends each 'root' job in turn, populating each """ if len(cmdsets) < depth: cmdsets.append(set()) cmdsets[depth-1].add(job.command) if len(job.dependencies) == 0: return cmdsets for j in job.dependencies: cmdsets = populate_cmdsets(j, cmdsets, depth+1) return cmdsets
python
def populate_cmdsets(job, cmdsets, depth): if len(cmdsets) < depth: cmdsets.append(set()) cmdsets[depth-1].add(job.command) if len(job.dependencies) == 0: return cmdsets for j in job.dependencies: cmdsets = populate_cmdsets(j, cmdsets, depth+1) return cmdsets
[ "def", "populate_cmdsets", "(", "job", ",", "cmdsets", ",", "depth", ")", ":", "if", "len", "(", "cmdsets", ")", "<", "depth", ":", "cmdsets", ".", "append", "(", "set", "(", ")", ")", "cmdsets", "[", "depth", "-", "1", "]", ".", "add", "(", "job...
Creates a list of sets containing jobs at different depths of the dependency tree. This is a recursive function (is there something quicker in the itertools module?) that descends each 'root' job in turn, populating each
[ "Creates", "a", "list", "of", "sets", "containing", "jobs", "at", "different", "depths", "of", "the", "dependency", "tree", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_multiprocessing.py#L51-L65
20,238
widdowquinn/pyani
pyani/run_multiprocessing.py
multiprocessing_run
def multiprocessing_run(cmdlines, workers=None): """Distributes passed command-line jobs using multiprocessing. - cmdlines - an iterable of command line strings Returns the sum of exit codes from each job that was run. If all goes well, this should be 0. Anything else and the calling function should act accordingly. """ # Run jobs # If workers is None or greater than the number of cores available, # it will be set to the maximum number of cores pool = multiprocessing.Pool(processes=workers) results = [pool.apply_async(subprocess.run, (str(cline), ), {'shell': sys.platform != "win32", 'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE}) for cline in cmdlines] pool.close() pool.join() return sum([r.get().returncode for r in results])
python
def multiprocessing_run(cmdlines, workers=None): # Run jobs # If workers is None or greater than the number of cores available, # it will be set to the maximum number of cores pool = multiprocessing.Pool(processes=workers) results = [pool.apply_async(subprocess.run, (str(cline), ), {'shell': sys.platform != "win32", 'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE}) for cline in cmdlines] pool.close() pool.join() return sum([r.get().returncode for r in results])
[ "def", "multiprocessing_run", "(", "cmdlines", ",", "workers", "=", "None", ")", ":", "# Run jobs", "# If workers is None or greater than the number of cores available,", "# it will be set to the maximum number of cores", "pool", "=", "multiprocessing", ".", "Pool", "(", "proce...
Distributes passed command-line jobs using multiprocessing. - cmdlines - an iterable of command line strings Returns the sum of exit codes from each job that was run. If all goes well, this should be 0. Anything else and the calling function should act accordingly.
[ "Distributes", "passed", "command", "-", "line", "jobs", "using", "multiprocessing", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_multiprocessing.py#L69-L89
20,239
widdowquinn/pyani
pyani/pyani_files.py
get_input_files
def get_input_files(dirname, *ext): """Returns files in passed directory, filtered by extension. - dirname - path to input directory - *ext - list of arguments describing permitted file extensions """ filelist = [f for f in os.listdir(dirname) if os.path.splitext(f)[-1] in ext] return [os.path.join(dirname, f) for f in filelist]
python
def get_input_files(dirname, *ext): filelist = [f for f in os.listdir(dirname) if os.path.splitext(f)[-1] in ext] return [os.path.join(dirname, f) for f in filelist]
[ "def", "get_input_files", "(", "dirname", ",", "*", "ext", ")", ":", "filelist", "=", "[", "f", "for", "f", "in", "os", ".", "listdir", "(", "dirname", ")", "if", "os", ".", "path", ".", "splitext", "(", "f", ")", "[", "-", "1", "]", "in", "ext...
Returns files in passed directory, filtered by extension. - dirname - path to input directory - *ext - list of arguments describing permitted file extensions
[ "Returns", "files", "in", "passed", "directory", "filtered", "by", "extension", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_files.py#L27-L35
20,240
widdowquinn/pyani
pyani/pyani_files.py
get_sequence_lengths
def get_sequence_lengths(fastafilenames): """Returns dictionary of sequence lengths, keyed by organism. Biopython's SeqIO module is used to parse all sequences in the FASTA file corresponding to each organism, and the total base count in each is obtained. NOTE: ambiguity symbols are not discounted. """ tot_lengths = {} for fn in fastafilenames: tot_lengths[os.path.splitext(os.path.split(fn)[-1])[0]] = \ sum([len(s) for s in SeqIO.parse(fn, 'fasta')]) return tot_lengths
python
def get_sequence_lengths(fastafilenames): tot_lengths = {} for fn in fastafilenames: tot_lengths[os.path.splitext(os.path.split(fn)[-1])[0]] = \ sum([len(s) for s in SeqIO.parse(fn, 'fasta')]) return tot_lengths
[ "def", "get_sequence_lengths", "(", "fastafilenames", ")", ":", "tot_lengths", "=", "{", "}", "for", "fn", "in", "fastafilenames", ":", "tot_lengths", "[", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "split", "(", "fn", ")", "[", "...
Returns dictionary of sequence lengths, keyed by organism. Biopython's SeqIO module is used to parse all sequences in the FASTA file corresponding to each organism, and the total base count in each is obtained. NOTE: ambiguity symbols are not discounted.
[ "Returns", "dictionary", "of", "sequence", "lengths", "keyed", "by", "organism", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_files.py#L39-L52
20,241
widdowquinn/pyani
bin/average_nucleotide_identity.py
last_exception
def last_exception(): """ Returns last exception as a string, or use in logging. """ exc_type, exc_value, exc_traceback = sys.exc_info() return "".join(traceback.format_exception(exc_type, exc_value, exc_traceback))
python
def last_exception(): exc_type, exc_value, exc_traceback = sys.exc_info() return "".join(traceback.format_exception(exc_type, exc_value, exc_traceback))
[ "def", "last_exception", "(", ")", ":", "exc_type", ",", "exc_value", ",", "exc_traceback", "=", "sys", ".", "exc_info", "(", ")", "return", "\"\"", ".", "join", "(", "traceback", ".", "format_exception", "(", "exc_type", ",", "exc_value", ",", "exc_tracebac...
Returns last exception as a string, or use in logging.
[ "Returns", "last", "exception", "as", "a", "string", "or", "use", "in", "logging", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/average_nucleotide_identity.py#L439-L443
20,242
widdowquinn/pyani
bin/average_nucleotide_identity.py
make_outdir
def make_outdir(): """Make the output directory, if required. This is a little involved. If the output directory already exists, we take the safe option by default, and stop with an error. We can, however, choose to force the program to go on, in which case we can either clobber the existing directory, or not. The options turn out as the following, if the directory exists: DEFAULT: stop and report the collision FORCE: continue, and remove the existing output directory NOCLOBBER+FORCE: continue, but do not remove the existing output """ if os.path.exists(args.outdirname): if not args.force: logger.error( "Output directory %s would overwrite existing " + "files (exiting)", args.outdirname, ) sys.exit(1) elif args.noclobber: logger.warning( "NOCLOBBER: not actually deleting directory %s", args.outdirname ) else: logger.info( "Removing directory %s and everything below it", args.outdirname ) shutil.rmtree(args.outdirname) logger.info("Creating directory %s", args.outdirname) try: os.makedirs(args.outdirname) # We make the directory recursively # Depending on the choice of method, a subdirectory will be made for # alignment output files if args.method != "TETRA": os.makedirs(os.path.join(args.outdirname, ALIGNDIR[args.method])) except OSError: # This gets thrown if the directory exists. If we've forced overwrite/ # delete and we're not clobbering, we let things slide if args.noclobber and args.force: logger.info("NOCLOBBER+FORCE: not creating directory") else: logger.error(last_exception) sys.exit(1)
python
def make_outdir(): if os.path.exists(args.outdirname): if not args.force: logger.error( "Output directory %s would overwrite existing " + "files (exiting)", args.outdirname, ) sys.exit(1) elif args.noclobber: logger.warning( "NOCLOBBER: not actually deleting directory %s", args.outdirname ) else: logger.info( "Removing directory %s and everything below it", args.outdirname ) shutil.rmtree(args.outdirname) logger.info("Creating directory %s", args.outdirname) try: os.makedirs(args.outdirname) # We make the directory recursively # Depending on the choice of method, a subdirectory will be made for # alignment output files if args.method != "TETRA": os.makedirs(os.path.join(args.outdirname, ALIGNDIR[args.method])) except OSError: # This gets thrown if the directory exists. If we've forced overwrite/ # delete and we're not clobbering, we let things slide if args.noclobber and args.force: logger.info("NOCLOBBER+FORCE: not creating directory") else: logger.error(last_exception) sys.exit(1)
[ "def", "make_outdir", "(", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "args", ".", "outdirname", ")", ":", "if", "not", "args", ".", "force", ":", "logger", ".", "error", "(", "\"Output directory %s would overwrite existing \"", "+", "\"files (ex...
Make the output directory, if required. This is a little involved. If the output directory already exists, we take the safe option by default, and stop with an error. We can, however, choose to force the program to go on, in which case we can either clobber the existing directory, or not. The options turn out as the following, if the directory exists: DEFAULT: stop and report the collision FORCE: continue, and remove the existing output directory NOCLOBBER+FORCE: continue, but do not remove the existing output
[ "Make", "the", "output", "directory", "if", "required", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/average_nucleotide_identity.py#L447-L490
20,243
widdowquinn/pyani
bin/average_nucleotide_identity.py
compress_delete_outdir
def compress_delete_outdir(outdir): """Compress the contents of the passed directory to .tar.gz and delete.""" # Compress output in .tar.gz file and remove raw output tarfn = outdir + ".tar.gz" logger.info("\tCompressing output from %s to %s", outdir, tarfn) with tarfile.open(tarfn, "w:gz") as fh: fh.add(outdir) logger.info("\tRemoving output directory %s", outdir) shutil.rmtree(outdir)
python
def compress_delete_outdir(outdir): # Compress output in .tar.gz file and remove raw output tarfn = outdir + ".tar.gz" logger.info("\tCompressing output from %s to %s", outdir, tarfn) with tarfile.open(tarfn, "w:gz") as fh: fh.add(outdir) logger.info("\tRemoving output directory %s", outdir) shutil.rmtree(outdir)
[ "def", "compress_delete_outdir", "(", "outdir", ")", ":", "# Compress output in .tar.gz file and remove raw output", "tarfn", "=", "outdir", "+", "\".tar.gz\"", "logger", ".", "info", "(", "\"\\tCompressing output from %s to %s\"", ",", "outdir", ",", "tarfn", ")", "with"...
Compress the contents of the passed directory to .tar.gz and delete.
[ "Compress", "the", "contents", "of", "the", "passed", "directory", "to", ".", "tar", ".", "gz", "and", "delete", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/average_nucleotide_identity.py#L494-L502
20,244
widdowquinn/pyani
bin/average_nucleotide_identity.py
calculate_anim
def calculate_anim(infiles, org_lengths): """Returns ANIm result dataframes for files in input directory. - infiles - paths to each input file - org_lengths - dictionary of input sequence lengths, keyed by sequence Finds ANI by the ANIm method, as described in Richter et al (2009) Proc Natl Acad Sci USA 106: 19126-19131 doi:10.1073/pnas.0906412106. All FASTA format files (selected by suffix) in the input directory are compared against each other, pairwise, using NUCmer (which must be in the path). NUCmer output is stored in the output directory. The NUCmer .delta file output is parsed to obtain an alignment length and similarity error count for every unique region alignment between the two organisms, as represented by the sequences in the FASTA files. These are processed to give matrices of aligned sequence lengths, average nucleotide identity (ANI) percentages, coverage (aligned percentage of whole genome), and similarity error cound for each pairwise comparison. """ logger.info("Running ANIm") logger.info("Generating NUCmer command-lines") deltadir = os.path.join(args.outdirname, ALIGNDIR["ANIm"]) logger.info("Writing nucmer output to %s", deltadir) # Schedule NUCmer runs if not args.skip_nucmer: joblist = anim.generate_nucmer_jobs( infiles, args.outdirname, nucmer_exe=args.nucmer_exe, filter_exe=args.filter_exe, maxmatch=args.maxmatch, jobprefix=args.jobprefix, ) if args.scheduler == "multiprocessing": logger.info("Running jobs with multiprocessing") if args.workers is None: logger.info("(using maximum number of available " + "worker threads)") else: logger.info("(using %d worker threads, if available)", args.workers) cumval = run_mp.run_dependency_graph( joblist, workers=args.workers, logger=logger ) logger.info("Cumulative return value: %d", cumval) if 0 < cumval: logger.warning( "At least one NUCmer comparison failed. " + "ANIm may fail." ) else: logger.info("All multiprocessing jobs complete.") else: logger.info("Running jobs with SGE") logger.info("Jobarray group size set to %d", args.sgegroupsize) run_sge.run_dependency_graph( joblist, logger=logger, jgprefix=args.jobprefix, sgegroupsize=args.sgegroupsize, sgeargs=args.sgeargs, ) else: logger.warning("Skipping NUCmer run (as instructed)!") # Process resulting .delta files logger.info("Processing NUCmer .delta files.") results = anim.process_deltadir(deltadir, org_lengths, logger=logger) if results.zero_error: # zero percentage identity error if not args.skip_nucmer and args.scheduler == "multiprocessing": if 0 < cumval: logger.error( "This has possibly been a NUCmer run failure, " + "please investigate" ) logger.error(last_exception()) sys.exit(1) else: logger.error( "This is possibly due to a NUCmer comparison " + "being too distant for use. Please consider " + "using the --maxmatch option." ) logger.error( "This is alternatively due to NUCmer run " + "failure, analysis will continue, but please " + "investigate." ) if not args.nocompress: logger.info("Compressing/deleting %s", deltadir) compress_delete_outdir(deltadir) # Return processed data from .delta files return results
python
def calculate_anim(infiles, org_lengths): logger.info("Running ANIm") logger.info("Generating NUCmer command-lines") deltadir = os.path.join(args.outdirname, ALIGNDIR["ANIm"]) logger.info("Writing nucmer output to %s", deltadir) # Schedule NUCmer runs if not args.skip_nucmer: joblist = anim.generate_nucmer_jobs( infiles, args.outdirname, nucmer_exe=args.nucmer_exe, filter_exe=args.filter_exe, maxmatch=args.maxmatch, jobprefix=args.jobprefix, ) if args.scheduler == "multiprocessing": logger.info("Running jobs with multiprocessing") if args.workers is None: logger.info("(using maximum number of available " + "worker threads)") else: logger.info("(using %d worker threads, if available)", args.workers) cumval = run_mp.run_dependency_graph( joblist, workers=args.workers, logger=logger ) logger.info("Cumulative return value: %d", cumval) if 0 < cumval: logger.warning( "At least one NUCmer comparison failed. " + "ANIm may fail." ) else: logger.info("All multiprocessing jobs complete.") else: logger.info("Running jobs with SGE") logger.info("Jobarray group size set to %d", args.sgegroupsize) run_sge.run_dependency_graph( joblist, logger=logger, jgprefix=args.jobprefix, sgegroupsize=args.sgegroupsize, sgeargs=args.sgeargs, ) else: logger.warning("Skipping NUCmer run (as instructed)!") # Process resulting .delta files logger.info("Processing NUCmer .delta files.") results = anim.process_deltadir(deltadir, org_lengths, logger=logger) if results.zero_error: # zero percentage identity error if not args.skip_nucmer and args.scheduler == "multiprocessing": if 0 < cumval: logger.error( "This has possibly been a NUCmer run failure, " + "please investigate" ) logger.error(last_exception()) sys.exit(1) else: logger.error( "This is possibly due to a NUCmer comparison " + "being too distant for use. Please consider " + "using the --maxmatch option." ) logger.error( "This is alternatively due to NUCmer run " + "failure, analysis will continue, but please " + "investigate." ) if not args.nocompress: logger.info("Compressing/deleting %s", deltadir) compress_delete_outdir(deltadir) # Return processed data from .delta files return results
[ "def", "calculate_anim", "(", "infiles", ",", "org_lengths", ")", ":", "logger", ".", "info", "(", "\"Running ANIm\"", ")", "logger", ".", "info", "(", "\"Generating NUCmer command-lines\"", ")", "deltadir", "=", "os", ".", "path", ".", "join", "(", "args", ...
Returns ANIm result dataframes for files in input directory. - infiles - paths to each input file - org_lengths - dictionary of input sequence lengths, keyed by sequence Finds ANI by the ANIm method, as described in Richter et al (2009) Proc Natl Acad Sci USA 106: 19126-19131 doi:10.1073/pnas.0906412106. All FASTA format files (selected by suffix) in the input directory are compared against each other, pairwise, using NUCmer (which must be in the path). NUCmer output is stored in the output directory. The NUCmer .delta file output is parsed to obtain an alignment length and similarity error count for every unique region alignment between the two organisms, as represented by the sequences in the FASTA files. These are processed to give matrices of aligned sequence lengths, average nucleotide identity (ANI) percentages, coverage (aligned percentage of whole genome), and similarity error cound for each pairwise comparison.
[ "Returns", "ANIm", "result", "dataframes", "for", "files", "in", "input", "directory", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/average_nucleotide_identity.py#L506-L599
20,245
widdowquinn/pyani
bin/average_nucleotide_identity.py
calculate_tetra
def calculate_tetra(infiles): """Calculate TETRA for files in input directory. - infiles - paths to each input file - org_lengths - dictionary of input sequence lengths, keyed by sequence Calculates TETRA correlation scores, as described in: Richter M, Rossello-Mora R (2009) Shifting the genomic gold standard for the prokaryotic species definition. Proc Natl Acad Sci USA 106: 19126-19131. doi:10.1073/pnas.0906412106. and Teeling et al. (2004) Application of tetranucleotide frequencies for the assignment of genomic fragments. Env. Microbiol. 6(9): 938-947. doi:10.1111/j.1462-2920.2004.00624.x """ logger.info("Running TETRA.") # First, find Z-scores logger.info("Calculating TETRA Z-scores for each sequence.") tetra_zscores = {} for filename in infiles: logger.info("Calculating TETRA Z-scores for %s", filename) org = os.path.splitext(os.path.split(filename)[-1])[0] tetra_zscores[org] = tetra.calculate_tetra_zscore(filename) # Then calculate Pearson correlation between Z-scores for each sequence logger.info("Calculating TETRA correlation scores.") tetra_correlations = tetra.calculate_correlations(tetra_zscores) return tetra_correlations
python
def calculate_tetra(infiles): logger.info("Running TETRA.") # First, find Z-scores logger.info("Calculating TETRA Z-scores for each sequence.") tetra_zscores = {} for filename in infiles: logger.info("Calculating TETRA Z-scores for %s", filename) org = os.path.splitext(os.path.split(filename)[-1])[0] tetra_zscores[org] = tetra.calculate_tetra_zscore(filename) # Then calculate Pearson correlation between Z-scores for each sequence logger.info("Calculating TETRA correlation scores.") tetra_correlations = tetra.calculate_correlations(tetra_zscores) return tetra_correlations
[ "def", "calculate_tetra", "(", "infiles", ")", ":", "logger", ".", "info", "(", "\"Running TETRA.\"", ")", "# First, find Z-scores", "logger", ".", "info", "(", "\"Calculating TETRA Z-scores for each sequence.\"", ")", "tetra_zscores", "=", "{", "}", "for", "filename"...
Calculate TETRA for files in input directory. - infiles - paths to each input file - org_lengths - dictionary of input sequence lengths, keyed by sequence Calculates TETRA correlation scores, as described in: Richter M, Rossello-Mora R (2009) Shifting the genomic gold standard for the prokaryotic species definition. Proc Natl Acad Sci USA 106: 19126-19131. doi:10.1073/pnas.0906412106. and Teeling et al. (2004) Application of tetranucleotide frequencies for the assignment of genomic fragments. Env. Microbiol. 6(9): 938-947. doi:10.1111/j.1462-2920.2004.00624.x
[ "Calculate", "TETRA", "for", "files", "in", "input", "directory", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/average_nucleotide_identity.py#L603-L632
20,246
widdowquinn/pyani
bin/average_nucleotide_identity.py
unified_anib
def unified_anib(infiles, org_lengths): """Calculate ANIb for files in input directory. - infiles - paths to each input file - org_lengths - dictionary of input sequence lengths, keyed by sequence Calculates ANI by the ANIb method, as described in Goris et al. (2007) Int J Syst Evol Micr 57: 81-91. doi:10.1099/ijs.0.64483-0. There are some minor differences depending on whether BLAST+ or legacy BLAST (BLASTALL) methods are used. All FASTA format files (selected by suffix) in the input directory are used to construct BLAST databases, placed in the output directory. Each file's contents are also split into sequence fragments of length options.fragsize, and the multiple FASTA file that results written to the output directory. These are BLASTNed, pairwise, against the databases. The BLAST output is interrogated for all fragment matches that cover at least 70% of the query sequence, with at least 30% nucleotide identity over the full length of the query sequence. This is an odd choice and doesn't correspond to the twilight zone limit as implied by Goris et al. We persist with their definition, however. Only these qualifying matches contribute to the total aligned length, and total aligned sequence identity used to calculate ANI. The results are processed to give matrices of aligned sequence length (aln_lengths.tab), similarity error counts (sim_errors.tab), ANIs (perc_ids.tab), and minimum aligned percentage (perc_aln.tab) of each genome, for each pairwise comparison. These are written to the output directory in plain text tab-separated format. """ logger.info("Running %s", args.method) blastdir = os.path.join(args.outdirname, ALIGNDIR[args.method]) logger.info("Writing BLAST output to %s", blastdir) # Build BLAST databases and run pairwise BLASTN if not args.skip_blastn: # Make sequence fragments logger.info("Fragmenting input files, and writing to %s", args.outdirname) # Fraglengths does not get reused with BLASTN fragfiles, fraglengths = anib.fragment_fasta_files( infiles, blastdir, args.fragsize ) # Export fragment lengths as JSON, in case we re-run with --skip_blastn with open(os.path.join(blastdir, "fraglengths.json"), "w") as outfile: json.dump(fraglengths, outfile) # Which executables are we using? # if args.method == "ANIblastall": # format_exe = args.formatdb_exe # blast_exe = args.blastall_exe # else: # format_exe = args.makeblastdb_exe # blast_exe = args.blastn_exe # Run BLAST database-building and executables from a jobgraph logger.info("Creating job dependency graph") jobgraph = anib.make_job_graph( infiles, fragfiles, anib.make_blastcmd_builder(args.method, blastdir) ) # jobgraph = anib.make_job_graph(infiles, fragfiles, blastdir, # format_exe, blast_exe, args.method, # jobprefix=args.jobprefix) if args.scheduler == "multiprocessing": logger.info("Running jobs with multiprocessing") logger.info("Running job dependency graph") if args.workers is None: logger.info("(using maximum number of available " + "worker threads)") else: logger.info("(using %d worker threads, if available)", args.workers) cumval = run_mp.run_dependency_graph( jobgraph, workers=args.workers, logger=logger ) if 0 < cumval: logger.warning( "At least one BLAST run failed. " + "%s may fail.", args.method ) else: logger.info("All multiprocessing jobs complete.") else: run_sge.run_dependency_graph(jobgraph, logger=logger) logger.info("Running jobs with SGE") else: # Import fragment lengths from JSON if args.method == "ANIblastall": with open(os.path.join(blastdir, "fraglengths.json"), "rU") as infile: fraglengths = json.load(infile) else: fraglengths = None logger.warning("Skipping BLASTN runs (as instructed)!") # Process pairwise BLASTN output logger.info("Processing pairwise %s BLAST output.", args.method) try: data = anib.process_blast( blastdir, org_lengths, fraglengths=fraglengths, mode=args.method ) except ZeroDivisionError: logger.error("One or more BLAST output files has a problem.") if not args.skip_blastn: if 0 < cumval: logger.error( "This is possibly due to BLASTN run failure, " + "please investigate" ) else: logger.error( "This is possibly due to a BLASTN comparison " + "being too distant for use." ) logger.error(last_exception()) if not args.nocompress: logger.info("Compressing/deleting %s", blastdir) compress_delete_outdir(blastdir) # Return processed BLAST data return data
python
def unified_anib(infiles, org_lengths): logger.info("Running %s", args.method) blastdir = os.path.join(args.outdirname, ALIGNDIR[args.method]) logger.info("Writing BLAST output to %s", blastdir) # Build BLAST databases and run pairwise BLASTN if not args.skip_blastn: # Make sequence fragments logger.info("Fragmenting input files, and writing to %s", args.outdirname) # Fraglengths does not get reused with BLASTN fragfiles, fraglengths = anib.fragment_fasta_files( infiles, blastdir, args.fragsize ) # Export fragment lengths as JSON, in case we re-run with --skip_blastn with open(os.path.join(blastdir, "fraglengths.json"), "w") as outfile: json.dump(fraglengths, outfile) # Which executables are we using? # if args.method == "ANIblastall": # format_exe = args.formatdb_exe # blast_exe = args.blastall_exe # else: # format_exe = args.makeblastdb_exe # blast_exe = args.blastn_exe # Run BLAST database-building and executables from a jobgraph logger.info("Creating job dependency graph") jobgraph = anib.make_job_graph( infiles, fragfiles, anib.make_blastcmd_builder(args.method, blastdir) ) # jobgraph = anib.make_job_graph(infiles, fragfiles, blastdir, # format_exe, blast_exe, args.method, # jobprefix=args.jobprefix) if args.scheduler == "multiprocessing": logger.info("Running jobs with multiprocessing") logger.info("Running job dependency graph") if args.workers is None: logger.info("(using maximum number of available " + "worker threads)") else: logger.info("(using %d worker threads, if available)", args.workers) cumval = run_mp.run_dependency_graph( jobgraph, workers=args.workers, logger=logger ) if 0 < cumval: logger.warning( "At least one BLAST run failed. " + "%s may fail.", args.method ) else: logger.info("All multiprocessing jobs complete.") else: run_sge.run_dependency_graph(jobgraph, logger=logger) logger.info("Running jobs with SGE") else: # Import fragment lengths from JSON if args.method == "ANIblastall": with open(os.path.join(blastdir, "fraglengths.json"), "rU") as infile: fraglengths = json.load(infile) else: fraglengths = None logger.warning("Skipping BLASTN runs (as instructed)!") # Process pairwise BLASTN output logger.info("Processing pairwise %s BLAST output.", args.method) try: data = anib.process_blast( blastdir, org_lengths, fraglengths=fraglengths, mode=args.method ) except ZeroDivisionError: logger.error("One or more BLAST output files has a problem.") if not args.skip_blastn: if 0 < cumval: logger.error( "This is possibly due to BLASTN run failure, " + "please investigate" ) else: logger.error( "This is possibly due to a BLASTN comparison " + "being too distant for use." ) logger.error(last_exception()) if not args.nocompress: logger.info("Compressing/deleting %s", blastdir) compress_delete_outdir(blastdir) # Return processed BLAST data return data
[ "def", "unified_anib", "(", "infiles", ",", "org_lengths", ")", ":", "logger", ".", "info", "(", "\"Running %s\"", ",", "args", ".", "method", ")", "blastdir", "=", "os", ".", "path", ".", "join", "(", "args", ".", "outdirname", ",", "ALIGNDIR", "[", "...
Calculate ANIb for files in input directory. - infiles - paths to each input file - org_lengths - dictionary of input sequence lengths, keyed by sequence Calculates ANI by the ANIb method, as described in Goris et al. (2007) Int J Syst Evol Micr 57: 81-91. doi:10.1099/ijs.0.64483-0. There are some minor differences depending on whether BLAST+ or legacy BLAST (BLASTALL) methods are used. All FASTA format files (selected by suffix) in the input directory are used to construct BLAST databases, placed in the output directory. Each file's contents are also split into sequence fragments of length options.fragsize, and the multiple FASTA file that results written to the output directory. These are BLASTNed, pairwise, against the databases. The BLAST output is interrogated for all fragment matches that cover at least 70% of the query sequence, with at least 30% nucleotide identity over the full length of the query sequence. This is an odd choice and doesn't correspond to the twilight zone limit as implied by Goris et al. We persist with their definition, however. Only these qualifying matches contribute to the total aligned length, and total aligned sequence identity used to calculate ANI. The results are processed to give matrices of aligned sequence length (aln_lengths.tab), similarity error counts (sim_errors.tab), ANIs (perc_ids.tab), and minimum aligned percentage (perc_aln.tab) of each genome, for each pairwise comparison. These are written to the output directory in plain text tab-separated format.
[ "Calculate", "ANIb", "for", "files", "in", "input", "directory", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/average_nucleotide_identity.py#L636-L752
20,247
widdowquinn/pyani
bin/average_nucleotide_identity.py
subsample_input
def subsample_input(infiles): """Returns a random subsample of the input files. - infiles: a list of input files for analysis """ logger.info("--subsample: %s", args.subsample) try: samplesize = float(args.subsample) except TypeError: # Not a number logger.error( "--subsample must be int or float, got %s (exiting)", type(args.subsample) ) sys.exit(1) if samplesize <= 0: # Not a positive value logger.error("--subsample must be positive value, got %s", str(args.subsample)) sys.exit(1) if int(samplesize) > 1: logger.info("Sample size integer > 1: %d", samplesize) k = min(int(samplesize), len(infiles)) else: logger.info("Sample size proportion in (0, 1]: %.3f", samplesize) k = int(min(samplesize, 1.0) * len(infiles)) logger.info("Randomly subsampling %d sequences for analysis", k) if args.seed: logger.info("Setting random seed with: %s", args.seed) random.seed(args.seed) else: logger.warning("Subsampling without specified random seed!") logger.warning("Subsampling may NOT be easily reproducible!") return random.sample(infiles, k)
python
def subsample_input(infiles): logger.info("--subsample: %s", args.subsample) try: samplesize = float(args.subsample) except TypeError: # Not a number logger.error( "--subsample must be int or float, got %s (exiting)", type(args.subsample) ) sys.exit(1) if samplesize <= 0: # Not a positive value logger.error("--subsample must be positive value, got %s", str(args.subsample)) sys.exit(1) if int(samplesize) > 1: logger.info("Sample size integer > 1: %d", samplesize) k = min(int(samplesize), len(infiles)) else: logger.info("Sample size proportion in (0, 1]: %.3f", samplesize) k = int(min(samplesize, 1.0) * len(infiles)) logger.info("Randomly subsampling %d sequences for analysis", k) if args.seed: logger.info("Setting random seed with: %s", args.seed) random.seed(args.seed) else: logger.warning("Subsampling without specified random seed!") logger.warning("Subsampling may NOT be easily reproducible!") return random.sample(infiles, k)
[ "def", "subsample_input", "(", "infiles", ")", ":", "logger", ".", "info", "(", "\"--subsample: %s\"", ",", "args", ".", "subsample", ")", "try", ":", "samplesize", "=", "float", "(", "args", ".", "subsample", ")", "except", "TypeError", ":", "# Not a number...
Returns a random subsample of the input files. - infiles: a list of input files for analysis
[ "Returns", "a", "random", "subsample", "of", "the", "input", "files", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/average_nucleotide_identity.py#L813-L842
20,248
widdowquinn/pyani
pyani/pyani_jobs.py
Job.wait
def wait(self, interval=SGE_WAIT): """Wait until the job finishes, and poll SGE on its status.""" finished = False while not finished: time.sleep(interval) interval = min(2 * interval, 60) finished = os.system("qstat -j %s > /dev/null" % (self.name))
python
def wait(self, interval=SGE_WAIT): finished = False while not finished: time.sleep(interval) interval = min(2 * interval, 60) finished = os.system("qstat -j %s > /dev/null" % (self.name))
[ "def", "wait", "(", "self", ",", "interval", "=", "SGE_WAIT", ")", ":", "finished", "=", "False", "while", "not", "finished", ":", "time", ".", "sleep", "(", "interval", ")", "interval", "=", "min", "(", "2", "*", "interval", ",", "60", ")", "finishe...
Wait until the job finishes, and poll SGE on its status.
[ "Wait", "until", "the", "job", "finishes", "and", "poll", "SGE", "on", "its", "status", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_jobs.py#L77-L83
20,249
widdowquinn/pyani
pyani/anim.py
generate_nucmer_jobs
def generate_nucmer_jobs( filenames, outdir=".", nucmer_exe=pyani_config.NUCMER_DEFAULT, filter_exe=pyani_config.FILTER_DEFAULT, maxmatch=False, jobprefix="ANINUCmer", ): """Return a list of Jobs describing NUCmer command-lines for ANIm - filenames - a list of paths to input FASTA files - outdir - path to output directory - nucmer_exe - location of the nucmer binary - maxmatch - Boolean flag indicating to use NUCmer's -maxmatch option Loop over all FASTA files, generating Jobs describing NUCmer command lines for each pairwise comparison. """ ncmds, fcmds = generate_nucmer_commands( filenames, outdir, nucmer_exe, filter_exe, maxmatch ) joblist = [] for idx, ncmd in enumerate(ncmds): njob = pyani_jobs.Job("%s_%06d-n" % (jobprefix, idx), ncmd) fjob = pyani_jobs.Job("%s_%06d-f" % (jobprefix, idx), fcmds[idx]) fjob.add_dependency(njob) # joblist.append(njob) # not required: dependency in fjob joblist.append(fjob) return joblist
python
def generate_nucmer_jobs( filenames, outdir=".", nucmer_exe=pyani_config.NUCMER_DEFAULT, filter_exe=pyani_config.FILTER_DEFAULT, maxmatch=False, jobprefix="ANINUCmer", ): ncmds, fcmds = generate_nucmer_commands( filenames, outdir, nucmer_exe, filter_exe, maxmatch ) joblist = [] for idx, ncmd in enumerate(ncmds): njob = pyani_jobs.Job("%s_%06d-n" % (jobprefix, idx), ncmd) fjob = pyani_jobs.Job("%s_%06d-f" % (jobprefix, idx), fcmds[idx]) fjob.add_dependency(njob) # joblist.append(njob) # not required: dependency in fjob joblist.append(fjob) return joblist
[ "def", "generate_nucmer_jobs", "(", "filenames", ",", "outdir", "=", "\".\"", ",", "nucmer_exe", "=", "pyani_config", ".", "NUCMER_DEFAULT", ",", "filter_exe", "=", "pyani_config", ".", "FILTER_DEFAULT", ",", "maxmatch", "=", "False", ",", "jobprefix", "=", "\"A...
Return a list of Jobs describing NUCmer command-lines for ANIm - filenames - a list of paths to input FASTA files - outdir - path to output directory - nucmer_exe - location of the nucmer binary - maxmatch - Boolean flag indicating to use NUCmer's -maxmatch option Loop over all FASTA files, generating Jobs describing NUCmer command lines for each pairwise comparison.
[ "Return", "a", "list", "of", "Jobs", "describing", "NUCmer", "command", "-", "lines", "for", "ANIm" ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anim.py#L33-L61
20,250
widdowquinn/pyani
pyani/anim.py
generate_nucmer_commands
def generate_nucmer_commands( filenames, outdir=".", nucmer_exe=pyani_config.NUCMER_DEFAULT, filter_exe=pyani_config.FILTER_DEFAULT, maxmatch=False, ): """Return a tuple of lists of NUCmer command-lines for ANIm The first element is a list of NUCmer commands, the second a list of delta_filter_wrapper.py commands. These are ordered such that commands are paired. The NUCmer commands should be run before the delta-filter commands. - filenames - a list of paths to input FASTA files - outdir - path to output directory - nucmer_exe - location of the nucmer binary - maxmatch - Boolean flag indicating to use NUCmer's -maxmatch option Loop over all FASTA files generating NUCmer command lines for each pairwise comparison. """ nucmer_cmdlines, delta_filter_cmdlines = [], [] for idx, fname1 in enumerate(filenames[:-1]): for fname2 in filenames[idx + 1 :]: ncmd, dcmd = construct_nucmer_cmdline( fname1, fname2, outdir, nucmer_exe, filter_exe, maxmatch ) nucmer_cmdlines.append(ncmd) delta_filter_cmdlines.append(dcmd) return (nucmer_cmdlines, delta_filter_cmdlines)
python
def generate_nucmer_commands( filenames, outdir=".", nucmer_exe=pyani_config.NUCMER_DEFAULT, filter_exe=pyani_config.FILTER_DEFAULT, maxmatch=False, ): nucmer_cmdlines, delta_filter_cmdlines = [], [] for idx, fname1 in enumerate(filenames[:-1]): for fname2 in filenames[idx + 1 :]: ncmd, dcmd = construct_nucmer_cmdline( fname1, fname2, outdir, nucmer_exe, filter_exe, maxmatch ) nucmer_cmdlines.append(ncmd) delta_filter_cmdlines.append(dcmd) return (nucmer_cmdlines, delta_filter_cmdlines)
[ "def", "generate_nucmer_commands", "(", "filenames", ",", "outdir", "=", "\".\"", ",", "nucmer_exe", "=", "pyani_config", ".", "NUCMER_DEFAULT", ",", "filter_exe", "=", "pyani_config", ".", "FILTER_DEFAULT", ",", "maxmatch", "=", "False", ",", ")", ":", "nucmer_...
Return a tuple of lists of NUCmer command-lines for ANIm The first element is a list of NUCmer commands, the second a list of delta_filter_wrapper.py commands. These are ordered such that commands are paired. The NUCmer commands should be run before the delta-filter commands. - filenames - a list of paths to input FASTA files - outdir - path to output directory - nucmer_exe - location of the nucmer binary - maxmatch - Boolean flag indicating to use NUCmer's -maxmatch option Loop over all FASTA files generating NUCmer command lines for each pairwise comparison.
[ "Return", "a", "tuple", "of", "lists", "of", "NUCmer", "command", "-", "lines", "for", "ANIm" ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anim.py#L66-L96
20,251
widdowquinn/pyani
pyani/anim.py
construct_nucmer_cmdline
def construct_nucmer_cmdline( fname1, fname2, outdir=".", nucmer_exe=pyani_config.NUCMER_DEFAULT, filter_exe=pyani_config.FILTER_DEFAULT, maxmatch=False, ): """Returns a tuple of NUCmer and delta-filter commands The split into a tuple was made necessary by changes to SGE/OGE. The delta-filter command must now be run as a dependency of the NUCmer command, and be wrapped in a Python script to capture STDOUT. NOTE: This command-line writes output data to a subdirectory of the passed outdir, called "nucmer_output". - fname1 - query FASTA filepath - fname2 - subject FASTA filepath - outdir - path to output directory - maxmatch - Boolean flag indicating whether to use NUCmer's -maxmatch option. If not, the -mum option is used instead """ outsubdir = os.path.join(outdir, pyani_config.ALIGNDIR["ANIm"]) outprefix = os.path.join( outsubdir, "%s_vs_%s" % ( os.path.splitext(os.path.split(fname1)[-1])[0], os.path.splitext(os.path.split(fname2)[-1])[0], ), ) if maxmatch: mode = "--maxmatch" else: mode = "--mum" nucmercmd = "{0} {1} -p {2} {3} {4}".format( nucmer_exe, mode, outprefix, fname1, fname2 ) filtercmd = "delta_filter_wrapper.py " + "{0} -1 {1} {2}".format( filter_exe, outprefix + ".delta", outprefix + ".filter" ) return (nucmercmd, filtercmd)
python
def construct_nucmer_cmdline( fname1, fname2, outdir=".", nucmer_exe=pyani_config.NUCMER_DEFAULT, filter_exe=pyani_config.FILTER_DEFAULT, maxmatch=False, ): outsubdir = os.path.join(outdir, pyani_config.ALIGNDIR["ANIm"]) outprefix = os.path.join( outsubdir, "%s_vs_%s" % ( os.path.splitext(os.path.split(fname1)[-1])[0], os.path.splitext(os.path.split(fname2)[-1])[0], ), ) if maxmatch: mode = "--maxmatch" else: mode = "--mum" nucmercmd = "{0} {1} -p {2} {3} {4}".format( nucmer_exe, mode, outprefix, fname1, fname2 ) filtercmd = "delta_filter_wrapper.py " + "{0} -1 {1} {2}".format( filter_exe, outprefix + ".delta", outprefix + ".filter" ) return (nucmercmd, filtercmd)
[ "def", "construct_nucmer_cmdline", "(", "fname1", ",", "fname2", ",", "outdir", "=", "\".\"", ",", "nucmer_exe", "=", "pyani_config", ".", "NUCMER_DEFAULT", ",", "filter_exe", "=", "pyani_config", ".", "FILTER_DEFAULT", ",", "maxmatch", "=", "False", ",", ")", ...
Returns a tuple of NUCmer and delta-filter commands The split into a tuple was made necessary by changes to SGE/OGE. The delta-filter command must now be run as a dependency of the NUCmer command, and be wrapped in a Python script to capture STDOUT. NOTE: This command-line writes output data to a subdirectory of the passed outdir, called "nucmer_output". - fname1 - query FASTA filepath - fname2 - subject FASTA filepath - outdir - path to output directory - maxmatch - Boolean flag indicating whether to use NUCmer's -maxmatch option. If not, the -mum option is used instead
[ "Returns", "a", "tuple", "of", "NUCmer", "and", "delta", "-", "filter", "commands" ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anim.py#L101-L143
20,252
widdowquinn/pyani
pyani/anim.py
process_deltadir
def process_deltadir(delta_dir, org_lengths, logger=None): """Returns a tuple of ANIm results for .deltas in passed directory. - delta_dir - path to the directory containing .delta files - org_lengths - dictionary of total sequence lengths, keyed by sequence Returns the following pandas dataframes in an ANIResults object; query sequences are rows, subject sequences are columns: - alignment_lengths - symmetrical: total length of alignment - percentage_identity - symmetrical: percentage identity of alignment - alignment_coverage - non-symmetrical: coverage of query and subject - similarity_errors - symmetrical: count of similarity errors May throw a ZeroDivisionError if one or more NUCmer runs failed, or a very distant sequence was included in the analysis. """ # Process directory to identify input files - as of v0.2.4 we use the # .filter files that result from delta-filter (1:1 alignments) deltafiles = pyani_files.get_input_files(delta_dir, ".filter") # Hold data in ANIResults object results = ANIResults(list(org_lengths.keys()), "ANIm") # Fill diagonal NA values for alignment_length with org_lengths for org, length in list(org_lengths.items()): results.alignment_lengths[org][org] = length # Process .delta files assuming that the filename format holds: # org1_vs_org2.delta for deltafile in deltafiles: qname, sname = os.path.splitext(os.path.split(deltafile)[-1])[0].split("_vs_") # We may have .delta files from other analyses in the same directory # If this occurs, we raise a warning, and skip the .delta file if qname not in list(org_lengths.keys()): if logger: logger.warning( "Query name %s not in input " % qname + "sequence list, skipping %s" % deltafile ) continue if sname not in list(org_lengths.keys()): if logger: logger.warning( "Subject name %s not in input " % sname + "sequence list, skipping %s" % deltafile ) continue tot_length, tot_sim_error = parse_delta(deltafile) if tot_length == 0 and logger is not None: if logger: logger.warning( "Total alignment length reported in " + "%s is zero!" % deltafile ) query_cover = float(tot_length) / org_lengths[qname] sbjct_cover = float(tot_length) / org_lengths[sname] # Calculate percentage ID of aligned length. This may fail if # total length is zero. # The ZeroDivisionError that would arise should be handled # Common causes are that a NUCmer run failed, or that a very # distant sequence was included in the analysis. try: perc_id = 1 - float(tot_sim_error) / tot_length except ZeroDivisionError: perc_id = 0 # set arbitrary value of zero identity results.zero_error = True # Populate dataframes: when assigning data from symmetrical MUMmer # output, both upper and lower triangles will be populated results.add_tot_length(qname, sname, tot_length) results.add_sim_errors(qname, sname, tot_sim_error) results.add_pid(qname, sname, perc_id) results.add_coverage(qname, sname, query_cover, sbjct_cover) return results
python
def process_deltadir(delta_dir, org_lengths, logger=None): # Process directory to identify input files - as of v0.2.4 we use the # .filter files that result from delta-filter (1:1 alignments) deltafiles = pyani_files.get_input_files(delta_dir, ".filter") # Hold data in ANIResults object results = ANIResults(list(org_lengths.keys()), "ANIm") # Fill diagonal NA values for alignment_length with org_lengths for org, length in list(org_lengths.items()): results.alignment_lengths[org][org] = length # Process .delta files assuming that the filename format holds: # org1_vs_org2.delta for deltafile in deltafiles: qname, sname = os.path.splitext(os.path.split(deltafile)[-1])[0].split("_vs_") # We may have .delta files from other analyses in the same directory # If this occurs, we raise a warning, and skip the .delta file if qname not in list(org_lengths.keys()): if logger: logger.warning( "Query name %s not in input " % qname + "sequence list, skipping %s" % deltafile ) continue if sname not in list(org_lengths.keys()): if logger: logger.warning( "Subject name %s not in input " % sname + "sequence list, skipping %s" % deltafile ) continue tot_length, tot_sim_error = parse_delta(deltafile) if tot_length == 0 and logger is not None: if logger: logger.warning( "Total alignment length reported in " + "%s is zero!" % deltafile ) query_cover = float(tot_length) / org_lengths[qname] sbjct_cover = float(tot_length) / org_lengths[sname] # Calculate percentage ID of aligned length. This may fail if # total length is zero. # The ZeroDivisionError that would arise should be handled # Common causes are that a NUCmer run failed, or that a very # distant sequence was included in the analysis. try: perc_id = 1 - float(tot_sim_error) / tot_length except ZeroDivisionError: perc_id = 0 # set arbitrary value of zero identity results.zero_error = True # Populate dataframes: when assigning data from symmetrical MUMmer # output, both upper and lower triangles will be populated results.add_tot_length(qname, sname, tot_length) results.add_sim_errors(qname, sname, tot_sim_error) results.add_pid(qname, sname, perc_id) results.add_coverage(qname, sname, query_cover, sbjct_cover) return results
[ "def", "process_deltadir", "(", "delta_dir", ",", "org_lengths", ",", "logger", "=", "None", ")", ":", "# Process directory to identify input files - as of v0.2.4 we use the", "# .filter files that result from delta-filter (1:1 alignments)", "deltafiles", "=", "pyani_files", ".", ...
Returns a tuple of ANIm results for .deltas in passed directory. - delta_dir - path to the directory containing .delta files - org_lengths - dictionary of total sequence lengths, keyed by sequence Returns the following pandas dataframes in an ANIResults object; query sequences are rows, subject sequences are columns: - alignment_lengths - symmetrical: total length of alignment - percentage_identity - symmetrical: percentage identity of alignment - alignment_coverage - non-symmetrical: coverage of query and subject - similarity_errors - symmetrical: count of similarity errors May throw a ZeroDivisionError if one or more NUCmer runs failed, or a very distant sequence was included in the analysis.
[ "Returns", "a", "tuple", "of", "ANIm", "results", "for", ".", "deltas", "in", "passed", "directory", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anim.py#L169-L244
20,253
widdowquinn/pyani
bin/genbank_get_genomes_by_taxon.py
set_ncbi_email
def set_ncbi_email(): """Set contact email for NCBI.""" Entrez.email = args.email logger.info("Set NCBI contact email to %s", args.email) Entrez.tool = "genbank_get_genomes_by_taxon.py"
python
def set_ncbi_email(): Entrez.email = args.email logger.info("Set NCBI contact email to %s", args.email) Entrez.tool = "genbank_get_genomes_by_taxon.py"
[ "def", "set_ncbi_email", "(", ")", ":", "Entrez", ".", "email", "=", "args", ".", "email", "logger", ".", "info", "(", "\"Set NCBI contact email to %s\"", ",", "args", ".", "email", ")", "Entrez", ".", "tool", "=", "\"genbank_get_genomes_by_taxon.py\"" ]
Set contact email for NCBI.
[ "Set", "contact", "email", "for", "NCBI", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/genbank_get_genomes_by_taxon.py#L139-L143
20,254
widdowquinn/pyani
bin/genbank_get_genomes_by_taxon.py
entrez_retry
def entrez_retry(func, *fnargs, **fnkwargs): """Retries the passed function up to the number of times specified by args.retries """ tries, success = 0, False while not success and tries < args.retries: try: output = func(*fnargs, **fnkwargs) success = True except (HTTPError, URLError): tries += 1 logger.warning("Entrez query %s(%s, %s) failed (%d/%d)", func, fnargs, fnkwargs, tries + 1, args.retries) logger.warning(last_exception()) if not success: logger.error("Too many Entrez failures (exiting)") sys.exit(1) return output
python
def entrez_retry(func, *fnargs, **fnkwargs): tries, success = 0, False while not success and tries < args.retries: try: output = func(*fnargs, **fnkwargs) success = True except (HTTPError, URLError): tries += 1 logger.warning("Entrez query %s(%s, %s) failed (%d/%d)", func, fnargs, fnkwargs, tries + 1, args.retries) logger.warning(last_exception()) if not success: logger.error("Too many Entrez failures (exiting)") sys.exit(1) return output
[ "def", "entrez_retry", "(", "func", ",", "*", "fnargs", ",", "*", "*", "fnkwargs", ")", ":", "tries", ",", "success", "=", "0", ",", "False", "while", "not", "success", "and", "tries", "<", "args", ".", "retries", ":", "try", ":", "output", "=", "f...
Retries the passed function up to the number of times specified by args.retries
[ "Retries", "the", "passed", "function", "up", "to", "the", "number", "of", "times", "specified", "by", "args", ".", "retries" ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/genbank_get_genomes_by_taxon.py#L187-L204
20,255
widdowquinn/pyani
bin/genbank_get_genomes_by_taxon.py
entrez_batch_webhistory
def entrez_batch_webhistory(record, expected, batchsize, *fnargs, **fnkwargs): """Recovers the Entrez data from a prior NCBI webhistory search, in batches of defined size, using Efetch. Returns all results as a list. - record: Entrez webhistory record - expected: number of expected search returns - batchsize: how many search returns to retrieve in a batch - *fnargs: arguments to Efetch - **fnkwargs: keyword arguments to Efetch """ results = [] for start in range(0, expected, batchsize): batch_handle = entrez_retry( Entrez.efetch, retstart=start, retmax=batchsize, webenv=record["WebEnv"], query_key=record["QueryKey"], *fnargs, **fnkwargs) batch_record = Entrez.read(batch_handle, validate=False) results.extend(batch_record) return results
python
def entrez_batch_webhistory(record, expected, batchsize, *fnargs, **fnkwargs): results = [] for start in range(0, expected, batchsize): batch_handle = entrez_retry( Entrez.efetch, retstart=start, retmax=batchsize, webenv=record["WebEnv"], query_key=record["QueryKey"], *fnargs, **fnkwargs) batch_record = Entrez.read(batch_handle, validate=False) results.extend(batch_record) return results
[ "def", "entrez_batch_webhistory", "(", "record", ",", "expected", ",", "batchsize", ",", "*", "fnargs", ",", "*", "*", "fnkwargs", ")", ":", "results", "=", "[", "]", "for", "start", "in", "range", "(", "0", ",", "expected", ",", "batchsize", ")", ":",...
Recovers the Entrez data from a prior NCBI webhistory search, in batches of defined size, using Efetch. Returns all results as a list. - record: Entrez webhistory record - expected: number of expected search returns - batchsize: how many search returns to retrieve in a batch - *fnargs: arguments to Efetch - **fnkwargs: keyword arguments to Efetch
[ "Recovers", "the", "Entrez", "data", "from", "a", "prior", "NCBI", "webhistory", "search", "in", "batches", "of", "defined", "size", "using", "Efetch", ".", "Returns", "all", "results", "as", "a", "list", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/genbank_get_genomes_by_taxon.py#L208-L230
20,256
widdowquinn/pyani
bin/genbank_get_genomes_by_taxon.py
get_asm_uids
def get_asm_uids(taxon_uid): """Returns a set of NCBI UIDs associated with the passed taxon. This query at NCBI returns all assemblies for the taxon subtree rooted at the passed taxon_uid. """ query = "txid%s[Organism:exp]" % taxon_uid logger.info("Entrez ESearch with query: %s", query) # Perform initial search for assembly UIDs with taxon ID as query. # Use NCBI history for the search. handle = entrez_retry( Entrez.esearch, db="assembly", term=query, format="xml", usehistory="y") record = Entrez.read(handle, validate=False) result_count = int(record['Count']) logger.info("Entrez ESearch returns %d assembly IDs", result_count) # Recover assembly UIDs from the web history asm_ids = entrez_batch_webhistory( record, result_count, 250, db="assembly", retmode="xml") logger.info("Identified %d unique assemblies", len(asm_ids)) return asm_ids
python
def get_asm_uids(taxon_uid): query = "txid%s[Organism:exp]" % taxon_uid logger.info("Entrez ESearch with query: %s", query) # Perform initial search for assembly UIDs with taxon ID as query. # Use NCBI history for the search. handle = entrez_retry( Entrez.esearch, db="assembly", term=query, format="xml", usehistory="y") record = Entrez.read(handle, validate=False) result_count = int(record['Count']) logger.info("Entrez ESearch returns %d assembly IDs", result_count) # Recover assembly UIDs from the web history asm_ids = entrez_batch_webhistory( record, result_count, 250, db="assembly", retmode="xml") logger.info("Identified %d unique assemblies", len(asm_ids)) return asm_ids
[ "def", "get_asm_uids", "(", "taxon_uid", ")", ":", "query", "=", "\"txid%s[Organism:exp]\"", "%", "taxon_uid", "logger", ".", "info", "(", "\"Entrez ESearch with query: %s\"", ",", "query", ")", "# Perform initial search for assembly UIDs with taxon ID as query.", "# Use NCBI...
Returns a set of NCBI UIDs associated with the passed taxon. This query at NCBI returns all assemblies for the taxon subtree rooted at the passed taxon_uid.
[ "Returns", "a", "set", "of", "NCBI", "UIDs", "associated", "with", "the", "passed", "taxon", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/genbank_get_genomes_by_taxon.py#L234-L259
20,257
widdowquinn/pyani
bin/genbank_get_genomes_by_taxon.py
extract_filestem
def extract_filestem(data): """Extract filestem from Entrez eSummary data. Function expects esummary['DocumentSummarySet']['DocumentSummary'][0] Some illegal characters may occur in AssemblyName - for these, a more robust regex replace/escape may be required. Sadly, NCBI don't just use standard percent escapes, but instead replace certain characters with underscores: white space, slash, comma, hash, brackets. """ escapes = re.compile(r"[\s/,#\(\)]") escname = re.sub(escapes, '_', data['AssemblyName']) return '_'.join([data['AssemblyAccession'], escname])
python
def extract_filestem(data): escapes = re.compile(r"[\s/,#\(\)]") escname = re.sub(escapes, '_', data['AssemblyName']) return '_'.join([data['AssemblyAccession'], escname])
[ "def", "extract_filestem", "(", "data", ")", ":", "escapes", "=", "re", ".", "compile", "(", "r\"[\\s/,#\\(\\)]\"", ")", "escname", "=", "re", ".", "sub", "(", "escapes", ",", "'_'", ",", "data", "[", "'AssemblyName'", "]", ")", "return", "'_'", ".", "...
Extract filestem from Entrez eSummary data. Function expects esummary['DocumentSummarySet']['DocumentSummary'][0] Some illegal characters may occur in AssemblyName - for these, a more robust regex replace/escape may be required. Sadly, NCBI don't just use standard percent escapes, but instead replace certain characters with underscores: white space, slash, comma, hash, brackets.
[ "Extract", "filestem", "from", "Entrez", "eSummary", "data", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/genbank_get_genomes_by_taxon.py#L263-L275
20,258
widdowquinn/pyani
bin/genbank_get_genomes_by_taxon.py
write_contigs
def write_contigs(asm_uid, contig_uids, batchsize=10000): """Writes assembly contigs out to a single FASTA file in the script's designated output directory. FASTA records are returned, as GenBank and even GenBankWithParts format records don't reliably give correct sequence in all cases. The script returns two strings for each assembly, a 'class' and a 'label' string - this is for use with, e.g. pyani. """ # Has duplicate code with get_class_label_info() - needs refactoring logger.info("Collecting contig data for %s", asm_uid) # Assembly record - get binomial and strain names asm_record = Entrez.read( entrez_retry( Entrez.esummary, db='assembly', id=asm_uid, rettype='text'), validate=False) asm_organism = asm_record['DocumentSummarySet']['DocumentSummary'][0][ 'SpeciesName'] try: asm_strain = asm_record['DocumentSummarySet']['DocumentSummary'][0][ 'Biosource']['InfraspeciesList'][0]['Sub_value'] except KeyError: asm_strain = "" # Assembly UID (long form) for the output filename outfilename = "%s.fasta" % os.path.join(args.outdirname, asm_record[ 'DocumentSummarySet']['DocumentSummary'][0]['AssemblyAccession']) # Create label and class strings genus, species = asm_organism.split(' ', 1) # Get FASTA records for contigs logger.info("Downloading FASTA records for assembly %s (%s)", asm_uid, ' '.join([genus[0] + '.', species, asm_strain])) # We're doing an explicit outer retry loop here because we want to confirm # we have the correct data, as well as test for Entrez connection errors, # which is all the entrez_retry function does. tries, success = 0, False while not success and tries < args.retries: records = [] # Holds all return records # We may need to batch contigs query_uids = ','.join(contig_uids) try: for start in range(0, len(contig_uids), batchsize): logger.info("Batch: %d-%d", start, start + batchsize) records.extend( list( SeqIO.parse( entrez_retry( Entrez.efetch, db='nucleotide', id=query_uids, rettype='fasta', retmode='text', retstart=start, retmax=batchsize), 'fasta'))) tries += 1 # Check only that correct number of records returned. if len(records) == len(contig_uids): success = True else: logger.warning("%d contigs expected, %d contigs returned", len(contig_uids), len(records)) logger.warning("FASTA download for assembly %s failed", asm_uid) logger.warning("try %d/20", tries) # Could also check expected assembly sequence length? logger.info("Downloaded genome size: %d", sum([len(r) for r in records])) except: logger.warning("FASTA download for assembly %s failed", asm_uid) logger.warning(last_exception()) logger.warning("try %d/20", tries) if not success: # Could place option on command-line to stop or continue here. logger.error("Failed to download records for %s (continuing)", asm_uid) # Write contigs to file retval = SeqIO.write(records, outfilename, 'fasta') logger.info("Wrote %d contigs to %s", retval, outfilename)
python
def write_contigs(asm_uid, contig_uids, batchsize=10000): # Has duplicate code with get_class_label_info() - needs refactoring logger.info("Collecting contig data for %s", asm_uid) # Assembly record - get binomial and strain names asm_record = Entrez.read( entrez_retry( Entrez.esummary, db='assembly', id=asm_uid, rettype='text'), validate=False) asm_organism = asm_record['DocumentSummarySet']['DocumentSummary'][0][ 'SpeciesName'] try: asm_strain = asm_record['DocumentSummarySet']['DocumentSummary'][0][ 'Biosource']['InfraspeciesList'][0]['Sub_value'] except KeyError: asm_strain = "" # Assembly UID (long form) for the output filename outfilename = "%s.fasta" % os.path.join(args.outdirname, asm_record[ 'DocumentSummarySet']['DocumentSummary'][0]['AssemblyAccession']) # Create label and class strings genus, species = asm_organism.split(' ', 1) # Get FASTA records for contigs logger.info("Downloading FASTA records for assembly %s (%s)", asm_uid, ' '.join([genus[0] + '.', species, asm_strain])) # We're doing an explicit outer retry loop here because we want to confirm # we have the correct data, as well as test for Entrez connection errors, # which is all the entrez_retry function does. tries, success = 0, False while not success and tries < args.retries: records = [] # Holds all return records # We may need to batch contigs query_uids = ','.join(contig_uids) try: for start in range(0, len(contig_uids), batchsize): logger.info("Batch: %d-%d", start, start + batchsize) records.extend( list( SeqIO.parse( entrez_retry( Entrez.efetch, db='nucleotide', id=query_uids, rettype='fasta', retmode='text', retstart=start, retmax=batchsize), 'fasta'))) tries += 1 # Check only that correct number of records returned. if len(records) == len(contig_uids): success = True else: logger.warning("%d contigs expected, %d contigs returned", len(contig_uids), len(records)) logger.warning("FASTA download for assembly %s failed", asm_uid) logger.warning("try %d/20", tries) # Could also check expected assembly sequence length? logger.info("Downloaded genome size: %d", sum([len(r) for r in records])) except: logger.warning("FASTA download for assembly %s failed", asm_uid) logger.warning(last_exception()) logger.warning("try %d/20", tries) if not success: # Could place option on command-line to stop or continue here. logger.error("Failed to download records for %s (continuing)", asm_uid) # Write contigs to file retval = SeqIO.write(records, outfilename, 'fasta') logger.info("Wrote %d contigs to %s", retval, outfilename)
[ "def", "write_contigs", "(", "asm_uid", ",", "contig_uids", ",", "batchsize", "=", "10000", ")", ":", "# Has duplicate code with get_class_label_info() - needs refactoring", "logger", ".", "info", "(", "\"Collecting contig data for %s\"", ",", "asm_uid", ")", "# Assembly re...
Writes assembly contigs out to a single FASTA file in the script's designated output directory. FASTA records are returned, as GenBank and even GenBankWithParts format records don't reliably give correct sequence in all cases. The script returns two strings for each assembly, a 'class' and a 'label' string - this is for use with, e.g. pyani.
[ "Writes", "assembly", "contigs", "out", "to", "a", "single", "FASTA", "file", "in", "the", "script", "s", "designated", "output", "directory", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/genbank_get_genomes_by_taxon.py#L481-L560
20,259
widdowquinn/pyani
bin/genbank_get_genomes_by_taxon.py
logreport_downloaded
def logreport_downloaded(accession, skippedlist, accessiondict, uidaccdict): """Reports to logger whether alternative assemblies for an accession that was missing have been downloaded """ for vid in accessiondict[accession.split('.')[0]]: if vid in skippedlist: status = "NOT DOWNLOADED" else: status = "DOWNLOADED" logger.warning("\t\t%s: %s - %s", vid, uidaccdict[vid], status)
python
def logreport_downloaded(accession, skippedlist, accessiondict, uidaccdict): for vid in accessiondict[accession.split('.')[0]]: if vid in skippedlist: status = "NOT DOWNLOADED" else: status = "DOWNLOADED" logger.warning("\t\t%s: %s - %s", vid, uidaccdict[vid], status)
[ "def", "logreport_downloaded", "(", "accession", ",", "skippedlist", ",", "accessiondict", ",", "uidaccdict", ")", ":", "for", "vid", "in", "accessiondict", "[", "accession", ".", "split", "(", "'.'", ")", "[", "0", "]", "]", ":", "if", "vid", "in", "ski...
Reports to logger whether alternative assemblies for an accession that was missing have been downloaded
[ "Reports", "to", "logger", "whether", "alternative", "assemblies", "for", "an", "accession", "that", "was", "missing", "have", "been", "downloaded" ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/genbank_get_genomes_by_taxon.py#L564-L573
20,260
widdowquinn/pyani
pyani/tetra.py
calculate_tetra_zscores
def calculate_tetra_zscores(infilenames): """Returns dictionary of TETRA Z-scores for each input file. - infilenames - collection of paths to sequence files """ org_tetraz = {} for filename in infilenames: org = os.path.splitext(os.path.split(filename)[-1])[0] org_tetraz[org] = calculate_tetra_zscore(filename) return org_tetraz
python
def calculate_tetra_zscores(infilenames): org_tetraz = {} for filename in infilenames: org = os.path.splitext(os.path.split(filename)[-1])[0] org_tetraz[org] = calculate_tetra_zscore(filename) return org_tetraz
[ "def", "calculate_tetra_zscores", "(", "infilenames", ")", ":", "org_tetraz", "=", "{", "}", "for", "filename", "in", "infilenames", ":", "org", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "split", "(", "filename", ")", "[", "...
Returns dictionary of TETRA Z-scores for each input file. - infilenames - collection of paths to sequence files
[ "Returns", "dictionary", "of", "TETRA", "Z", "-", "scores", "for", "each", "input", "file", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/tetra.py#L33-L42
20,261
widdowquinn/pyani
pyani/tetra.py
calculate_tetra_zscore
def calculate_tetra_zscore(filename): """Returns TETRA Z-score for the sequence in the passed file. - filename - path to sequence file Calculates mono-, di-, tri- and tetranucleotide frequencies for each sequence, on each strand, and follows Teeling et al. (2004) in calculating a corresponding Z-score for each observed tetranucleotide frequency, dependent on the mono-, di- and tri- nucleotide frequencies for that input sequence. """ # For the Teeling et al. method, the Z-scores require us to count # mono, di, tri and tetranucleotide sequences - these are stored # (in order) in the counts tuple counts = (collections.defaultdict(int), collections.defaultdict(int), collections.defaultdict(int), collections.defaultdict(int)) for rec in SeqIO.parse(filename, 'fasta'): for seq in [str(rec.seq).upper(), str(rec.seq.reverse_complement()).upper()]: # The Teeling et al. algorithm requires us to consider # both strand orientations, so monocounts are easy for base in ('G', 'C', 'T', 'A'): counts[0][base] += seq.count(base) # For di, tri and tetranucleotide counts, loop over the # sequence and its reverse complement, until near the end: for i in range(len(seq[:-4])): din, tri, tetra = seq[i:i+2], seq[i:i+3], seq[i:i+4] counts[1][str(din)] += 1 counts[2][str(tri)] += 1 counts[3][str(tetra)] += 1 # Then clean up the straggling bit at the end: counts[2][str(seq[-4:-1])] += 1 counts[2][str(seq[-3:])] += 1 counts[1][str(seq[-4:-2])] += 1 counts[1][str(seq[-3:-1])] += 1 counts[1][str(seq[-2:])] += 1 # Following Teeling (2004), calculate expected frequencies for each # tetranucleotide; we ignore ambiguity symbols tetra_exp = {} for tet in [tetn for tetn in counts[3] if tetra_clean(tetn)]: tetra_exp[tet] = 1. * counts[2][tet[:3]] * counts[2][tet[1:]] / \ counts[1][tet[1:3]] # Following Teeling (2004) we approximate the std dev and Z-score for each # tetranucleotide tetra_sd = {} tetra_z = {} for tet, exp in list(tetra_exp.items()): den = counts[1][tet[1:3]] tetra_sd[tet] = math.sqrt(exp * (den - counts[2][tet[:3]]) * (den - counts[2][tet[1:]]) / (den * den)) try: tetra_z[tet] = (counts[3][tet] - exp)/tetra_sd[tet] except ZeroDivisionError: # To record if we hit a zero in the estimation of variance # zeroes = [k for k, v in list(tetra_sd.items()) if v == 0] tetra_z[tet] = 1 / (counts[1][tet[1:3]] * counts[1][tet[1:3]]) return tetra_z
python
def calculate_tetra_zscore(filename): # For the Teeling et al. method, the Z-scores require us to count # mono, di, tri and tetranucleotide sequences - these are stored # (in order) in the counts tuple counts = (collections.defaultdict(int), collections.defaultdict(int), collections.defaultdict(int), collections.defaultdict(int)) for rec in SeqIO.parse(filename, 'fasta'): for seq in [str(rec.seq).upper(), str(rec.seq.reverse_complement()).upper()]: # The Teeling et al. algorithm requires us to consider # both strand orientations, so monocounts are easy for base in ('G', 'C', 'T', 'A'): counts[0][base] += seq.count(base) # For di, tri and tetranucleotide counts, loop over the # sequence and its reverse complement, until near the end: for i in range(len(seq[:-4])): din, tri, tetra = seq[i:i+2], seq[i:i+3], seq[i:i+4] counts[1][str(din)] += 1 counts[2][str(tri)] += 1 counts[3][str(tetra)] += 1 # Then clean up the straggling bit at the end: counts[2][str(seq[-4:-1])] += 1 counts[2][str(seq[-3:])] += 1 counts[1][str(seq[-4:-2])] += 1 counts[1][str(seq[-3:-1])] += 1 counts[1][str(seq[-2:])] += 1 # Following Teeling (2004), calculate expected frequencies for each # tetranucleotide; we ignore ambiguity symbols tetra_exp = {} for tet in [tetn for tetn in counts[3] if tetra_clean(tetn)]: tetra_exp[tet] = 1. * counts[2][tet[:3]] * counts[2][tet[1:]] / \ counts[1][tet[1:3]] # Following Teeling (2004) we approximate the std dev and Z-score for each # tetranucleotide tetra_sd = {} tetra_z = {} for tet, exp in list(tetra_exp.items()): den = counts[1][tet[1:3]] tetra_sd[tet] = math.sqrt(exp * (den - counts[2][tet[:3]]) * (den - counts[2][tet[1:]]) / (den * den)) try: tetra_z[tet] = (counts[3][tet] - exp)/tetra_sd[tet] except ZeroDivisionError: # To record if we hit a zero in the estimation of variance # zeroes = [k for k, v in list(tetra_sd.items()) if v == 0] tetra_z[tet] = 1 / (counts[1][tet[1:3]] * counts[1][tet[1:3]]) return tetra_z
[ "def", "calculate_tetra_zscore", "(", "filename", ")", ":", "# For the Teeling et al. method, the Z-scores require us to count", "# mono, di, tri and tetranucleotide sequences - these are stored", "# (in order) in the counts tuple", "counts", "=", "(", "collections", ".", "defaultdict", ...
Returns TETRA Z-score for the sequence in the passed file. - filename - path to sequence file Calculates mono-, di-, tri- and tetranucleotide frequencies for each sequence, on each strand, and follows Teeling et al. (2004) in calculating a corresponding Z-score for each observed tetranucleotide frequency, dependent on the mono-, di- and tri- nucleotide frequencies for that input sequence.
[ "Returns", "TETRA", "Z", "-", "score", "for", "the", "sequence", "in", "the", "passed", "file", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/tetra.py#L46-L102
20,262
widdowquinn/pyani
pyani/tetra.py
calculate_correlations
def calculate_correlations(tetra_z): """Returns dataframe of Pearson correlation coefficients. - tetra_z - dictionary of Z-scores, keyed by sequence ID Calculates Pearson correlation coefficient from Z scores for each tetranucleotide. This is done longhand here, which is fast enough, but for robustness we might want to do something else... (TODO). Note that we report a correlation by this method, rather than a percentage identity. """ orgs = sorted(tetra_z.keys()) correlations = pd.DataFrame(index=orgs, columns=orgs, dtype=float).fillna(1.0) for idx, org1 in enumerate(orgs[:-1]): for org2 in orgs[idx+1:]: assert sorted(tetra_z[org1].keys()) == sorted(tetra_z[org2].keys()) tets = sorted(tetra_z[org1].keys()) zscores = [[tetra_z[org1][t] for t in tets], [tetra_z[org2][t] for t in tets]] zmeans = [sum(zscore)/len(zscore) for zscore in zscores] zdiffs = [[z - zmeans[0] for z in zscores[0]], [z - zmeans[1] for z in zscores[1]]] diffprods = sum([zdiffs[0][i] * zdiffs[1][i] for i in range(len(zdiffs[0]))]) zdiffs2 = [sum([z * z for z in zdiffs[0]]), sum([z * z for z in zdiffs[1]])] correlations[org1][org2] = diffprods / \ math.sqrt(zdiffs2[0] * zdiffs2[1]) correlations[org2][org1] = correlations[org1][org2] return correlations
python
def calculate_correlations(tetra_z): orgs = sorted(tetra_z.keys()) correlations = pd.DataFrame(index=orgs, columns=orgs, dtype=float).fillna(1.0) for idx, org1 in enumerate(orgs[:-1]): for org2 in orgs[idx+1:]: assert sorted(tetra_z[org1].keys()) == sorted(tetra_z[org2].keys()) tets = sorted(tetra_z[org1].keys()) zscores = [[tetra_z[org1][t] for t in tets], [tetra_z[org2][t] for t in tets]] zmeans = [sum(zscore)/len(zscore) for zscore in zscores] zdiffs = [[z - zmeans[0] for z in zscores[0]], [z - zmeans[1] for z in zscores[1]]] diffprods = sum([zdiffs[0][i] * zdiffs[1][i] for i in range(len(zdiffs[0]))]) zdiffs2 = [sum([z * z for z in zdiffs[0]]), sum([z * z for z in zdiffs[1]])] correlations[org1][org2] = diffprods / \ math.sqrt(zdiffs2[0] * zdiffs2[1]) correlations[org2][org1] = correlations[org1][org2] return correlations
[ "def", "calculate_correlations", "(", "tetra_z", ")", ":", "orgs", "=", "sorted", "(", "tetra_z", ".", "keys", "(", ")", ")", "correlations", "=", "pd", ".", "DataFrame", "(", "index", "=", "orgs", ",", "columns", "=", "orgs", ",", "dtype", "=", "float...
Returns dataframe of Pearson correlation coefficients. - tetra_z - dictionary of Z-scores, keyed by sequence ID Calculates Pearson correlation coefficient from Z scores for each tetranucleotide. This is done longhand here, which is fast enough, but for robustness we might want to do something else... (TODO). Note that we report a correlation by this method, rather than a percentage identity.
[ "Returns", "dataframe", "of", "Pearson", "correlation", "coefficients", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/tetra.py#L118-L149
20,263
widdowquinn/pyani
pyani/pyani_tools.py
get_labels
def get_labels(filename, logger=None): """Returns a dictionary of alternative sequence labels, or None - filename - path to file containing tab-separated table of labels Input files should be formatted as <key>\t<label>, one pair per line. """ labeldict = {} if filename is not None: if logger: logger.info("Reading labels from %s", filename) with open(filename, "r") as ifh: count = 0 for line in ifh.readlines(): count += 1 try: key, label = line.strip().split("\t") except ValueError: if logger: logger.warning("Problem with class file: %s", filename) logger.warning("%d: %s", (count, line.strip())) logger.warning("(skipping line)") continue else: labeldict[key] = label return labeldict
python
def get_labels(filename, logger=None): labeldict = {} if filename is not None: if logger: logger.info("Reading labels from %s", filename) with open(filename, "r") as ifh: count = 0 for line in ifh.readlines(): count += 1 try: key, label = line.strip().split("\t") except ValueError: if logger: logger.warning("Problem with class file: %s", filename) logger.warning("%d: %s", (count, line.strip())) logger.warning("(skipping line)") continue else: labeldict[key] = label return labeldict
[ "def", "get_labels", "(", "filename", ",", "logger", "=", "None", ")", ":", "labeldict", "=", "{", "}", "if", "filename", "is", "not", "None", ":", "if", "logger", ":", "logger", ".", "info", "(", "\"Reading labels from %s\"", ",", "filename", ")", "with...
Returns a dictionary of alternative sequence labels, or None - filename - path to file containing tab-separated table of labels Input files should be formatted as <key>\t<label>, one pair per line.
[ "Returns", "a", "dictionary", "of", "alternative", "sequence", "labels", "or", "None" ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_tools.py#L131-L156
20,264
widdowquinn/pyani
pyani/pyani_tools.py
ANIResults.add_tot_length
def add_tot_length(self, qname, sname, value, sym=True): """Add a total length value to self.alignment_lengths.""" self.alignment_lengths.loc[qname, sname] = value if sym: self.alignment_lengths.loc[sname, qname] = value
python
def add_tot_length(self, qname, sname, value, sym=True): self.alignment_lengths.loc[qname, sname] = value if sym: self.alignment_lengths.loc[sname, qname] = value
[ "def", "add_tot_length", "(", "self", ",", "qname", ",", "sname", ",", "value", ",", "sym", "=", "True", ")", ":", "self", ".", "alignment_lengths", ".", "loc", "[", "qname", ",", "sname", "]", "=", "value", "if", "sym", ":", "self", ".", "alignment_...
Add a total length value to self.alignment_lengths.
[ "Add", "a", "total", "length", "value", "to", "self", ".", "alignment_lengths", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_tools.py#L33-L37
20,265
widdowquinn/pyani
pyani/pyani_tools.py
ANIResults.add_sim_errors
def add_sim_errors(self, qname, sname, value, sym=True): """Add a similarity error value to self.similarity_errors.""" self.similarity_errors.loc[qname, sname] = value if sym: self.similarity_errors.loc[sname, qname] = value
python
def add_sim_errors(self, qname, sname, value, sym=True): self.similarity_errors.loc[qname, sname] = value if sym: self.similarity_errors.loc[sname, qname] = value
[ "def", "add_sim_errors", "(", "self", ",", "qname", ",", "sname", ",", "value", ",", "sym", "=", "True", ")", ":", "self", ".", "similarity_errors", ".", "loc", "[", "qname", ",", "sname", "]", "=", "value", "if", "sym", ":", "self", ".", "similarity...
Add a similarity error value to self.similarity_errors.
[ "Add", "a", "similarity", "error", "value", "to", "self", ".", "similarity_errors", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_tools.py#L39-L43
20,266
widdowquinn/pyani
pyani/pyani_tools.py
ANIResults.add_pid
def add_pid(self, qname, sname, value, sym=True): """Add a percentage identity value to self.percentage_identity.""" self.percentage_identity.loc[qname, sname] = value if sym: self.percentage_identity.loc[sname, qname] = value
python
def add_pid(self, qname, sname, value, sym=True): self.percentage_identity.loc[qname, sname] = value if sym: self.percentage_identity.loc[sname, qname] = value
[ "def", "add_pid", "(", "self", ",", "qname", ",", "sname", ",", "value", ",", "sym", "=", "True", ")", ":", "self", ".", "percentage_identity", ".", "loc", "[", "qname", ",", "sname", "]", "=", "value", "if", "sym", ":", "self", ".", "percentage_iden...
Add a percentage identity value to self.percentage_identity.
[ "Add", "a", "percentage", "identity", "value", "to", "self", ".", "percentage_identity", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_tools.py#L45-L49
20,267
widdowquinn/pyani
pyani/pyani_tools.py
ANIResults.add_coverage
def add_coverage(self, qname, sname, qcover, scover=None): """Add percentage coverage values to self.alignment_coverage.""" self.alignment_coverage.loc[qname, sname] = qcover if scover: self.alignment_coverage.loc[sname, qname] = scover
python
def add_coverage(self, qname, sname, qcover, scover=None): self.alignment_coverage.loc[qname, sname] = qcover if scover: self.alignment_coverage.loc[sname, qname] = scover
[ "def", "add_coverage", "(", "self", ",", "qname", ",", "sname", ",", "qcover", ",", "scover", "=", "None", ")", ":", "self", ".", "alignment_coverage", ".", "loc", "[", "qname", ",", "sname", "]", "=", "qcover", "if", "scover", ":", "self", ".", "ali...
Add percentage coverage values to self.alignment_coverage.
[ "Add", "percentage", "coverage", "values", "to", "self", ".", "alignment_coverage", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_tools.py#L51-L55
20,268
widdowquinn/pyani
pyani/pyani_tools.py
BLASTcmds.get_db_name
def get_db_name(self, fname): """Return database filename""" return self.funcs.db_func(fname, self.outdir, self.exes.format_exe)[1]
python
def get_db_name(self, fname): return self.funcs.db_func(fname, self.outdir, self.exes.format_exe)[1]
[ "def", "get_db_name", "(", "self", ",", "fname", ")", ":", "return", "self", ".", "funcs", ".", "db_func", "(", "fname", ",", "self", ".", "outdir", ",", "self", ".", "exes", ".", "format_exe", ")", "[", "1", "]" ]
Return database filename
[ "Return", "database", "filename" ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_tools.py#L121-L123
20,269
widdowquinn/pyani
pyani/pyani_tools.py
BLASTcmds.build_blast_cmd
def build_blast_cmd(self, fname, dbname): """Return BLASTN command""" return self.funcs.blastn_func(fname, dbname, self.outdir, self.exes.blast_exe)
python
def build_blast_cmd(self, fname, dbname): return self.funcs.blastn_func(fname, dbname, self.outdir, self.exes.blast_exe)
[ "def", "build_blast_cmd", "(", "self", ",", "fname", ",", "dbname", ")", ":", "return", "self", ".", "funcs", ".", "blastn_func", "(", "fname", ",", "dbname", ",", "self", ".", "outdir", ",", "self", ".", "exes", ".", "blast_exe", ")" ]
Return BLASTN command
[ "Return", "BLASTN", "command" ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_tools.py#L125-L127
20,270
widdowquinn/pyani
pyani/anib.py
fragment_fasta_files
def fragment_fasta_files(infiles, outdirname, fragsize): """Chops sequences of the passed files into fragments, returns filenames. - infiles - paths to each input sequence file - outdirname - path to output directory - fragsize - the size of sequence fragments Takes every sequence from every file in infiles, and splits them into consecutive fragments of length fragsize, (with any trailing sequences being included, even if shorter than fragsize), and writes the resulting set of sequences to a file with the same name in the output directory. All fragments are named consecutively and uniquely (within a file) as fragNNNNN. Sequence description fields are retained. """ outfnames = [] for fname in infiles: outstem, outext = os.path.splitext(os.path.split(fname)[-1]) outfname = os.path.join(outdirname, outstem) + "-fragments" + outext outseqs = [] count = 0 for seq in SeqIO.parse(fname, "fasta"): idx = 0 while idx < len(seq): count += 1 newseq = seq[idx : idx + fragsize] newseq.id = "frag%05d" % count outseqs.append(newseq) idx += fragsize outfnames.append(outfname) SeqIO.write(outseqs, outfname, "fasta") return outfnames, get_fraglength_dict(outfnames)
python
def fragment_fasta_files(infiles, outdirname, fragsize): outfnames = [] for fname in infiles: outstem, outext = os.path.splitext(os.path.split(fname)[-1]) outfname = os.path.join(outdirname, outstem) + "-fragments" + outext outseqs = [] count = 0 for seq in SeqIO.parse(fname, "fasta"): idx = 0 while idx < len(seq): count += 1 newseq = seq[idx : idx + fragsize] newseq.id = "frag%05d" % count outseqs.append(newseq) idx += fragsize outfnames.append(outfname) SeqIO.write(outseqs, outfname, "fasta") return outfnames, get_fraglength_dict(outfnames)
[ "def", "fragment_fasta_files", "(", "infiles", ",", "outdirname", ",", "fragsize", ")", ":", "outfnames", "=", "[", "]", "for", "fname", "in", "infiles", ":", "outstem", ",", "outext", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", "....
Chops sequences of the passed files into fragments, returns filenames. - infiles - paths to each input sequence file - outdirname - path to output directory - fragsize - the size of sequence fragments Takes every sequence from every file in infiles, and splits them into consecutive fragments of length fragsize, (with any trailing sequences being included, even if shorter than fragsize), and writes the resulting set of sequences to a file with the same name in the output directory. All fragments are named consecutively and uniquely (within a file) as fragNNNNN. Sequence description fields are retained.
[ "Chops", "sequences", "of", "the", "passed", "files", "into", "fragments", "returns", "filenames", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L100-L130
20,271
widdowquinn/pyani
pyani/anib.py
get_fraglength_dict
def get_fraglength_dict(fastafiles): """Returns dictionary of sequence fragment lengths, keyed by query name. - fastafiles - list of FASTA input whole sequence files Loops over input files and, for each, produces a dictionary with fragment lengths, keyed by sequence ID. These are returned as a dictionary with the keys being query IDs derived from filenames. """ fraglength_dict = {} for filename in fastafiles: qname = os.path.split(filename)[-1].split("-fragments")[0] fraglength_dict[qname] = get_fragment_lengths(filename) return fraglength_dict
python
def get_fraglength_dict(fastafiles): fraglength_dict = {} for filename in fastafiles: qname = os.path.split(filename)[-1].split("-fragments")[0] fraglength_dict[qname] = get_fragment_lengths(filename) return fraglength_dict
[ "def", "get_fraglength_dict", "(", "fastafiles", ")", ":", "fraglength_dict", "=", "{", "}", "for", "filename", "in", "fastafiles", ":", "qname", "=", "os", ".", "path", ".", "split", "(", "filename", ")", "[", "-", "1", "]", ".", "split", "(", "\"-fra...
Returns dictionary of sequence fragment lengths, keyed by query name. - fastafiles - list of FASTA input whole sequence files Loops over input files and, for each, produces a dictionary with fragment lengths, keyed by sequence ID. These are returned as a dictionary with the keys being query IDs derived from filenames.
[ "Returns", "dictionary", "of", "sequence", "fragment", "lengths", "keyed", "by", "query", "name", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L134-L147
20,272
widdowquinn/pyani
pyani/anib.py
get_fragment_lengths
def get_fragment_lengths(fastafile): """Returns dictionary of sequence fragment lengths, keyed by fragment ID. Biopython's SeqIO module is used to parse all sequences in the FASTA file. NOTE: ambiguity symbols are not discounted. """ fraglengths = {} for seq in SeqIO.parse(fastafile, "fasta"): fraglengths[seq.id] = len(seq) return fraglengths
python
def get_fragment_lengths(fastafile): fraglengths = {} for seq in SeqIO.parse(fastafile, "fasta"): fraglengths[seq.id] = len(seq) return fraglengths
[ "def", "get_fragment_lengths", "(", "fastafile", ")", ":", "fraglengths", "=", "{", "}", "for", "seq", "in", "SeqIO", ".", "parse", "(", "fastafile", ",", "\"fasta\"", ")", ":", "fraglengths", "[", "seq", ".", "id", "]", "=", "len", "(", "seq", ")", ...
Returns dictionary of sequence fragment lengths, keyed by fragment ID. Biopython's SeqIO module is used to parse all sequences in the FASTA file. NOTE: ambiguity symbols are not discounted.
[ "Returns", "dictionary", "of", "sequence", "fragment", "lengths", "keyed", "by", "fragment", "ID", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L151-L162
20,273
widdowquinn/pyani
pyani/anib.py
build_db_jobs
def build_db_jobs(infiles, blastcmds): """Returns dictionary of db-building commands, keyed by dbname.""" dbjobdict = {} # Dict of database construction jobs, keyed by filename # Create dictionary of database building jobs, keyed by db name # defining jobnum for later use as last job index used for idx, fname in enumerate(infiles): dbjobdict[blastcmds.get_db_name(fname)] = pyani_jobs.Job( "%s_db_%06d" % (blastcmds.prefix, idx), blastcmds.build_db_cmd(fname) ) return dbjobdict
python
def build_db_jobs(infiles, blastcmds): dbjobdict = {} # Dict of database construction jobs, keyed by filename # Create dictionary of database building jobs, keyed by db name # defining jobnum for later use as last job index used for idx, fname in enumerate(infiles): dbjobdict[blastcmds.get_db_name(fname)] = pyani_jobs.Job( "%s_db_%06d" % (blastcmds.prefix, idx), blastcmds.build_db_cmd(fname) ) return dbjobdict
[ "def", "build_db_jobs", "(", "infiles", ",", "blastcmds", ")", ":", "dbjobdict", "=", "{", "}", "# Dict of database construction jobs, keyed by filename", "# Create dictionary of database building jobs, keyed by db name", "# defining jobnum for later use as last job index used", "for",...
Returns dictionary of db-building commands, keyed by dbname.
[ "Returns", "dictionary", "of", "db", "-", "building", "commands", "keyed", "by", "dbname", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L166-L175
20,274
widdowquinn/pyani
pyani/anib.py
make_blastcmd_builder
def make_blastcmd_builder( mode, outdir, format_exe=None, blast_exe=None, prefix="ANIBLAST" ): """Returns BLASTcmds object for construction of BLAST commands.""" if mode == "ANIb": # BLAST/formatting executable depends on mode blastcmds = BLASTcmds( BLASTfunctions(construct_makeblastdb_cmd, construct_blastn_cmdline), BLASTexes( format_exe or pyani_config.MAKEBLASTDB_DEFAULT, blast_exe or pyani_config.BLASTN_DEFAULT, ), prefix, outdir, ) else: blastcmds = BLASTcmds( BLASTfunctions(construct_formatdb_cmd, construct_blastall_cmdline), BLASTexes( format_exe or pyani_config.FORMATDB_DEFAULT, blast_exe or pyani_config.BLASTALL_DEFAULT, ), prefix, outdir, ) return blastcmds
python
def make_blastcmd_builder( mode, outdir, format_exe=None, blast_exe=None, prefix="ANIBLAST" ): if mode == "ANIb": # BLAST/formatting executable depends on mode blastcmds = BLASTcmds( BLASTfunctions(construct_makeblastdb_cmd, construct_blastn_cmdline), BLASTexes( format_exe or pyani_config.MAKEBLASTDB_DEFAULT, blast_exe or pyani_config.BLASTN_DEFAULT, ), prefix, outdir, ) else: blastcmds = BLASTcmds( BLASTfunctions(construct_formatdb_cmd, construct_blastall_cmdline), BLASTexes( format_exe or pyani_config.FORMATDB_DEFAULT, blast_exe or pyani_config.BLASTALL_DEFAULT, ), prefix, outdir, ) return blastcmds
[ "def", "make_blastcmd_builder", "(", "mode", ",", "outdir", ",", "format_exe", "=", "None", ",", "blast_exe", "=", "None", ",", "prefix", "=", "\"ANIBLAST\"", ")", ":", "if", "mode", "==", "\"ANIb\"", ":", "# BLAST/formatting executable depends on mode", "blastcmd...
Returns BLASTcmds object for construction of BLAST commands.
[ "Returns", "BLASTcmds", "object", "for", "construction", "of", "BLAST", "commands", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L178-L202
20,275
widdowquinn/pyani
pyani/anib.py
make_job_graph
def make_job_graph(infiles, fragfiles, blastcmds): """Return a job dependency graph, based on the passed input sequence files. - infiles - a list of paths to input FASTA files - fragfiles - a list of paths to fragmented input FASTA files By default, will run ANIb - it *is* possible to make a mess of passing the wrong executable for the mode you're using. All items in the returned graph list are BLAST executable jobs that must be run *after* the corresponding database creation. The Job objects corresponding to the database creation are contained as dependencies. How those jobs are scheduled depends on the scheduler (see run_multiprocessing.py, run_sge.py) """ joblist = [] # Holds list of job dependency graphs # Get dictionary of database-building jobs dbjobdict = build_db_jobs(infiles, blastcmds) # Create list of BLAST executable jobs, with dependencies jobnum = len(dbjobdict) for idx, fname1 in enumerate(fragfiles[:-1]): for fname2 in fragfiles[idx + 1 :]: jobnum += 1 jobs = [ pyani_jobs.Job( "%s_exe_%06d_a" % (blastcmds.prefix, jobnum), blastcmds.build_blast_cmd(fname1, fname2.replace("-fragments", "")), ), pyani_jobs.Job( "%s_exe_%06d_b" % (blastcmds.prefix, jobnum), blastcmds.build_blast_cmd(fname2, fname1.replace("-fragments", "")), ), ] jobs[0].add_dependency(dbjobdict[fname1.replace("-fragments", "")]) jobs[1].add_dependency(dbjobdict[fname2.replace("-fragments", "")]) joblist.extend(jobs) # Return the dependency graph return joblist
python
def make_job_graph(infiles, fragfiles, blastcmds): joblist = [] # Holds list of job dependency graphs # Get dictionary of database-building jobs dbjobdict = build_db_jobs(infiles, blastcmds) # Create list of BLAST executable jobs, with dependencies jobnum = len(dbjobdict) for idx, fname1 in enumerate(fragfiles[:-1]): for fname2 in fragfiles[idx + 1 :]: jobnum += 1 jobs = [ pyani_jobs.Job( "%s_exe_%06d_a" % (blastcmds.prefix, jobnum), blastcmds.build_blast_cmd(fname1, fname2.replace("-fragments", "")), ), pyani_jobs.Job( "%s_exe_%06d_b" % (blastcmds.prefix, jobnum), blastcmds.build_blast_cmd(fname2, fname1.replace("-fragments", "")), ), ] jobs[0].add_dependency(dbjobdict[fname1.replace("-fragments", "")]) jobs[1].add_dependency(dbjobdict[fname2.replace("-fragments", "")]) joblist.extend(jobs) # Return the dependency graph return joblist
[ "def", "make_job_graph", "(", "infiles", ",", "fragfiles", ",", "blastcmds", ")", ":", "joblist", "=", "[", "]", "# Holds list of job dependency graphs", "# Get dictionary of database-building jobs", "dbjobdict", "=", "build_db_jobs", "(", "infiles", ",", "blastcmds", "...
Return a job dependency graph, based on the passed input sequence files. - infiles - a list of paths to input FASTA files - fragfiles - a list of paths to fragmented input FASTA files By default, will run ANIb - it *is* possible to make a mess of passing the wrong executable for the mode you're using. All items in the returned graph list are BLAST executable jobs that must be run *after* the corresponding database creation. The Job objects corresponding to the database creation are contained as dependencies. How those jobs are scheduled depends on the scheduler (see run_multiprocessing.py, run_sge.py)
[ "Return", "a", "job", "dependency", "graph", "based", "on", "the", "passed", "input", "sequence", "files", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L206-L246
20,276
widdowquinn/pyani
pyani/anib.py
construct_makeblastdb_cmd
def construct_makeblastdb_cmd( filename, outdir, blastdb_exe=pyani_config.MAKEBLASTDB_DEFAULT ): """Returns a single makeblastdb command. - filename - input filename - blastdb_exe - path to the makeblastdb executable """ title = os.path.splitext(os.path.split(filename)[-1])[0] outfilename = os.path.join(outdir, os.path.split(filename)[-1]) return ( "{0} -dbtype nucl -in {1} -title {2} -out {3}".format( blastdb_exe, filename, title, outfilename ), outfilename, )
python
def construct_makeblastdb_cmd( filename, outdir, blastdb_exe=pyani_config.MAKEBLASTDB_DEFAULT ): title = os.path.splitext(os.path.split(filename)[-1])[0] outfilename = os.path.join(outdir, os.path.split(filename)[-1]) return ( "{0} -dbtype nucl -in {1} -title {2} -out {3}".format( blastdb_exe, filename, title, outfilename ), outfilename, )
[ "def", "construct_makeblastdb_cmd", "(", "filename", ",", "outdir", ",", "blastdb_exe", "=", "pyani_config", ".", "MAKEBLASTDB_DEFAULT", ")", ":", "title", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "split", "(", "filename", ")", ...
Returns a single makeblastdb command. - filename - input filename - blastdb_exe - path to the makeblastdb executable
[ "Returns", "a", "single", "makeblastdb", "command", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L271-L286
20,277
widdowquinn/pyani
pyani/anib.py
construct_formatdb_cmd
def construct_formatdb_cmd(filename, outdir, blastdb_exe=pyani_config.FORMATDB_DEFAULT): """Returns a single formatdb command. - filename - input filename - blastdb_exe - path to the formatdb executable """ title = os.path.splitext(os.path.split(filename)[-1])[0] newfilename = os.path.join(outdir, os.path.split(filename)[-1]) shutil.copy(filename, newfilename) return ( "{0} -p F -i {1} -t {2}".format(blastdb_exe, newfilename, title), newfilename, )
python
def construct_formatdb_cmd(filename, outdir, blastdb_exe=pyani_config.FORMATDB_DEFAULT): title = os.path.splitext(os.path.split(filename)[-1])[0] newfilename = os.path.join(outdir, os.path.split(filename)[-1]) shutil.copy(filename, newfilename) return ( "{0} -p F -i {1} -t {2}".format(blastdb_exe, newfilename, title), newfilename, )
[ "def", "construct_formatdb_cmd", "(", "filename", ",", "outdir", ",", "blastdb_exe", "=", "pyani_config", ".", "FORMATDB_DEFAULT", ")", ":", "title", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "split", "(", "filename", ")", "[", ...
Returns a single formatdb command. - filename - input filename - blastdb_exe - path to the formatdb executable
[ "Returns", "a", "single", "formatdb", "command", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L290-L302
20,278
widdowquinn/pyani
pyani/anib.py
generate_blastn_commands
def generate_blastn_commands(filenames, outdir, blast_exe=None, mode="ANIb"): """Return a list of blastn command-lines for ANIm - filenames - a list of paths to fragmented input FASTA files - outdir - path to output directory - blastn_exe - path to BLASTN executable Assumes that the fragment sequence input filenames have the form ACCESSION-fragments.ext, where the corresponding BLAST database filenames have the form ACCESSION.ext. This is the convention followed by the fragment_FASTA_files() function above. """ if mode == "ANIb": construct_blast_cmdline = construct_blastn_cmdline else: construct_blast_cmdline = construct_blastall_cmdline cmdlines = [] for idx, fname1 in enumerate(filenames[:-1]): dbname1 = fname1.replace("-fragments", "") for fname2 in filenames[idx + 1 :]: dbname2 = fname2.replace("-fragments", "") if blast_exe is None: cmdlines.append(construct_blast_cmdline(fname1, dbname2, outdir)) cmdlines.append(construct_blast_cmdline(fname2, dbname1, outdir)) else: cmdlines.append( construct_blast_cmdline(fname1, dbname2, outdir, blast_exe) ) cmdlines.append( construct_blast_cmdline(fname2, dbname1, outdir, blast_exe) ) return cmdlines
python
def generate_blastn_commands(filenames, outdir, blast_exe=None, mode="ANIb"): if mode == "ANIb": construct_blast_cmdline = construct_blastn_cmdline else: construct_blast_cmdline = construct_blastall_cmdline cmdlines = [] for idx, fname1 in enumerate(filenames[:-1]): dbname1 = fname1.replace("-fragments", "") for fname2 in filenames[idx + 1 :]: dbname2 = fname2.replace("-fragments", "") if blast_exe is None: cmdlines.append(construct_blast_cmdline(fname1, dbname2, outdir)) cmdlines.append(construct_blast_cmdline(fname2, dbname1, outdir)) else: cmdlines.append( construct_blast_cmdline(fname1, dbname2, outdir, blast_exe) ) cmdlines.append( construct_blast_cmdline(fname2, dbname1, outdir, blast_exe) ) return cmdlines
[ "def", "generate_blastn_commands", "(", "filenames", ",", "outdir", ",", "blast_exe", "=", "None", ",", "mode", "=", "\"ANIb\"", ")", ":", "if", "mode", "==", "\"ANIb\"", ":", "construct_blast_cmdline", "=", "construct_blastn_cmdline", "else", ":", "construct_blas...
Return a list of blastn command-lines for ANIm - filenames - a list of paths to fragmented input FASTA files - outdir - path to output directory - blastn_exe - path to BLASTN executable Assumes that the fragment sequence input filenames have the form ACCESSION-fragments.ext, where the corresponding BLAST database filenames have the form ACCESSION.ext. This is the convention followed by the fragment_FASTA_files() function above.
[ "Return", "a", "list", "of", "blastn", "command", "-", "lines", "for", "ANIm" ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L306-L337
20,279
widdowquinn/pyani
pyani/anib.py
construct_blastn_cmdline
def construct_blastn_cmdline( fname1, fname2, outdir, blastn_exe=pyani_config.BLASTN_DEFAULT ): """Returns a single blastn command. - filename - input filename - blastn_exe - path to BLASTN executable """ fstem1 = os.path.splitext(os.path.split(fname1)[-1])[0] fstem2 = os.path.splitext(os.path.split(fname2)[-1])[0] fstem1 = fstem1.replace("-fragments", "") prefix = os.path.join(outdir, "%s_vs_%s" % (fstem1, fstem2)) cmd = ( "{0} -out {1}.blast_tab -query {2} -db {3} " + "-xdrop_gap_final 150 -dust no -evalue 1e-15 " + "-max_target_seqs 1 -outfmt '6 qseqid sseqid length mismatch " + "pident nident qlen slen qstart qend sstart send positive " + "ppos gaps' -task blastn" ) return cmd.format(blastn_exe, prefix, fname1, fname2)
python
def construct_blastn_cmdline( fname1, fname2, outdir, blastn_exe=pyani_config.BLASTN_DEFAULT ): fstem1 = os.path.splitext(os.path.split(fname1)[-1])[0] fstem2 = os.path.splitext(os.path.split(fname2)[-1])[0] fstem1 = fstem1.replace("-fragments", "") prefix = os.path.join(outdir, "%s_vs_%s" % (fstem1, fstem2)) cmd = ( "{0} -out {1}.blast_tab -query {2} -db {3} " + "-xdrop_gap_final 150 -dust no -evalue 1e-15 " + "-max_target_seqs 1 -outfmt '6 qseqid sseqid length mismatch " + "pident nident qlen slen qstart qend sstart send positive " + "ppos gaps' -task blastn" ) return cmd.format(blastn_exe, prefix, fname1, fname2)
[ "def", "construct_blastn_cmdline", "(", "fname1", ",", "fname2", ",", "outdir", ",", "blastn_exe", "=", "pyani_config", ".", "BLASTN_DEFAULT", ")", ":", "fstem1", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "split", "(", "fname1",...
Returns a single blastn command. - filename - input filename - blastn_exe - path to BLASTN executable
[ "Returns", "a", "single", "blastn", "command", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L341-L360
20,280
widdowquinn/pyani
pyani/anib.py
construct_blastall_cmdline
def construct_blastall_cmdline( fname1, fname2, outdir, blastall_exe=pyani_config.BLASTALL_DEFAULT ): """Returns a single blastall command. - blastall_exe - path to BLASTALL executable """ fstem1 = os.path.splitext(os.path.split(fname1)[-1])[0] fstem2 = os.path.splitext(os.path.split(fname2)[-1])[0] fstem1 = fstem1.replace("-fragments", "") prefix = os.path.join(outdir, "%s_vs_%s" % (fstem1, fstem2)) cmd = ( "{0} -p blastn -o {1}.blast_tab -i {2} -d {3} " + "-X 150 -q -1 -F F -e 1e-15 " + "-b 1 -v 1 -m 8" ) return cmd.format(blastall_exe, prefix, fname1, fname2)
python
def construct_blastall_cmdline( fname1, fname2, outdir, blastall_exe=pyani_config.BLASTALL_DEFAULT ): fstem1 = os.path.splitext(os.path.split(fname1)[-1])[0] fstem2 = os.path.splitext(os.path.split(fname2)[-1])[0] fstem1 = fstem1.replace("-fragments", "") prefix = os.path.join(outdir, "%s_vs_%s" % (fstem1, fstem2)) cmd = ( "{0} -p blastn -o {1}.blast_tab -i {2} -d {3} " + "-X 150 -q -1 -F F -e 1e-15 " + "-b 1 -v 1 -m 8" ) return cmd.format(blastall_exe, prefix, fname1, fname2)
[ "def", "construct_blastall_cmdline", "(", "fname1", ",", "fname2", ",", "outdir", ",", "blastall_exe", "=", "pyani_config", ".", "BLASTALL_DEFAULT", ")", ":", "fstem1", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "split", "(", "fn...
Returns a single blastall command. - blastall_exe - path to BLASTALL executable
[ "Returns", "a", "single", "blastall", "command", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L364-L380
20,281
widdowquinn/pyani
pyani/anib.py
process_blast
def process_blast( blast_dir, org_lengths, fraglengths=None, mode="ANIb", identity=0.3, coverage=0.7, logger=None, ): """Returns a tuple of ANIb results for .blast_tab files in the output dir. - blast_dir - path to the directory containing .blast_tab files - org_lengths - the base count for each input sequence - fraglengths - dictionary of query sequence fragment lengths, only needed for BLASTALL output - mode - parsing BLASTN+ or BLASTALL output? - logger - a logger for messages Returns the following pandas dataframes in an ANIResults object; query sequences are rows, subject sequences are columns: - alignment_lengths - non-symmetrical: total length of alignment - percentage_identity - non-symmetrical: ANIb (Goris) percentage identity - alignment_coverage - non-symmetrical: coverage of query - similarity_errors - non-symmetrical: count of similarity errors May throw a ZeroDivisionError if one or more BLAST runs failed, or a very distant sequence was included in the analysis. """ # Process directory to identify input files blastfiles = pyani_files.get_input_files(blast_dir, ".blast_tab") # Hold data in ANIResults object results = ANIResults(list(org_lengths.keys()), mode) # Fill diagonal NA values for alignment_length with org_lengths for org, length in list(org_lengths.items()): results.alignment_lengths[org][org] = length # Process .blast_tab files assuming that the filename format holds: # org1_vs_org2.blast_tab: for blastfile in blastfiles: qname, sname = os.path.splitext(os.path.split(blastfile)[-1])[0].split("_vs_") # We may have BLAST files from other analyses in the same directory # If this occurs, we raise a warning, and skip the file if qname not in list(org_lengths.keys()): if logger: logger.warning( "Query name %s not in input " % qname + "sequence list, skipping %s" % blastfile ) continue if sname not in list(org_lengths.keys()): if logger: logger.warning( "Subject name %s not in input " % sname + "sequence list, skipping %s" % blastfile ) continue resultvals = parse_blast_tab(blastfile, fraglengths, identity, coverage, mode) query_cover = float(resultvals[0]) / org_lengths[qname] # Populate dataframes: when assigning data, we need to note that # we have asymmetrical data from BLAST output, so only the # upper triangle is populated results.add_tot_length(qname, sname, resultvals[0], sym=False) results.add_sim_errors(qname, sname, resultvals[1], sym=False) results.add_pid(qname, sname, 0.01 * resultvals[2], sym=False) results.add_coverage(qname, sname, query_cover) return results
python
def process_blast( blast_dir, org_lengths, fraglengths=None, mode="ANIb", identity=0.3, coverage=0.7, logger=None, ): # Process directory to identify input files blastfiles = pyani_files.get_input_files(blast_dir, ".blast_tab") # Hold data in ANIResults object results = ANIResults(list(org_lengths.keys()), mode) # Fill diagonal NA values for alignment_length with org_lengths for org, length in list(org_lengths.items()): results.alignment_lengths[org][org] = length # Process .blast_tab files assuming that the filename format holds: # org1_vs_org2.blast_tab: for blastfile in blastfiles: qname, sname = os.path.splitext(os.path.split(blastfile)[-1])[0].split("_vs_") # We may have BLAST files from other analyses in the same directory # If this occurs, we raise a warning, and skip the file if qname not in list(org_lengths.keys()): if logger: logger.warning( "Query name %s not in input " % qname + "sequence list, skipping %s" % blastfile ) continue if sname not in list(org_lengths.keys()): if logger: logger.warning( "Subject name %s not in input " % sname + "sequence list, skipping %s" % blastfile ) continue resultvals = parse_blast_tab(blastfile, fraglengths, identity, coverage, mode) query_cover = float(resultvals[0]) / org_lengths[qname] # Populate dataframes: when assigning data, we need to note that # we have asymmetrical data from BLAST output, so only the # upper triangle is populated results.add_tot_length(qname, sname, resultvals[0], sym=False) results.add_sim_errors(qname, sname, resultvals[1], sym=False) results.add_pid(qname, sname, 0.01 * resultvals[2], sym=False) results.add_coverage(qname, sname, query_cover) return results
[ "def", "process_blast", "(", "blast_dir", ",", "org_lengths", ",", "fraglengths", "=", "None", ",", "mode", "=", "\"ANIb\"", ",", "identity", "=", "0.3", ",", "coverage", "=", "0.7", ",", "logger", "=", "None", ",", ")", ":", "# Process directory to identify...
Returns a tuple of ANIb results for .blast_tab files in the output dir. - blast_dir - path to the directory containing .blast_tab files - org_lengths - the base count for each input sequence - fraglengths - dictionary of query sequence fragment lengths, only needed for BLASTALL output - mode - parsing BLASTN+ or BLASTALL output? - logger - a logger for messages Returns the following pandas dataframes in an ANIResults object; query sequences are rows, subject sequences are columns: - alignment_lengths - non-symmetrical: total length of alignment - percentage_identity - non-symmetrical: ANIb (Goris) percentage identity - alignment_coverage - non-symmetrical: coverage of query - similarity_errors - non-symmetrical: count of similarity errors May throw a ZeroDivisionError if one or more BLAST runs failed, or a very distant sequence was included in the analysis.
[ "Returns", "a", "tuple", "of", "ANIb", "results", "for", ".", "blast_tab", "files", "in", "the", "output", "dir", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L384-L453
20,282
widdowquinn/pyani
pyani/run_sge.py
split_seq
def split_seq(iterable, size): """Splits a passed iterable into chunks of a given size.""" elm = iter(iterable) item = list(itertools.islice(elm, size)) while item: yield item item = list(itertools.islice(elm, size))
python
def split_seq(iterable, size): elm = iter(iterable) item = list(itertools.islice(elm, size)) while item: yield item item = list(itertools.islice(elm, size))
[ "def", "split_seq", "(", "iterable", ",", "size", ")", ":", "elm", "=", "iter", "(", "iterable", ")", "item", "=", "list", "(", "itertools", ".", "islice", "(", "elm", ",", "size", ")", ")", "while", "item", ":", "yield", "item", "item", "=", "list...
Splits a passed iterable into chunks of a given size.
[ "Splits", "a", "passed", "iterable", "into", "chunks", "of", "a", "given", "size", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_sge.py#L23-L29
20,283
widdowquinn/pyani
pyani/run_sge.py
build_joblist
def build_joblist(jobgraph): """Returns a list of jobs, from a passed jobgraph.""" jobset = set() for job in jobgraph: jobset = populate_jobset(job, jobset, depth=1) return list(jobset)
python
def build_joblist(jobgraph): jobset = set() for job in jobgraph: jobset = populate_jobset(job, jobset, depth=1) return list(jobset)
[ "def", "build_joblist", "(", "jobgraph", ")", ":", "jobset", "=", "set", "(", ")", "for", "job", "in", "jobgraph", ":", "jobset", "=", "populate_jobset", "(", "job", ",", "jobset", ",", "depth", "=", "1", ")", "return", "list", "(", "jobset", ")" ]
Returns a list of jobs, from a passed jobgraph.
[ "Returns", "a", "list", "of", "jobs", "from", "a", "passed", "jobgraph", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_sge.py#L33-L38
20,284
widdowquinn/pyani
pyani/run_sge.py
compile_jobgroups_from_joblist
def compile_jobgroups_from_joblist(joblist, jgprefix, sgegroupsize): """Return list of jobgroups, rather than list of jobs.""" jobcmds = defaultdict(list) for job in joblist: jobcmds[job.command.split(' ', 1)[0]].append(job.command) jobgroups = [] for cmds in list(jobcmds.items()): # Break arglist up into batches of sgegroupsize (default: 10,000) sublists = split_seq(cmds[1], sgegroupsize) count = 0 for sublist in sublists: count += 1 sge_jobcmdlist = ['\"%s\"' % jc for jc in sublist] jobgroups.append(JobGroup("%s_%d" % (jgprefix, count), "$cmds", arguments={'cmds': sge_jobcmdlist})) return jobgroups
python
def compile_jobgroups_from_joblist(joblist, jgprefix, sgegroupsize): jobcmds = defaultdict(list) for job in joblist: jobcmds[job.command.split(' ', 1)[0]].append(job.command) jobgroups = [] for cmds in list(jobcmds.items()): # Break arglist up into batches of sgegroupsize (default: 10,000) sublists = split_seq(cmds[1], sgegroupsize) count = 0 for sublist in sublists: count += 1 sge_jobcmdlist = ['\"%s\"' % jc for jc in sublist] jobgroups.append(JobGroup("%s_%d" % (jgprefix, count), "$cmds", arguments={'cmds': sge_jobcmdlist})) return jobgroups
[ "def", "compile_jobgroups_from_joblist", "(", "joblist", ",", "jgprefix", ",", "sgegroupsize", ")", ":", "jobcmds", "=", "defaultdict", "(", "list", ")", "for", "job", "in", "joblist", ":", "jobcmds", "[", "job", ".", "command", ".", "split", "(", "' '", "...
Return list of jobgroups, rather than list of jobs.
[ "Return", "list", "of", "jobgroups", "rather", "than", "list", "of", "jobs", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_sge.py#L42-L58
20,285
widdowquinn/pyani
pyani/run_sge.py
run_dependency_graph
def run_dependency_graph(jobgraph, logger=None, jgprefix="ANIm_SGE_JG", sgegroupsize=10000, sgeargs=None): """Creates and runs GridEngine scripts for jobs based on the passed jobgraph. - jobgraph - list of jobs, which may have dependencies. - verbose - flag for multiprocessing verbosity - logger - a logger module logger (optional) - jgprefix - a prefix for the submitted jobs, in the scheduler - sgegroupsize - the maximum size for an array job submission - sgeargs - additional arguments to qsub The strategy here is to loop over each job in the list of jobs (jobgraph), and create/populate a series of Sets of commands, to be run in reverse order with multiprocessing_run as asynchronous pools. The strategy here is to loop over each job in the dependency graph, and add the job to a new list of jobs, swapping out the Job dependency for the name of the Job on which it depends. """ joblist = build_joblist(jobgraph) # Try to be informative by telling the user what jobs will run dep_count = 0 # how many dependencies are there if logger: logger.info("Jobs to run with scheduler") for job in joblist: logger.info("{0}: {1}".format(job.name, job.command)) if len(job.dependencies): dep_count += len(job.dependencies) for dep in job.dependencies: logger.info("\t[^ depends on: %s]" % dep.name) logger.info("There are %d job dependencies" % dep_count) # If there are no job dependencies, we can use an array (or series of # arrays) to schedule our jobs. This cuts down on problems with long # job lists choking up the queue. if dep_count == 0: logger.info("Compiling jobs into JobGroups") joblist = compile_jobgroups_from_joblist(joblist, jgprefix, sgegroupsize) # Send jobs to scheduler logger.info("Running jobs with scheduler...") logger.info("Jobs passed to scheduler in order:") for job in joblist: logger.info("\t%s" % job.name) build_and_submit_jobs(os.curdir, joblist, sgeargs) logger.info("Waiting for SGE-submitted jobs to finish (polling)") for job in joblist: job.wait()
python
def run_dependency_graph(jobgraph, logger=None, jgprefix="ANIm_SGE_JG", sgegroupsize=10000, sgeargs=None): joblist = build_joblist(jobgraph) # Try to be informative by telling the user what jobs will run dep_count = 0 # how many dependencies are there if logger: logger.info("Jobs to run with scheduler") for job in joblist: logger.info("{0}: {1}".format(job.name, job.command)) if len(job.dependencies): dep_count += len(job.dependencies) for dep in job.dependencies: logger.info("\t[^ depends on: %s]" % dep.name) logger.info("There are %d job dependencies" % dep_count) # If there are no job dependencies, we can use an array (or series of # arrays) to schedule our jobs. This cuts down on problems with long # job lists choking up the queue. if dep_count == 0: logger.info("Compiling jobs into JobGroups") joblist = compile_jobgroups_from_joblist(joblist, jgprefix, sgegroupsize) # Send jobs to scheduler logger.info("Running jobs with scheduler...") logger.info("Jobs passed to scheduler in order:") for job in joblist: logger.info("\t%s" % job.name) build_and_submit_jobs(os.curdir, joblist, sgeargs) logger.info("Waiting for SGE-submitted jobs to finish (polling)") for job in joblist: job.wait()
[ "def", "run_dependency_graph", "(", "jobgraph", ",", "logger", "=", "None", ",", "jgprefix", "=", "\"ANIm_SGE_JG\"", ",", "sgegroupsize", "=", "10000", ",", "sgeargs", "=", "None", ")", ":", "joblist", "=", "build_joblist", "(", "jobgraph", ")", "# Try to be i...
Creates and runs GridEngine scripts for jobs based on the passed jobgraph. - jobgraph - list of jobs, which may have dependencies. - verbose - flag for multiprocessing verbosity - logger - a logger module logger (optional) - jgprefix - a prefix for the submitted jobs, in the scheduler - sgegroupsize - the maximum size for an array job submission - sgeargs - additional arguments to qsub The strategy here is to loop over each job in the list of jobs (jobgraph), and create/populate a series of Sets of commands, to be run in reverse order with multiprocessing_run as asynchronous pools. The strategy here is to loop over each job in the dependency graph, and add the job to a new list of jobs, swapping out the Job dependency for the name of the Job on which it depends.
[ "Creates", "and", "runs", "GridEngine", "scripts", "for", "jobs", "based", "on", "the", "passed", "jobgraph", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_sge.py#L62-L112
20,286
widdowquinn/pyani
pyani/run_sge.py
populate_jobset
def populate_jobset(job, jobset, depth): """ Creates a set of jobs, containing jobs at difference depths of the dependency tree, retaining dependencies as strings, not Jobs. """ jobset.add(job) if len(job.dependencies) == 0: return jobset for j in job.dependencies: jobset = populate_jobset(j, jobset, depth+1) return jobset
python
def populate_jobset(job, jobset, depth): jobset.add(job) if len(job.dependencies) == 0: return jobset for j in job.dependencies: jobset = populate_jobset(j, jobset, depth+1) return jobset
[ "def", "populate_jobset", "(", "job", ",", "jobset", ",", "depth", ")", ":", "jobset", ".", "add", "(", "job", ")", "if", "len", "(", "job", ".", "dependencies", ")", "==", "0", ":", "return", "jobset", "for", "j", "in", "job", ".", "dependencies", ...
Creates a set of jobs, containing jobs at difference depths of the dependency tree, retaining dependencies as strings, not Jobs.
[ "Creates", "a", "set", "of", "jobs", "containing", "jobs", "at", "difference", "depths", "of", "the", "dependency", "tree", "retaining", "dependencies", "as", "strings", "not", "Jobs", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_sge.py#L115-L124
20,287
widdowquinn/pyani
pyani/run_sge.py
build_job_scripts
def build_job_scripts(root_dir, jobs): """Constructs the script for each passed Job in the jobs iterable - root_dir Path to output directory """ # Loop over the job list, creating each job script in turn, and then adding # scriptPath to the Job object for job in jobs: scriptpath = os.path.join(root_dir, "jobs", job.name) with open(scriptpath, "w") as scriptfile: scriptfile.write("#!/bin/sh\n#$ -S /bin/bash\n%s\n" % job.script) job.scriptpath = scriptpath
python
def build_job_scripts(root_dir, jobs): # Loop over the job list, creating each job script in turn, and then adding # scriptPath to the Job object for job in jobs: scriptpath = os.path.join(root_dir, "jobs", job.name) with open(scriptpath, "w") as scriptfile: scriptfile.write("#!/bin/sh\n#$ -S /bin/bash\n%s\n" % job.script) job.scriptpath = scriptpath
[ "def", "build_job_scripts", "(", "root_dir", ",", "jobs", ")", ":", "# Loop over the job list, creating each job script in turn, and then adding", "# scriptPath to the Job object", "for", "job", "in", "jobs", ":", "scriptpath", "=", "os", ".", "path", ".", "join", "(", ...
Constructs the script for each passed Job in the jobs iterable - root_dir Path to output directory
[ "Constructs", "the", "script", "for", "each", "passed", "Job", "in", "the", "jobs", "iterable" ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_sge.py#L149-L160
20,288
widdowquinn/pyani
pyani/run_sge.py
extract_submittable_jobs
def extract_submittable_jobs(waiting): """Obtain a list of jobs that are able to be submitted from the passed list of pending jobs - waiting List of Job objects """ submittable = set() # Holds jobs that are able to be submitted # Loop over each job, and check all the subjobs in that job's dependency # list. If there are any, and all of these have been submitted, then # append the job to the list of submittable jobs. for job in waiting: unsatisfied = sum([(subjob.submitted is False) for subjob in job.dependencies]) if unsatisfied == 0: submittable.add(job) return list(submittable)
python
def extract_submittable_jobs(waiting): submittable = set() # Holds jobs that are able to be submitted # Loop over each job, and check all the subjobs in that job's dependency # list. If there are any, and all of these have been submitted, then # append the job to the list of submittable jobs. for job in waiting: unsatisfied = sum([(subjob.submitted is False) for subjob in job.dependencies]) if unsatisfied == 0: submittable.add(job) return list(submittable)
[ "def", "extract_submittable_jobs", "(", "waiting", ")", ":", "submittable", "=", "set", "(", ")", "# Holds jobs that are able to be submitted", "# Loop over each job, and check all the subjobs in that job's dependency", "# list. If there are any, and all of these have been submitted, then...
Obtain a list of jobs that are able to be submitted from the passed list of pending jobs - waiting List of Job objects
[ "Obtain", "a", "list", "of", "jobs", "that", "are", "able", "to", "be", "submitted", "from", "the", "passed", "list", "of", "pending", "jobs" ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_sge.py#L163-L178
20,289
widdowquinn/pyani
pyani/run_sge.py
submit_safe_jobs
def submit_safe_jobs(root_dir, jobs, sgeargs=None): """Submit the passed list of jobs to the Grid Engine server, using the passed directory as the root for scheduler output. - root_dir Path to output directory - jobs Iterable of Job objects """ # Loop over each job, constructing SGE command-line based on job settings for job in jobs: job.out = os.path.join(root_dir, "stdout") job.err = os.path.join(root_dir, "stderr") # Add the job name, current working directory, and SGE stdout/stderr # directories to the SGE command line args = " -N %s " % (job.name) args += " -cwd " args += " -o %s -e %s " % (job.out, job.err) # If a queue is specified, add this to the SGE command line # LP: This has an undeclared variable, not sure why - delete? #if job.queue is not None and job.queue in local_queues: # args += local_queues[job.queue] # If the job is actually a JobGroup, add the task numbering argument if isinstance(job, JobGroup): args += "-t 1:%d " % (job.tasks) # If there are dependencies for this job, hold the job until they are # complete if len(job.dependencies) > 0: args += "-hold_jid " for dep in job.dependencies: args += dep.name + "," args = args[:-1] # Build the qsub SGE commandline (passing local environment) qsubcmd = ("%s -V %s %s" % (pyani_config.QSUB_DEFAULT, args, job.scriptpath)) if sgeargs is not None: qsubcmd = "%s %s" % (qsubcmd, sgeargs) os.system(qsubcmd) # Run the command job.submitted = True
python
def submit_safe_jobs(root_dir, jobs, sgeargs=None): # Loop over each job, constructing SGE command-line based on job settings for job in jobs: job.out = os.path.join(root_dir, "stdout") job.err = os.path.join(root_dir, "stderr") # Add the job name, current working directory, and SGE stdout/stderr # directories to the SGE command line args = " -N %s " % (job.name) args += " -cwd " args += " -o %s -e %s " % (job.out, job.err) # If a queue is specified, add this to the SGE command line # LP: This has an undeclared variable, not sure why - delete? #if job.queue is not None and job.queue in local_queues: # args += local_queues[job.queue] # If the job is actually a JobGroup, add the task numbering argument if isinstance(job, JobGroup): args += "-t 1:%d " % (job.tasks) # If there are dependencies for this job, hold the job until they are # complete if len(job.dependencies) > 0: args += "-hold_jid " for dep in job.dependencies: args += dep.name + "," args = args[:-1] # Build the qsub SGE commandline (passing local environment) qsubcmd = ("%s -V %s %s" % (pyani_config.QSUB_DEFAULT, args, job.scriptpath)) if sgeargs is not None: qsubcmd = "%s %s" % (qsubcmd, sgeargs) os.system(qsubcmd) # Run the command job.submitted = True
[ "def", "submit_safe_jobs", "(", "root_dir", ",", "jobs", ",", "sgeargs", "=", "None", ")", ":", "# Loop over each job, constructing SGE command-line based on job settings", "for", "job", "in", "jobs", ":", "job", ".", "out", "=", "os", ".", "path", ".", "join", ...
Submit the passed list of jobs to the Grid Engine server, using the passed directory as the root for scheduler output. - root_dir Path to output directory - jobs Iterable of Job objects
[ "Submit", "the", "passed", "list", "of", "jobs", "to", "the", "Grid", "Engine", "server", "using", "the", "passed", "directory", "as", "the", "root", "for", "scheduler", "output", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_sge.py#L181-L222
20,290
widdowquinn/pyani
pyani/run_sge.py
submit_jobs
def submit_jobs(root_dir, jobs, sgeargs=None): """ Submit each of the passed jobs to the SGE server, using the passed directory as root for SGE output. - root_dir Path to output directory - jobs List of Job objects """ waiting = list(jobs) # List of jobs still to be done # Loop over the list of pending jobs, while there still are any while len(waiting) > 0: # extract submittable jobs submittable = extract_submittable_jobs(waiting) # run those jobs submit_safe_jobs(root_dir, submittable, sgeargs) # remove those from the waiting list for job in submittable: waiting.remove(job)
python
def submit_jobs(root_dir, jobs, sgeargs=None): waiting = list(jobs) # List of jobs still to be done # Loop over the list of pending jobs, while there still are any while len(waiting) > 0: # extract submittable jobs submittable = extract_submittable_jobs(waiting) # run those jobs submit_safe_jobs(root_dir, submittable, sgeargs) # remove those from the waiting list for job in submittable: waiting.remove(job)
[ "def", "submit_jobs", "(", "root_dir", ",", "jobs", ",", "sgeargs", "=", "None", ")", ":", "waiting", "=", "list", "(", "jobs", ")", "# List of jobs still to be done", "# Loop over the list of pending jobs, while there still are any", "while", "len", "(", "waiting", "...
Submit each of the passed jobs to the SGE server, using the passed directory as root for SGE output. - root_dir Path to output directory - jobs List of Job objects
[ "Submit", "each", "of", "the", "passed", "jobs", "to", "the", "SGE", "server", "using", "the", "passed", "directory", "as", "root", "for", "SGE", "output", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_sge.py#L225-L241
20,291
widdowquinn/pyani
pyani/run_sge.py
build_and_submit_jobs
def build_and_submit_jobs(root_dir, jobs, sgeargs=None): """Submits the passed iterable of Job objects to SGE, placing SGE's output in the passed root directory - root_dir Root directory for SGE and job output - jobs List of Job objects, describing each job to be submitted - sgeargs Additional arguments to qsub """ # If the passed set of jobs is not a list, turn it into one. This makes the # use of a single JobGroup a little more intutitive if not isinstance(jobs, list): jobs = [jobs] # Build and submit the passed jobs build_directories(root_dir) # build all necessary directories build_job_scripts(root_dir, jobs) # build job scripts submit_jobs(root_dir, jobs, sgeargs)
python
def build_and_submit_jobs(root_dir, jobs, sgeargs=None): # If the passed set of jobs is not a list, turn it into one. This makes the # use of a single JobGroup a little more intutitive if not isinstance(jobs, list): jobs = [jobs] # Build and submit the passed jobs build_directories(root_dir) # build all necessary directories build_job_scripts(root_dir, jobs) # build job scripts submit_jobs(root_dir, jobs, sgeargs)
[ "def", "build_and_submit_jobs", "(", "root_dir", ",", "jobs", ",", "sgeargs", "=", "None", ")", ":", "# If the passed set of jobs is not a list, turn it into one. This makes the", "# use of a single JobGroup a little more intutitive", "if", "not", "isinstance", "(", "jobs", ","...
Submits the passed iterable of Job objects to SGE, placing SGE's output in the passed root directory - root_dir Root directory for SGE and job output - jobs List of Job objects, describing each job to be submitted - sgeargs Additional arguments to qsub
[ "Submits", "the", "passed", "iterable", "of", "Job", "objects", "to", "SGE", "placing", "SGE", "s", "output", "in", "the", "passed", "root", "directory" ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_sge.py#L244-L260
20,292
widdowquinn/pyani
pyani/pyani_config.py
params_mpl
def params_mpl(df): """Returns dict of matplotlib parameters, dependent on dataframe.""" return {'ANIb_alignment_lengths': ('afmhot', df.values.min(), df.values.max()), 'ANIb_percentage_identity': ('spbnd_BuRd', 0, 1), 'ANIb_alignment_coverage': ('BuRd', 0, 1), 'ANIb_hadamard': ('hadamard_BuRd', 0, 1), 'ANIb_similarity_errors': ('afmhot', df.values.min(), df.values.max()), 'ANIm_alignment_lengths': ('afmhot', df.values.min(), df.values.max()), 'ANIm_percentage_identity': ('spbnd_BuRd', 0, 1), 'ANIm_alignment_coverage': ('BuRd', 0, 1), 'ANIm_hadamard': ('hadamard_BuRd', 0, 1), 'ANIm_similarity_errors': ('afmhot', df.values.min(), df.values.max()), 'TETRA_correlations': ('spbnd_BuRd', 0, 1), 'ANIblastall_alignment_lengths': ('afmhot', df.values.min(), df.values.max()), 'ANIblastall_percentage_identity': ('spbnd_BuRd', 0, 1), 'ANIblastall_alignment_coverage': ('BuRd', 0, 1), 'ANIblastall_hadamard': ('hadamard_BuRd', 0, 1), 'ANIblastall_similarity_errors': ('afmhot', df.values.min(), df.values.max())}
python
def params_mpl(df): return {'ANIb_alignment_lengths': ('afmhot', df.values.min(), df.values.max()), 'ANIb_percentage_identity': ('spbnd_BuRd', 0, 1), 'ANIb_alignment_coverage': ('BuRd', 0, 1), 'ANIb_hadamard': ('hadamard_BuRd', 0, 1), 'ANIb_similarity_errors': ('afmhot', df.values.min(), df.values.max()), 'ANIm_alignment_lengths': ('afmhot', df.values.min(), df.values.max()), 'ANIm_percentage_identity': ('spbnd_BuRd', 0, 1), 'ANIm_alignment_coverage': ('BuRd', 0, 1), 'ANIm_hadamard': ('hadamard_BuRd', 0, 1), 'ANIm_similarity_errors': ('afmhot', df.values.min(), df.values.max()), 'TETRA_correlations': ('spbnd_BuRd', 0, 1), 'ANIblastall_alignment_lengths': ('afmhot', df.values.min(), df.values.max()), 'ANIblastall_percentage_identity': ('spbnd_BuRd', 0, 1), 'ANIblastall_alignment_coverage': ('BuRd', 0, 1), 'ANIblastall_hadamard': ('hadamard_BuRd', 0, 1), 'ANIblastall_similarity_errors': ('afmhot', df.values.min(), df.values.max())}
[ "def", "params_mpl", "(", "df", ")", ":", "return", "{", "'ANIb_alignment_lengths'", ":", "(", "'afmhot'", ",", "df", ".", "values", ".", "min", "(", ")", ",", "df", ".", "values", ".", "max", "(", ")", ")", ",", "'ANIb_percentage_identity'", ":", "(",...
Returns dict of matplotlib parameters, dependent on dataframe.
[ "Returns", "dict", "of", "matplotlib", "parameters", "dependent", "on", "dataframe", "." ]
2b24ec971401e04024bba896e4011984fe3f53f0
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_config.py#L107-L130
20,293
datapythonista/mnist
mnist/__init__.py
download_file
def download_file(fname, target_dir=None, force=False): """Download fname from the datasets_url, and save it to target_dir, unless the file already exists, and force is False. Parameters ---------- fname : str Name of the file to download target_dir : str Directory where to store the file force : bool Force downloading the file, if it already exists Returns ------- fname : str Full path of the downloaded file """ target_dir = target_dir or temporary_dir() target_fname = os.path.join(target_dir, fname) if force or not os.path.isfile(target_fname): url = urljoin(datasets_url, fname) urlretrieve(url, target_fname) return target_fname
python
def download_file(fname, target_dir=None, force=False): target_dir = target_dir or temporary_dir() target_fname = os.path.join(target_dir, fname) if force or not os.path.isfile(target_fname): url = urljoin(datasets_url, fname) urlretrieve(url, target_fname) return target_fname
[ "def", "download_file", "(", "fname", ",", "target_dir", "=", "None", ",", "force", "=", "False", ")", ":", "target_dir", "=", "target_dir", "or", "temporary_dir", "(", ")", "target_fname", "=", "os", ".", "path", ".", "join", "(", "target_dir", ",", "fn...
Download fname from the datasets_url, and save it to target_dir, unless the file already exists, and force is False. Parameters ---------- fname : str Name of the file to download target_dir : str Directory where to store the file force : bool Force downloading the file, if it already exists Returns ------- fname : str Full path of the downloaded file
[ "Download", "fname", "from", "the", "datasets_url", "and", "save", "it", "to", "target_dir", "unless", "the", "file", "already", "exists", "and", "force", "is", "False", "." ]
d91df2b27ee62d07396b5b64c7cfead59833b563
https://github.com/datapythonista/mnist/blob/d91df2b27ee62d07396b5b64c7cfead59833b563/mnist/__init__.py#L34-L61
20,294
datapythonista/mnist
mnist/__init__.py
parse_idx
def parse_idx(fd): """Parse an IDX file, and return it as a numpy array. Parameters ---------- fd : file File descriptor of the IDX file to parse endian : str Byte order of the IDX file. See [1] for available options Returns ------- data : numpy.ndarray Numpy array with the dimensions and the data in the IDX file 1. https://docs.python.org/3/library/struct.html #byte-order-size-and-alignment """ DATA_TYPES = {0x08: 'B', # unsigned byte 0x09: 'b', # signed byte 0x0b: 'h', # short (2 bytes) 0x0c: 'i', # int (4 bytes) 0x0d: 'f', # float (4 bytes) 0x0e: 'd'} # double (8 bytes) header = fd.read(4) if len(header) != 4: raise IdxDecodeError('Invalid IDX file, ' 'file empty or does not contain a full header.') zeros, data_type, num_dimensions = struct.unpack('>HBB', header) if zeros != 0: raise IdxDecodeError('Invalid IDX file, ' 'file must start with two zero bytes. ' 'Found 0x%02x' % zeros) try: data_type = DATA_TYPES[data_type] except KeyError: raise IdxDecodeError('Unknown data type ' '0x%02x in IDX file' % data_type) dimension_sizes = struct.unpack('>' + 'I' * num_dimensions, fd.read(4 * num_dimensions)) data = array.array(data_type, fd.read()) data.byteswap() # looks like array.array reads data as little endian expected_items = functools.reduce(operator.mul, dimension_sizes) if len(data) != expected_items: raise IdxDecodeError('IDX file has wrong number of items. ' 'Expected: %d. Found: %d' % (expected_items, len(data))) return numpy.array(data).reshape(dimension_sizes)
python
def parse_idx(fd): DATA_TYPES = {0x08: 'B', # unsigned byte 0x09: 'b', # signed byte 0x0b: 'h', # short (2 bytes) 0x0c: 'i', # int (4 bytes) 0x0d: 'f', # float (4 bytes) 0x0e: 'd'} # double (8 bytes) header = fd.read(4) if len(header) != 4: raise IdxDecodeError('Invalid IDX file, ' 'file empty or does not contain a full header.') zeros, data_type, num_dimensions = struct.unpack('>HBB', header) if zeros != 0: raise IdxDecodeError('Invalid IDX file, ' 'file must start with two zero bytes. ' 'Found 0x%02x' % zeros) try: data_type = DATA_TYPES[data_type] except KeyError: raise IdxDecodeError('Unknown data type ' '0x%02x in IDX file' % data_type) dimension_sizes = struct.unpack('>' + 'I' * num_dimensions, fd.read(4 * num_dimensions)) data = array.array(data_type, fd.read()) data.byteswap() # looks like array.array reads data as little endian expected_items = functools.reduce(operator.mul, dimension_sizes) if len(data) != expected_items: raise IdxDecodeError('IDX file has wrong number of items. ' 'Expected: %d. Found: %d' % (expected_items, len(data))) return numpy.array(data).reshape(dimension_sizes)
[ "def", "parse_idx", "(", "fd", ")", ":", "DATA_TYPES", "=", "{", "0x08", ":", "'B'", ",", "# unsigned byte", "0x09", ":", "'b'", ",", "# signed byte", "0x0b", ":", "'h'", ",", "# short (2 bytes)", "0x0c", ":", "'i'", ",", "# int (4 bytes)", "0x0d", ":", ...
Parse an IDX file, and return it as a numpy array. Parameters ---------- fd : file File descriptor of the IDX file to parse endian : str Byte order of the IDX file. See [1] for available options Returns ------- data : numpy.ndarray Numpy array with the dimensions and the data in the IDX file 1. https://docs.python.org/3/library/struct.html #byte-order-size-and-alignment
[ "Parse", "an", "IDX", "file", "and", "return", "it", "as", "a", "numpy", "array", "." ]
d91df2b27ee62d07396b5b64c7cfead59833b563
https://github.com/datapythonista/mnist/blob/d91df2b27ee62d07396b5b64c7cfead59833b563/mnist/__init__.py#L64-L120
20,295
datapythonista/mnist
mnist/__init__.py
download_and_parse_mnist_file
def download_and_parse_mnist_file(fname, target_dir=None, force=False): """Download the IDX file named fname from the URL specified in dataset_url and return it as a numpy array. Parameters ---------- fname : str File name to download and parse target_dir : str Directory where to store the file force : bool Force downloading the file, if it already exists Returns ------- data : numpy.ndarray Numpy array with the dimensions and the data in the IDX file """ fname = download_file(fname, target_dir=target_dir, force=force) fopen = gzip.open if os.path.splitext(fname)[1] == '.gz' else open with fopen(fname, 'rb') as fd: return parse_idx(fd)
python
def download_and_parse_mnist_file(fname, target_dir=None, force=False): fname = download_file(fname, target_dir=target_dir, force=force) fopen = gzip.open if os.path.splitext(fname)[1] == '.gz' else open with fopen(fname, 'rb') as fd: return parse_idx(fd)
[ "def", "download_and_parse_mnist_file", "(", "fname", ",", "target_dir", "=", "None", ",", "force", "=", "False", ")", ":", "fname", "=", "download_file", "(", "fname", ",", "target_dir", "=", "target_dir", ",", "force", "=", "force", ")", "fopen", "=", "g...
Download the IDX file named fname from the URL specified in dataset_url and return it as a numpy array. Parameters ---------- fname : str File name to download and parse target_dir : str Directory where to store the file force : bool Force downloading the file, if it already exists Returns ------- data : numpy.ndarray Numpy array with the dimensions and the data in the IDX file
[ "Download", "the", "IDX", "file", "named", "fname", "from", "the", "URL", "specified", "in", "dataset_url", "and", "return", "it", "as", "a", "numpy", "array", "." ]
d91df2b27ee62d07396b5b64c7cfead59833b563
https://github.com/datapythonista/mnist/blob/d91df2b27ee62d07396b5b64c7cfead59833b563/mnist/__init__.py#L123-L146
20,296
Bogdanp/anom-py
anom/query.py
Pages.fetch_next_page
def fetch_next_page(self): """Fetch the next Page of results. Returns: Page: The next page of results. """ for page in self: return page else: return Page(self._resultset.cursor, iter(()))
python
def fetch_next_page(self): for page in self: return page else: return Page(self._resultset.cursor, iter(()))
[ "def", "fetch_next_page", "(", "self", ")", ":", "for", "page", "in", "self", ":", "return", "page", "else", ":", "return", "Page", "(", "self", ".", "_resultset", ".", "cursor", ",", "iter", "(", "(", ")", ")", ")" ]
Fetch the next Page of results. Returns: Page: The next page of results.
[ "Fetch", "the", "next", "Page", "of", "results", "." ]
519078b6d1570fa63c5f17cf98817c7bb5588136
https://github.com/Bogdanp/anom-py/blob/519078b6d1570fa63c5f17cf98817c7bb5588136/anom/query.py#L204-L213
20,297
Bogdanp/anom-py
anom/query.py
Query.count
def count(self, *, page_size=DEFAULT_BATCH_SIZE, **options): """Counts the number of entities that match this query. Note: Since Datastore doesn't provide a native way to count entities by query, this method paginates through all the entities' keys and counts them. Parameters: \**options(QueryOptions, optional) Returns: int: The number of entities. """ entities = 0 options = QueryOptions(self).replace(keys_only=True) for page in self.paginate(page_size=page_size, **options): entities += len(list(page)) return entities
python
def count(self, *, page_size=DEFAULT_BATCH_SIZE, **options): entities = 0 options = QueryOptions(self).replace(keys_only=True) for page in self.paginate(page_size=page_size, **options): entities += len(list(page)) return entities
[ "def", "count", "(", "self", ",", "*", ",", "page_size", "=", "DEFAULT_BATCH_SIZE", ",", "*", "*", "options", ")", ":", "entities", "=", "0", "options", "=", "QueryOptions", "(", "self", ")", ".", "replace", "(", "keys_only", "=", "True", ")", "for", ...
Counts the number of entities that match this query. Note: Since Datastore doesn't provide a native way to count entities by query, this method paginates through all the entities' keys and counts them. Parameters: \**options(QueryOptions, optional) Returns: int: The number of entities.
[ "Counts", "the", "number", "of", "entities", "that", "match", "this", "query", "." ]
519078b6d1570fa63c5f17cf98817c7bb5588136
https://github.com/Bogdanp/anom-py/blob/519078b6d1570fa63c5f17cf98817c7bb5588136/anom/query.py#L383-L401
20,298
Bogdanp/anom-py
anom/query.py
Query.delete
def delete(self, *, page_size=DEFAULT_BATCH_SIZE, **options): """Deletes all the entities that match this query. Note: Since Datasotre doesn't provide a native way to delete entities by query, this method paginates through all the entities' keys and issues a single delete_multi call per page. Parameters: \**options(QueryOptions, optional) Returns: int: The number of deleted entities. """ from .model import delete_multi deleted = 0 options = QueryOptions(self).replace(keys_only=True) for page in self.paginate(page_size=page_size, **options): keys = list(page) deleted += len(keys) delete_multi(keys) return deleted
python
def delete(self, *, page_size=DEFAULT_BATCH_SIZE, **options): from .model import delete_multi deleted = 0 options = QueryOptions(self).replace(keys_only=True) for page in self.paginate(page_size=page_size, **options): keys = list(page) deleted += len(keys) delete_multi(keys) return deleted
[ "def", "delete", "(", "self", ",", "*", ",", "page_size", "=", "DEFAULT_BATCH_SIZE", ",", "*", "*", "options", ")", ":", "from", ".", "model", "import", "delete_multi", "deleted", "=", "0", "options", "=", "QueryOptions", "(", "self", ")", ".", "replace"...
Deletes all the entities that match this query. Note: Since Datasotre doesn't provide a native way to delete entities by query, this method paginates through all the entities' keys and issues a single delete_multi call per page. Parameters: \**options(QueryOptions, optional) Returns: int: The number of deleted entities.
[ "Deletes", "all", "the", "entities", "that", "match", "this", "query", "." ]
519078b6d1570fa63c5f17cf98817c7bb5588136
https://github.com/Bogdanp/anom-py/blob/519078b6d1570fa63c5f17cf98817c7bb5588136/anom/query.py#L403-L427
20,299
Bogdanp/anom-py
anom/query.py
Query.get
def get(self, **options): """Run this query and get the first result. Parameters: \**options(QueryOptions, optional) Returns: Model: An entity or None if there were no results. """ sub_query = self.with_limit(1) options = QueryOptions(sub_query).replace(batch_size=1) for result in sub_query.run(**options): return result return None
python
def get(self, **options): sub_query = self.with_limit(1) options = QueryOptions(sub_query).replace(batch_size=1) for result in sub_query.run(**options): return result return None
[ "def", "get", "(", "self", ",", "*", "*", "options", ")", ":", "sub_query", "=", "self", ".", "with_limit", "(", "1", ")", "options", "=", "QueryOptions", "(", "sub_query", ")", ".", "replace", "(", "batch_size", "=", "1", ")", "for", "result", "in",...
Run this query and get the first result. Parameters: \**options(QueryOptions, optional) Returns: Model: An entity or None if there were no results.
[ "Run", "this", "query", "and", "get", "the", "first", "result", "." ]
519078b6d1570fa63c5f17cf98817c7bb5588136
https://github.com/Bogdanp/anom-py/blob/519078b6d1570fa63c5f17cf98817c7bb5588136/anom/query.py#L429-L442