repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
asottile/aspy.refactor_imports
aspy/refactor_imports/import_obj.py
AbstractImportObj.from_str
def from_str(cls, s): """Construct an import object from a string.""" ast_obj = ast.parse(s).body[0] if not isinstance(ast_obj, cls._expected_ast_type): raise AssertionError( 'Expected ast of type {!r} but got {!r}'.format( cls._expected_ast_type, ast_obj ) ) return cls(ast_obj)
python
def from_str(cls, s): """Construct an import object from a string.""" ast_obj = ast.parse(s).body[0] if not isinstance(ast_obj, cls._expected_ast_type): raise AssertionError( 'Expected ast of type {!r} but got {!r}'.format( cls._expected_ast_type, ast_obj ) ) return cls(ast_obj)
[ "def", "from_str", "(", "cls", ",", "s", ")", ":", "ast_obj", "=", "ast", ".", "parse", "(", "s", ")", ".", "body", "[", "0", "]", "if", "not", "isinstance", "(", "ast_obj", ",", "cls", ".", "_expected_ast_type", ")", ":", "raise", "AssertionError", ...
Construct an import object from a string.
[ "Construct", "an", "import", "object", "from", "a", "string", "." ]
8815983d373f734bca2007ea598020a6b23d7c59
https://github.com/asottile/aspy.refactor_imports/blob/8815983d373f734bca2007ea598020a6b23d7c59/aspy/refactor_imports/import_obj.py#L26-L36
train
41,000
asottile/aspy.refactor_imports
aspy/refactor_imports/sort.py
sort
def sort(imports, separate=True, import_before_from=True, **classify_kwargs): """Sort import objects into groups. :param list imports: FromImport / ImportImport objects :param bool separate: Whether to classify and return separate segments of imports based on classification. :param bool import_before_from: Whether to sort `import ...` imports before `from ...` imports. For example: from os import path from aspy import refactor_imports import sys import pyramid separate = True, import_before_from = True import sys from os import path import pyramid from aspy import refactor_imports separate = True, import_before_from = False from os import path import sys import pyramid from aspy import refactor_imports separate = False, import_before_from = True import pyramid import sys from aspy import refactor_imports from os import path separate = False, import_before_from = False from aspy import refactor_imports from os import path import pyramid import sys """ if separate: def classify_func(obj): return classify_import( obj.import_statement.module, **classify_kwargs ) types = ImportType.__all__ else: # A little cheaty, this allows future imports to sort before others def classify_func(obj): return classify_import( obj.import_statement.module, **classify_kwargs ) == ImportType.FUTURE types = [True, False] if import_before_from: def sort_within(obj): return (CLS_TO_INDEX[type(obj)],) + obj.sort_key else: def sort_within(obj): return tuple(obj.sort_key) # Partition the imports imports_partitioned = collections.defaultdict(list) for import_obj in imports: imports_partitioned[classify_func(import_obj)].append(import_obj) # sort each of the segments for segment_key, val in imports_partitioned.items(): imports_partitioned[segment_key] = sorted(val, key=sort_within) return tuple( tuple(imports_partitioned[key]) for key in types if key in imports_partitioned )
python
def sort(imports, separate=True, import_before_from=True, **classify_kwargs): """Sort import objects into groups. :param list imports: FromImport / ImportImport objects :param bool separate: Whether to classify and return separate segments of imports based on classification. :param bool import_before_from: Whether to sort `import ...` imports before `from ...` imports. For example: from os import path from aspy import refactor_imports import sys import pyramid separate = True, import_before_from = True import sys from os import path import pyramid from aspy import refactor_imports separate = True, import_before_from = False from os import path import sys import pyramid from aspy import refactor_imports separate = False, import_before_from = True import pyramid import sys from aspy import refactor_imports from os import path separate = False, import_before_from = False from aspy import refactor_imports from os import path import pyramid import sys """ if separate: def classify_func(obj): return classify_import( obj.import_statement.module, **classify_kwargs ) types = ImportType.__all__ else: # A little cheaty, this allows future imports to sort before others def classify_func(obj): return classify_import( obj.import_statement.module, **classify_kwargs ) == ImportType.FUTURE types = [True, False] if import_before_from: def sort_within(obj): return (CLS_TO_INDEX[type(obj)],) + obj.sort_key else: def sort_within(obj): return tuple(obj.sort_key) # Partition the imports imports_partitioned = collections.defaultdict(list) for import_obj in imports: imports_partitioned[classify_func(import_obj)].append(import_obj) # sort each of the segments for segment_key, val in imports_partitioned.items(): imports_partitioned[segment_key] = sorted(val, key=sort_within) return tuple( tuple(imports_partitioned[key]) for key in types if key in imports_partitioned )
[ "def", "sort", "(", "imports", ",", "separate", "=", "True", ",", "import_before_from", "=", "True", ",", "*", "*", "classify_kwargs", ")", ":", "if", "separate", ":", "def", "classify_func", "(", "obj", ")", ":", "return", "classify_import", "(", "obj", ...
Sort import objects into groups. :param list imports: FromImport / ImportImport objects :param bool separate: Whether to classify and return separate segments of imports based on classification. :param bool import_before_from: Whether to sort `import ...` imports before `from ...` imports. For example: from os import path from aspy import refactor_imports import sys import pyramid separate = True, import_before_from = True import sys from os import path import pyramid from aspy import refactor_imports separate = True, import_before_from = False from os import path import sys import pyramid from aspy import refactor_imports separate = False, import_before_from = True import pyramid import sys from aspy import refactor_imports from os import path separate = False, import_before_from = False from aspy import refactor_imports from os import path import pyramid import sys
[ "Sort", "import", "objects", "into", "groups", "." ]
8815983d373f734bca2007ea598020a6b23d7c59
https://github.com/asottile/aspy.refactor_imports/blob/8815983d373f734bca2007ea598020a6b23d7c59/aspy/refactor_imports/sort.py#L19-L99
train
41,001
asottile/aspy.refactor_imports
aspy/refactor_imports/classify.py
classify_import
def classify_import(module_name, application_directories=('.',)): """Classifies an import by its package. Returns a value in ImportType.__all__ :param text module_name: The dotted notation of a module :param tuple application_directories: tuple of paths which are considered application roots. """ # Only really care about the first part of the path base, _, _ = module_name.partition('.') found, module_path, is_builtin = _get_module_info( base, application_directories, ) if base == '__future__': return ImportType.FUTURE # Relative imports: `from .foo import bar` elif base == '': return ImportType.APPLICATION # If imp tells us it is builtin, it is builtin elif is_builtin: return ImportType.BUILTIN # If the module path exists in the project directories elif _module_path_is_local_and_is_not_symlinked( module_path, application_directories, ): return ImportType.APPLICATION # Otherwise we assume it is a system module or a third party module elif ( found and PACKAGES_PATH not in module_path and not _due_to_pythonpath(module_path) ): return ImportType.BUILTIN else: return ImportType.THIRD_PARTY
python
def classify_import(module_name, application_directories=('.',)): """Classifies an import by its package. Returns a value in ImportType.__all__ :param text module_name: The dotted notation of a module :param tuple application_directories: tuple of paths which are considered application roots. """ # Only really care about the first part of the path base, _, _ = module_name.partition('.') found, module_path, is_builtin = _get_module_info( base, application_directories, ) if base == '__future__': return ImportType.FUTURE # Relative imports: `from .foo import bar` elif base == '': return ImportType.APPLICATION # If imp tells us it is builtin, it is builtin elif is_builtin: return ImportType.BUILTIN # If the module path exists in the project directories elif _module_path_is_local_and_is_not_symlinked( module_path, application_directories, ): return ImportType.APPLICATION # Otherwise we assume it is a system module or a third party module elif ( found and PACKAGES_PATH not in module_path and not _due_to_pythonpath(module_path) ): return ImportType.BUILTIN else: return ImportType.THIRD_PARTY
[ "def", "classify_import", "(", "module_name", ",", "application_directories", "=", "(", "'.'", ",", ")", ")", ":", "# Only really care about the first part of the path", "base", ",", "_", ",", "_", "=", "module_name", ".", "partition", "(", "'.'", ")", "found", ...
Classifies an import by its package. Returns a value in ImportType.__all__ :param text module_name: The dotted notation of a module :param tuple application_directories: tuple of paths which are considered application roots.
[ "Classifies", "an", "import", "by", "its", "package", "." ]
8815983d373f734bca2007ea598020a6b23d7c59
https://github.com/asottile/aspy.refactor_imports/blob/8815983d373f734bca2007ea598020a6b23d7c59/aspy/refactor_imports/classify.py#L124-L159
train
41,002
openstax/cnxml
cnxml/cli.py
_arg_parser
def _arg_parser(): """Factory for creating the argument parser""" parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('xml', nargs='*') return parser
python
def _arg_parser(): """Factory for creating the argument parser""" parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('xml', nargs='*') return parser
[ "def", "_arg_parser", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "__doc__", ")", "parser", ".", "add_argument", "(", "'xml'", ",", "nargs", "=", "'*'", ")", "return", "parser" ]
Factory for creating the argument parser
[ "Factory", "for", "creating", "the", "argument", "parser" ]
ddce4016ef204c509861cdc328815ddc361378c9
https://github.com/openstax/cnxml/blob/ddce4016ef204c509861cdc328815ddc361378c9/cnxml/cli.py#L27-L31
train
41,003
openstax/cnxml
cnxml/validation.py
validate_cnxml
def validate_cnxml(*content_filepaths): """Validates the given CNXML file against the cnxml-jing.rng RNG.""" content_filepaths = [Path(path).resolve() for path in content_filepaths] return jing(CNXML_JING_RNG, *content_filepaths)
python
def validate_cnxml(*content_filepaths): """Validates the given CNXML file against the cnxml-jing.rng RNG.""" content_filepaths = [Path(path).resolve() for path in content_filepaths] return jing(CNXML_JING_RNG, *content_filepaths)
[ "def", "validate_cnxml", "(", "*", "content_filepaths", ")", ":", "content_filepaths", "=", "[", "Path", "(", "path", ")", ".", "resolve", "(", ")", "for", "path", "in", "content_filepaths", "]", "return", "jing", "(", "CNXML_JING_RNG", ",", "*", "content_fi...
Validates the given CNXML file against the cnxml-jing.rng RNG.
[ "Validates", "the", "given", "CNXML", "file", "against", "the", "cnxml", "-", "jing", ".", "rng", "RNG", "." ]
ddce4016ef204c509861cdc328815ddc361378c9
https://github.com/openstax/cnxml/blob/ddce4016ef204c509861cdc328815ddc361378c9/cnxml/validation.py#L20-L23
train
41,004
openstax/cnxml
cnxml/validation.py
validate_collxml
def validate_collxml(*content_filepaths): """Validates the given COLLXML file against the collxml-jing.rng RNG.""" content_filepaths = [Path(path).resolve() for path in content_filepaths] return jing(COLLXML_JING_RNG, *content_filepaths)
python
def validate_collxml(*content_filepaths): """Validates the given COLLXML file against the collxml-jing.rng RNG.""" content_filepaths = [Path(path).resolve() for path in content_filepaths] return jing(COLLXML_JING_RNG, *content_filepaths)
[ "def", "validate_collxml", "(", "*", "content_filepaths", ")", ":", "content_filepaths", "=", "[", "Path", "(", "path", ")", ".", "resolve", "(", ")", "for", "path", "in", "content_filepaths", "]", "return", "jing", "(", "COLLXML_JING_RNG", ",", "*", "conten...
Validates the given COLLXML file against the collxml-jing.rng RNG.
[ "Validates", "the", "given", "COLLXML", "file", "against", "the", "collxml", "-", "jing", ".", "rng", "RNG", "." ]
ddce4016ef204c509861cdc328815ddc361378c9
https://github.com/openstax/cnxml/blob/ddce4016ef204c509861cdc328815ddc361378c9/cnxml/validation.py#L26-L29
train
41,005
fhcrc/taxtastic
taxtastic/subcommands/rollback.py
action
def action(args): """Roll back commands on a refpkg. *args* should be an argparse object with fields refpkg (giving the path to the refpkg to operate on) and n (giving the number of operations to roll back). """ log.info('loading reference package') r = refpkg.Refpkg(args.refpkg, create=False) # First check if we can do n rollbacks q = r.contents for i in range(args.n): if q['rollback'] is None: log.error('Cannot rollback {} changes; ' 'refpkg only records {} changes.'.format(args.n, i)) return 1 else: q = q['rollback'] for i in range(args.n): r.rollback() return 0
python
def action(args): """Roll back commands on a refpkg. *args* should be an argparse object with fields refpkg (giving the path to the refpkg to operate on) and n (giving the number of operations to roll back). """ log.info('loading reference package') r = refpkg.Refpkg(args.refpkg, create=False) # First check if we can do n rollbacks q = r.contents for i in range(args.n): if q['rollback'] is None: log.error('Cannot rollback {} changes; ' 'refpkg only records {} changes.'.format(args.n, i)) return 1 else: q = q['rollback'] for i in range(args.n): r.rollback() return 0
[ "def", "action", "(", "args", ")", ":", "log", ".", "info", "(", "'loading reference package'", ")", "r", "=", "refpkg", ".", "Refpkg", "(", "args", ".", "refpkg", ",", "create", "=", "False", ")", "# First check if we can do n rollbacks", "q", "=", "r", "...
Roll back commands on a refpkg. *args* should be an argparse object with fields refpkg (giving the path to the refpkg to operate on) and n (giving the number of operations to roll back).
[ "Roll", "back", "commands", "on", "a", "refpkg", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/subcommands/rollback.py#L39-L63
train
41,006
amicks/Speculator
speculator/market.py
set_targets
def set_targets(x, delta=10): """ Sets target market trend for a date Args: x: Pandas DataFrame of market features delta: Positive number defining a price buffer between what is classified as a bullish/bearish market for the training set. delta is equivalent to the total size of the neutral price zone. delta / 2 is equivalent to either the positive or negative threshold of the neutral price zone. Returns: Pandas Series of numpy int8 market trend targets """ data = [] # Keep track of targets for row, _ in x.iterrows(): if row == x.shape[0] - 1: # Can't predict yet, done. break # Get closing prices curr_close = x.close[row] next_close = x.close[row + 1] high_close = next_close + (delta / 2) # Pos. neutral zone threshold low_close = next_close - (delta / 2) # Neg. neutral zone threshold # Get target if curr_close < low_close: target = TARGET_CODES['bearish'] elif curr_close > high_close: target = TARGET_CODES['bullish'] else: target = TARGET_CODES['neutral'] data.append(target) return pd.Series(data=data, dtype=np.int32, name='target')
python
def set_targets(x, delta=10): """ Sets target market trend for a date Args: x: Pandas DataFrame of market features delta: Positive number defining a price buffer between what is classified as a bullish/bearish market for the training set. delta is equivalent to the total size of the neutral price zone. delta / 2 is equivalent to either the positive or negative threshold of the neutral price zone. Returns: Pandas Series of numpy int8 market trend targets """ data = [] # Keep track of targets for row, _ in x.iterrows(): if row == x.shape[0] - 1: # Can't predict yet, done. break # Get closing prices curr_close = x.close[row] next_close = x.close[row + 1] high_close = next_close + (delta / 2) # Pos. neutral zone threshold low_close = next_close - (delta / 2) # Neg. neutral zone threshold # Get target if curr_close < low_close: target = TARGET_CODES['bearish'] elif curr_close > high_close: target = TARGET_CODES['bullish'] else: target = TARGET_CODES['neutral'] data.append(target) return pd.Series(data=data, dtype=np.int32, name='target')
[ "def", "set_targets", "(", "x", ",", "delta", "=", "10", ")", ":", "data", "=", "[", "]", "# Keep track of targets", "for", "row", ",", "_", "in", "x", ".", "iterrows", "(", ")", ":", "if", "row", "==", "x", ".", "shape", "[", "0", "]", "-", "1...
Sets target market trend for a date Args: x: Pandas DataFrame of market features delta: Positive number defining a price buffer between what is classified as a bullish/bearish market for the training set. delta is equivalent to the total size of the neutral price zone. delta / 2 is equivalent to either the positive or negative threshold of the neutral price zone. Returns: Pandas Series of numpy int8 market trend targets
[ "Sets", "target", "market", "trend", "for", "a", "date" ]
f7d6590aded20b1e1b5df16a4b27228ee821c4ab
https://github.com/amicks/Speculator/blob/f7d6590aded20b1e1b5df16a4b27228ee821c4ab/speculator/market.py#L111-L145
train
41,007
amicks/Speculator
speculator/market.py
eval_features
def eval_features(json): """ Gets technical analysis features from market data JSONs Args: json: JSON data as a list of dict dates, where the keys are the raw market statistics. Returns: Dict of market features and their values """ return {'close' : json[-1]['close'], 'sma' : SMA.eval_from_json(json), 'rsi' : RSI.eval_from_json(json), 'so' : SO.eval_from_json(json), 'obv' : OBV.eval_from_json(json)}
python
def eval_features(json): """ Gets technical analysis features from market data JSONs Args: json: JSON data as a list of dict dates, where the keys are the raw market statistics. Returns: Dict of market features and their values """ return {'close' : json[-1]['close'], 'sma' : SMA.eval_from_json(json), 'rsi' : RSI.eval_from_json(json), 'so' : SO.eval_from_json(json), 'obv' : OBV.eval_from_json(json)}
[ "def", "eval_features", "(", "json", ")", ":", "return", "{", "'close'", ":", "json", "[", "-", "1", "]", "[", "'close'", "]", ",", "'sma'", ":", "SMA", ".", "eval_from_json", "(", "json", ")", ",", "'rsi'", ":", "RSI", ".", "eval_from_json", "(", ...
Gets technical analysis features from market data JSONs Args: json: JSON data as a list of dict dates, where the keys are the raw market statistics. Returns: Dict of market features and their values
[ "Gets", "technical", "analysis", "features", "from", "market", "data", "JSONs" ]
f7d6590aded20b1e1b5df16a4b27228ee821c4ab
https://github.com/amicks/Speculator/blob/f7d6590aded20b1e1b5df16a4b27228ee821c4ab/speculator/market.py#L147-L161
train
41,008
amicks/Speculator
speculator/market.py
target_code_to_name
def target_code_to_name(code): """ Converts an int target code to a target name Since self.TARGET_CODES is a 1:1 mapping, perform a reverse lookup to get the more readable name. Args: code: Value from self.TARGET_CODES Returns: String target name corresponding to the given code. """ TARGET_NAMES = {v: k for k, v in TARGET_CODES.items()} return TARGET_NAMES[code]
python
def target_code_to_name(code): """ Converts an int target code to a target name Since self.TARGET_CODES is a 1:1 mapping, perform a reverse lookup to get the more readable name. Args: code: Value from self.TARGET_CODES Returns: String target name corresponding to the given code. """ TARGET_NAMES = {v: k for k, v in TARGET_CODES.items()} return TARGET_NAMES[code]
[ "def", "target_code_to_name", "(", "code", ")", ":", "TARGET_NAMES", "=", "{", "v", ":", "k", "for", "k", ",", "v", "in", "TARGET_CODES", ".", "items", "(", ")", "}", "return", "TARGET_NAMES", "[", "code", "]" ]
Converts an int target code to a target name Since self.TARGET_CODES is a 1:1 mapping, perform a reverse lookup to get the more readable name. Args: code: Value from self.TARGET_CODES Returns: String target name corresponding to the given code.
[ "Converts", "an", "int", "target", "code", "to", "a", "target", "name" ]
f7d6590aded20b1e1b5df16a4b27228ee821c4ab
https://github.com/amicks/Speculator/blob/f7d6590aded20b1e1b5df16a4b27228ee821c4ab/speculator/market.py#L163-L176
train
41,009
amicks/Speculator
speculator/market.py
setup_model
def setup_model(x, y, model_type='random_forest', seed=None, **kwargs): """ Initializes a machine learning model Args: x: Pandas DataFrame, X axis of features y: Pandas Series, Y axis of targets model_type: Machine Learning model to use Valid values: 'random_forest' seed: Random state to use when splitting sets and creating the model **kwargs: Scikit Learn's RandomForestClassifier kwargs Returns: Trained model instance of model_type """ assert len(x) > 1 and len(y) > 1, 'Not enough data objects to train on (minimum is at least two, you have (x: {0}) and (y: {1}))'.format(len(x), len(y)) sets = namedtuple('Datasets', ['train', 'test']) x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=seed, shuffle=False) x = sets(x_train, x_test) y = sets(y_train, y_test) if model_type == 'random_forest' or model_type == 'rf': model = rf.RandomForest(x, y, random_state=seed, **kwargs) elif model_type == 'deep_neural_network' or model_type == 'dnn': model = dnn.DeepNeuralNetwork(x, y, **kwargs) else: raise ValueError('Invalid model type kwarg') return model
python
def setup_model(x, y, model_type='random_forest', seed=None, **kwargs): """ Initializes a machine learning model Args: x: Pandas DataFrame, X axis of features y: Pandas Series, Y axis of targets model_type: Machine Learning model to use Valid values: 'random_forest' seed: Random state to use when splitting sets and creating the model **kwargs: Scikit Learn's RandomForestClassifier kwargs Returns: Trained model instance of model_type """ assert len(x) > 1 and len(y) > 1, 'Not enough data objects to train on (minimum is at least two, you have (x: {0}) and (y: {1}))'.format(len(x), len(y)) sets = namedtuple('Datasets', ['train', 'test']) x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=seed, shuffle=False) x = sets(x_train, x_test) y = sets(y_train, y_test) if model_type == 'random_forest' or model_type == 'rf': model = rf.RandomForest(x, y, random_state=seed, **kwargs) elif model_type == 'deep_neural_network' or model_type == 'dnn': model = dnn.DeepNeuralNetwork(x, y, **kwargs) else: raise ValueError('Invalid model type kwarg') return model
[ "def", "setup_model", "(", "x", ",", "y", ",", "model_type", "=", "'random_forest'", ",", "seed", "=", "None", ",", "*", "*", "kwargs", ")", ":", "assert", "len", "(", "x", ")", ">", "1", "and", "len", "(", "y", ")", ">", "1", ",", "'Not enough d...
Initializes a machine learning model Args: x: Pandas DataFrame, X axis of features y: Pandas Series, Y axis of targets model_type: Machine Learning model to use Valid values: 'random_forest' seed: Random state to use when splitting sets and creating the model **kwargs: Scikit Learn's RandomForestClassifier kwargs Returns: Trained model instance of model_type
[ "Initializes", "a", "machine", "learning", "model" ]
f7d6590aded20b1e1b5df16a4b27228ee821c4ab
https://github.com/amicks/Speculator/blob/f7d6590aded20b1e1b5df16a4b27228ee821c4ab/speculator/market.py#L178-L208
train
41,010
amicks/Speculator
speculator/market.py
Market.get_json
def get_json(self): """ Gets market chart data from today to a previous date """ today = dt.now() DIRECTION = 'last' epochs = date.get_end_start_epochs(today.year, today.month, today.day, DIRECTION, self.unit, self.count) return poloniex.chart_json(epochs['shifted'], epochs['initial'], self.period, self.symbol)[0]
python
def get_json(self): """ Gets market chart data from today to a previous date """ today = dt.now() DIRECTION = 'last' epochs = date.get_end_start_epochs(today.year, today.month, today.day, DIRECTION, self.unit, self.count) return poloniex.chart_json(epochs['shifted'], epochs['initial'], self.period, self.symbol)[0]
[ "def", "get_json", "(", "self", ")", ":", "today", "=", "dt", ".", "now", "(", ")", "DIRECTION", "=", "'last'", "epochs", "=", "date", ".", "get_end_start_epochs", "(", "today", ".", "year", ",", "today", ".", "month", ",", "today", ".", "day", ",", ...
Gets market chart data from today to a previous date
[ "Gets", "market", "chart", "data", "from", "today", "to", "a", "previous", "date" ]
f7d6590aded20b1e1b5df16a4b27228ee821c4ab
https://github.com/amicks/Speculator/blob/f7d6590aded20b1e1b5df16a4b27228ee821c4ab/speculator/market.py#L52-L59
train
41,011
amicks/Speculator
speculator/market.py
Market.set_features
def set_features(self, partition=1): """ Parses market data JSON for technical analysis indicators Args: partition: Int of how many dates to take into consideration when evaluating technical analysis indicators. Returns: Pandas DataFrame instance with columns as numpy.float32 features. """ if len(self.json) < partition + 1: raise ValueError('Not enough dates for the specified partition size: {0}. Try a smaller partition.'.format(partition)) data = [] for offset in range(len(self.json) - partition): json = self.json[offset : offset + partition] data.append(eval_features(json)) return pd.DataFrame(data=data, dtype=np.float32)
python
def set_features(self, partition=1): """ Parses market data JSON for technical analysis indicators Args: partition: Int of how many dates to take into consideration when evaluating technical analysis indicators. Returns: Pandas DataFrame instance with columns as numpy.float32 features. """ if len(self.json) < partition + 1: raise ValueError('Not enough dates for the specified partition size: {0}. Try a smaller partition.'.format(partition)) data = [] for offset in range(len(self.json) - partition): json = self.json[offset : offset + partition] data.append(eval_features(json)) return pd.DataFrame(data=data, dtype=np.float32)
[ "def", "set_features", "(", "self", ",", "partition", "=", "1", ")", ":", "if", "len", "(", "self", ".", "json", ")", "<", "partition", "+", "1", ":", "raise", "ValueError", "(", "'Not enough dates for the specified partition size: {0}. Try a smaller partition.'", ...
Parses market data JSON for technical analysis indicators Args: partition: Int of how many dates to take into consideration when evaluating technical analysis indicators. Returns: Pandas DataFrame instance with columns as numpy.float32 features.
[ "Parses", "market", "data", "JSON", "for", "technical", "analysis", "indicators" ]
f7d6590aded20b1e1b5df16a4b27228ee821c4ab
https://github.com/amicks/Speculator/blob/f7d6590aded20b1e1b5df16a4b27228ee821c4ab/speculator/market.py#L61-L78
train
41,012
amicks/Speculator
speculator/market.py
Market.set_long_features
def set_long_features(self, features, columns_to_set=[], partition=2): """ Sets features of double the duration Example: Setting 14 day RSIs to longer will create add a feature column of a 28 day RSIs. Args: features: Pandas DataFrame instance with columns as numpy.float32 features. columns_to_set: List of strings of feature names to make longer partition: Int of how many dates to take into consideration when evaluating technical analysis indicators. Returns: Pandas DataFrame instance with columns as numpy.float32 features. """ # Create long features DataFrame features_long = self.set_features(partition=2 * partition) # Remove features not specified by args.long unwanted_features = [f for f in features.columns if f not in columns_to_set] features_long = features_long.drop(unwanted_features, axis=1) # Prefix long columns with 'long_' to fix naming conflicts features_long.columns = ['long_{0}'.format(f) for f in features_long.columns] # Merge the two DataFrames skip = partition return pd.concat([features[skip:].reset_index(drop=True), features_long], axis=1)
python
def set_long_features(self, features, columns_to_set=[], partition=2): """ Sets features of double the duration Example: Setting 14 day RSIs to longer will create add a feature column of a 28 day RSIs. Args: features: Pandas DataFrame instance with columns as numpy.float32 features. columns_to_set: List of strings of feature names to make longer partition: Int of how many dates to take into consideration when evaluating technical analysis indicators. Returns: Pandas DataFrame instance with columns as numpy.float32 features. """ # Create long features DataFrame features_long = self.set_features(partition=2 * partition) # Remove features not specified by args.long unwanted_features = [f for f in features.columns if f not in columns_to_set] features_long = features_long.drop(unwanted_features, axis=1) # Prefix long columns with 'long_' to fix naming conflicts features_long.columns = ['long_{0}'.format(f) for f in features_long.columns] # Merge the two DataFrames skip = partition return pd.concat([features[skip:].reset_index(drop=True), features_long], axis=1)
[ "def", "set_long_features", "(", "self", ",", "features", ",", "columns_to_set", "=", "[", "]", ",", "partition", "=", "2", ")", ":", "# Create long features DataFrame", "features_long", "=", "self", ".", "set_features", "(", "partition", "=", "2", "*", "parti...
Sets features of double the duration Example: Setting 14 day RSIs to longer will create add a feature column of a 28 day RSIs. Args: features: Pandas DataFrame instance with columns as numpy.float32 features. columns_to_set: List of strings of feature names to make longer partition: Int of how many dates to take into consideration when evaluating technical analysis indicators. Returns: Pandas DataFrame instance with columns as numpy.float32 features.
[ "Sets", "features", "of", "double", "the", "duration" ]
f7d6590aded20b1e1b5df16a4b27228ee821c4ab
https://github.com/amicks/Speculator/blob/f7d6590aded20b1e1b5df16a4b27228ee821c4ab/speculator/market.py#L80-L109
train
41,013
amicks/Speculator
speculator/models/random_forest.py
RandomForest.feature_importances
def feature_importances(self): """ Return list of features and their importance in classification """ feature_names = [feature for feature in self.features.train] return list(zip(feature_names, self.feature_importances_))
python
def feature_importances(self): """ Return list of features and their importance in classification """ feature_names = [feature for feature in self.features.train] return list(zip(feature_names, self.feature_importances_))
[ "def", "feature_importances", "(", "self", ")", ":", "feature_names", "=", "[", "feature", "for", "feature", "in", "self", ".", "features", ".", "train", "]", "return", "list", "(", "zip", "(", "feature_names", ",", "self", ".", "feature_importances_", ")", ...
Return list of features and their importance in classification
[ "Return", "list", "of", "features", "and", "their", "importance", "in", "classification" ]
f7d6590aded20b1e1b5df16a4b27228ee821c4ab
https://github.com/amicks/Speculator/blob/f7d6590aded20b1e1b5df16a4b27228ee821c4ab/speculator/models/random_forest.py#L31-L34
train
41,014
seznam/shelter
shelter/commands/runserver.py
TornadoProcess.stop
def stop(self): """ Stop the worker. """ if self._http_server is not None: self._http_server.stop() tornado.ioloop.IOLoop.instance().add_callback( tornado.ioloop.IOLoop.instance().stop)
python
def stop(self): """ Stop the worker. """ if self._http_server is not None: self._http_server.stop() tornado.ioloop.IOLoop.instance().add_callback( tornado.ioloop.IOLoop.instance().stop)
[ "def", "stop", "(", "self", ")", ":", "if", "self", ".", "_http_server", "is", "not", "None", ":", "self", ".", "_http_server", ".", "stop", "(", ")", "tornado", ".", "ioloop", ".", "IOLoop", ".", "instance", "(", ")", ".", "add_callback", "(", "torn...
Stop the worker.
[ "Stop", "the", "worker", "." ]
c652b0ff1cca70158f8fc97d9210c1fa5961ac1c
https://github.com/seznam/shelter/blob/c652b0ff1cca70158f8fc97d9210c1fa5961ac1c/shelter/commands/runserver.py#L164-L171
train
41,015
seznam/shelter
shelter/commands/runserver.py
TornadoProcess.run
def run(self): """ Tornado worker which handles HTTP requests. """ setproctitle.setproctitle("{:s}: worker {:s}".format( self.context.config.name, self._tornado_app.settings['interface'].name)) self.logger.info( "Worker '%s' has been started with pid %d", self._tornado_app.settings['interface'].name, os.getpid()) # Configure logging self.context.config.configure_logging() # Create HTTP server instance self.http_server = tornado.httpserver.HTTPServer(self._tornado_app) # Initialize child self.context.initialize_child(TORNADO_WORKER, process=self) # Register SIGINT handler which will stop worker def sigint_handler(unused_signum, unused_frame): """ Call :meth:`stop` method when SIGINT is reached. """ io_loop = tornado.ioloop.IOLoop.instance() io_loop.add_callback_from_signal(self.stop) signal.signal(signal.SIGINT, sigint_handler) # Register callback which is called when IOLoop is started def run_ioloop_callback(): """ Set ready flag. Callback is called when worker is started. """ self._ready.value = True tornado.ioloop.IOLoop.instance().add_callback(run_ioloop_callback) # Register job which will stop worker if parent process PID is changed def check_parent_callback(): """ Tornado's callback function which checks PID of the parent process. If PID of the parent process is changed (parent has stopped), call :meth:`stop` method. """ if os.getppid() != self._parent_pid: self.stop() stop_callback = tornado.ioloop.PeriodicCallback( check_parent_callback, 250) stop_callback.start() # Run HTTP server self.http_server.add_sockets(self._sockets) # Run IOLoop tornado.ioloop.IOLoop.instance().start()
python
def run(self): """ Tornado worker which handles HTTP requests. """ setproctitle.setproctitle("{:s}: worker {:s}".format( self.context.config.name, self._tornado_app.settings['interface'].name)) self.logger.info( "Worker '%s' has been started with pid %d", self._tornado_app.settings['interface'].name, os.getpid()) # Configure logging self.context.config.configure_logging() # Create HTTP server instance self.http_server = tornado.httpserver.HTTPServer(self._tornado_app) # Initialize child self.context.initialize_child(TORNADO_WORKER, process=self) # Register SIGINT handler which will stop worker def sigint_handler(unused_signum, unused_frame): """ Call :meth:`stop` method when SIGINT is reached. """ io_loop = tornado.ioloop.IOLoop.instance() io_loop.add_callback_from_signal(self.stop) signal.signal(signal.SIGINT, sigint_handler) # Register callback which is called when IOLoop is started def run_ioloop_callback(): """ Set ready flag. Callback is called when worker is started. """ self._ready.value = True tornado.ioloop.IOLoop.instance().add_callback(run_ioloop_callback) # Register job which will stop worker if parent process PID is changed def check_parent_callback(): """ Tornado's callback function which checks PID of the parent process. If PID of the parent process is changed (parent has stopped), call :meth:`stop` method. """ if os.getppid() != self._parent_pid: self.stop() stop_callback = tornado.ioloop.PeriodicCallback( check_parent_callback, 250) stop_callback.start() # Run HTTP server self.http_server.add_sockets(self._sockets) # Run IOLoop tornado.ioloop.IOLoop.instance().start()
[ "def", "run", "(", "self", ")", ":", "setproctitle", ".", "setproctitle", "(", "\"{:s}: worker {:s}\"", ".", "format", "(", "self", ".", "context", ".", "config", ".", "name", ",", "self", ".", "_tornado_app", ".", "settings", "[", "'interface'", "]", ".",...
Tornado worker which handles HTTP requests.
[ "Tornado", "worker", "which", "handles", "HTTP", "requests", "." ]
c652b0ff1cca70158f8fc97d9210c1fa5961ac1c
https://github.com/seznam/shelter/blob/c652b0ff1cca70158f8fc97d9210c1fa5961ac1c/shelter/commands/runserver.py#L173-L224
train
41,016
seznam/shelter
shelter/commands/runserver.py
RunServer.initialize
def initialize(self): """ Initialize instance attributes. You can override this method in the subclasses. """ self.main_pid = os.getpid() self.processes.extend(self.init_service_processes()) self.processes.extend(self.init_tornado_workers())
python
def initialize(self): """ Initialize instance attributes. You can override this method in the subclasses. """ self.main_pid = os.getpid() self.processes.extend(self.init_service_processes()) self.processes.extend(self.init_tornado_workers())
[ "def", "initialize", "(", "self", ")", ":", "self", ".", "main_pid", "=", "os", ".", "getpid", "(", ")", "self", ".", "processes", ".", "extend", "(", "self", ".", "init_service_processes", "(", ")", ")", "self", ".", "processes", ".", "extend", "(", ...
Initialize instance attributes. You can override this method in the subclasses.
[ "Initialize", "instance", "attributes", ".", "You", "can", "override", "this", "method", "in", "the", "subclasses", "." ]
c652b0ff1cca70158f8fc97d9210c1fa5961ac1c
https://github.com/seznam/shelter/blob/c652b0ff1cca70158f8fc97d9210c1fa5961ac1c/shelter/commands/runserver.py#L238-L245
train
41,017
fhcrc/taxtastic
taxtastic/refpkg.py
scratch_file
def scratch_file(unlink=True, **kwargs): """Create a temporary file and return its name. Additional arguments are passed to :class:`tempfile.NamedTemporaryFile` At the start of the with block a secure, temporary file is created and its name returned. At the end of the with block it is deleted. """ kwargs['delete'] = False tf = tempfile.NamedTemporaryFile(**kwargs) tf.close() try: yield tf.name finally: if unlink: os.unlink(tf.name)
python
def scratch_file(unlink=True, **kwargs): """Create a temporary file and return its name. Additional arguments are passed to :class:`tempfile.NamedTemporaryFile` At the start of the with block a secure, temporary file is created and its name returned. At the end of the with block it is deleted. """ kwargs['delete'] = False tf = tempfile.NamedTemporaryFile(**kwargs) tf.close() try: yield tf.name finally: if unlink: os.unlink(tf.name)
[ "def", "scratch_file", "(", "unlink", "=", "True", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'delete'", "]", "=", "False", "tf", "=", "tempfile", ".", "NamedTemporaryFile", "(", "*", "*", "kwargs", ")", "tf", ".", "close", "(", ")", "try", ...
Create a temporary file and return its name. Additional arguments are passed to :class:`tempfile.NamedTemporaryFile` At the start of the with block a secure, temporary file is created and its name returned. At the end of the with block it is deleted.
[ "Create", "a", "temporary", "file", "and", "return", "its", "name", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/refpkg.py#L66-L82
train
41,018
fhcrc/taxtastic
taxtastic/refpkg.py
Refpkg.open
def open(self, name, *mode): """ Return an open file object for a file in the reference package. """ return self.file_factory(self.file_path(name), *mode)
python
def open(self, name, *mode): """ Return an open file object for a file in the reference package. """ return self.file_factory(self.file_path(name), *mode)
[ "def", "open", "(", "self", ",", "name", ",", "*", "mode", ")", ":", "return", "self", ".", "file_factory", "(", "self", ".", "file_path", "(", "name", ")", ",", "*", "mode", ")" ]
Return an open file object for a file in the reference package.
[ "Return", "an", "open", "file", "object", "for", "a", "file", "in", "the", "reference", "package", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/refpkg.py#L227-L231
train
41,019
fhcrc/taxtastic
taxtastic/refpkg.py
Refpkg.open_resource
def open_resource(self, resource, *mode): """ Return an open file object for a particular named resource in this reference package. """ return self.open(self.resource_name(resource), *mode)
python
def open_resource(self, resource, *mode): """ Return an open file object for a particular named resource in this reference package. """ return self.open(self.resource_name(resource), *mode)
[ "def", "open_resource", "(", "self", ",", "resource", ",", "*", "mode", ")", ":", "return", "self", ".", "open", "(", "self", ".", "resource_name", "(", "resource", ")", ",", "*", "mode", ")" ]
Return an open file object for a particular named resource in this reference package.
[ "Return", "an", "open", "file", "object", "for", "a", "particular", "named", "resource", "in", "this", "reference", "package", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/refpkg.py#L240-L245
train
41,020
fhcrc/taxtastic
taxtastic/refpkg.py
Refpkg.resource_name
def resource_name(self, resource): """ Return the name of the file within the reference package for a particular named resource. """ if not(resource in self.contents['files']): raise ValueError("No such resource %r in refpkg" % (resource,)) return self.contents['files'][resource]
python
def resource_name(self, resource): """ Return the name of the file within the reference package for a particular named resource. """ if not(resource in self.contents['files']): raise ValueError("No such resource %r in refpkg" % (resource,)) return self.contents['files'][resource]
[ "def", "resource_name", "(", "self", ",", "resource", ")", ":", "if", "not", "(", "resource", "in", "self", ".", "contents", "[", "'files'", "]", ")", ":", "raise", "ValueError", "(", "\"No such resource %r in refpkg\"", "%", "(", "resource", ",", ")", ")"...
Return the name of the file within the reference package for a particular named resource.
[ "Return", "the", "name", "of", "the", "file", "within", "the", "reference", "package", "for", "a", "particular", "named", "resource", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/refpkg.py#L247-L254
train
41,021
fhcrc/taxtastic
taxtastic/refpkg.py
Refpkg.resource_md5
def resource_md5(self, resource): """Return the stored MD5 sum for a particular named resource.""" if not(resource in self.contents['md5']): raise ValueError("No such resource %r in refpkg" % (resource,)) return self.contents['md5'][resource]
python
def resource_md5(self, resource): """Return the stored MD5 sum for a particular named resource.""" if not(resource in self.contents['md5']): raise ValueError("No such resource %r in refpkg" % (resource,)) return self.contents['md5'][resource]
[ "def", "resource_md5", "(", "self", ",", "resource", ")", ":", "if", "not", "(", "resource", "in", "self", ".", "contents", "[", "'md5'", "]", ")", ":", "raise", "ValueError", "(", "\"No such resource %r in refpkg\"", "%", "(", "resource", ",", ")", ")", ...
Return the stored MD5 sum for a particular named resource.
[ "Return", "the", "stored", "MD5", "sum", "for", "a", "particular", "named", "resource", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/refpkg.py#L256-L260
train
41,022
fhcrc/taxtastic
taxtastic/refpkg.py
Refpkg._set_defaults
def _set_defaults(self): """ Set some default values in the manifest. This method should be called after loading from disk, but before checking the integrity of the reference package. """ self.contents.setdefault('log', []) self.contents.setdefault('rollback', None) self.contents.setdefault('rollforward', None)
python
def _set_defaults(self): """ Set some default values in the manifest. This method should be called after loading from disk, but before checking the integrity of the reference package. """ self.contents.setdefault('log', []) self.contents.setdefault('rollback', None) self.contents.setdefault('rollforward', None)
[ "def", "_set_defaults", "(", "self", ")", ":", "self", ".", "contents", ".", "setdefault", "(", "'log'", ",", "[", "]", ")", "self", ".", "contents", ".", "setdefault", "(", "'rollback'", ",", "None", ")", "self", ".", "contents", ".", "setdefault", "(...
Set some default values in the manifest. This method should be called after loading from disk, but before checking the integrity of the reference package.
[ "Set", "some", "default", "values", "in", "the", "manifest", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/refpkg.py#L277-L286
train
41,023
fhcrc/taxtastic
taxtastic/refpkg.py
Refpkg._sync_to_disk
def _sync_to_disk(self): """Write any changes made on Refpkg to disk. Other methods of Refpkg that alter the contents of the package will call this method themselves. Generally you should never have to call it by hand. The only exception would be if another program has changed the Refpkg on disk while your program is running and you want to force your version over it. Otherwise it should only be called by other methods of refpkg. """ with self.open_manifest('w') as h: json.dump(self.contents, h, indent=4) h.write('\n')
python
def _sync_to_disk(self): """Write any changes made on Refpkg to disk. Other methods of Refpkg that alter the contents of the package will call this method themselves. Generally you should never have to call it by hand. The only exception would be if another program has changed the Refpkg on disk while your program is running and you want to force your version over it. Otherwise it should only be called by other methods of refpkg. """ with self.open_manifest('w') as h: json.dump(self.contents, h, indent=4) h.write('\n')
[ "def", "_sync_to_disk", "(", "self", ")", ":", "with", "self", ".", "open_manifest", "(", "'w'", ")", "as", "h", ":", "json", ".", "dump", "(", "self", ".", "contents", ",", "h", ",", "indent", "=", "4", ")", "h", ".", "write", "(", "'\\n'", ")" ...
Write any changes made on Refpkg to disk. Other methods of Refpkg that alter the contents of the package will call this method themselves. Generally you should never have to call it by hand. The only exception would be if another program has changed the Refpkg on disk while your program is running and you want to force your version over it. Otherwise it should only be called by other methods of refpkg.
[ "Write", "any", "changes", "made", "on", "Refpkg", "to", "disk", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/refpkg.py#L288-L300
train
41,024
fhcrc/taxtastic
taxtastic/refpkg.py
Refpkg._sync_from_disk
def _sync_from_disk(self): """Read any changes made on disk to this Refpkg. This is necessary if other programs are making changes to the Refpkg on disk and your program must be synchronized to them. """ try: fobj = self.open_manifest('r') except IOError as e: if e.errno == errno.ENOENT: raise ValueError( "couldn't find manifest file in %s" % (self.path,)) elif e.errno == errno.ENOTDIR: raise ValueError("%s is not a directory" % (self.path,)) else: raise with fobj: self.contents = json.load(fobj) self._set_defaults() self._check_refpkg()
python
def _sync_from_disk(self): """Read any changes made on disk to this Refpkg. This is necessary if other programs are making changes to the Refpkg on disk and your program must be synchronized to them. """ try: fobj = self.open_manifest('r') except IOError as e: if e.errno == errno.ENOENT: raise ValueError( "couldn't find manifest file in %s" % (self.path,)) elif e.errno == errno.ENOTDIR: raise ValueError("%s is not a directory" % (self.path,)) else: raise with fobj: self.contents = json.load(fobj) self._set_defaults() self._check_refpkg()
[ "def", "_sync_from_disk", "(", "self", ")", ":", "try", ":", "fobj", "=", "self", ".", "open_manifest", "(", "'r'", ")", "except", "IOError", "as", "e", ":", "if", "e", ".", "errno", "==", "errno", ".", "ENOENT", ":", "raise", "ValueError", "(", "\"c...
Read any changes made on disk to this Refpkg. This is necessary if other programs are making changes to the Refpkg on disk and your program must be synchronized to them.
[ "Read", "any", "changes", "made", "on", "disk", "to", "this", "Refpkg", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/refpkg.py#L302-L323
train
41,025
fhcrc/taxtastic
taxtastic/refpkg.py
Refpkg._add_file
def _add_file(self, key, path): """Copy a file into the reference package.""" filename = os.path.basename(path) base, ext = os.path.splitext(filename) if os.path.exists(self.file_path(filename)): with tempfile.NamedTemporaryFile( dir=self.path, prefix=base, suffix=ext) as tf: filename = os.path.basename(tf.name) shutil.copyfile(path, self.file_path(filename)) self.contents['files'][key] = filename
python
def _add_file(self, key, path): """Copy a file into the reference package.""" filename = os.path.basename(path) base, ext = os.path.splitext(filename) if os.path.exists(self.file_path(filename)): with tempfile.NamedTemporaryFile( dir=self.path, prefix=base, suffix=ext) as tf: filename = os.path.basename(tf.name) shutil.copyfile(path, self.file_path(filename)) self.contents['files'][key] = filename
[ "def", "_add_file", "(", "self", ",", "key", ",", "path", ")", ":", "filename", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "base", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "if", "os", ".", "path",...
Copy a file into the reference package.
[ "Copy", "a", "file", "into", "the", "reference", "package", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/refpkg.py#L325-L334
train
41,026
fhcrc/taxtastic
taxtastic/refpkg.py
Refpkg.is_invalid
def is_invalid(self): """Check if this RefPkg is invalid. Valid means that it contains a properly named manifest, and each of the files described in the manifest exists and has the proper MD5 hashsum. If the Refpkg is valid, is_invalid returns False. Otherwise it returns a nonempty string describing the error. """ # Manifest file contains the proper keys for k in ['metadata', 'files', 'md5']: if not(k in self.contents): return "Manifest file missing key %s" % k if not(isinstance(self.contents[k], dict)): return "Key %s in manifest did not refer to a dictionary" % k if not('rollback' in self.contents): return "Manifest file missing key rollback" if not(isinstance(self.contents['rollback'], dict)) and self.contents[ "rollback"] is not None: return ("Key rollback in manifest did not refer to a " "dictionary or None, found %s") % str(self.contents['rollback']) if not('rollforward' in self.contents): return "Manifest file missing key rollforward" if self.contents['rollforward'] is not None: if not(isinstance(self.contents['rollforward'], list)): return "Key rollforward was not a list, found %s" % str( self.contents['rollforward']) elif len(self.contents['rollforward']) != 2: return "Key rollforward had wrong length, found %d" % \ len(self.contents['rollforward']) elif not is_string(self.contents['rollforward'][0]): print(type(self.contents['rollforward'][0])) return "Key rollforward's first entry was not a string, found %s" % \ str(self.contents['rollforward'][0]) elif not(isinstance(self.contents['rollforward'][1], dict)): return "Key rollforward's second entry was not a dict, found %s" % \ str(self.contents['rollforward'][1]) if not("log" in self.contents): return "Manifest file missing key 'log'" if not(isinstance(self.contents['log'], list)): return "Key 'log' in manifest did not refer to a list" # MD5 keys and filenames are in one to one correspondence if self.contents['files'].keys() != self.contents[ 'md5'].keys(): return ("Files and MD5 sums in manifest do not " "match (files: %s, MD5 sums: %s)") % \ (list(self.contents['files'].keys()), list(self.contents['md5'].keys())) # All files in the manifest exist and match the MD5 sums for key, filename in self.contents['files'].items(): # we don't need to explicitly check for existence; # calculate_resource_md5 will open the file for us. expected_md5 = self.resource_md5(key) found_md5 = self.calculate_resource_md5(key) if found_md5 != expected_md5: return ("File %s referred to by key %s did " "not match its MD5 sum (found: %s, expected %s)") % \ (filename, key, found_md5, expected_md5) return False
python
def is_invalid(self): """Check if this RefPkg is invalid. Valid means that it contains a properly named manifest, and each of the files described in the manifest exists and has the proper MD5 hashsum. If the Refpkg is valid, is_invalid returns False. Otherwise it returns a nonempty string describing the error. """ # Manifest file contains the proper keys for k in ['metadata', 'files', 'md5']: if not(k in self.contents): return "Manifest file missing key %s" % k if not(isinstance(self.contents[k], dict)): return "Key %s in manifest did not refer to a dictionary" % k if not('rollback' in self.contents): return "Manifest file missing key rollback" if not(isinstance(self.contents['rollback'], dict)) and self.contents[ "rollback"] is not None: return ("Key rollback in manifest did not refer to a " "dictionary or None, found %s") % str(self.contents['rollback']) if not('rollforward' in self.contents): return "Manifest file missing key rollforward" if self.contents['rollforward'] is not None: if not(isinstance(self.contents['rollforward'], list)): return "Key rollforward was not a list, found %s" % str( self.contents['rollforward']) elif len(self.contents['rollforward']) != 2: return "Key rollforward had wrong length, found %d" % \ len(self.contents['rollforward']) elif not is_string(self.contents['rollforward'][0]): print(type(self.contents['rollforward'][0])) return "Key rollforward's first entry was not a string, found %s" % \ str(self.contents['rollforward'][0]) elif not(isinstance(self.contents['rollforward'][1], dict)): return "Key rollforward's second entry was not a dict, found %s" % \ str(self.contents['rollforward'][1]) if not("log" in self.contents): return "Manifest file missing key 'log'" if not(isinstance(self.contents['log'], list)): return "Key 'log' in manifest did not refer to a list" # MD5 keys and filenames are in one to one correspondence if self.contents['files'].keys() != self.contents[ 'md5'].keys(): return ("Files and MD5 sums in manifest do not " "match (files: %s, MD5 sums: %s)") % \ (list(self.contents['files'].keys()), list(self.contents['md5'].keys())) # All files in the manifest exist and match the MD5 sums for key, filename in self.contents['files'].items(): # we don't need to explicitly check for existence; # calculate_resource_md5 will open the file for us. expected_md5 = self.resource_md5(key) found_md5 = self.calculate_resource_md5(key) if found_md5 != expected_md5: return ("File %s referred to by key %s did " "not match its MD5 sum (found: %s, expected %s)") % \ (filename, key, found_md5, expected_md5) return False
[ "def", "is_invalid", "(", "self", ")", ":", "# Manifest file contains the proper keys", "for", "k", "in", "[", "'metadata'", ",", "'files'", ",", "'md5'", "]", ":", "if", "not", "(", "k", "in", "self", ".", "contents", ")", ":", "return", "\"Manifest file mi...
Check if this RefPkg is invalid. Valid means that it contains a properly named manifest, and each of the files described in the manifest exists and has the proper MD5 hashsum. If the Refpkg is valid, is_invalid returns False. Otherwise it returns a nonempty string describing the error.
[ "Check", "if", "this", "RefPkg", "is", "invalid", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/refpkg.py#L369-L435
train
41,027
fhcrc/taxtastic
taxtastic/refpkg.py
Refpkg.reroot
def reroot(self, rppr=None, pretend=False): """Reroot the phylogenetic tree. This operation calls ``rppr reroot`` to generate the rerooted tree, so you must have ``pplacer`` and its auxiliary tools ``rppr`` and ``guppy`` installed for it to work. You can specify the path to ``rppr`` by giving it as the *rppr* argument. If *pretend* is ``True``, the convexification is run, but the refpkg is not actually updated. """ with scratch_file(prefix='tree', suffix='.tre') as name: # Use a specific path to rppr, otherwise rely on $PATH subprocess.check_call([rppr or 'rppr', 'reroot', '-c', self.path, '-o', name]) if not(pretend): self.update_file('tree', name) self._log('Rerooting refpkg')
python
def reroot(self, rppr=None, pretend=False): """Reroot the phylogenetic tree. This operation calls ``rppr reroot`` to generate the rerooted tree, so you must have ``pplacer`` and its auxiliary tools ``rppr`` and ``guppy`` installed for it to work. You can specify the path to ``rppr`` by giving it as the *rppr* argument. If *pretend* is ``True``, the convexification is run, but the refpkg is not actually updated. """ with scratch_file(prefix='tree', suffix='.tre') as name: # Use a specific path to rppr, otherwise rely on $PATH subprocess.check_call([rppr or 'rppr', 'reroot', '-c', self.path, '-o', name]) if not(pretend): self.update_file('tree', name) self._log('Rerooting refpkg')
[ "def", "reroot", "(", "self", ",", "rppr", "=", "None", ",", "pretend", "=", "False", ")", ":", "with", "scratch_file", "(", "prefix", "=", "'tree'", ",", "suffix", "=", "'.tre'", ")", "as", "name", ":", "# Use a specific path to rppr, otherwise rely on $PATH"...
Reroot the phylogenetic tree. This operation calls ``rppr reroot`` to generate the rerooted tree, so you must have ``pplacer`` and its auxiliary tools ``rppr`` and ``guppy`` installed for it to work. You can specify the path to ``rppr`` by giving it as the *rppr* argument. If *pretend* is ``True``, the convexification is run, but the refpkg is not actually updated.
[ "Reroot", "the", "phylogenetic", "tree", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/refpkg.py#L483-L501
train
41,028
fhcrc/taxtastic
taxtastic/refpkg.py
Refpkg.update_phylo_model
def update_phylo_model(self, stats_type, stats_file, frequency_type=None): """Parse a stats log and use it to update ``phylo_model``. ``pplacer`` expects its input to include the deatils of the phylogenetic model used for creating a tree in JSON format under the key ``phylo_model``, but no program actually outputs that format. This function takes a log generated by RAxML or FastTree, parses it, and inserts an appropriate JSON file into the refpkg. The first parameter must be 'RAxML', 'PhyML' or 'FastTree', depending on which program generated the log. It may also be None to attempt to guess which program generated the log. :param stats_type: Statistics file type. One of 'RAxML', 'FastTree', 'PhyML' :param stats_file: path to statistics/log file :param frequency_type: For ``stats_type == 'PhyML'``, amino acid alignments only: was the alignment inferred with ``model`` or ``empirical`` frequencies? """ if frequency_type not in (None, 'model', 'empirical'): raise ValueError( 'Unknown frequency type: "{0}"'.format(frequency_type)) if frequency_type and stats_type not in (None, 'PhyML'): raise ValueError('Frequency type should only be specified for ' 'PhyML alignments.') if stats_type is None: with open(stats_file) as fobj: for line in fobj: if line.startswith('FastTree'): stats_type = 'FastTree' break elif (line.startswith('This is RAxML') or line.startswith('You are using RAxML')): stats_type = 'RAxML' break elif 'PhyML' in line: stats_type = 'PhyML' break else: raise ValueError( "couldn't guess log type for %r" % (stats_file,)) if stats_type == 'RAxML': parser = utils.parse_raxml elif stats_type == 'FastTree': parser = utils.parse_fasttree elif stats_type == 'PhyML': parser = functools.partial(utils.parse_phyml, frequency_type=frequency_type) else: raise ValueError('invalid log type: %r' % (stats_type,)) with scratch_file(prefix='phylo_model', suffix='.json') as name: with open(name, 'w') as phylo_model, open(stats_file) as h: json.dump(parser(h), phylo_model, indent=4) self.update_file('phylo_model', name)
python
def update_phylo_model(self, stats_type, stats_file, frequency_type=None): """Parse a stats log and use it to update ``phylo_model``. ``pplacer`` expects its input to include the deatils of the phylogenetic model used for creating a tree in JSON format under the key ``phylo_model``, but no program actually outputs that format. This function takes a log generated by RAxML or FastTree, parses it, and inserts an appropriate JSON file into the refpkg. The first parameter must be 'RAxML', 'PhyML' or 'FastTree', depending on which program generated the log. It may also be None to attempt to guess which program generated the log. :param stats_type: Statistics file type. One of 'RAxML', 'FastTree', 'PhyML' :param stats_file: path to statistics/log file :param frequency_type: For ``stats_type == 'PhyML'``, amino acid alignments only: was the alignment inferred with ``model`` or ``empirical`` frequencies? """ if frequency_type not in (None, 'model', 'empirical'): raise ValueError( 'Unknown frequency type: "{0}"'.format(frequency_type)) if frequency_type and stats_type not in (None, 'PhyML'): raise ValueError('Frequency type should only be specified for ' 'PhyML alignments.') if stats_type is None: with open(stats_file) as fobj: for line in fobj: if line.startswith('FastTree'): stats_type = 'FastTree' break elif (line.startswith('This is RAxML') or line.startswith('You are using RAxML')): stats_type = 'RAxML' break elif 'PhyML' in line: stats_type = 'PhyML' break else: raise ValueError( "couldn't guess log type for %r" % (stats_file,)) if stats_type == 'RAxML': parser = utils.parse_raxml elif stats_type == 'FastTree': parser = utils.parse_fasttree elif stats_type == 'PhyML': parser = functools.partial(utils.parse_phyml, frequency_type=frequency_type) else: raise ValueError('invalid log type: %r' % (stats_type,)) with scratch_file(prefix='phylo_model', suffix='.json') as name: with open(name, 'w') as phylo_model, open(stats_file) as h: json.dump(parser(h), phylo_model, indent=4) self.update_file('phylo_model', name)
[ "def", "update_phylo_model", "(", "self", ",", "stats_type", ",", "stats_file", ",", "frequency_type", "=", "None", ")", ":", "if", "frequency_type", "not", "in", "(", "None", ",", "'model'", ",", "'empirical'", ")", ":", "raise", "ValueError", "(", "'Unknow...
Parse a stats log and use it to update ``phylo_model``. ``pplacer`` expects its input to include the deatils of the phylogenetic model used for creating a tree in JSON format under the key ``phylo_model``, but no program actually outputs that format. This function takes a log generated by RAxML or FastTree, parses it, and inserts an appropriate JSON file into the refpkg. The first parameter must be 'RAxML', 'PhyML' or 'FastTree', depending on which program generated the log. It may also be None to attempt to guess which program generated the log. :param stats_type: Statistics file type. One of 'RAxML', 'FastTree', 'PhyML' :param stats_file: path to statistics/log file :param frequency_type: For ``stats_type == 'PhyML'``, amino acid alignments only: was the alignment inferred with ``model`` or ``empirical`` frequencies?
[ "Parse", "a", "stats", "log", "and", "use", "it", "to", "update", "phylo_model", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/refpkg.py#L503-L562
train
41,029
fhcrc/taxtastic
taxtastic/refpkg.py
Refpkg.rollback
def rollback(self): """Revert the previous modification to the refpkg. """ # This is slightly complicated because of Python's freakish # assignment semantics and because we don't store multiple # copies of the log. if self.contents['rollback'] is None: raise ValueError("No operation to roll back on refpkg") future_msg = self.contents['log'][0] rolledback_log = self.contents['log'][1:] rollforward = copy.deepcopy(self.contents) rollforward.pop('rollback') self.contents = self.contents['rollback'] self.contents['log'] = rolledback_log self.contents['rollforward'] = [future_msg, rollforward] self._sync_to_disk()
python
def rollback(self): """Revert the previous modification to the refpkg. """ # This is slightly complicated because of Python's freakish # assignment semantics and because we don't store multiple # copies of the log. if self.contents['rollback'] is None: raise ValueError("No operation to roll back on refpkg") future_msg = self.contents['log'][0] rolledback_log = self.contents['log'][1:] rollforward = copy.deepcopy(self.contents) rollforward.pop('rollback') self.contents = self.contents['rollback'] self.contents['log'] = rolledback_log self.contents['rollforward'] = [future_msg, rollforward] self._sync_to_disk()
[ "def", "rollback", "(", "self", ")", ":", "# This is slightly complicated because of Python's freakish", "# assignment semantics and because we don't store multiple", "# copies of the log.", "if", "self", ".", "contents", "[", "'rollback'", "]", "is", "None", ":", "raise", "V...
Revert the previous modification to the refpkg.
[ "Revert", "the", "previous", "modification", "to", "the", "refpkg", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/refpkg.py#L564-L579
train
41,030
fhcrc/taxtastic
taxtastic/refpkg.py
Refpkg.rollforward
def rollforward(self): """Restore a reverted modification to the refpkg. """ if self.contents['rollforward'] is None: raise ValueError("No operation to roll forward on refpkg") new_log_message = self.contents['rollforward'][0] new_contents = self.contents['rollforward'][1] new_contents['log'] = [new_log_message] + self.contents.pop('log') self.contents['rollforward'] = None new_contents['rollback'] = copy.deepcopy(self.contents) new_contents['rollback'].pop('rollforward') self.contents = new_contents self._sync_to_disk()
python
def rollforward(self): """Restore a reverted modification to the refpkg. """ if self.contents['rollforward'] is None: raise ValueError("No operation to roll forward on refpkg") new_log_message = self.contents['rollforward'][0] new_contents = self.contents['rollforward'][1] new_contents['log'] = [new_log_message] + self.contents.pop('log') self.contents['rollforward'] = None new_contents['rollback'] = copy.deepcopy(self.contents) new_contents['rollback'].pop('rollforward') self.contents = new_contents self._sync_to_disk()
[ "def", "rollforward", "(", "self", ")", ":", "if", "self", ".", "contents", "[", "'rollforward'", "]", "is", "None", ":", "raise", "ValueError", "(", "\"No operation to roll forward on refpkg\"", ")", "new_log_message", "=", "self", ".", "contents", "[", "'rollf...
Restore a reverted modification to the refpkg.
[ "Restore", "a", "reverted", "modification", "to", "the", "refpkg", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/refpkg.py#L581-L593
train
41,031
fhcrc/taxtastic
taxtastic/refpkg.py
Refpkg.strip
def strip(self): """Remove rollbacks, rollforwards, and all non-current files. When distributing a refpkg, you probably want to distribute as small a one as possible. strip removes everything from the refpkg which is not relevant to its current state. """ self._sync_from_disk() current_filenames = set(self.contents['files'].values()) all_filenames = set(os.listdir(self.path)) to_delete = all_filenames.difference(current_filenames) to_delete.discard('CONTENTS.json') for f in to_delete: self._delete_file(f) self.contents['rollback'] = None self.contents['rollforward'] = None self.contents['log'].insert( 0, 'Stripped refpkg (removed %d files)' % len(to_delete)) self._sync_to_disk()
python
def strip(self): """Remove rollbacks, rollforwards, and all non-current files. When distributing a refpkg, you probably want to distribute as small a one as possible. strip removes everything from the refpkg which is not relevant to its current state. """ self._sync_from_disk() current_filenames = set(self.contents['files'].values()) all_filenames = set(os.listdir(self.path)) to_delete = all_filenames.difference(current_filenames) to_delete.discard('CONTENTS.json') for f in to_delete: self._delete_file(f) self.contents['rollback'] = None self.contents['rollforward'] = None self.contents['log'].insert( 0, 'Stripped refpkg (removed %d files)' % len(to_delete)) self._sync_to_disk()
[ "def", "strip", "(", "self", ")", ":", "self", ".", "_sync_from_disk", "(", ")", "current_filenames", "=", "set", "(", "self", ".", "contents", "[", "'files'", "]", ".", "values", "(", ")", ")", "all_filenames", "=", "set", "(", "os", ".", "listdir", ...
Remove rollbacks, rollforwards, and all non-current files. When distributing a refpkg, you probably want to distribute as small a one as possible. strip removes everything from the refpkg which is not relevant to its current state.
[ "Remove", "rollbacks", "rollforwards", "and", "all", "non", "-", "current", "files", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/refpkg.py#L595-L613
train
41,032
fhcrc/taxtastic
taxtastic/refpkg.py
Refpkg.start_transaction
def start_transaction(self): """Begin a transaction to group operations on the refpkg. All the operations until the next call to ``commit_transaction`` will be recorded as a single operation for rollback and rollforward, and recorded with a single line in the log. """ if self.current_transaction: raise ValueError("There is already a transaction going") else: initial_state = copy.deepcopy(self.contents) self.current_transaction = {'rollback': initial_state, 'log': '(Transaction left no log message)'}
python
def start_transaction(self): """Begin a transaction to group operations on the refpkg. All the operations until the next call to ``commit_transaction`` will be recorded as a single operation for rollback and rollforward, and recorded with a single line in the log. """ if self.current_transaction: raise ValueError("There is already a transaction going") else: initial_state = copy.deepcopy(self.contents) self.current_transaction = {'rollback': initial_state, 'log': '(Transaction left no log message)'}
[ "def", "start_transaction", "(", "self", ")", ":", "if", "self", ".", "current_transaction", ":", "raise", "ValueError", "(", "\"There is already a transaction going\"", ")", "else", ":", "initial_state", "=", "copy", ".", "deepcopy", "(", "self", ".", "contents",...
Begin a transaction to group operations on the refpkg. All the operations until the next call to ``commit_transaction`` will be recorded as a single operation for rollback and rollforward, and recorded with a single line in the log.
[ "Begin", "a", "transaction", "to", "group", "operations", "on", "the", "refpkg", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/refpkg.py#L615-L628
train
41,033
fhcrc/taxtastic
taxtastic/refpkg.py
Refpkg.is_ill_formed
def is_ill_formed(self): """Stronger set of checks than is_invalid for Refpkg. Checks that FASTA, Stockholm, JSON, and CSV files under known keys are all valid as well as calling is_invalid. Returns either False or a string describing the error. """ m = self.is_invalid() if m: return m required_keys = ('aln_fasta', 'aln_sto', 'seq_info', 'tree', 'taxonomy', 'phylo_model') for k in required_keys: if k not in self.contents['files']: return "RefPkg has no key " + k # aln_fasta, seq_info, tree, and aln_sto must be valid FASTA, # CSV, Newick, and Stockholm files, respectively, and describe # the same sequences. with self.open_resource('aln_fasta') as f: firstline = f.readline() if firstline.startswith('>'): f.seek(0) else: return 'aln_fasta file is not valid FASTA.' fasta_names = {seq.id for seq in fastalite(f)} with self.open_resource('seq_info') as f: lines = list(csv.reader(f)) headers = set(lines[0]) # Check required headers for req_header in 'seqname', 'tax_id': if req_header not in headers: return "seq_info is missing {0}".format(req_header) lengths = {len(line) for line in lines} if len(lengths) > 1: return "some lines in seq_info differ in field cout" csv_names = {line[0] for line in lines[1:]} with self.open_resource('aln_sto') as f: try: sto_names = set(utils.parse_stockholm(f)) except ValueError: return 'aln_sto file is not valid Stockholm.' try: tree = dendropy.Tree.get( path=self.resource_path('tree'), schema='newick', case_sensitive_taxon_labels=True, preserve_underscores=True) tree_names = set(tree.taxon_namespace.labels()) except Exception: return 'tree file is not valid Newick.' d = fasta_names.symmetric_difference(sto_names) if len(d) != 0: return "Names in aln_fasta did not match aln_sto. Mismatches: " + \ ', '.join([str(x) for x in d]) d = fasta_names.symmetric_difference(csv_names) if len(d) != 0: return "Names in aln_fasta did not match seq_info. Mismatches: " + \ ', '.join([str(x) for x in d]) d = fasta_names.symmetric_difference(tree_names) if len(d) != 0: return "Names in aln_fasta did not match nodes in tree. Mismatches: " + \ ', '.join([str(x) for x in d]) # Next make sure that taxonomy is valid CSV, phylo_model is valid JSON with self.open_resource('taxonomy') as f: lines = list(csv.reader(f)) lengths = {len(line) for line in lines} if len(lengths) > 1: return ("Taxonomy is invalid: not all lines had " "the same number of fields.") # I don't try to check if the taxids match up to those # mentioned in aln_fasta, since that would make taxtastic # depend on RefsetInternalFasta in romperroom. with self.open_resource('phylo_model') as f: try: json.load(f) except ValueError: return "phylo_model is not valid JSON." return False
python
def is_ill_formed(self): """Stronger set of checks than is_invalid for Refpkg. Checks that FASTA, Stockholm, JSON, and CSV files under known keys are all valid as well as calling is_invalid. Returns either False or a string describing the error. """ m = self.is_invalid() if m: return m required_keys = ('aln_fasta', 'aln_sto', 'seq_info', 'tree', 'taxonomy', 'phylo_model') for k in required_keys: if k not in self.contents['files']: return "RefPkg has no key " + k # aln_fasta, seq_info, tree, and aln_sto must be valid FASTA, # CSV, Newick, and Stockholm files, respectively, and describe # the same sequences. with self.open_resource('aln_fasta') as f: firstline = f.readline() if firstline.startswith('>'): f.seek(0) else: return 'aln_fasta file is not valid FASTA.' fasta_names = {seq.id for seq in fastalite(f)} with self.open_resource('seq_info') as f: lines = list(csv.reader(f)) headers = set(lines[0]) # Check required headers for req_header in 'seqname', 'tax_id': if req_header not in headers: return "seq_info is missing {0}".format(req_header) lengths = {len(line) for line in lines} if len(lengths) > 1: return "some lines in seq_info differ in field cout" csv_names = {line[0] for line in lines[1:]} with self.open_resource('aln_sto') as f: try: sto_names = set(utils.parse_stockholm(f)) except ValueError: return 'aln_sto file is not valid Stockholm.' try: tree = dendropy.Tree.get( path=self.resource_path('tree'), schema='newick', case_sensitive_taxon_labels=True, preserve_underscores=True) tree_names = set(tree.taxon_namespace.labels()) except Exception: return 'tree file is not valid Newick.' d = fasta_names.symmetric_difference(sto_names) if len(d) != 0: return "Names in aln_fasta did not match aln_sto. Mismatches: " + \ ', '.join([str(x) for x in d]) d = fasta_names.symmetric_difference(csv_names) if len(d) != 0: return "Names in aln_fasta did not match seq_info. Mismatches: " + \ ', '.join([str(x) for x in d]) d = fasta_names.symmetric_difference(tree_names) if len(d) != 0: return "Names in aln_fasta did not match nodes in tree. Mismatches: " + \ ', '.join([str(x) for x in d]) # Next make sure that taxonomy is valid CSV, phylo_model is valid JSON with self.open_resource('taxonomy') as f: lines = list(csv.reader(f)) lengths = {len(line) for line in lines} if len(lengths) > 1: return ("Taxonomy is invalid: not all lines had " "the same number of fields.") # I don't try to check if the taxids match up to those # mentioned in aln_fasta, since that would make taxtastic # depend on RefsetInternalFasta in romperroom. with self.open_resource('phylo_model') as f: try: json.load(f) except ValueError: return "phylo_model is not valid JSON." return False
[ "def", "is_ill_formed", "(", "self", ")", ":", "m", "=", "self", ".", "is_invalid", "(", ")", "if", "m", ":", "return", "m", "required_keys", "=", "(", "'aln_fasta'", ",", "'aln_sto'", ",", "'seq_info'", ",", "'tree'", ",", "'taxonomy'", ",", "'phylo_mod...
Stronger set of checks than is_invalid for Refpkg. Checks that FASTA, Stockholm, JSON, and CSV files under known keys are all valid as well as calling is_invalid. Returns either False or a string describing the error.
[ "Stronger", "set", "of", "checks", "than", "is_invalid", "for", "Refpkg", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/refpkg.py#L641-L728
train
41,034
fhcrc/taxtastic
taxtastic/refpkg.py
Refpkg.load_db
def load_db(self): """Load the taxonomy into a sqlite3 database. This will set ``self.db`` to a sqlite3 database which contains all of the taxonomic information in the reference package. """ db = taxdb.Taxdb() db.create_tables() reader = csv.DictReader(self.open_resource('taxonomy', 'rU')) db.insert_from_taxtable(lambda: reader._fieldnames, reader) curs = db.cursor() reader = csv.DictReader(self.open_resource('seq_info', 'rU')) curs.executemany("INSERT INTO sequences VALUES (?, ?)", ((row['seqname'], row['tax_id']) for row in reader)) db.commit() self.db = db
python
def load_db(self): """Load the taxonomy into a sqlite3 database. This will set ``self.db`` to a sqlite3 database which contains all of the taxonomic information in the reference package. """ db = taxdb.Taxdb() db.create_tables() reader = csv.DictReader(self.open_resource('taxonomy', 'rU')) db.insert_from_taxtable(lambda: reader._fieldnames, reader) curs = db.cursor() reader = csv.DictReader(self.open_resource('seq_info', 'rU')) curs.executemany("INSERT INTO sequences VALUES (?, ?)", ((row['seqname'], row['tax_id']) for row in reader)) db.commit() self.db = db
[ "def", "load_db", "(", "self", ")", ":", "db", "=", "taxdb", ".", "Taxdb", "(", ")", "db", ".", "create_tables", "(", ")", "reader", "=", "csv", ".", "DictReader", "(", "self", ".", "open_resource", "(", "'taxonomy'", ",", "'rU'", ")", ")", "db", "...
Load the taxonomy into a sqlite3 database. This will set ``self.db`` to a sqlite3 database which contains all of the taxonomic information in the reference package.
[ "Load", "the", "taxonomy", "into", "a", "sqlite3", "database", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/refpkg.py#L730-L748
train
41,035
fhcrc/taxtastic
taxtastic/refpkg.py
Refpkg.most_recent_common_ancestor
def most_recent_common_ancestor(self, *ts): """Find the MRCA of some tax_ids. Returns the MRCA of the specified tax_ids, or raises ``NoAncestor`` if no ancestor of the specified tax_ids could be found. """ if len(ts) > 200: res = self._large_mrca(ts) else: res = self._small_mrca(ts) if res: (res,), = res else: raise NoAncestor() return res
python
def most_recent_common_ancestor(self, *ts): """Find the MRCA of some tax_ids. Returns the MRCA of the specified tax_ids, or raises ``NoAncestor`` if no ancestor of the specified tax_ids could be found. """ if len(ts) > 200: res = self._large_mrca(ts) else: res = self._small_mrca(ts) if res: (res,), = res else: raise NoAncestor() return res
[ "def", "most_recent_common_ancestor", "(", "self", ",", "*", "ts", ")", ":", "if", "len", "(", "ts", ")", ">", "200", ":", "res", "=", "self", ".", "_large_mrca", "(", "ts", ")", "else", ":", "res", "=", "self", ".", "_small_mrca", "(", "ts", ")", ...
Find the MRCA of some tax_ids. Returns the MRCA of the specified tax_ids, or raises ``NoAncestor`` if no ancestor of the specified tax_ids could be found.
[ "Find", "the", "MRCA", "of", "some", "tax_ids", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/refpkg.py#L750-L765
train
41,036
seznam/shelter
shelter/main.py
main
def main(args=None): """ Run management command handled from command line. """ # Base command line parser. Help is not allowed because command # line is parsed in two stages - in the first stage is found setting # module of the application, in the second stage are found management # command's arguments. parser = ArgumentParser(add_help=False) parser.add_argument( '-s', '--settings', dest='settings', action='store', type=str, default=None, help=_('application settings module') ) # Get settings module try: settings = get_app_settings(parser, args) except ImportError as exc: parser.error(_("Invalid application settings module: {}").format(exc)) # Get management commands and add their arguments into command # line parser commands = get_management_commands(settings) subparsers = parser.add_subparsers( dest='action', help=_('specify action') ) for command_cls in six.itervalues(commands): subparser = subparsers.add_parser( command_cls.name, help=command_cls.help) for command_args, kwargs in command_cls.arguments: subparser.add_argument(*command_args, **kwargs) # Get config class and add its arguments into command line parser if settings: config_cls = get_config_class(settings) if not issubclass(config_cls, Config): raise TypeError( "Config class must be subclass of the " "shelter.core.config.Config") for config_args, kwargs in config_cls.arguments: parser.add_argument(*config_args, **kwargs) else: config_cls = Config # Add help argument and parse command line parser.add_argument( '-h', '--help', action='help', help=_('show this help message and exit') ) cmdline_args = parser.parse_args(args) if not cmdline_args.action: parser.error(_('No action')) # Run management command command_cls = commands[cmdline_args.action] if command_cls.settings_required and not settings: parser.error(_( "Settings module is not defined. You must either set " "'SHELTER_SETTINGS_MODULE' environment variable or " "'-s/--settings' command line argument." )) try: config = config_cls(settings, cmdline_args) except ImproperlyConfiguredError as exc: parser.error(str(exc)) command = command_cls(config) try: command() except Exception: traceback.print_exc(file=sys.stderr) sys.stderr.flush() if multiprocessing.active_children(): # If main process has children processes, exit immediately without # cleaning. It is a workaround, because parent process waits for # non-daemon children. os._exit(1) sys.exit(1) sys.exit(0)
python
def main(args=None): """ Run management command handled from command line. """ # Base command line parser. Help is not allowed because command # line is parsed in two stages - in the first stage is found setting # module of the application, in the second stage are found management # command's arguments. parser = ArgumentParser(add_help=False) parser.add_argument( '-s', '--settings', dest='settings', action='store', type=str, default=None, help=_('application settings module') ) # Get settings module try: settings = get_app_settings(parser, args) except ImportError as exc: parser.error(_("Invalid application settings module: {}").format(exc)) # Get management commands and add their arguments into command # line parser commands = get_management_commands(settings) subparsers = parser.add_subparsers( dest='action', help=_('specify action') ) for command_cls in six.itervalues(commands): subparser = subparsers.add_parser( command_cls.name, help=command_cls.help) for command_args, kwargs in command_cls.arguments: subparser.add_argument(*command_args, **kwargs) # Get config class and add its arguments into command line parser if settings: config_cls = get_config_class(settings) if not issubclass(config_cls, Config): raise TypeError( "Config class must be subclass of the " "shelter.core.config.Config") for config_args, kwargs in config_cls.arguments: parser.add_argument(*config_args, **kwargs) else: config_cls = Config # Add help argument and parse command line parser.add_argument( '-h', '--help', action='help', help=_('show this help message and exit') ) cmdline_args = parser.parse_args(args) if not cmdline_args.action: parser.error(_('No action')) # Run management command command_cls = commands[cmdline_args.action] if command_cls.settings_required and not settings: parser.error(_( "Settings module is not defined. You must either set " "'SHELTER_SETTINGS_MODULE' environment variable or " "'-s/--settings' command line argument." )) try: config = config_cls(settings, cmdline_args) except ImproperlyConfiguredError as exc: parser.error(str(exc)) command = command_cls(config) try: command() except Exception: traceback.print_exc(file=sys.stderr) sys.stderr.flush() if multiprocessing.active_children(): # If main process has children processes, exit immediately without # cleaning. It is a workaround, because parent process waits for # non-daemon children. os._exit(1) sys.exit(1) sys.exit(0)
[ "def", "main", "(", "args", "=", "None", ")", ":", "# Base command line parser. Help is not allowed because command", "# line is parsed in two stages - in the first stage is found setting", "# module of the application, in the second stage are found management", "# command's arguments.", "pa...
Run management command handled from command line.
[ "Run", "management", "command", "handled", "from", "command", "line", "." ]
c652b0ff1cca70158f8fc97d9210c1fa5961ac1c
https://github.com/seznam/shelter/blob/c652b0ff1cca70158f8fc97d9210c1fa5961ac1c/shelter/main.py#L71-L149
train
41,037
seequent/vectormath
vectormath/vector.py
BaseVector.as_length
def as_length(self, value): """Return a new vector scaled to given length""" new_vec = self.copy() new_vec.length = value return new_vec
python
def as_length(self, value): """Return a new vector scaled to given length""" new_vec = self.copy() new_vec.length = value return new_vec
[ "def", "as_length", "(", "self", ",", "value", ")", ":", "new_vec", "=", "self", ".", "copy", "(", ")", "new_vec", ".", "length", "=", "value", "return", "new_vec" ]
Return a new vector scaled to given length
[ "Return", "a", "new", "vector", "scaled", "to", "given", "length" ]
a2259fb82cf5a665170f50d216b11a738400d878
https://github.com/seequent/vectormath/blob/a2259fb82cf5a665170f50d216b11a738400d878/vectormath/vector.py#L88-L92
train
41,038
seequent/vectormath
vectormath/vector.py
BaseVector.as_percent
def as_percent(self, value): """Return a new vector scaled by given decimal percent""" new_vec = self.copy() new_vec.length = value * self.length return new_vec
python
def as_percent(self, value): """Return a new vector scaled by given decimal percent""" new_vec = self.copy() new_vec.length = value * self.length return new_vec
[ "def", "as_percent", "(", "self", ",", "value", ")", ":", "new_vec", "=", "self", ".", "copy", "(", ")", "new_vec", ".", "length", "=", "value", "*", "self", ".", "length", "return", "new_vec" ]
Return a new vector scaled by given decimal percent
[ "Return", "a", "new", "vector", "scaled", "by", "given", "decimal", "percent" ]
a2259fb82cf5a665170f50d216b11a738400d878
https://github.com/seequent/vectormath/blob/a2259fb82cf5a665170f50d216b11a738400d878/vectormath/vector.py#L94-L98
train
41,039
seequent/vectormath
vectormath/vector.py
BaseVector.angle
def angle(self, vec, unit='rad'): """Calculate the angle between two Vectors unit: unit for returned angle, either 'rad' or 'deg'. Defaults to 'rad' """ if not isinstance(vec, self.__class__): raise TypeError('Angle operand must be of class {}' .format(self.__class__.__name__)) if unit not in ['deg', 'rad']: raise ValueError('Only units of rad or deg are supported') denom = self.length * vec.length if denom == 0: raise ZeroDivisionError('Cannot calculate angle between ' 'zero-length vector(s)') ang = np.arccos(self.dot(vec) / denom) if unit == 'deg': ang = ang * 180 / np.pi return ang
python
def angle(self, vec, unit='rad'): """Calculate the angle between two Vectors unit: unit for returned angle, either 'rad' or 'deg'. Defaults to 'rad' """ if not isinstance(vec, self.__class__): raise TypeError('Angle operand must be of class {}' .format(self.__class__.__name__)) if unit not in ['deg', 'rad']: raise ValueError('Only units of rad or deg are supported') denom = self.length * vec.length if denom == 0: raise ZeroDivisionError('Cannot calculate angle between ' 'zero-length vector(s)') ang = np.arccos(self.dot(vec) / denom) if unit == 'deg': ang = ang * 180 / np.pi return ang
[ "def", "angle", "(", "self", ",", "vec", ",", "unit", "=", "'rad'", ")", ":", "if", "not", "isinstance", "(", "vec", ",", "self", ".", "__class__", ")", ":", "raise", "TypeError", "(", "'Angle operand must be of class {}'", ".", "format", "(", "self", "....
Calculate the angle between two Vectors unit: unit for returned angle, either 'rad' or 'deg'. Defaults to 'rad'
[ "Calculate", "the", "angle", "between", "two", "Vectors" ]
a2259fb82cf5a665170f50d216b11a738400d878
https://github.com/seequent/vectormath/blob/a2259fb82cf5a665170f50d216b11a738400d878/vectormath/vector.py#L123-L142
train
41,040
seequent/vectormath
vectormath/vector.py
BaseVectorArray.length
def length(self): """Array of vector lengths""" return np.sqrt(np.sum(self**2, axis=1)).view(np.ndarray)
python
def length(self): """Array of vector lengths""" return np.sqrt(np.sum(self**2, axis=1)).view(np.ndarray)
[ "def", "length", "(", "self", ")", ":", "return", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "self", "**", "2", ",", "axis", "=", "1", ")", ")", ".", "view", "(", "np", ".", "ndarray", ")" ]
Array of vector lengths
[ "Array", "of", "vector", "lengths" ]
a2259fb82cf5a665170f50d216b11a738400d878
https://github.com/seequent/vectormath/blob/a2259fb82cf5a665170f50d216b11a738400d878/vectormath/vector.py#L347-L349
train
41,041
seequent/vectormath
vectormath/vector.py
Vector3Array.cross
def cross(self, vec): """Cross product with another Vector3Array""" if not isinstance(vec, Vector3Array): raise TypeError('Cross product operand must be a Vector3Array') if self.nV != 1 and vec.nV != 1 and self.nV != vec.nV: raise ValueError('Cross product operands must have the same ' 'number of elements.') return Vector3Array(np.cross(self, vec))
python
def cross(self, vec): """Cross product with another Vector3Array""" if not isinstance(vec, Vector3Array): raise TypeError('Cross product operand must be a Vector3Array') if self.nV != 1 and vec.nV != 1 and self.nV != vec.nV: raise ValueError('Cross product operands must have the same ' 'number of elements.') return Vector3Array(np.cross(self, vec))
[ "def", "cross", "(", "self", ",", "vec", ")", ":", "if", "not", "isinstance", "(", "vec", ",", "Vector3Array", ")", ":", "raise", "TypeError", "(", "'Cross product operand must be a Vector3Array'", ")", "if", "self", ".", "nV", "!=", "1", "and", "vec", "."...
Cross product with another Vector3Array
[ "Cross", "product", "with", "another", "Vector3Array" ]
a2259fb82cf5a665170f50d216b11a738400d878
https://github.com/seequent/vectormath/blob/a2259fb82cf5a665170f50d216b11a738400d878/vectormath/vector.py#L487-L494
train
41,042
amicks/Speculator
speculator/features/RSI.py
RSI.eval_rs
def eval_rs(gains, losses): """ Evaluates the RS variable in RSI algorithm Args: gains: List of price gains. losses: List of prices losses. Returns: Float of average gains over average losses. """ # Number of days that the data was collected through count = len(gains) + len(losses) avg_gains = stats.avg(gains, count=count) if gains else 1 avg_losses = stats.avg(losses,count=count) if losses else 1 if avg_losses == 0: return avg_gains else: return avg_gains / avg_losses
python
def eval_rs(gains, losses): """ Evaluates the RS variable in RSI algorithm Args: gains: List of price gains. losses: List of prices losses. Returns: Float of average gains over average losses. """ # Number of days that the data was collected through count = len(gains) + len(losses) avg_gains = stats.avg(gains, count=count) if gains else 1 avg_losses = stats.avg(losses,count=count) if losses else 1 if avg_losses == 0: return avg_gains else: return avg_gains / avg_losses
[ "def", "eval_rs", "(", "gains", ",", "losses", ")", ":", "# Number of days that the data was collected through", "count", "=", "len", "(", "gains", ")", "+", "len", "(", "losses", ")", "avg_gains", "=", "stats", ".", "avg", "(", "gains", ",", "count", "=", ...
Evaluates the RS variable in RSI algorithm Args: gains: List of price gains. losses: List of prices losses. Returns: Float of average gains over average losses.
[ "Evaluates", "the", "RS", "variable", "in", "RSI", "algorithm" ]
f7d6590aded20b1e1b5df16a4b27228ee821c4ab
https://github.com/amicks/Speculator/blob/f7d6590aded20b1e1b5df16a4b27228ee821c4ab/speculator/features/RSI.py#L26-L44
train
41,043
fhcrc/taxtastic
taxtastic/subcommands/info.py
action
def action(args): """ Show information about reference packages. """ log.info('loading reference package') pkg = refpkg.Refpkg(args.refpkg, create=False) with open(pkg.file_abspath('seq_info'), 'rU') as seq_info: seqinfo = list(csv.DictReader(seq_info)) snames = [row['seqname'] for row in seqinfo] if args.seq_names: print('\n'.join(snames)) elif args.tally: tally_taxa(pkg) elif args.lengths: print_lengths(pkg) else: print('number of sequences:', len(snames)) print('package components\n', '\n'.join(sorted(pkg.file_keys())))
python
def action(args): """ Show information about reference packages. """ log.info('loading reference package') pkg = refpkg.Refpkg(args.refpkg, create=False) with open(pkg.file_abspath('seq_info'), 'rU') as seq_info: seqinfo = list(csv.DictReader(seq_info)) snames = [row['seqname'] for row in seqinfo] if args.seq_names: print('\n'.join(snames)) elif args.tally: tally_taxa(pkg) elif args.lengths: print_lengths(pkg) else: print('number of sequences:', len(snames)) print('package components\n', '\n'.join(sorted(pkg.file_keys())))
[ "def", "action", "(", "args", ")", ":", "log", ".", "info", "(", "'loading reference package'", ")", "pkg", "=", "refpkg", ".", "Refpkg", "(", "args", ".", "refpkg", ",", "create", "=", "False", ")", "with", "open", "(", "pkg", ".", "file_abspath", "("...
Show information about reference packages.
[ "Show", "information", "about", "reference", "packages", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/subcommands/info.py#L68-L88
train
41,044
amicks/Speculator
speculator/utils/poloniex.py
json_to_url
def json_to_url(json, symbol): """ Converts a JSON to a URL by the Poloniex API Args: json: JSON data as a list of dict dates, where the keys are the raw market statistics. symbol: String of currency pair, like a ticker symbol. Returns: String URL to Poloniex API representing the given JSON. """ start = json[0]['date'] end = json[-1]['date'] diff = end - start # Get period by a ratio from calculated period to valid periods # Ratio closest to 1 is the period # Valid values: 300, 900, 1800, 7200, 14400, 86400 periods = [300, 900, 1800, 7200, 14400, 86400] diffs = {} for p in periods: diffs[p] = abs(1 - (p / (diff / len(json)))) # Get ratio period = min(diffs, key=diffs.get) # Find closest period url = ('https://poloniex.com/public?command' '=returnChartData&currencyPair={0}&start={1}' '&end={2}&period={3}').format(symbol, start, end, period) return url
python
def json_to_url(json, symbol): """ Converts a JSON to a URL by the Poloniex API Args: json: JSON data as a list of dict dates, where the keys are the raw market statistics. symbol: String of currency pair, like a ticker symbol. Returns: String URL to Poloniex API representing the given JSON. """ start = json[0]['date'] end = json[-1]['date'] diff = end - start # Get period by a ratio from calculated period to valid periods # Ratio closest to 1 is the period # Valid values: 300, 900, 1800, 7200, 14400, 86400 periods = [300, 900, 1800, 7200, 14400, 86400] diffs = {} for p in periods: diffs[p] = abs(1 - (p / (diff / len(json)))) # Get ratio period = min(diffs, key=diffs.get) # Find closest period url = ('https://poloniex.com/public?command' '=returnChartData&currencyPair={0}&start={1}' '&end={2}&period={3}').format(symbol, start, end, period) return url
[ "def", "json_to_url", "(", "json", ",", "symbol", ")", ":", "start", "=", "json", "[", "0", "]", "[", "'date'", "]", "end", "=", "json", "[", "-", "1", "]", "[", "'date'", "]", "diff", "=", "end", "-", "start", "# Get period by a ratio from calculated ...
Converts a JSON to a URL by the Poloniex API Args: json: JSON data as a list of dict dates, where the keys are the raw market statistics. symbol: String of currency pair, like a ticker symbol. Returns: String URL to Poloniex API representing the given JSON.
[ "Converts", "a", "JSON", "to", "a", "URL", "by", "the", "Poloniex", "API" ]
f7d6590aded20b1e1b5df16a4b27228ee821c4ab
https://github.com/amicks/Speculator/blob/f7d6590aded20b1e1b5df16a4b27228ee821c4ab/speculator/utils/poloniex.py#L8-L37
train
41,045
amicks/Speculator
speculator/utils/poloniex.py
parse_changes
def parse_changes(json): """ Gets price changes from JSON Args: json: JSON data as a list of dict dates, where the keys are the raw market statistics. Returns: List of floats of price changes between entries in JSON. """ changes = [] dates = len(json) for date in range(1, dates): last_close = json[date - 1]['close'] now_close = json[date]['close'] changes.append(now_close - last_close) logger.debug('Market Changes (from JSON):\n{0}'.format(changes)) return changes
python
def parse_changes(json): """ Gets price changes from JSON Args: json: JSON data as a list of dict dates, where the keys are the raw market statistics. Returns: List of floats of price changes between entries in JSON. """ changes = [] dates = len(json) for date in range(1, dates): last_close = json[date - 1]['close'] now_close = json[date]['close'] changes.append(now_close - last_close) logger.debug('Market Changes (from JSON):\n{0}'.format(changes)) return changes
[ "def", "parse_changes", "(", "json", ")", ":", "changes", "=", "[", "]", "dates", "=", "len", "(", "json", ")", "for", "date", "in", "range", "(", "1", ",", "dates", ")", ":", "last_close", "=", "json", "[", "date", "-", "1", "]", "[", "'close'",...
Gets price changes from JSON Args: json: JSON data as a list of dict dates, where the keys are the raw market statistics. Returns: List of floats of price changes between entries in JSON.
[ "Gets", "price", "changes", "from", "JSON" ]
f7d6590aded20b1e1b5df16a4b27228ee821c4ab
https://github.com/amicks/Speculator/blob/f7d6590aded20b1e1b5df16a4b27228ee821c4ab/speculator/utils/poloniex.py#L76-L93
train
41,046
amicks/Speculator
speculator/utils/poloniex.py
get_gains_losses
def get_gains_losses(changes): """ Categorizes changes into gains and losses Args: changes: List of floats of price changes between entries in JSON. Returns: Dict of changes with keys 'gains' and 'losses'. All values are positive. """ res = {'gains': [], 'losses': []} for change in changes: if change > 0: res['gains'].append(change) else: res['losses'].append(change * -1) logger.debug('Gains: {0}'.format(res['gains'])) logger.debug('Losses: {0}'.format(res['losses'])) return res
python
def get_gains_losses(changes): """ Categorizes changes into gains and losses Args: changes: List of floats of price changes between entries in JSON. Returns: Dict of changes with keys 'gains' and 'losses'. All values are positive. """ res = {'gains': [], 'losses': []} for change in changes: if change > 0: res['gains'].append(change) else: res['losses'].append(change * -1) logger.debug('Gains: {0}'.format(res['gains'])) logger.debug('Losses: {0}'.format(res['losses'])) return res
[ "def", "get_gains_losses", "(", "changes", ")", ":", "res", "=", "{", "'gains'", ":", "[", "]", ",", "'losses'", ":", "[", "]", "}", "for", "change", "in", "changes", ":", "if", "change", ">", "0", ":", "res", "[", "'gains'", "]", ".", "append", ...
Categorizes changes into gains and losses Args: changes: List of floats of price changes between entries in JSON. Returns: Dict of changes with keys 'gains' and 'losses'. All values are positive.
[ "Categorizes", "changes", "into", "gains", "and", "losses" ]
f7d6590aded20b1e1b5df16a4b27228ee821c4ab
https://github.com/amicks/Speculator/blob/f7d6590aded20b1e1b5df16a4b27228ee821c4ab/speculator/utils/poloniex.py#L95-L113
train
41,047
amicks/Speculator
speculator/utils/poloniex.py
get_attribute
def get_attribute(json, attr): """ Gets the values of an attribute from JSON Args: json: JSON data as a list of dict dates, where the keys are the raw market statistics. attr: String of attribute in JSON file to collect. Returns: List of values of specified attribute from JSON """ res = [json[entry][attr] for entry, _ in enumerate(json)] logger.debug('{0}s (from JSON):\n{1}'.format(attr, res)) return res
python
def get_attribute(json, attr): """ Gets the values of an attribute from JSON Args: json: JSON data as a list of dict dates, where the keys are the raw market statistics. attr: String of attribute in JSON file to collect. Returns: List of values of specified attribute from JSON """ res = [json[entry][attr] for entry, _ in enumerate(json)] logger.debug('{0}s (from JSON):\n{1}'.format(attr, res)) return res
[ "def", "get_attribute", "(", "json", ",", "attr", ")", ":", "res", "=", "[", "json", "[", "entry", "]", "[", "attr", "]", "for", "entry", ",", "_", "in", "enumerate", "(", "json", ")", "]", "logger", ".", "debug", "(", "'{0}s (from JSON):\\n{1}'", "....
Gets the values of an attribute from JSON Args: json: JSON data as a list of dict dates, where the keys are the raw market statistics. attr: String of attribute in JSON file to collect. Returns: List of values of specified attribute from JSON
[ "Gets", "the", "values", "of", "an", "attribute", "from", "JSON" ]
f7d6590aded20b1e1b5df16a4b27228ee821c4ab
https://github.com/amicks/Speculator/blob/f7d6590aded20b1e1b5df16a4b27228ee821c4ab/speculator/utils/poloniex.py#L115-L128
train
41,048
amicks/Speculator
speculator/utils/poloniex.py
get_json_shift
def get_json_shift(year, month, day, unit, count, period, symbol): """ Gets JSON from shifted date by the Poloniex API Args: year: Int between 1 and 9999. month: Int between 1 and 12. day: Int between 1 and 31. unit: String of time period unit for count argument. How far back to check historical market data. Valid values: 'hour', 'day', 'week', 'month', 'year' count: Int of units. How far back to check historical market data. period: Int defining width of each chart candlestick in seconds. symbol: String of currency pair, like a ticker symbol. Returns: JSON, list of dates where each entry is a dict of raw market data. """ epochs = date.get_end_start_epochs(year, month, day, 'last', unit, count) return chart_json(epochs['shifted'], epochs['initial'], period, symbol)[0]
python
def get_json_shift(year, month, day, unit, count, period, symbol): """ Gets JSON from shifted date by the Poloniex API Args: year: Int between 1 and 9999. month: Int between 1 and 12. day: Int between 1 and 31. unit: String of time period unit for count argument. How far back to check historical market data. Valid values: 'hour', 'day', 'week', 'month', 'year' count: Int of units. How far back to check historical market data. period: Int defining width of each chart candlestick in seconds. symbol: String of currency pair, like a ticker symbol. Returns: JSON, list of dates where each entry is a dict of raw market data. """ epochs = date.get_end_start_epochs(year, month, day, 'last', unit, count) return chart_json(epochs['shifted'], epochs['initial'], period, symbol)[0]
[ "def", "get_json_shift", "(", "year", ",", "month", ",", "day", ",", "unit", ",", "count", ",", "period", ",", "symbol", ")", ":", "epochs", "=", "date", ".", "get_end_start_epochs", "(", "year", ",", "month", ",", "day", ",", "'last'", ",", "unit", ...
Gets JSON from shifted date by the Poloniex API Args: year: Int between 1 and 9999. month: Int between 1 and 12. day: Int between 1 and 31. unit: String of time period unit for count argument. How far back to check historical market data. Valid values: 'hour', 'day', 'week', 'month', 'year' count: Int of units. How far back to check historical market data. period: Int defining width of each chart candlestick in seconds. symbol: String of currency pair, like a ticker symbol. Returns: JSON, list of dates where each entry is a dict of raw market data.
[ "Gets", "JSON", "from", "shifted", "date", "by", "the", "Poloniex", "API" ]
f7d6590aded20b1e1b5df16a4b27228ee821c4ab
https://github.com/amicks/Speculator/blob/f7d6590aded20b1e1b5df16a4b27228ee821c4ab/speculator/utils/poloniex.py#L130-L149
train
41,049
fhcrc/taxtastic
taxtastic/subcommands/refpkg_intersection.py
filter_ranks
def filter_ranks(results): """ Find just the first rank for all the results for a given tax_id. """ for _, group in itertools.groupby(results, operator.itemgetter(0)): yield next(group)
python
def filter_ranks(results): """ Find just the first rank for all the results for a given tax_id. """ for _, group in itertools.groupby(results, operator.itemgetter(0)): yield next(group)
[ "def", "filter_ranks", "(", "results", ")", ":", "for", "_", ",", "group", "in", "itertools", ".", "groupby", "(", "results", ",", "operator", ".", "itemgetter", "(", "0", ")", ")", ":", "yield", "next", "(", "group", ")" ]
Find just the first rank for all the results for a given tax_id.
[ "Find", "just", "the", "first", "rank", "for", "all", "the", "results", "for", "a", "given", "tax_id", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/subcommands/refpkg_intersection.py#L49-L54
train
41,050
amicks/Speculator
speculator/features/SO.py
SO.eval_algorithm
def eval_algorithm(closing, low, high): """ Evaluates the SO algorithm Args: closing: Float of current closing price. low: Float of lowest low closing price throughout some duration. high: Float of highest high closing price throughout some duration. Returns: Float SO between 0 and 100. """ if high - low == 0: # High and low are the same, zero division error return 100 * (closing - low) else: return 100 * (closing - low) / (high - low)
python
def eval_algorithm(closing, low, high): """ Evaluates the SO algorithm Args: closing: Float of current closing price. low: Float of lowest low closing price throughout some duration. high: Float of highest high closing price throughout some duration. Returns: Float SO between 0 and 100. """ if high - low == 0: # High and low are the same, zero division error return 100 * (closing - low) else: return 100 * (closing - low) / (high - low)
[ "def", "eval_algorithm", "(", "closing", ",", "low", ",", "high", ")", ":", "if", "high", "-", "low", "==", "0", ":", "# High and low are the same, zero division error", "return", "100", "*", "(", "closing", "-", "low", ")", "else", ":", "return", "100", "...
Evaluates the SO algorithm Args: closing: Float of current closing price. low: Float of lowest low closing price throughout some duration. high: Float of highest high closing price throughout some duration. Returns: Float SO between 0 and 100.
[ "Evaluates", "the", "SO", "algorithm" ]
f7d6590aded20b1e1b5df16a4b27228ee821c4ab
https://github.com/amicks/Speculator/blob/f7d6590aded20b1e1b5df16a4b27228ee821c4ab/speculator/features/SO.py#L17-L31
train
41,051
amicks/Speculator
speculator/utils/stats.py
avg
def avg(vals, count=None): """ Returns the average value Args: vals: List of numbers to calculate average from. count: Int of total count that vals was part of. Returns: Float average value throughout a count. """ sum = 0 for v in vals: sum += v if count is None: count = len(vals) return float(sum) / count
python
def avg(vals, count=None): """ Returns the average value Args: vals: List of numbers to calculate average from. count: Int of total count that vals was part of. Returns: Float average value throughout a count. """ sum = 0 for v in vals: sum += v if count is None: count = len(vals) return float(sum) / count
[ "def", "avg", "(", "vals", ",", "count", "=", "None", ")", ":", "sum", "=", "0", "for", "v", "in", "vals", ":", "sum", "+=", "v", "if", "count", "is", "None", ":", "count", "=", "len", "(", "vals", ")", "return", "float", "(", "sum", ")", "/"...
Returns the average value Args: vals: List of numbers to calculate average from. count: Int of total count that vals was part of. Returns: Float average value throughout a count.
[ "Returns", "the", "average", "value" ]
f7d6590aded20b1e1b5df16a4b27228ee821c4ab
https://github.com/amicks/Speculator/blob/f7d6590aded20b1e1b5df16a4b27228ee821c4ab/speculator/utils/stats.py#L5-L20
train
41,052
fhcrc/taxtastic
taxtastic/ncbi.py
db_connect
def db_connect(engine, schema=None, clobber=False): """Create a connection object to a database. Attempt to establish a schema. If there are existing tables, delete them if clobber is True and return otherwise. Returns a sqlalchemy engine object. """ if schema is None: base = declarative_base() else: try: engine.execute(sqlalchemy.schema.CreateSchema(schema)) except sqlalchemy.exc.ProgrammingError as err: logging.warn(err) base = declarative_base(metadata=MetaData(schema=schema)) define_schema(base) if clobber: logging.info('Clobbering database tables') base.metadata.drop_all(bind=engine) logging.info('Creating database tables') base.metadata.create_all(bind=engine) return base
python
def db_connect(engine, schema=None, clobber=False): """Create a connection object to a database. Attempt to establish a schema. If there are existing tables, delete them if clobber is True and return otherwise. Returns a sqlalchemy engine object. """ if schema is None: base = declarative_base() else: try: engine.execute(sqlalchemy.schema.CreateSchema(schema)) except sqlalchemy.exc.ProgrammingError as err: logging.warn(err) base = declarative_base(metadata=MetaData(schema=schema)) define_schema(base) if clobber: logging.info('Clobbering database tables') base.metadata.drop_all(bind=engine) logging.info('Creating database tables') base.metadata.create_all(bind=engine) return base
[ "def", "db_connect", "(", "engine", ",", "schema", "=", "None", ",", "clobber", "=", "False", ")", ":", "if", "schema", "is", "None", ":", "base", "=", "declarative_base", "(", ")", "else", ":", "try", ":", "engine", ".", "execute", "(", "sqlalchemy", ...
Create a connection object to a database. Attempt to establish a schema. If there are existing tables, delete them if clobber is True and return otherwise. Returns a sqlalchemy engine object.
[ "Create", "a", "connection", "object", "to", "a", "database", ".", "Attempt", "to", "establish", "a", "schema", ".", "If", "there", "are", "existing", "tables", "delete", "them", "if", "clobber", "is", "True", "and", "return", "otherwise", ".", "Returns", ...
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/ncbi.py#L215-L240
train
41,053
fhcrc/taxtastic
taxtastic/ncbi.py
read_nodes
def read_nodes(rows, source_id=1): """ Return an iterator of rows ready to insert into table "nodes". * rows - iterator of lists (eg, output from read_archive or read_dmp) """ ncbi_keys = ['tax_id', 'parent_id', 'rank', 'embl_code', 'division_id'] extra_keys = ['source_id', 'is_valid'] is_valid = True ncbi_cols = len(ncbi_keys) rank = ncbi_keys.index('rank') parent_id = ncbi_keys.index('parent_id') # assumes the first row is the root row = next(rows) row[rank] = 'root' # parent must be None for termination of recursive CTE for # calculating lineages row[parent_id] = None rows = itertools.chain([row], rows) yield ncbi_keys + extra_keys for row in rows: # replace whitespace in "rank" with underscore row[rank] = '_'.join(row[rank].split()) # provide default values for source_id and is_valid yield row[:ncbi_cols] + [source_id, is_valid]
python
def read_nodes(rows, source_id=1): """ Return an iterator of rows ready to insert into table "nodes". * rows - iterator of lists (eg, output from read_archive or read_dmp) """ ncbi_keys = ['tax_id', 'parent_id', 'rank', 'embl_code', 'division_id'] extra_keys = ['source_id', 'is_valid'] is_valid = True ncbi_cols = len(ncbi_keys) rank = ncbi_keys.index('rank') parent_id = ncbi_keys.index('parent_id') # assumes the first row is the root row = next(rows) row[rank] = 'root' # parent must be None for termination of recursive CTE for # calculating lineages row[parent_id] = None rows = itertools.chain([row], rows) yield ncbi_keys + extra_keys for row in rows: # replace whitespace in "rank" with underscore row[rank] = '_'.join(row[rank].split()) # provide default values for source_id and is_valid yield row[:ncbi_cols] + [source_id, is_valid]
[ "def", "read_nodes", "(", "rows", ",", "source_id", "=", "1", ")", ":", "ncbi_keys", "=", "[", "'tax_id'", ",", "'parent_id'", ",", "'rank'", ",", "'embl_code'", ",", "'division_id'", "]", "extra_keys", "=", "[", "'source_id'", ",", "'is_valid'", "]", "is_...
Return an iterator of rows ready to insert into table "nodes". * rows - iterator of lists (eg, output from read_archive or read_dmp)
[ "Return", "an", "iterator", "of", "rows", "ready", "to", "insert", "into", "table", "nodes", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/ncbi.py#L250-L280
train
41,054
fhcrc/taxtastic
taxtastic/ncbi.py
fetch_data
def fetch_data(dest_dir='.', clobber=False, url=DATA_URL): """ Download data from NCBI required to generate local taxonomy database. Default url is ncbi.DATA_URL * dest_dir - directory in which to save output files (created if necessary). * clobber - don't download if False and target of url exists in dest_dir * url - url to archive; default is ncbi.DATA_URL Returns (fname, downloaded), where fname is the name of the downloaded zip archive, and downloaded is True if a new files was downloaded, false otherwise. see ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump_readme.txt """ dest_dir = os.path.abspath(dest_dir) try: os.mkdir(dest_dir) except OSError: pass fout = os.path.join(dest_dir, os.path.split(url)[-1]) if os.access(fout, os.F_OK) and not clobber: downloaded = False logging.info(fout + ' exists; not downloading') else: downloaded = True logging.info('downloading {} to {}'.format(url, fout)) request.urlretrieve(url, fout) return (fout, downloaded)
python
def fetch_data(dest_dir='.', clobber=False, url=DATA_URL): """ Download data from NCBI required to generate local taxonomy database. Default url is ncbi.DATA_URL * dest_dir - directory in which to save output files (created if necessary). * clobber - don't download if False and target of url exists in dest_dir * url - url to archive; default is ncbi.DATA_URL Returns (fname, downloaded), where fname is the name of the downloaded zip archive, and downloaded is True if a new files was downloaded, false otherwise. see ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump_readme.txt """ dest_dir = os.path.abspath(dest_dir) try: os.mkdir(dest_dir) except OSError: pass fout = os.path.join(dest_dir, os.path.split(url)[-1]) if os.access(fout, os.F_OK) and not clobber: downloaded = False logging.info(fout + ' exists; not downloading') else: downloaded = True logging.info('downloading {} to {}'.format(url, fout)) request.urlretrieve(url, fout) return (fout, downloaded)
[ "def", "fetch_data", "(", "dest_dir", "=", "'.'", ",", "clobber", "=", "False", ",", "url", "=", "DATA_URL", ")", ":", "dest_dir", "=", "os", ".", "path", ".", "abspath", "(", "dest_dir", ")", "try", ":", "os", ".", "mkdir", "(", "dest_dir", ")", "...
Download data from NCBI required to generate local taxonomy database. Default url is ncbi.DATA_URL * dest_dir - directory in which to save output files (created if necessary). * clobber - don't download if False and target of url exists in dest_dir * url - url to archive; default is ncbi.DATA_URL Returns (fname, downloaded), where fname is the name of the downloaded zip archive, and downloaded is True if a new files was downloaded, false otherwise. see ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump_readme.txt
[ "Download", "data", "from", "NCBI", "required", "to", "generate", "local", "taxonomy", "database", ".", "Default", "url", "is", "ncbi", ".", "DATA_URL" ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/ncbi.py#L495-L527
train
41,055
fhcrc/taxtastic
taxtastic/ncbi.py
read_archive
def read_archive(archive, fname): """Return an iterator of unique rows from a zip archive. * archive - path to the zip archive. * fname - name of the compressed file within the archive. """ # Note that deduplication here is equivalent to an upsert/ignore, # but avoids requirement for a database-specific implementation. zfile = zipfile.ZipFile(archive) contents = zfile.open(fname, 'r') fobj = io.TextIOWrapper(contents) seen = set() for line in fobj: line = line.rstrip('\t|\n') if line not in seen: yield line.split('\t|\t') seen.add(line)
python
def read_archive(archive, fname): """Return an iterator of unique rows from a zip archive. * archive - path to the zip archive. * fname - name of the compressed file within the archive. """ # Note that deduplication here is equivalent to an upsert/ignore, # but avoids requirement for a database-specific implementation. zfile = zipfile.ZipFile(archive) contents = zfile.open(fname, 'r') fobj = io.TextIOWrapper(contents) seen = set() for line in fobj: line = line.rstrip('\t|\n') if line not in seen: yield line.split('\t|\t') seen.add(line)
[ "def", "read_archive", "(", "archive", ",", "fname", ")", ":", "# Note that deduplication here is equivalent to an upsert/ignore,", "# but avoids requirement for a database-specific implementation.", "zfile", "=", "zipfile", ".", "ZipFile", "(", "archive", ")", "contents", "=",...
Return an iterator of unique rows from a zip archive. * archive - path to the zip archive. * fname - name of the compressed file within the archive.
[ "Return", "an", "iterator", "of", "unique", "rows", "from", "a", "zip", "archive", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/ncbi.py#L530-L550
train
41,056
fhcrc/taxtastic
taxtastic/ncbi.py
NCBILoader.prepend_schema
def prepend_schema(self, name): """Prepend schema name to 'name' when a schema is specified """ return '.'.join([self.schema, name]) if self.schema else name
python
def prepend_schema(self, name): """Prepend schema name to 'name' when a schema is specified """ return '.'.join([self.schema, name]) if self.schema else name
[ "def", "prepend_schema", "(", "self", ",", "name", ")", ":", "return", "'.'", ".", "join", "(", "[", "self", ".", "schema", ",", "name", "]", ")", "if", "self", ".", "schema", "else", "name" ]
Prepend schema name to 'name' when a schema is specified
[ "Prepend", "schema", "name", "to", "name", "when", "a", "schema", "is", "specified" ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/ncbi.py#L338-L342
train
41,057
fhcrc/taxtastic
taxtastic/ncbi.py
NCBILoader.load_table
def load_table(self, table, rows, colnames=None, limit=None): """Load 'rows' into table 'table'. If 'colnames' is not provided, the first element of 'rows' must provide column names. """ conn = self.engine.raw_connection() cur = conn.cursor() colnames = colnames or next(rows) cmd = 'INSERT INTO {table} ({colnames}) VALUES ({placeholders})'.format( table=self.tables[table], colnames=', '.join(colnames), placeholders=', '.join([self.placeholder] * len(colnames))) cur.executemany(cmd, itertools.islice(rows, limit)) conn.commit()
python
def load_table(self, table, rows, colnames=None, limit=None): """Load 'rows' into table 'table'. If 'colnames' is not provided, the first element of 'rows' must provide column names. """ conn = self.engine.raw_connection() cur = conn.cursor() colnames = colnames or next(rows) cmd = 'INSERT INTO {table} ({colnames}) VALUES ({placeholders})'.format( table=self.tables[table], colnames=', '.join(colnames), placeholders=', '.join([self.placeholder] * len(colnames))) cur.executemany(cmd, itertools.islice(rows, limit)) conn.commit()
[ "def", "load_table", "(", "self", ",", "table", ",", "rows", ",", "colnames", "=", "None", ",", "limit", "=", "None", ")", ":", "conn", "=", "self", ".", "engine", ".", "raw_connection", "(", ")", "cur", "=", "conn", ".", "cursor", "(", ")", "colna...
Load 'rows' into table 'table'. If 'colnames' is not provided, the first element of 'rows' must provide column names.
[ "Load", "rows", "into", "table", "table", ".", "If", "colnames", "is", "not", "provided", "the", "first", "element", "of", "rows", "must", "provide", "column", "names", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/ncbi.py#L344-L361
train
41,058
fhcrc/taxtastic
taxtastic/ncbi.py
NCBILoader.load_archive
def load_archive(self, archive): """Load data from the zip archive of the NCBI taxonomy. """ # source self.load_table( 'source', rows=[('ncbi', DATA_URL)], colnames=['name', 'description'], ) conn = self.engine.raw_connection() cur = conn.cursor() cmd = "select id from {source} where name = 'ncbi'".format(**self.tables) cur.execute(cmd) source_id = cur.fetchone()[0] # ranks log.info('loading ranks') self.load_table( 'ranks', rows=((rank, i) for i, rank in enumerate(RANKS)), colnames=['rank', 'height'], ) # nodes logging.info('loading nodes') nodes_rows = read_nodes( read_archive(archive, 'nodes.dmp'), source_id=source_id) self.load_table('nodes', rows=nodes_rows) # names logging.info('loading names') names_rows = read_names( read_archive(archive, 'names.dmp'), source_id=source_id) self.load_table('names', rows=names_rows) # merged logging.info('loading merged') merged_rows = read_merged(read_archive(archive, 'merged.dmp')) self.load_table('merged', rows=merged_rows)
python
def load_archive(self, archive): """Load data from the zip archive of the NCBI taxonomy. """ # source self.load_table( 'source', rows=[('ncbi', DATA_URL)], colnames=['name', 'description'], ) conn = self.engine.raw_connection() cur = conn.cursor() cmd = "select id from {source} where name = 'ncbi'".format(**self.tables) cur.execute(cmd) source_id = cur.fetchone()[0] # ranks log.info('loading ranks') self.load_table( 'ranks', rows=((rank, i) for i, rank in enumerate(RANKS)), colnames=['rank', 'height'], ) # nodes logging.info('loading nodes') nodes_rows = read_nodes( read_archive(archive, 'nodes.dmp'), source_id=source_id) self.load_table('nodes', rows=nodes_rows) # names logging.info('loading names') names_rows = read_names( read_archive(archive, 'names.dmp'), source_id=source_id) self.load_table('names', rows=names_rows) # merged logging.info('loading merged') merged_rows = read_merged(read_archive(archive, 'merged.dmp')) self.load_table('merged', rows=merged_rows)
[ "def", "load_archive", "(", "self", ",", "archive", ")", ":", "# source", "self", ".", "load_table", "(", "'source'", ",", "rows", "=", "[", "(", "'ncbi'", ",", "DATA_URL", ")", "]", ",", "colnames", "=", "[", "'name'", ",", "'description'", "]", ",", ...
Load data from the zip archive of the NCBI taxonomy.
[ "Load", "data", "from", "the", "zip", "archive", "of", "the", "NCBI", "taxonomy", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/ncbi.py#L363-L404
train
41,059
amicks/Speculator
speculator/utils/date.py
date_to_epoch
def date_to_epoch(year, month, day): """ Converts a date to epoch in UTC Args: year: int between 1 and 9999. month: int between 1 and 12. day: int between 1 and 31. Returns: Int epoch in UTC from date. """ return int(date_to_delorean(year, month, day).epoch)
python
def date_to_epoch(year, month, day): """ Converts a date to epoch in UTC Args: year: int between 1 and 9999. month: int between 1 and 12. day: int between 1 and 31. Returns: Int epoch in UTC from date. """ return int(date_to_delorean(year, month, day).epoch)
[ "def", "date_to_epoch", "(", "year", ",", "month", ",", "day", ")", ":", "return", "int", "(", "date_to_delorean", "(", "year", ",", "month", ",", "day", ")", ".", "epoch", ")" ]
Converts a date to epoch in UTC Args: year: int between 1 and 9999. month: int between 1 and 12. day: int between 1 and 31. Returns: Int epoch in UTC from date.
[ "Converts", "a", "date", "to", "epoch", "in", "UTC" ]
f7d6590aded20b1e1b5df16a4b27228ee821c4ab
https://github.com/amicks/Speculator/blob/f7d6590aded20b1e1b5df16a4b27228ee821c4ab/speculator/utils/date.py#L17-L28
train
41,060
amicks/Speculator
speculator/utils/date.py
get_end_start_epochs
def get_end_start_epochs(year, month, day, direction, unit, count): """ Gets epoch from a start date and epoch from a shifted date Args: year: Int between 1 and 9999. month: Int between 1 and 12. day: Int between 1 and 31. direction: String to shift time forwards or backwards. Valid values: 'last', 'next'. unit: String of time period unit for count argument. How far back to check historical market data. Valid values: 'hour', 'day', 'week', 'month', 'year'. count: Int of units. How far back to check historical market data? Returns: Dict of int epochs in UTC with keys 'initial' and 'shifted' """ if year or month or day: # Date is specified if not year: year = 2017 if not month: month = 1 if not day: day = 1 initial_delorean = date_to_delorean(year, month, day) else: # Date is not specified, get current date count += 1 # Get another date because market is still open initial_delorean = now_delorean() initial_epoch = int(initial_delorean.epoch) shifted_epoch = shift_epoch(initial_delorean, direction, unit, count) return { 'initial': initial_epoch, 'shifted': shifted_epoch }
python
def get_end_start_epochs(year, month, day, direction, unit, count): """ Gets epoch from a start date and epoch from a shifted date Args: year: Int between 1 and 9999. month: Int between 1 and 12. day: Int between 1 and 31. direction: String to shift time forwards or backwards. Valid values: 'last', 'next'. unit: String of time period unit for count argument. How far back to check historical market data. Valid values: 'hour', 'day', 'week', 'month', 'year'. count: Int of units. How far back to check historical market data? Returns: Dict of int epochs in UTC with keys 'initial' and 'shifted' """ if year or month or day: # Date is specified if not year: year = 2017 if not month: month = 1 if not day: day = 1 initial_delorean = date_to_delorean(year, month, day) else: # Date is not specified, get current date count += 1 # Get another date because market is still open initial_delorean = now_delorean() initial_epoch = int(initial_delorean.epoch) shifted_epoch = shift_epoch(initial_delorean, direction, unit, count) return { 'initial': initial_epoch, 'shifted': shifted_epoch }
[ "def", "get_end_start_epochs", "(", "year", ",", "month", ",", "day", ",", "direction", ",", "unit", ",", "count", ")", ":", "if", "year", "or", "month", "or", "day", ":", "# Date is specified", "if", "not", "year", ":", "year", "=", "2017", "if", "not...
Gets epoch from a start date and epoch from a shifted date Args: year: Int between 1 and 9999. month: Int between 1 and 12. day: Int between 1 and 31. direction: String to shift time forwards or backwards. Valid values: 'last', 'next'. unit: String of time period unit for count argument. How far back to check historical market data. Valid values: 'hour', 'day', 'week', 'month', 'year'. count: Int of units. How far back to check historical market data? Returns: Dict of int epochs in UTC with keys 'initial' and 'shifted'
[ "Gets", "epoch", "from", "a", "start", "date", "and", "epoch", "from", "a", "shifted", "date" ]
f7d6590aded20b1e1b5df16a4b27228ee821c4ab
https://github.com/amicks/Speculator/blob/f7d6590aded20b1e1b5df16a4b27228ee821c4ab/speculator/utils/date.py#L71-L103
train
41,061
fhcrc/taxtastic
taxtastic/taxtable.py
TaxNode.add_child
def add_child(self, child): """ Add a child to this node. """ assert child != self child.parent = self child.ranks = self.ranks child.index = self.index assert child.tax_id not in self.index self.index[child.tax_id] = child self.children.add(child)
python
def add_child(self, child): """ Add a child to this node. """ assert child != self child.parent = self child.ranks = self.ranks child.index = self.index assert child.tax_id not in self.index self.index[child.tax_id] = child self.children.add(child)
[ "def", "add_child", "(", "self", ",", "child", ")", ":", "assert", "child", "!=", "self", "child", ".", "parent", "=", "self", "child", ".", "ranks", "=", "self", ".", "ranks", "child", ".", "index", "=", "self", ".", "index", "assert", "child", ".",...
Add a child to this node.
[ "Add", "a", "child", "to", "this", "node", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxtable.py#L46-L56
train
41,062
fhcrc/taxtastic
taxtastic/taxtable.py
TaxNode.remove_child
def remove_child(self, child): """ Remove a child from this node. """ assert child in self.children self.children.remove(child) self.index.pop(child.tax_id) if child.parent is self: child.parent = None if child.index is self.index: child.index = None # Remove child subtree from index for n in child: if n is child: continue self.index.pop(n.tax_id) if n.index is self.index: n.index = None
python
def remove_child(self, child): """ Remove a child from this node. """ assert child in self.children self.children.remove(child) self.index.pop(child.tax_id) if child.parent is self: child.parent = None if child.index is self.index: child.index = None # Remove child subtree from index for n in child: if n is child: continue self.index.pop(n.tax_id) if n.index is self.index: n.index = None
[ "def", "remove_child", "(", "self", ",", "child", ")", ":", "assert", "child", "in", "self", ".", "children", "self", ".", "children", ".", "remove", "(", "child", ")", "self", ".", "index", ".", "pop", "(", "child", ".", "tax_id", ")", "if", "child"...
Remove a child from this node.
[ "Remove", "a", "child", "from", "this", "node", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxtable.py#L58-L76
train
41,063
fhcrc/taxtastic
taxtastic/taxtable.py
TaxNode.drop
def drop(self): """ Remove this node from the taxonomy, maintaining child subtrees by adding them to the node's parent, and moving sequences at this node to the parent. Not valid for root node. """ if self.is_root: raise ValueError("Cannot drop root node!") parent = self.parent for child in self.children: child.parent = parent parent.children.add(child) self.children = set() parent.sequence_ids.update(self.sequence_ids) self.sequence_ids = set() parent.remove_child(self)
python
def drop(self): """ Remove this node from the taxonomy, maintaining child subtrees by adding them to the node's parent, and moving sequences at this node to the parent. Not valid for root node. """ if self.is_root: raise ValueError("Cannot drop root node!") parent = self.parent for child in self.children: child.parent = parent parent.children.add(child) self.children = set() parent.sequence_ids.update(self.sequence_ids) self.sequence_ids = set() parent.remove_child(self)
[ "def", "drop", "(", "self", ")", ":", "if", "self", ".", "is_root", ":", "raise", "ValueError", "(", "\"Cannot drop root node!\"", ")", "parent", "=", "self", ".", "parent", "for", "child", "in", "self", ".", "children", ":", "child", ".", "parent", "=",...
Remove this node from the taxonomy, maintaining child subtrees by adding them to the node's parent, and moving sequences at this node to the parent. Not valid for root node.
[ "Remove", "this", "node", "from", "the", "taxonomy", "maintaining", "child", "subtrees", "by", "adding", "them", "to", "the", "node", "s", "parent", "and", "moving", "sequences", "at", "this", "node", "to", "the", "parent", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxtable.py#L78-L98
train
41,064
fhcrc/taxtastic
taxtastic/taxtable.py
TaxNode.prune_unrepresented
def prune_unrepresented(self): """ Remove nodes without sequences or children below this node. """ for node in self.depth_first_iter(self_first=False): if (not node.children and not node.sequence_ids and node is not self): node.parent.remove_child(node)
python
def prune_unrepresented(self): """ Remove nodes without sequences or children below this node. """ for node in self.depth_first_iter(self_first=False): if (not node.children and not node.sequence_ids and node is not self): node.parent.remove_child(node)
[ "def", "prune_unrepresented", "(", "self", ")", ":", "for", "node", "in", "self", ".", "depth_first_iter", "(", "self_first", "=", "False", ")", ":", "if", "(", "not", "node", ".", "children", "and", "not", "node", ".", "sequence_ids", "and", "node", "is...
Remove nodes without sequences or children below this node.
[ "Remove", "nodes", "without", "sequences", "or", "children", "below", "this", "node", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxtable.py#L100-L108
train
41,065
fhcrc/taxtastic
taxtastic/taxtable.py
TaxNode.at_rank
def at_rank(self, rank): """ Find the node above this node at rank ``rank`` """ s = self while s: if s.rank == rank: return s s = s.parent raise ValueError("No node at rank {0} for {1}".format( rank, self.tax_id))
python
def at_rank(self, rank): """ Find the node above this node at rank ``rank`` """ s = self while s: if s.rank == rank: return s s = s.parent raise ValueError("No node at rank {0} for {1}".format( rank, self.tax_id))
[ "def", "at_rank", "(", "self", ",", "rank", ")", ":", "s", "=", "self", "while", "s", ":", "if", "s", ".", "rank", "==", "rank", ":", "return", "s", "s", "=", "s", ".", "parent", "raise", "ValueError", "(", "\"No node at rank {0} for {1}\"", ".", "fo...
Find the node above this node at rank ``rank``
[ "Find", "the", "node", "above", "this", "node", "at", "rank", "rank" ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxtable.py#L118-L128
train
41,066
fhcrc/taxtastic
taxtastic/taxtable.py
TaxNode.depth_first_iter
def depth_first_iter(self, self_first=True): """ Iterate over nodes below this node, optionally yielding children before self. """ if self_first: yield self for child in list(self.children): for i in child.depth_first_iter(self_first): yield i if not self_first: yield self
python
def depth_first_iter(self, self_first=True): """ Iterate over nodes below this node, optionally yielding children before self. """ if self_first: yield self for child in list(self.children): for i in child.depth_first_iter(self_first): yield i if not self_first: yield self
[ "def", "depth_first_iter", "(", "self", ",", "self_first", "=", "True", ")", ":", "if", "self_first", ":", "yield", "self", "for", "child", "in", "list", "(", "self", ".", "children", ")", ":", "for", "i", "in", "child", ".", "depth_first_iter", "(", "...
Iterate over nodes below this node, optionally yielding children before self.
[ "Iterate", "over", "nodes", "below", "this", "node", "optionally", "yielding", "children", "before", "self", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxtable.py#L130-L141
train
41,067
fhcrc/taxtastic
taxtastic/taxtable.py
TaxNode.path
def path(self, tax_ids): """Get the node at the end of the path described by tax_ids.""" assert tax_ids[0] == self.tax_id if len(tax_ids) == 1: return self n = tax_ids[1] try: child = next(i for i in self.children if i.tax_id == n) except StopIteration: raise ValueError(n) return child.path(tax_ids[1:])
python
def path(self, tax_ids): """Get the node at the end of the path described by tax_ids.""" assert tax_ids[0] == self.tax_id if len(tax_ids) == 1: return self n = tax_ids[1] try: child = next(i for i in self.children if i.tax_id == n) except StopIteration: raise ValueError(n) return child.path(tax_ids[1:])
[ "def", "path", "(", "self", ",", "tax_ids", ")", ":", "assert", "tax_ids", "[", "0", "]", "==", "self", ".", "tax_id", "if", "len", "(", "tax_ids", ")", "==", "1", ":", "return", "self", "n", "=", "tax_ids", "[", "1", "]", "try", ":", "child", ...
Get the node at the end of the path described by tax_ids.
[ "Get", "the", "node", "at", "the", "end", "of", "the", "path", "described", "by", "tax_ids", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxtable.py#L160-L172
train
41,068
fhcrc/taxtastic
taxtastic/taxtable.py
TaxNode.lineage
def lineage(self): """ Return all nodes between this node and the root, including this one. """ if not self.parent: return [self] else: L = self.parent.lineage() L.append(self) return L
python
def lineage(self): """ Return all nodes between this node and the root, including this one. """ if not self.parent: return [self] else: L = self.parent.lineage() L.append(self) return L
[ "def", "lineage", "(", "self", ")", ":", "if", "not", "self", ".", "parent", ":", "return", "[", "self", "]", "else", ":", "L", "=", "self", ".", "parent", ".", "lineage", "(", ")", "L", ".", "append", "(", "self", ")", "return", "L" ]
Return all nodes between this node and the root, including this one.
[ "Return", "all", "nodes", "between", "this", "node", "and", "the", "root", "including", "this", "one", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxtable.py#L180-L189
train
41,069
fhcrc/taxtastic
taxtastic/taxtable.py
TaxNode.write_taxtable
def write_taxtable(self, out_fp, **kwargs): """ Write a taxtable for this node and all descendants, including the lineage leading to this node. """ ranks_represented = frozenset(i.rank for i in self) | \ frozenset(i.rank for i in self.lineage()) ranks = [i for i in self.ranks if i in ranks_represented] assert len(ranks_represented) == len(ranks) def node_record(node): parent_id = node.parent.tax_id if node.parent else node.tax_id d = {'tax_id': node.tax_id, 'tax_name': node.name, 'parent_id': parent_id, 'rank': node.rank} L = {i.rank: i.tax_id for i in node.lineage()} d.update(L) return d header = ['tax_id', 'parent_id', 'rank', 'tax_name'] + ranks w = csv.DictWriter(out_fp, header, quoting=csv.QUOTE_NONNUMERIC, lineterminator='\n') w.writeheader() # All nodes leading to this one for i in self.lineage()[:-1]: w.writerow(node_record(i)) w.writerows(node_record(i) for i in self)
python
def write_taxtable(self, out_fp, **kwargs): """ Write a taxtable for this node and all descendants, including the lineage leading to this node. """ ranks_represented = frozenset(i.rank for i in self) | \ frozenset(i.rank for i in self.lineage()) ranks = [i for i in self.ranks if i in ranks_represented] assert len(ranks_represented) == len(ranks) def node_record(node): parent_id = node.parent.tax_id if node.parent else node.tax_id d = {'tax_id': node.tax_id, 'tax_name': node.name, 'parent_id': parent_id, 'rank': node.rank} L = {i.rank: i.tax_id for i in node.lineage()} d.update(L) return d header = ['tax_id', 'parent_id', 'rank', 'tax_name'] + ranks w = csv.DictWriter(out_fp, header, quoting=csv.QUOTE_NONNUMERIC, lineterminator='\n') w.writeheader() # All nodes leading to this one for i in self.lineage()[:-1]: w.writerow(node_record(i)) w.writerows(node_record(i) for i in self)
[ "def", "write_taxtable", "(", "self", ",", "out_fp", ",", "*", "*", "kwargs", ")", ":", "ranks_represented", "=", "frozenset", "(", "i", ".", "rank", "for", "i", "in", "self", ")", "|", "frozenset", "(", "i", ".", "rank", "for", "i", "in", "self", ...
Write a taxtable for this node and all descendants, including the lineage leading to this node.
[ "Write", "a", "taxtable", "for", "this", "node", "and", "all", "descendants", "including", "the", "lineage", "leading", "to", "this", "node", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxtable.py#L199-L226
train
41,070
fhcrc/taxtastic
taxtastic/taxtable.py
TaxNode.populate_from_seqinfo
def populate_from_seqinfo(self, seqinfo): """Populate sequence_ids below this node from a seqinfo file object.""" for row in csv.DictReader(seqinfo): node = self.index.get(row['tax_id']) if node: node.sequence_ids.add(row['seqname'])
python
def populate_from_seqinfo(self, seqinfo): """Populate sequence_ids below this node from a seqinfo file object.""" for row in csv.DictReader(seqinfo): node = self.index.get(row['tax_id']) if node: node.sequence_ids.add(row['seqname'])
[ "def", "populate_from_seqinfo", "(", "self", ",", "seqinfo", ")", ":", "for", "row", "in", "csv", ".", "DictReader", "(", "seqinfo", ")", ":", "node", "=", "self", ".", "index", ".", "get", "(", "row", "[", "'tax_id'", "]", ")", "if", "node", ":", ...
Populate sequence_ids below this node from a seqinfo file object.
[ "Populate", "sequence_ids", "below", "this", "node", "from", "a", "seqinfo", "file", "object", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxtable.py#L228-L233
train
41,071
fhcrc/taxtastic
taxtastic/taxtable.py
TaxNode.collapse
def collapse(self, remove=False): """ Move all ``sequence_ids`` in the subtree below this node to this node. If ``remove`` is True, nodes below this one are deleted from the taxonomy. """ descendants = iter(self) # Skip this node assert next(descendants) is self for descendant in descendants: self.sequence_ids.update(descendant.sequence_ids) descendant.sequence_ids.clear() if remove: for node in self.children: self.remove_child(node)
python
def collapse(self, remove=False): """ Move all ``sequence_ids`` in the subtree below this node to this node. If ``remove`` is True, nodes below this one are deleted from the taxonomy. """ descendants = iter(self) # Skip this node assert next(descendants) is self for descendant in descendants: self.sequence_ids.update(descendant.sequence_ids) descendant.sequence_ids.clear() if remove: for node in self.children: self.remove_child(node)
[ "def", "collapse", "(", "self", ",", "remove", "=", "False", ")", ":", "descendants", "=", "iter", "(", "self", ")", "# Skip this node", "assert", "next", "(", "descendants", ")", "is", "self", "for", "descendant", "in", "descendants", ":", "self", ".", ...
Move all ``sequence_ids`` in the subtree below this node to this node. If ``remove`` is True, nodes below this one are deleted from the taxonomy.
[ "Move", "all", "sequence_ids", "in", "the", "subtree", "below", "this", "node", "to", "this", "node", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxtable.py#L235-L251
train
41,072
fhcrc/taxtastic
taxtastic/taxtable.py
TaxNode.write_seqinfo
def write_seqinfo(self, out_fp, include_name=True): """ Write a simple seq_info file, suitable for use in taxtastic. Useful for printing out the results of collapsing tax nodes - super bare bones, just tax_id and seqname. If include_name is True, a column with the taxon name is included. """ header = ['seqname', 'tax_id'] if include_name: header.append('tax_name') w = csv.DictWriter(out_fp, header, quoting=csv.QUOTE_NONNUMERIC, lineterminator='\n', extrasaction='ignore') w.writeheader() rows = ({'seqname': seq_id, 'tax_id': node.tax_id, 'tax_name': node.name} for node in self for seq_id in node.sequence_ids) w.writerows(rows)
python
def write_seqinfo(self, out_fp, include_name=True): """ Write a simple seq_info file, suitable for use in taxtastic. Useful for printing out the results of collapsing tax nodes - super bare bones, just tax_id and seqname. If include_name is True, a column with the taxon name is included. """ header = ['seqname', 'tax_id'] if include_name: header.append('tax_name') w = csv.DictWriter(out_fp, header, quoting=csv.QUOTE_NONNUMERIC, lineterminator='\n', extrasaction='ignore') w.writeheader() rows = ({'seqname': seq_id, 'tax_id': node.tax_id, 'tax_name': node.name} for node in self for seq_id in node.sequence_ids) w.writerows(rows)
[ "def", "write_seqinfo", "(", "self", ",", "out_fp", ",", "include_name", "=", "True", ")", ":", "header", "=", "[", "'seqname'", ",", "'tax_id'", "]", "if", "include_name", ":", "header", ".", "append", "(", "'tax_name'", ")", "w", "=", "csv", ".", "Di...
Write a simple seq_info file, suitable for use in taxtastic. Useful for printing out the results of collapsing tax nodes - super bare bones, just tax_id and seqname. If include_name is True, a column with the taxon name is included.
[ "Write", "a", "simple", "seq_info", "file", "suitable", "for", "use", "in", "taxtastic", ".", "Useful", "for", "printing", "out", "the", "results", "of", "collapsing", "tax", "nodes", "-", "super", "bare", "bones", "just", "tax_id", "and", "seqname", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxtable.py#L253-L276
train
41,073
fhcrc/taxtastic
taxtastic/taxtable.py
TaxNode.from_taxtable
def from_taxtable(cls, taxtable_fp): """ Generate a node from an open handle to a taxtable, as generated by ``taxit taxtable`` """ r = csv.reader(taxtable_fp) headers = next(r) rows = (collections.OrderedDict(list(zip(headers, i))) for i in r) row = next(rows) root = cls(rank=row['rank'], tax_id=row[ 'tax_id'], name=row['tax_name']) path_root = headers.index('root') root.ranks = headers[path_root:] for row in rows: rank, tax_id, name = [ row[i] for i in ('rank', 'tax_id', 'tax_name')] path = [_f for _f in list(row.values())[path_root:] if _f] parent = root.path(path[:-1]) parent.add_child(cls(rank, tax_id, name=name)) return root
python
def from_taxtable(cls, taxtable_fp): """ Generate a node from an open handle to a taxtable, as generated by ``taxit taxtable`` """ r = csv.reader(taxtable_fp) headers = next(r) rows = (collections.OrderedDict(list(zip(headers, i))) for i in r) row = next(rows) root = cls(rank=row['rank'], tax_id=row[ 'tax_id'], name=row['tax_name']) path_root = headers.index('root') root.ranks = headers[path_root:] for row in rows: rank, tax_id, name = [ row[i] for i in ('rank', 'tax_id', 'tax_name')] path = [_f for _f in list(row.values())[path_root:] if _f] parent = root.path(path[:-1]) parent.add_child(cls(rank, tax_id, name=name)) return root
[ "def", "from_taxtable", "(", "cls", ",", "taxtable_fp", ")", ":", "r", "=", "csv", ".", "reader", "(", "taxtable_fp", ")", "headers", "=", "next", "(", "r", ")", "rows", "=", "(", "collections", ".", "OrderedDict", "(", "list", "(", "zip", "(", "head...
Generate a node from an open handle to a taxtable, as generated by ``taxit taxtable``
[ "Generate", "a", "node", "from", "an", "open", "handle", "to", "a", "taxtable", "as", "generated", "by", "taxit", "taxtable" ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxtable.py#L279-L300
train
41,074
fhcrc/taxtastic
taxtastic/subcommands/taxids.py
get_children
def get_children(engine, parent_ids, rank='species', schema=None): """ Recursively fetch children of tax_ids in `parent_ids` until the rank of `rank` """ if not parent_ids: return [] nodes = schema + '.nodes' if schema else 'nodes' names = schema + '.names' if schema else 'names' cmd = ('select tax_id, tax_name, rank ' 'from {} join {} using (tax_id) ' 'where parent_id = :tax_id and is_primary').format(nodes, names) species = [] for parent_id in parent_ids: result = engine.execute(sqlalchemy.sql.text(cmd), tax_id=parent_id) keys = list(result.keys()) rows = [dict(list(zip(keys, row))) for row in result.fetchall()] for r in rows: if r['rank'] == rank and 'sp.' not in r['tax_name']: species.append(r) others = [r for r in rows if r['rank'] not in (rank, 'no_rank')] if others: _, s = get_children(engine, [r['tax_id'] for r in others]) species.extend(s) return keys, species
python
def get_children(engine, parent_ids, rank='species', schema=None): """ Recursively fetch children of tax_ids in `parent_ids` until the rank of `rank` """ if not parent_ids: return [] nodes = schema + '.nodes' if schema else 'nodes' names = schema + '.names' if schema else 'names' cmd = ('select tax_id, tax_name, rank ' 'from {} join {} using (tax_id) ' 'where parent_id = :tax_id and is_primary').format(nodes, names) species = [] for parent_id in parent_ids: result = engine.execute(sqlalchemy.sql.text(cmd), tax_id=parent_id) keys = list(result.keys()) rows = [dict(list(zip(keys, row))) for row in result.fetchall()] for r in rows: if r['rank'] == rank and 'sp.' not in r['tax_name']: species.append(r) others = [r for r in rows if r['rank'] not in (rank, 'no_rank')] if others: _, s = get_children(engine, [r['tax_id'] for r in others]) species.extend(s) return keys, species
[ "def", "get_children", "(", "engine", ",", "parent_ids", ",", "rank", "=", "'species'", ",", "schema", "=", "None", ")", ":", "if", "not", "parent_ids", ":", "return", "[", "]", "nodes", "=", "schema", "+", "'.nodes'", "if", "schema", "else", "'nodes'", ...
Recursively fetch children of tax_ids in `parent_ids` until the rank of `rank`
[ "Recursively", "fetch", "children", "of", "tax_ids", "in", "parent_ids", "until", "the", "rank", "of", "rank" ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/subcommands/taxids.py#L33-L62
train
41,075
fhcrc/taxtastic
taxtastic/subcommands/update.py
action
def action(args): """Updates a Refpkg with new files. *args* should be an argparse object with fields refpkg (giving the path to the refpkg to operate on) and changes (a series of strings of the form 'key=file' giving the key to update in the refpkg and the file to store under that key)." """ log.info('loading reference package') pairs = [p.split('=', 1) for p in args.changes] if args.metadata: rp = refpkg.Refpkg(args.refpkg, create=False) rp.start_transaction() for key, value in pairs: rp.update_metadata(key, value) rp.commit_transaction('Updated metadata: ' + ', '.join(['%s=%s' % (a, b) for a, b in pairs])) else: for key, filename in pairs: if not(os.path.exists(filename)): print("No such file: %s" % filename) exit(1) rp = refpkg.Refpkg(args.refpkg, create=False) rp.start_transaction() for key, filename in pairs: if key == 'tree_stats': with warnings.catch_warnings(): warnings.simplefilter( "ignore", refpkg.DerivedFileNotUpdatedWarning) rp.update_file(key, os.path.abspath(filename)) # Trigger model update log.info('Updating phylo_model to match tree_stats') rp.update_phylo_model(args.stats_type, filename, args.frequency_type) else: rp.update_file(key, os.path.abspath(filename)) rp.commit_transaction('Updates files: ' + ', '.join(['%s=%s' % (a, b) for a, b in pairs])) return 0
python
def action(args): """Updates a Refpkg with new files. *args* should be an argparse object with fields refpkg (giving the path to the refpkg to operate on) and changes (a series of strings of the form 'key=file' giving the key to update in the refpkg and the file to store under that key)." """ log.info('loading reference package') pairs = [p.split('=', 1) for p in args.changes] if args.metadata: rp = refpkg.Refpkg(args.refpkg, create=False) rp.start_transaction() for key, value in pairs: rp.update_metadata(key, value) rp.commit_transaction('Updated metadata: ' + ', '.join(['%s=%s' % (a, b) for a, b in pairs])) else: for key, filename in pairs: if not(os.path.exists(filename)): print("No such file: %s" % filename) exit(1) rp = refpkg.Refpkg(args.refpkg, create=False) rp.start_transaction() for key, filename in pairs: if key == 'tree_stats': with warnings.catch_warnings(): warnings.simplefilter( "ignore", refpkg.DerivedFileNotUpdatedWarning) rp.update_file(key, os.path.abspath(filename)) # Trigger model update log.info('Updating phylo_model to match tree_stats') rp.update_phylo_model(args.stats_type, filename, args.frequency_type) else: rp.update_file(key, os.path.abspath(filename)) rp.commit_transaction('Updates files: ' + ', '.join(['%s=%s' % (a, b) for a, b in pairs])) return 0
[ "def", "action", "(", "args", ")", ":", "log", ".", "info", "(", "'loading reference package'", ")", "pairs", "=", "[", "p", ".", "split", "(", "'='", ",", "1", ")", "for", "p", "in", "args", ".", "changes", "]", "if", "args", ".", "metadata", ":",...
Updates a Refpkg with new files. *args* should be an argparse object with fields refpkg (giving the path to the refpkg to operate on) and changes (a series of strings of the form 'key=file' giving the key to update in the refpkg and the file to store under that key)."
[ "Updates", "a", "Refpkg", "with", "new", "files", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/subcommands/update.py#L82-L125
train
41,076
fhcrc/taxtastic
taxtastic/scripts/taxit.py
parse_arguments
def parse_arguments(argv): """Create the argument parser """ parser = argparse.ArgumentParser(description=DESCRIPTION) base_parser = argparse.ArgumentParser(add_help=False) parser.add_argument('-V', '--version', action='version', version='taxit v' + version, help='Print the version number and exit') parser.add_argument('-v', '--verbose', action='count', dest='verbosity', default=1, help='Increase verbosity of screen output (eg, -v is verbose, ' '-vv more so)') parser.add_argument('-q', '--quiet', action='store_const', dest='verbosity', const=0, help='Suppress output') ########################## # Setup all sub-commands # ########################## subparsers = parser.add_subparsers(dest='subparser_name') # Begin help sub-command parser_help = subparsers.add_parser( 'help', help='Detailed help for actions using `help <action>`') parser_help.add_argument('action', nargs=1) # End help sub-command actions = {} for name, mod in subcommands.itermodules( os.path.split(subcommands.__file__)[0]): # set up subcommand help text. The first line of the dosctring # in the module is displayed as the help text in the # script-level help message (`script -h`). The entire # docstring is displayed in the help message for the # individual subcommand ((`script action -h`)). subparser = subparsers.add_parser( name, prog='taxit {}'.format(name), help=mod.__doc__.lstrip().split('\n', 1)[0], description=mod.__doc__, formatter_class=RawDescriptionHelpFormatter, parents=[base_parser]) mod.build_parser(subparser) actions[name] = mod.action # Determine we have called ourself (e.g. "help <action>") # Set arguments to display help if parameter is set # *or* # Set arguments to perform an action with any specified options. arguments = parser.parse_args(argv) # Determine which action is in play. action = arguments.subparser_name # Support help <action> by simply having this function call itself and # translate the arguments into something that argparse can work with. if action == 'help': return parse_arguments([str(arguments.action[0]), '-h']) return actions[action], arguments
python
def parse_arguments(argv): """Create the argument parser """ parser = argparse.ArgumentParser(description=DESCRIPTION) base_parser = argparse.ArgumentParser(add_help=False) parser.add_argument('-V', '--version', action='version', version='taxit v' + version, help='Print the version number and exit') parser.add_argument('-v', '--verbose', action='count', dest='verbosity', default=1, help='Increase verbosity of screen output (eg, -v is verbose, ' '-vv more so)') parser.add_argument('-q', '--quiet', action='store_const', dest='verbosity', const=0, help='Suppress output') ########################## # Setup all sub-commands # ########################## subparsers = parser.add_subparsers(dest='subparser_name') # Begin help sub-command parser_help = subparsers.add_parser( 'help', help='Detailed help for actions using `help <action>`') parser_help.add_argument('action', nargs=1) # End help sub-command actions = {} for name, mod in subcommands.itermodules( os.path.split(subcommands.__file__)[0]): # set up subcommand help text. The first line of the dosctring # in the module is displayed as the help text in the # script-level help message (`script -h`). The entire # docstring is displayed in the help message for the # individual subcommand ((`script action -h`)). subparser = subparsers.add_parser( name, prog='taxit {}'.format(name), help=mod.__doc__.lstrip().split('\n', 1)[0], description=mod.__doc__, formatter_class=RawDescriptionHelpFormatter, parents=[base_parser]) mod.build_parser(subparser) actions[name] = mod.action # Determine we have called ourself (e.g. "help <action>") # Set arguments to display help if parameter is set # *or* # Set arguments to perform an action with any specified options. arguments = parser.parse_args(argv) # Determine which action is in play. action = arguments.subparser_name # Support help <action> by simply having this function call itself and # translate the arguments into something that argparse can work with. if action == 'help': return parse_arguments([str(arguments.action[0]), '-h']) return actions[action], arguments
[ "def", "parse_arguments", "(", "argv", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "DESCRIPTION", ")", "base_parser", "=", "argparse", ".", "ArgumentParser", "(", "add_help", "=", "False", ")", "parser", ".", "add_argum...
Create the argument parser
[ "Create", "the", "argument", "parser" ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/scripts/taxit.py#L56-L120
train
41,077
fhcrc/taxtastic
taxtastic/taxonomy.py
Taxonomy.execute
def execute(self, statements, exc=IntegrityError, rasie_as=ValueError): """Execute ``statements`` in a session, and perform a rollback on error. ``exc`` is a single exception object or a tuple of objects to be used in the except clause. The error message is re-raised as the exception specified by ``raise_as``. """ Session = sessionmaker(bind=self.engine) session = Session() try: for statement in statements: session.execute(statement) except exc as err: session.rollback() raise rasie_as(str(err)) else: session.commit() finally: session.close()
python
def execute(self, statements, exc=IntegrityError, rasie_as=ValueError): """Execute ``statements`` in a session, and perform a rollback on error. ``exc`` is a single exception object or a tuple of objects to be used in the except clause. The error message is re-raised as the exception specified by ``raise_as``. """ Session = sessionmaker(bind=self.engine) session = Session() try: for statement in statements: session.execute(statement) except exc as err: session.rollback() raise rasie_as(str(err)) else: session.commit() finally: session.close()
[ "def", "execute", "(", "self", ",", "statements", ",", "exc", "=", "IntegrityError", ",", "rasie_as", "=", "ValueError", ")", ":", "Session", "=", "sessionmaker", "(", "bind", "=", "self", ".", "engine", ")", "session", "=", "Session", "(", ")", "try", ...
Execute ``statements`` in a session, and perform a rollback on error. ``exc`` is a single exception object or a tuple of objects to be used in the except clause. The error message is re-raised as the exception specified by ``raise_as``.
[ "Execute", "statements", "in", "a", "session", "and", "perform", "a", "rollback", "on", "error", ".", "exc", "is", "a", "single", "exception", "object", "or", "a", "tuple", "of", "objects", "to", "be", "used", "in", "the", "except", "clause", ".", "The",...
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxonomy.py#L114-L134
train
41,078
fhcrc/taxtastic
taxtastic/taxonomy.py
Taxonomy._node
def _node(self, tax_id): """ Returns parent_id, rank FIXME: expand return rank to include custom 'below' ranks built when get_lineage is caled """ s = select([self.nodes.c.parent_id, self.nodes.c.rank], self.nodes.c.tax_id == tax_id) res = s.execute() output = res.fetchone() if not output: msg = 'value "{}" not found in nodes.tax_id'.format(tax_id) raise ValueError(msg) else: return output
python
def _node(self, tax_id): """ Returns parent_id, rank FIXME: expand return rank to include custom 'below' ranks built when get_lineage is caled """ s = select([self.nodes.c.parent_id, self.nodes.c.rank], self.nodes.c.tax_id == tax_id) res = s.execute() output = res.fetchone() if not output: msg = 'value "{}" not found in nodes.tax_id'.format(tax_id) raise ValueError(msg) else: return output
[ "def", "_node", "(", "self", ",", "tax_id", ")", ":", "s", "=", "select", "(", "[", "self", ".", "nodes", ".", "c", ".", "parent_id", ",", "self", ".", "nodes", ".", "c", ".", "rank", "]", ",", "self", ".", "nodes", ".", "c", ".", "tax_id", "...
Returns parent_id, rank FIXME: expand return rank to include custom 'below' ranks built when get_lineage is caled
[ "Returns", "parent_id", "rank" ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxonomy.py#L136-L151
train
41,079
fhcrc/taxtastic
taxtastic/taxonomy.py
Taxonomy.primary_from_id
def primary_from_id(self, tax_id): """ Returns primary taxonomic name associated with tax_id """ s = select([self.names.c.tax_name], and_(self.names.c.tax_id == tax_id, self.names.c.is_primary)) res = s.execute() output = res.fetchone() if not output: msg = 'value "{}" not found in names.tax_id'.format(tax_id) raise ValueError(msg) else: return output[0]
python
def primary_from_id(self, tax_id): """ Returns primary taxonomic name associated with tax_id """ s = select([self.names.c.tax_name], and_(self.names.c.tax_id == tax_id, self.names.c.is_primary)) res = s.execute() output = res.fetchone() if not output: msg = 'value "{}" not found in names.tax_id'.format(tax_id) raise ValueError(msg) else: return output[0]
[ "def", "primary_from_id", "(", "self", ",", "tax_id", ")", ":", "s", "=", "select", "(", "[", "self", ".", "names", ".", "c", ".", "tax_name", "]", ",", "and_", "(", "self", ".", "names", ".", "c", ".", "tax_id", "==", "tax_id", ",", "self", ".",...
Returns primary taxonomic name associated with tax_id
[ "Returns", "primary", "taxonomic", "name", "associated", "with", "tax_id" ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxonomy.py#L153-L167
train
41,080
fhcrc/taxtastic
taxtastic/taxonomy.py
Taxonomy.primary_from_name
def primary_from_name(self, tax_name): """ Return tax_id and primary tax_name corresponding to tax_name. """ names = self.names s1 = select([names.c.tax_id, names.c.is_primary], names.c.tax_name == tax_name) log.debug(str(s1)) res = s1.execute().fetchone() if res: tax_id, is_primary = res else: msg = '"{}" not found in names.tax_names'.format(tax_name) raise ValueError(msg) if not is_primary: s2 = select([names.c.tax_name], and_(names.c.tax_id == tax_id, names.c.is_primary)) tax_name = s2.execute().fetchone()[0] return tax_id, tax_name, bool(is_primary)
python
def primary_from_name(self, tax_name): """ Return tax_id and primary tax_name corresponding to tax_name. """ names = self.names s1 = select([names.c.tax_id, names.c.is_primary], names.c.tax_name == tax_name) log.debug(str(s1)) res = s1.execute().fetchone() if res: tax_id, is_primary = res else: msg = '"{}" not found in names.tax_names'.format(tax_name) raise ValueError(msg) if not is_primary: s2 = select([names.c.tax_name], and_(names.c.tax_id == tax_id, names.c.is_primary)) tax_name = s2.execute().fetchone()[0] return tax_id, tax_name, bool(is_primary)
[ "def", "primary_from_name", "(", "self", ",", "tax_name", ")", ":", "names", "=", "self", ".", "names", "s1", "=", "select", "(", "[", "names", ".", "c", ".", "tax_id", ",", "names", ".", "c", ".", "is_primary", "]", ",", "names", ".", "c", ".", ...
Return tax_id and primary tax_name corresponding to tax_name.
[ "Return", "tax_id", "and", "primary", "tax_name", "corresponding", "to", "tax_name", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxonomy.py#L169-L193
train
41,081
fhcrc/taxtastic
taxtastic/taxonomy.py
Taxonomy.lineage
def lineage(self, tax_id=None, tax_name=None): """Public method for returning a lineage; includes tax_name and rank """ if not bool(tax_id) ^ bool(tax_name): msg = 'Exactly one of tax_id and tax_name may be provided.' raise ValueError(msg) if tax_name: tax_id, primary_name, is_primary = self.primary_from_name(tax_name) else: primary_name = None # assumes stable ordering of lineage from root --> leaf lintups = self._get_lineage(tax_id) ldict = dict(lintups) ldict['tax_id'] = tax_id try: # parent is second to last element, except for root __, ldict['parent_id'] = lintups[-2] except IndexError: ldict['parent_id'] = None ldict['rank'], __ = lintups[-1] # this taxon is last element in lineage ldict['tax_name'] = primary_name or self.primary_from_id(tax_id) return ldict
python
def lineage(self, tax_id=None, tax_name=None): """Public method for returning a lineage; includes tax_name and rank """ if not bool(tax_id) ^ bool(tax_name): msg = 'Exactly one of tax_id and tax_name may be provided.' raise ValueError(msg) if tax_name: tax_id, primary_name, is_primary = self.primary_from_name(tax_name) else: primary_name = None # assumes stable ordering of lineage from root --> leaf lintups = self._get_lineage(tax_id) ldict = dict(lintups) ldict['tax_id'] = tax_id try: # parent is second to last element, except for root __, ldict['parent_id'] = lintups[-2] except IndexError: ldict['parent_id'] = None ldict['rank'], __ = lintups[-1] # this taxon is last element in lineage ldict['tax_name'] = primary_name or self.primary_from_id(tax_id) return ldict
[ "def", "lineage", "(", "self", ",", "tax_id", "=", "None", ",", "tax_name", "=", "None", ")", ":", "if", "not", "bool", "(", "tax_id", ")", "^", "bool", "(", "tax_name", ")", ":", "msg", "=", "'Exactly one of tax_id and tax_name may be provided.'", "raise", ...
Public method for returning a lineage; includes tax_name and rank
[ "Public", "method", "for", "returning", "a", "lineage", ";", "includes", "tax_name", "and", "rank" ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxonomy.py#L375-L403
train
41,082
fhcrc/taxtastic
taxtastic/taxonomy.py
Taxonomy.verify_rank_integrity
def verify_rank_integrity(self, tax_id, rank, parent_id, children): """Confirm that for each node the parent ranks and children ranks are coherent """ def _lower(n1, n2): return self.ranks.index(n1) < self.ranks.index(n2) if rank not in self.ranks: raise TaxonIntegrityError('rank "{}" is undefined'.format(rank)) parent_rank = self.rank(parent_id) # undefined ranks can be placed anywhere in a lineage if not _lower(rank, parent_rank) and rank != self.NO_RANK: msg = ('New node "{}", rank "{}" has same or ' 'higher rank than parent node "{}", rank "{}"') msg = msg.format(tax_id, rank, parent_id, parent_rank) raise TaxonIntegrityError(msg) for child in children: if not _lower(self.rank(child), rank): msg = 'Child node {} has same or lower rank as new node {}' msg = msg.format(tax_id, child) raise TaxonIntegrityError(msg) return True
python
def verify_rank_integrity(self, tax_id, rank, parent_id, children): """Confirm that for each node the parent ranks and children ranks are coherent """ def _lower(n1, n2): return self.ranks.index(n1) < self.ranks.index(n2) if rank not in self.ranks: raise TaxonIntegrityError('rank "{}" is undefined'.format(rank)) parent_rank = self.rank(parent_id) # undefined ranks can be placed anywhere in a lineage if not _lower(rank, parent_rank) and rank != self.NO_RANK: msg = ('New node "{}", rank "{}" has same or ' 'higher rank than parent node "{}", rank "{}"') msg = msg.format(tax_id, rank, parent_id, parent_rank) raise TaxonIntegrityError(msg) for child in children: if not _lower(self.rank(child), rank): msg = 'Child node {} has same or lower rank as new node {}' msg = msg.format(tax_id, child) raise TaxonIntegrityError(msg) return True
[ "def", "verify_rank_integrity", "(", "self", ",", "tax_id", ",", "rank", ",", "parent_id", ",", "children", ")", ":", "def", "_lower", "(", "n1", ",", "n2", ")", ":", "return", "self", ".", "ranks", ".", "index", "(", "n1", ")", "<", "self", ".", "...
Confirm that for each node the parent ranks and children ranks are coherent
[ "Confirm", "that", "for", "each", "node", "the", "parent", "ranks", "and", "children", "ranks", "are", "coherent" ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxonomy.py#L457-L481
train
41,083
fhcrc/taxtastic
taxtastic/taxonomy.py
Taxonomy.add_node
def add_node(self, tax_id, parent_id, rank, names, source_name, children=None, is_valid=True, execute=True, **ignored): """Add a node to the taxonomy. ``source_name`` is added to table "source" if necessary. """ if ignored: log.info('some arguments were ignored: {} '.format(str(ignored))) children = children or [] self.verify_rank_integrity(tax_id, rank, parent_id, children) source_id, __ = self.add_source(source_name) assert isinstance(is_valid, bool) statements = [] # add node statements.append( self.nodes.insert().values( tax_id=tax_id, parent_id=parent_id, rank=rank, source_id=source_id)) # add names. Since this is a new node, at least one name must # be provided; if only one is provided, it is the primary # name. If more than one is primary, an error will be raised # from add_names() if len(names) == 1: names[0]['is_primary'] = True else: primary_names = [n['tax_name'] for n in names if n.get('is_primary')] if len(primary_names) != 1: raise ValueError( '`is_primary` must be True for exactly one name in `names`') for namedict in names: namedict['source_id'] = source_id if 'source_name' in namedict: del namedict['source_name'] statements.extend(self.add_names(tax_id, names, execute=False)) # add children and update source_id for child in children: statements.append(self.nodes.update( whereclause=self.nodes.c.tax_id == child, values={'parent_id': tax_id, 'source_id': source_id})) if execute: self.execute(statements) else: return statements
python
def add_node(self, tax_id, parent_id, rank, names, source_name, children=None, is_valid=True, execute=True, **ignored): """Add a node to the taxonomy. ``source_name`` is added to table "source" if necessary. """ if ignored: log.info('some arguments were ignored: {} '.format(str(ignored))) children = children or [] self.verify_rank_integrity(tax_id, rank, parent_id, children) source_id, __ = self.add_source(source_name) assert isinstance(is_valid, bool) statements = [] # add node statements.append( self.nodes.insert().values( tax_id=tax_id, parent_id=parent_id, rank=rank, source_id=source_id)) # add names. Since this is a new node, at least one name must # be provided; if only one is provided, it is the primary # name. If more than one is primary, an error will be raised # from add_names() if len(names) == 1: names[0]['is_primary'] = True else: primary_names = [n['tax_name'] for n in names if n.get('is_primary')] if len(primary_names) != 1: raise ValueError( '`is_primary` must be True for exactly one name in `names`') for namedict in names: namedict['source_id'] = source_id if 'source_name' in namedict: del namedict['source_name'] statements.extend(self.add_names(tax_id, names, execute=False)) # add children and update source_id for child in children: statements.append(self.nodes.update( whereclause=self.nodes.c.tax_id == child, values={'parent_id': tax_id, 'source_id': source_id})) if execute: self.execute(statements) else: return statements
[ "def", "add_node", "(", "self", ",", "tax_id", ",", "parent_id", ",", "rank", ",", "names", ",", "source_name", ",", "children", "=", "None", ",", "is_valid", "=", "True", ",", "execute", "=", "True", ",", "*", "*", "ignored", ")", ":", "if", "ignore...
Add a node to the taxonomy. ``source_name`` is added to table "source" if necessary.
[ "Add", "a", "node", "to", "the", "taxonomy", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxonomy.py#L487-L541
train
41,084
fhcrc/taxtastic
taxtastic/taxonomy.py
Taxonomy.add_names
def add_names(self, tax_id, names, execute=True): """Associate one or more names with ``tax_id``. ``names`` is a list of one or more dicts, with keys corresponding to the signature of ``self.add_name()`` (excluding ``execute``). """ primary_names = [n['tax_name'] for n in names if n.get('is_primary')] if len(primary_names) > 1: raise ValueError( '`is_primary` may be True for no more than one name in `names`') statements = [] for namevals in names: if 'tax_id' in namevals: del namevals['tax_id'] statements.extend( self.add_name(tax_id=tax_id, execute=False, **namevals)) if execute: self.execute(statements) else: return statements
python
def add_names(self, tax_id, names, execute=True): """Associate one or more names with ``tax_id``. ``names`` is a list of one or more dicts, with keys corresponding to the signature of ``self.add_name()`` (excluding ``execute``). """ primary_names = [n['tax_name'] for n in names if n.get('is_primary')] if len(primary_names) > 1: raise ValueError( '`is_primary` may be True for no more than one name in `names`') statements = [] for namevals in names: if 'tax_id' in namevals: del namevals['tax_id'] statements.extend( self.add_name(tax_id=tax_id, execute=False, **namevals)) if execute: self.execute(statements) else: return statements
[ "def", "add_names", "(", "self", ",", "tax_id", ",", "names", ",", "execute", "=", "True", ")", ":", "primary_names", "=", "[", "n", "[", "'tax_name'", "]", "for", "n", "in", "names", "if", "n", ".", "get", "(", "'is_primary'", ")", "]", "if", "len...
Associate one or more names with ``tax_id``. ``names`` is a list of one or more dicts, with keys corresponding to the signature of ``self.add_name()`` (excluding ``execute``).
[ "Associate", "one", "or", "more", "names", "with", "tax_id", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxonomy.py#L650-L675
train
41,085
fhcrc/taxtastic
taxtastic/taxonomy.py
Taxonomy.tax_ids
def tax_ids(self): ''' Return all tax_ids in node table ''' fetch = select([self.nodes.c.tax_id]).execute().fetchall() ids = [t[0] for t in fetch] return ids
python
def tax_ids(self): ''' Return all tax_ids in node table ''' fetch = select([self.nodes.c.tax_id]).execute().fetchall() ids = [t[0] for t in fetch] return ids
[ "def", "tax_ids", "(", "self", ")", ":", "fetch", "=", "select", "(", "[", "self", ".", "nodes", ".", "c", ".", "tax_id", "]", ")", ".", "execute", "(", ")", ".", "fetchall", "(", ")", "ids", "=", "[", "t", "[", "0", "]", "for", "t", "in", ...
Return all tax_ids in node table
[ "Return", "all", "tax_ids", "in", "node", "table" ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxonomy.py#L716-L722
train
41,086
amicks/Speculator
api/helpers.py
validate_db
def validate_db(sqlalchemy_bind, is_enabled=ENABLE_DB): """ Checks if a DB is authorized and responding before executing the function """ def decorator(func): @wraps(func) def wrapper(*args, **kwargs): def is_db_responsive(): try: sqlalchemy_bind.session.query('1').first_or_404() except: return False else: return True if is_enabled and is_db_responsive(): return func(*args, **kwargs) else: abort(HTTP_CODES.UNAUTHORIZED) return wrapper return decorator
python
def validate_db(sqlalchemy_bind, is_enabled=ENABLE_DB): """ Checks if a DB is authorized and responding before executing the function """ def decorator(func): @wraps(func) def wrapper(*args, **kwargs): def is_db_responsive(): try: sqlalchemy_bind.session.query('1').first_or_404() except: return False else: return True if is_enabled and is_db_responsive(): return func(*args, **kwargs) else: abort(HTTP_CODES.UNAUTHORIZED) return wrapper return decorator
[ "def", "validate_db", "(", "sqlalchemy_bind", ",", "is_enabled", "=", "ENABLE_DB", ")", ":", "def", "decorator", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "is_...
Checks if a DB is authorized and responding before executing the function
[ "Checks", "if", "a", "DB", "is", "authorized", "and", "responding", "before", "executing", "the", "function" ]
f7d6590aded20b1e1b5df16a4b27228ee821c4ab
https://github.com/amicks/Speculator/blob/f7d6590aded20b1e1b5df16a4b27228ee821c4ab/api/helpers.py#L17-L34
train
41,087
fhcrc/taxtastic
taxtastic/utils.py
get_new_nodes
def get_new_nodes(fname): """ Return an iterator of dicts given a .csv-format file. """ with open(fname, 'rU') as infile: infile = (line for line in infile if not line.startswith('#')) reader = list(csv.DictReader(infile)) rows = (d for d in reader if d['tax_id']) # for now, children are provided as a semicolon-delimited list # within a cell (yes, yuck). We need to convert thit into a list # if present. for d in rows: if 'children' in d: if d['children']: d['children'] = [x.strip() for x in d['children'].split(';')] else: del d['children'] yield d
python
def get_new_nodes(fname): """ Return an iterator of dicts given a .csv-format file. """ with open(fname, 'rU') as infile: infile = (line for line in infile if not line.startswith('#')) reader = list(csv.DictReader(infile)) rows = (d for d in reader if d['tax_id']) # for now, children are provided as a semicolon-delimited list # within a cell (yes, yuck). We need to convert thit into a list # if present. for d in rows: if 'children' in d: if d['children']: d['children'] = [x.strip() for x in d['children'].split(';')] else: del d['children'] yield d
[ "def", "get_new_nodes", "(", "fname", ")", ":", "with", "open", "(", "fname", ",", "'rU'", ")", "as", "infile", ":", "infile", "=", "(", "line", "for", "line", "in", "infile", "if", "not", "line", ".", "startswith", "(", "'#'", ")", ")", "reader", ...
Return an iterator of dicts given a .csv-format file.
[ "Return", "an", "iterator", "of", "dicts", "given", "a", ".", "csv", "-", "format", "file", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/utils.py#L29-L48
train
41,088
fhcrc/taxtastic
taxtastic/utils.py
parse_raxml
def parse_raxml(handle): """Parse RAxML's summary output. *handle* should be an open file handle containing the RAxML output. It is parsed and a dictionary returned. """ s = ''.join(handle.readlines()) result = {} try_set_fields(result, r'(?P<program>RAxML version [0-9.]+)', s) try_set_fields(result, r'(?P<datatype>DNA|RNA|AA)', s) result['empirical_frequencies'] = ( result['datatype'] != 'AA' or re.search('empirical base frequencies', s, re.IGNORECASE) is not None) try_set_fields(result, r'Substitution Matrix: (?P<subs_model>\w+)', s) rates = {} if result['datatype'] != 'AA': try_set_fields(rates, (r"rates\[0\] ac ag at cg ct gt: " r"(?P<ac>[0-9.]+) (?P<ag>[0-9.]+) (?P<at>[0-9.]+) " r"(?P<cg>[0-9.]+) (?P<ct>[0-9.]+) (?P<gt>[0-9.]+)"), s, hook=float) try_set_fields(rates, r'rate A <-> C: (?P<ac>[0-9.]+)', s, hook=float) try_set_fields(rates, r'rate A <-> G: (?P<ag>[0-9.]+)', s, hook=float) try_set_fields(rates, r'rate A <-> T: (?P<at>[0-9.]+)', s, hook=float) try_set_fields(rates, r'rate C <-> G: (?P<cg>[0-9.]+)', s, hook=float) try_set_fields(rates, r'rate C <-> T: (?P<ct>[0-9.]+)', s, hook=float) try_set_fields(rates, r'rate G <-> T: (?P<gt>[0-9.]+)', s, hook=float) if len(rates) > 0: result['subs_rates'] = rates result['gamma'] = {'n_cats': 4} try_set_fields(result['gamma'], r"alpha[\[\]0-9]*: (?P<alpha>[0-9.]+)", s, hook=float) result['ras_model'] = 'gamma' return result
python
def parse_raxml(handle): """Parse RAxML's summary output. *handle* should be an open file handle containing the RAxML output. It is parsed and a dictionary returned. """ s = ''.join(handle.readlines()) result = {} try_set_fields(result, r'(?P<program>RAxML version [0-9.]+)', s) try_set_fields(result, r'(?P<datatype>DNA|RNA|AA)', s) result['empirical_frequencies'] = ( result['datatype'] != 'AA' or re.search('empirical base frequencies', s, re.IGNORECASE) is not None) try_set_fields(result, r'Substitution Matrix: (?P<subs_model>\w+)', s) rates = {} if result['datatype'] != 'AA': try_set_fields(rates, (r"rates\[0\] ac ag at cg ct gt: " r"(?P<ac>[0-9.]+) (?P<ag>[0-9.]+) (?P<at>[0-9.]+) " r"(?P<cg>[0-9.]+) (?P<ct>[0-9.]+) (?P<gt>[0-9.]+)"), s, hook=float) try_set_fields(rates, r'rate A <-> C: (?P<ac>[0-9.]+)', s, hook=float) try_set_fields(rates, r'rate A <-> G: (?P<ag>[0-9.]+)', s, hook=float) try_set_fields(rates, r'rate A <-> T: (?P<at>[0-9.]+)', s, hook=float) try_set_fields(rates, r'rate C <-> G: (?P<cg>[0-9.]+)', s, hook=float) try_set_fields(rates, r'rate C <-> T: (?P<ct>[0-9.]+)', s, hook=float) try_set_fields(rates, r'rate G <-> T: (?P<gt>[0-9.]+)', s, hook=float) if len(rates) > 0: result['subs_rates'] = rates result['gamma'] = {'n_cats': 4} try_set_fields(result['gamma'], r"alpha[\[\]0-9]*: (?P<alpha>[0-9.]+)", s, hook=float) result['ras_model'] = 'gamma' return result
[ "def", "parse_raxml", "(", "handle", ")", ":", "s", "=", "''", ".", "join", "(", "handle", ".", "readlines", "(", ")", ")", "result", "=", "{", "}", "try_set_fields", "(", "result", ",", "r'(?P<program>RAxML version [0-9.]+)'", ",", "s", ")", "try_set_fiel...
Parse RAxML's summary output. *handle* should be an open file handle containing the RAxML output. It is parsed and a dictionary returned.
[ "Parse", "RAxML", "s", "summary", "output", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/utils.py#L76-L109
train
41,089
fhcrc/taxtastic
taxtastic/utils.py
parse_stockholm
def parse_stockholm(fobj): """Return a list of names from an Stockholm-format sequence alignment file. ``fobj`` is an open file or another object representing a sequence of lines. """ names = OrderedDict() found_eof = False for line in fobj: line = line.strip() if line == '//': found_eof = True elif line.startswith('#') or not line.strip(): continue else: name, __ = line.split(None, 1) names[name] = None if not found_eof: raise ValueError('Invalid Stockholm format: no file terminator') return list(names.keys())
python
def parse_stockholm(fobj): """Return a list of names from an Stockholm-format sequence alignment file. ``fobj`` is an open file or another object representing a sequence of lines. """ names = OrderedDict() found_eof = False for line in fobj: line = line.strip() if line == '//': found_eof = True elif line.startswith('#') or not line.strip(): continue else: name, __ = line.split(None, 1) names[name] = None if not found_eof: raise ValueError('Invalid Stockholm format: no file terminator') return list(names.keys())
[ "def", "parse_stockholm", "(", "fobj", ")", ":", "names", "=", "OrderedDict", "(", ")", "found_eof", "=", "False", "for", "line", "in", "fobj", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", "==", "'//'", ":", "found_eof", "=", "True...
Return a list of names from an Stockholm-format sequence alignment file. ``fobj`` is an open file or another object representing a sequence of lines.
[ "Return", "a", "list", "of", "names", "from", "an", "Stockholm", "-", "format", "sequence", "alignment", "file", ".", "fobj", "is", "an", "open", "file", "or", "another", "object", "representing", "a", "sequence", "of", "lines", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/utils.py#L208-L231
train
41,090
fhcrc/taxtastic
taxtastic/utils.py
has_rppr
def has_rppr(rppr_name='rppr'): """ Check for rppr binary in path """ with open(os.devnull) as dn: try: subprocess.check_call([rppr_name], stdout=dn, stderr=dn) except OSError as e: if e.errno == os.errno.ENOENT: return False else: raise except subprocess.CalledProcessError as e: # rppr returns non-zero exit status with no arguments pass return True
python
def has_rppr(rppr_name='rppr'): """ Check for rppr binary in path """ with open(os.devnull) as dn: try: subprocess.check_call([rppr_name], stdout=dn, stderr=dn) except OSError as e: if e.errno == os.errno.ENOENT: return False else: raise except subprocess.CalledProcessError as e: # rppr returns non-zero exit status with no arguments pass return True
[ "def", "has_rppr", "(", "rppr_name", "=", "'rppr'", ")", ":", "with", "open", "(", "os", ".", "devnull", ")", "as", "dn", ":", "try", ":", "subprocess", ".", "check_call", "(", "[", "rppr_name", "]", ",", "stdout", "=", "dn", ",", "stderr", "=", "d...
Check for rppr binary in path
[ "Check", "for", "rppr", "binary", "in", "path" ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/utils.py#L234-L249
train
41,091
fhcrc/taxtastic
taxtastic/utils.py
add_database_args
def add_database_args(parser): ''' Add a standard set of database arguments for argparse ''' parser.add_argument( 'url', nargs='?', default='sqlite:///ncbi_taxonomy.db', type=sqlite_default(), help=('Database string URI or filename. If no database scheme ' 'specified \"sqlite:///\" will be prepended. [%(default)s]')) db_parser = parser.add_argument_group(title='database options') # TODO: better description of what --schema does db_parser.add_argument( '--schema', help=('Name of SQL schema in database to query ' '(if database flavor supports this).')) return parser
python
def add_database_args(parser): ''' Add a standard set of database arguments for argparse ''' parser.add_argument( 'url', nargs='?', default='sqlite:///ncbi_taxonomy.db', type=sqlite_default(), help=('Database string URI or filename. If no database scheme ' 'specified \"sqlite:///\" will be prepended. [%(default)s]')) db_parser = parser.add_argument_group(title='database options') # TODO: better description of what --schema does db_parser.add_argument( '--schema', help=('Name of SQL schema in database to query ' '(if database flavor supports this).')) return parser
[ "def", "add_database_args", "(", "parser", ")", ":", "parser", ".", "add_argument", "(", "'url'", ",", "nargs", "=", "'?'", ",", "default", "=", "'sqlite:///ncbi_taxonomy.db'", ",", "type", "=", "sqlite_default", "(", ")", ",", "help", "=", "(", "'Database s...
Add a standard set of database arguments for argparse
[ "Add", "a", "standard", "set", "of", "database", "arguments", "for", "argparse" ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/utils.py#L252-L271
train
41,092
fhcrc/taxtastic
taxtastic/utils.py
sqlite_default
def sqlite_default(): ''' Prepend default scheme if none is specified. This helps provides backwards compatibility with old versions of taxtastic where sqlite was the automatic default database. ''' def parse_url(url): # TODO: need separate option for a config file if url.endswith('.db') or url.endswith('.sqlite'): if not url.startswith('sqlite:///'): url = 'sqlite:///' + url elif url.endswith('.cfg') or url.endswith('.conf'): conf = configparser.SafeConfigParser(allow_no_value=True) conf.optionxform = str # options are case-sensitive conf.read(url) url = conf.get('sqlalchemy', 'url') return url return parse_url
python
def sqlite_default(): ''' Prepend default scheme if none is specified. This helps provides backwards compatibility with old versions of taxtastic where sqlite was the automatic default database. ''' def parse_url(url): # TODO: need separate option for a config file if url.endswith('.db') or url.endswith('.sqlite'): if not url.startswith('sqlite:///'): url = 'sqlite:///' + url elif url.endswith('.cfg') or url.endswith('.conf'): conf = configparser.SafeConfigParser(allow_no_value=True) conf.optionxform = str # options are case-sensitive conf.read(url) url = conf.get('sqlalchemy', 'url') return url return parse_url
[ "def", "sqlite_default", "(", ")", ":", "def", "parse_url", "(", "url", ")", ":", "# TODO: need separate option for a config file", "if", "url", ".", "endswith", "(", "'.db'", ")", "or", "url", ".", "endswith", "(", "'.sqlite'", ")", ":", "if", "not", "url",...
Prepend default scheme if none is specified. This helps provides backwards compatibility with old versions of taxtastic where sqlite was the automatic default database.
[ "Prepend", "default", "scheme", "if", "none", "is", "specified", ".", "This", "helps", "provides", "backwards", "compatibility", "with", "old", "versions", "of", "taxtastic", "where", "sqlite", "was", "the", "automatic", "default", "database", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/utils.py#L274-L292
train
41,093
fhcrc/taxtastic
taxtastic/subcommands/strip.py
action
def action(args): """Strips non-current files and rollback information from a refpkg. *args* should be an argparse object with fields refpkg (giving the path to the refpkg to operate on). """ log.info('loading reference package') refpkg.Refpkg(args.refpkg, create=False).strip()
python
def action(args): """Strips non-current files and rollback information from a refpkg. *args* should be an argparse object with fields refpkg (giving the path to the refpkg to operate on). """ log.info('loading reference package') refpkg.Refpkg(args.refpkg, create=False).strip()
[ "def", "action", "(", "args", ")", ":", "log", ".", "info", "(", "'loading reference package'", ")", "refpkg", ".", "Refpkg", "(", "args", ".", "refpkg", ",", "create", "=", "False", ")", ".", "strip", "(", ")" ]
Strips non-current files and rollback information from a refpkg. *args* should be an argparse object with fields refpkg (giving the path to the refpkg to operate on).
[ "Strips", "non", "-", "current", "files", "and", "rollback", "information", "from", "a", "refpkg", "." ]
4e874b7f2cc146178828bfba386314f8c342722b
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/subcommands/strip.py#L37-L45
train
41,094
seznam/shelter
shelter/contrib/config/iniconfig.py
IniConfig.name
def name(self): """ Application name. It's used as a process name. """ try: return self.config_parser.get('application', 'name') except CONFIGPARSER_EXC: return super(IniConfig, self).name
python
def name(self): """ Application name. It's used as a process name. """ try: return self.config_parser.get('application', 'name') except CONFIGPARSER_EXC: return super(IniConfig, self).name
[ "def", "name", "(", "self", ")", ":", "try", ":", "return", "self", ".", "config_parser", ".", "get", "(", "'application'", ",", "'name'", ")", "except", "CONFIGPARSER_EXC", ":", "return", "super", "(", "IniConfig", ",", "self", ")", ".", "name" ]
Application name. It's used as a process name.
[ "Application", "name", ".", "It", "s", "used", "as", "a", "process", "name", "." ]
c652b0ff1cca70158f8fc97d9210c1fa5961ac1c
https://github.com/seznam/shelter/blob/c652b0ff1cca70158f8fc97d9210c1fa5961ac1c/shelter/contrib/config/iniconfig.py#L128-L135
train
41,095
sdispater/pytzdata
pytzdata/__init__.py
tz_file
def tz_file(name): """ Open a timezone file from the zoneinfo subdir for reading. :param name: The name of the timezone. :type name: str :rtype: file """ try: filepath = tz_path(name) return open(filepath, 'rb') except TimezoneNotFound: # http://bugs.launchpad.net/bugs/383171 - we avoid using this # unless absolutely necessary to help when a broken version of # pkg_resources is installed. try: from pkg_resources import resource_stream except ImportError: resource_stream = None if resource_stream is not None: try: return resource_stream(__name__, 'zoneinfo/' + name) except FileNotFoundError: return tz_path(name) raise
python
def tz_file(name): """ Open a timezone file from the zoneinfo subdir for reading. :param name: The name of the timezone. :type name: str :rtype: file """ try: filepath = tz_path(name) return open(filepath, 'rb') except TimezoneNotFound: # http://bugs.launchpad.net/bugs/383171 - we avoid using this # unless absolutely necessary to help when a broken version of # pkg_resources is installed. try: from pkg_resources import resource_stream except ImportError: resource_stream = None if resource_stream is not None: try: return resource_stream(__name__, 'zoneinfo/' + name) except FileNotFoundError: return tz_path(name) raise
[ "def", "tz_file", "(", "name", ")", ":", "try", ":", "filepath", "=", "tz_path", "(", "name", ")", "return", "open", "(", "filepath", ",", "'rb'", ")", "except", "TimezoneNotFound", ":", "# http://bugs.launchpad.net/bugs/383171 - we avoid using this", "# unless abso...
Open a timezone file from the zoneinfo subdir for reading. :param name: The name of the timezone. :type name: str :rtype: file
[ "Open", "a", "timezone", "file", "from", "the", "zoneinfo", "subdir", "for", "reading", "." ]
5707a44e425c0ab57cf9d1f6be83528accc31412
https://github.com/sdispater/pytzdata/blob/5707a44e425c0ab57cf9d1f6be83528accc31412/pytzdata/__init__.py#L22-L50
train
41,096
sdispater/pytzdata
pytzdata/__init__.py
tz_path
def tz_path(name): """ Return the path to a timezone file. :param name: The name of the timezone. :type name: str :rtype: str """ if not name: raise ValueError('Invalid timezone') name_parts = name.lstrip('/').split('/') for part in name_parts: if part == os.path.pardir or os.path.sep in part: raise ValueError('Bad path segment: %r' % part) filepath = os.path.join(_DIRECTORY, *name_parts) if not os.path.exists(filepath): raise TimezoneNotFound('Timezone {} not found at {}'.format(name, filepath)) return filepath
python
def tz_path(name): """ Return the path to a timezone file. :param name: The name of the timezone. :type name: str :rtype: str """ if not name: raise ValueError('Invalid timezone') name_parts = name.lstrip('/').split('/') for part in name_parts: if part == os.path.pardir or os.path.sep in part: raise ValueError('Bad path segment: %r' % part) filepath = os.path.join(_DIRECTORY, *name_parts) if not os.path.exists(filepath): raise TimezoneNotFound('Timezone {} not found at {}'.format(name, filepath)) return filepath
[ "def", "tz_path", "(", "name", ")", ":", "if", "not", "name", ":", "raise", "ValueError", "(", "'Invalid timezone'", ")", "name_parts", "=", "name", ".", "lstrip", "(", "'/'", ")", ".", "split", "(", "'/'", ")", "for", "part", "in", "name_parts", ":", ...
Return the path to a timezone file. :param name: The name of the timezone. :type name: str :rtype: str
[ "Return", "the", "path", "to", "a", "timezone", "file", "." ]
5707a44e425c0ab57cf9d1f6be83528accc31412
https://github.com/sdispater/pytzdata/blob/5707a44e425c0ab57cf9d1f6be83528accc31412/pytzdata/__init__.py#L53-L76
train
41,097
sdispater/pytzdata
pytzdata/__init__.py
get_timezones
def get_timezones(): """ Get the supported timezones. The list will be cached unless you set the "fresh" attribute to True. :param fresh: Whether to get a fresh list or not :type fresh: bool :rtype: tuple """ base_dir = _DIRECTORY zones = () for root, dirs, files in os.walk(base_dir): for basename in files: zone = os.path.join(root, basename) if os.path.isdir(zone): continue zone = os.path.relpath(zone, base_dir) with open(os.path.join(root, basename), 'rb') as fd: if fd.read(4) == b'TZif' and zone not in INVALID_ZONES: zones = zones + (zone,) return tuple(sorted(zones))
python
def get_timezones(): """ Get the supported timezones. The list will be cached unless you set the "fresh" attribute to True. :param fresh: Whether to get a fresh list or not :type fresh: bool :rtype: tuple """ base_dir = _DIRECTORY zones = () for root, dirs, files in os.walk(base_dir): for basename in files: zone = os.path.join(root, basename) if os.path.isdir(zone): continue zone = os.path.relpath(zone, base_dir) with open(os.path.join(root, basename), 'rb') as fd: if fd.read(4) == b'TZif' and zone not in INVALID_ZONES: zones = zones + (zone,) return tuple(sorted(zones))
[ "def", "get_timezones", "(", ")", ":", "base_dir", "=", "_DIRECTORY", "zones", "=", "(", ")", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "base_dir", ")", ":", "for", "basename", "in", "files", ":", "zone", "=", "os", "....
Get the supported timezones. The list will be cached unless you set the "fresh" attribute to True. :param fresh: Whether to get a fresh list or not :type fresh: bool :rtype: tuple
[ "Get", "the", "supported", "timezones", "." ]
5707a44e425c0ab57cf9d1f6be83528accc31412
https://github.com/sdispater/pytzdata/blob/5707a44e425c0ab57cf9d1f6be83528accc31412/pytzdata/__init__.py#L88-L114
train
41,098
pinax/pinax-points
pinax/points/models.py
award_points
def award_points(target, key, reason="", source=None): """ Awards target the point value for key. If key is an integer then it's a one off assignment and should be interpreted as the actual point value. """ point_value, points = get_points(key) if not ALLOW_NEGATIVE_TOTALS: total = points_awarded(target) if total + points < 0: reason = reason + "(floored from {0} to 0)".format(points) points = -total apv = AwardedPointValue(points=points, value=point_value, reason=reason) if isinstance(target, get_user_model()): apv.target_user = target lookup_params = { "target_user": target } else: apv.target_object = target lookup_params = { "target_content_type": apv.target_content_type, "target_object_id": apv.target_object_id, } if source is not None: if isinstance(source, get_user_model()): apv.source_user = source else: apv.source_object = source apv.save() if not TargetStat.update_points(points, lookup_params): try: sid = transaction.savepoint() TargetStat._default_manager.create( **dict(lookup_params, points=points) ) transaction.savepoint_commit(sid) except IntegrityError: transaction.savepoint_rollback(sid) TargetStat.update_points(points, lookup_params) signals.points_awarded.send( sender=target.__class__, target=target, key=key, points=points, source=source ) new_points = points_awarded(target) old_points = new_points - points TargetStat.update_positions((old_points, new_points)) return apv
python
def award_points(target, key, reason="", source=None): """ Awards target the point value for key. If key is an integer then it's a one off assignment and should be interpreted as the actual point value. """ point_value, points = get_points(key) if not ALLOW_NEGATIVE_TOTALS: total = points_awarded(target) if total + points < 0: reason = reason + "(floored from {0} to 0)".format(points) points = -total apv = AwardedPointValue(points=points, value=point_value, reason=reason) if isinstance(target, get_user_model()): apv.target_user = target lookup_params = { "target_user": target } else: apv.target_object = target lookup_params = { "target_content_type": apv.target_content_type, "target_object_id": apv.target_object_id, } if source is not None: if isinstance(source, get_user_model()): apv.source_user = source else: apv.source_object = source apv.save() if not TargetStat.update_points(points, lookup_params): try: sid = transaction.savepoint() TargetStat._default_manager.create( **dict(lookup_params, points=points) ) transaction.savepoint_commit(sid) except IntegrityError: transaction.savepoint_rollback(sid) TargetStat.update_points(points, lookup_params) signals.points_awarded.send( sender=target.__class__, target=target, key=key, points=points, source=source ) new_points = points_awarded(target) old_points = new_points - points TargetStat.update_positions((old_points, new_points)) return apv
[ "def", "award_points", "(", "target", ",", "key", ",", "reason", "=", "\"\"", ",", "source", "=", "None", ")", ":", "point_value", ",", "points", "=", "get_points", "(", "key", ")", "if", "not", "ALLOW_NEGATIVE_TOTALS", ":", "total", "=", "points_awarded",...
Awards target the point value for key. If key is an integer then it's a one off assignment and should be interpreted as the actual point value.
[ "Awards", "target", "the", "point", "value", "for", "key", ".", "If", "key", "is", "an", "integer", "then", "it", "s", "a", "one", "off", "assignment", "and", "should", "be", "interpreted", "as", "the", "actual", "point", "value", "." ]
c8490f847d0572943029ff4718d67094c04fadc9
https://github.com/pinax/pinax-points/blob/c8490f847d0572943029ff4718d67094c04fadc9/pinax/points/models.py#L167-L225
train
41,099