repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
UDST/urbansim
urbansim/developer/developer.py
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/developer/developer.py#L23-L44
def _max_form(f, colname): """ Assumes dataframe with hierarchical columns with first index equal to the use and second index equal to the attribute. e.g. f.columns equal to:: mixedoffice building_cost building_revenue building_size max_profit max_profit_far total_cost industrial building_cost building_revenue building_size max_profit max_profit_far total_cost """ df = f.stack(level=0)[[colname]].stack().unstack(level=1).reset_index(level=1, drop=True) return df.idxmax(axis=1)
[ "def", "_max_form", "(", "f", ",", "colname", ")", ":", "df", "=", "f", ".", "stack", "(", "level", "=", "0", ")", "[", "[", "colname", "]", "]", ".", "stack", "(", ")", ".", "unstack", "(", "level", "=", "1", ")", ".", "reset_index", "(", "level", "=", "1", ",", "drop", "=", "True", ")", "return", "df", ".", "idxmax", "(", "axis", "=", "1", ")" ]
Assumes dataframe with hierarchical columns with first index equal to the use and second index equal to the attribute. e.g. f.columns equal to:: mixedoffice building_cost building_revenue building_size max_profit max_profit_far total_cost industrial building_cost building_revenue building_size max_profit max_profit_far total_cost
[ "Assumes", "dataframe", "with", "hierarchical", "columns", "with", "first", "index", "equal", "to", "the", "use", "and", "second", "index", "equal", "to", "the", "attribute", "." ]
python
train
geertj/pyskiplist
pyskiplist/skiplist.py
https://github.com/geertj/pyskiplist/blob/c5f94cf135d42bb277255150d3f570ed807468b2/pyskiplist/skiplist.py#L341-L343
def values(self, start=None, stop=None): """Like :meth:`items` but returns only the values.""" return (item[1] for item in self.items(start, stop))
[ "def", "values", "(", "self", ",", "start", "=", "None", ",", "stop", "=", "None", ")", ":", "return", "(", "item", "[", "1", "]", "for", "item", "in", "self", ".", "items", "(", "start", ",", "stop", ")", ")" ]
Like :meth:`items` but returns only the values.
[ "Like", ":", "meth", ":", "items", "but", "returns", "only", "the", "values", "." ]
python
train
edx/ease
ease/create.py
https://github.com/edx/ease/blob/a7890ed403da94d03726b0639cd8ebda45af6bbb/ease/create.py#L90-L134
def create_generic(numeric_values, textual_values, target, algorithm = util_functions.AlgorithmTypes.regression): """ Creates a model from a generic list numeric values and text values numeric_values - A list of lists that are the predictors textual_values - A list of lists that are the predictors (each item in textual_values corresponds to the similarly indexed counterpart in numeric_values) target - The variable that we are trying to predict. A list of integers. algorithm - the type of algorithm that will be used """ algorithm = select_algorithm(target) #Initialize a result dictionary to return. results = {'errors': [],'success' : False, 'cv_kappa' : 0, 'cv_mean_absolute_error': 0, 'feature_ext' : "", 'classifier' : "", 'algorithm' : algorithm} if len(numeric_values)!=len(textual_values) or len(numeric_values)!=len(target): msg = "Target, numeric features, and text features must all be the same length." results['errors'].append(msg) log.exception(msg) return results try: #Initialize a predictor set object that encapsulates all of the text and numeric predictors pset = predictor_set.PredictorSet(essaytype="train") for i in xrange(0, len(numeric_values)): pset.add_row(numeric_values[i], textual_values[i], target[i]) except: msg = "predictor set creation failed." results['errors'].append(msg) log.exception(msg) try: #Extract all features and then train a classifier with the features feature_ext, classifier, cv_error_results = model_creator.extract_features_and_generate_model_predictors(pset, algorithm) results['cv_kappa']=cv_error_results['kappa'] results['cv_mean_absolute_error']=cv_error_results['mae'] results['feature_ext']=feature_ext results['classifier']=classifier results['success']=True except: msg = "feature extraction and model creation failed." results['errors'].append(msg) log.exception(msg) return results
[ "def", "create_generic", "(", "numeric_values", ",", "textual_values", ",", "target", ",", "algorithm", "=", "util_functions", ".", "AlgorithmTypes", ".", "regression", ")", ":", "algorithm", "=", "select_algorithm", "(", "target", ")", "#Initialize a result dictionary to return.", "results", "=", "{", "'errors'", ":", "[", "]", ",", "'success'", ":", "False", ",", "'cv_kappa'", ":", "0", ",", "'cv_mean_absolute_error'", ":", "0", ",", "'feature_ext'", ":", "\"\"", ",", "'classifier'", ":", "\"\"", ",", "'algorithm'", ":", "algorithm", "}", "if", "len", "(", "numeric_values", ")", "!=", "len", "(", "textual_values", ")", "or", "len", "(", "numeric_values", ")", "!=", "len", "(", "target", ")", ":", "msg", "=", "\"Target, numeric features, and text features must all be the same length.\"", "results", "[", "'errors'", "]", ".", "append", "(", "msg", ")", "log", ".", "exception", "(", "msg", ")", "return", "results", "try", ":", "#Initialize a predictor set object that encapsulates all of the text and numeric predictors", "pset", "=", "predictor_set", ".", "PredictorSet", "(", "essaytype", "=", "\"train\"", ")", "for", "i", "in", "xrange", "(", "0", ",", "len", "(", "numeric_values", ")", ")", ":", "pset", ".", "add_row", "(", "numeric_values", "[", "i", "]", ",", "textual_values", "[", "i", "]", ",", "target", "[", "i", "]", ")", "except", ":", "msg", "=", "\"predictor set creation failed.\"", "results", "[", "'errors'", "]", ".", "append", "(", "msg", ")", "log", ".", "exception", "(", "msg", ")", "try", ":", "#Extract all features and then train a classifier with the features", "feature_ext", ",", "classifier", ",", "cv_error_results", "=", "model_creator", ".", "extract_features_and_generate_model_predictors", "(", "pset", ",", "algorithm", ")", "results", "[", "'cv_kappa'", "]", "=", "cv_error_results", "[", "'kappa'", "]", "results", "[", "'cv_mean_absolute_error'", "]", "=", "cv_error_results", "[", "'mae'", "]", "results", "[", "'feature_ext'", "]", "=", "feature_ext", "results", "[", "'classifier'", "]", "=", "classifier", "results", "[", "'success'", "]", "=", "True", "except", ":", "msg", "=", "\"feature extraction and model creation failed.\"", "results", "[", "'errors'", "]", ".", "append", "(", "msg", ")", "log", ".", "exception", "(", "msg", ")", "return", "results" ]
Creates a model from a generic list numeric values and text values numeric_values - A list of lists that are the predictors textual_values - A list of lists that are the predictors (each item in textual_values corresponds to the similarly indexed counterpart in numeric_values) target - The variable that we are trying to predict. A list of integers. algorithm - the type of algorithm that will be used
[ "Creates", "a", "model", "from", "a", "generic", "list", "numeric", "values", "and", "text", "values", "numeric_values", "-", "A", "list", "of", "lists", "that", "are", "the", "predictors", "textual_values", "-", "A", "list", "of", "lists", "that", "are", "the", "predictors", "(", "each", "item", "in", "textual_values", "corresponds", "to", "the", "similarly", "indexed", "counterpart", "in", "numeric_values", ")", "target", "-", "The", "variable", "that", "we", "are", "trying", "to", "predict", ".", "A", "list", "of", "integers", ".", "algorithm", "-", "the", "type", "of", "algorithm", "that", "will", "be", "used" ]
python
valid
jxtech/wechatpy
wechatpy/pay/api/withhold.py
https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/pay/api/withhold.py#L158-L174
def query_order(self, transaction_id=None, out_trade_no=None): """ 查询订单 api :param transaction_id: 二选一 微信订单号 微信的订单号,优先使用 :param out_trade_no: 二选一 商户订单号 商户系统内部的订单号,当没提供transaction_id时需要传这个。 :return: 返回的结果信息 """ if not transaction_id and not out_trade_no: raise ValueError("transaction_id and out_trade_no must be a choice.") data = { "appid": self.appid, "mch_id": self.mch_id, "transaction_id": transaction_id, "out_trade_no": out_trade_no, } return self._post("pay/paporderquery", data=data)
[ "def", "query_order", "(", "self", ",", "transaction_id", "=", "None", ",", "out_trade_no", "=", "None", ")", ":", "if", "not", "transaction_id", "and", "not", "out_trade_no", ":", "raise", "ValueError", "(", "\"transaction_id and out_trade_no must be a choice.\"", ")", "data", "=", "{", "\"appid\"", ":", "self", ".", "appid", ",", "\"mch_id\"", ":", "self", ".", "mch_id", ",", "\"transaction_id\"", ":", "transaction_id", ",", "\"out_trade_no\"", ":", "out_trade_no", ",", "}", "return", "self", ".", "_post", "(", "\"pay/paporderquery\"", ",", "data", "=", "data", ")" ]
查询订单 api :param transaction_id: 二选一 微信订单号 微信的订单号,优先使用 :param out_trade_no: 二选一 商户订单号 商户系统内部的订单号,当没提供transaction_id时需要传这个。 :return: 返回的结果信息
[ "查询订单", "api" ]
python
train
riga/tfdeploy
tfdeploy.py
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L863-L877
def print_tensor(td_tensor, indent="| ", max_depth=-1, depth=0): """ print_tensor(td_tensor, indent=" ", max_depth=-1) Prints the dependency graph of a :py:class:`Tensor` *td_tensor*, where each new level is indented by *indent*. When *max_depth* is positive, the graph is truncated at that depth, where each tensor and each op count as a level. """ offset = depth * indent line = "td tensor: %s" % td_tensor.name if td_tensor.value is not None: line += " (%s)" % (",".join(str(i) for i in td_tensor.value.shape),) print(offset + line) if td_tensor.op and (max_depth < 0 or max_depth > depth): print_op(td_tensor.op, indent=indent, max_depth=max_depth, depth=depth+1)
[ "def", "print_tensor", "(", "td_tensor", ",", "indent", "=", "\"| \"", ",", "max_depth", "=", "-", "1", ",", "depth", "=", "0", ")", ":", "offset", "=", "depth", "*", "indent", "line", "=", "\"td tensor: %s\"", "%", "td_tensor", ".", "name", "if", "td_tensor", ".", "value", "is", "not", "None", ":", "line", "+=", "\" (%s)\"", "%", "(", "\",\"", ".", "join", "(", "str", "(", "i", ")", "for", "i", "in", "td_tensor", ".", "value", ".", "shape", ")", ",", ")", "print", "(", "offset", "+", "line", ")", "if", "td_tensor", ".", "op", "and", "(", "max_depth", "<", "0", "or", "max_depth", ">", "depth", ")", ":", "print_op", "(", "td_tensor", ".", "op", ",", "indent", "=", "indent", ",", "max_depth", "=", "max_depth", ",", "depth", "=", "depth", "+", "1", ")" ]
print_tensor(td_tensor, indent=" ", max_depth=-1) Prints the dependency graph of a :py:class:`Tensor` *td_tensor*, where each new level is indented by *indent*. When *max_depth* is positive, the graph is truncated at that depth, where each tensor and each op count as a level.
[ "print_tensor", "(", "td_tensor", "indent", "=", "max_depth", "=", "-", "1", ")", "Prints", "the", "dependency", "graph", "of", "a", ":", "py", ":", "class", ":", "Tensor", "*", "td_tensor", "*", "where", "each", "new", "level", "is", "indented", "by", "*", "indent", "*", ".", "When", "*", "max_depth", "*", "is", "positive", "the", "graph", "is", "truncated", "at", "that", "depth", "where", "each", "tensor", "and", "each", "op", "count", "as", "a", "level", "." ]
python
train
mcs07/ChemDataExtractor
chemdataextractor/cli/cem.py
https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/cli/cem.py#L31-L44
def train_crf(ctx, input, output, clusters): """Train CRF CEM recognizer.""" click.echo('chemdataextractor.crf.train') sentences = [] for line in input: sentence = [] for t in line.split(): token, tag, iob = t.rsplit('/', 2) sentence.append(((token, tag), iob)) if sentence: sentences.append(sentence) tagger = CrfCemTagger(clusters=clusters) tagger.train(sentences, output)
[ "def", "train_crf", "(", "ctx", ",", "input", ",", "output", ",", "clusters", ")", ":", "click", ".", "echo", "(", "'chemdataextractor.crf.train'", ")", "sentences", "=", "[", "]", "for", "line", "in", "input", ":", "sentence", "=", "[", "]", "for", "t", "in", "line", ".", "split", "(", ")", ":", "token", ",", "tag", ",", "iob", "=", "t", ".", "rsplit", "(", "'/'", ",", "2", ")", "sentence", ".", "append", "(", "(", "(", "token", ",", "tag", ")", ",", "iob", ")", ")", "if", "sentence", ":", "sentences", ".", "append", "(", "sentence", ")", "tagger", "=", "CrfCemTagger", "(", "clusters", "=", "clusters", ")", "tagger", ".", "train", "(", "sentences", ",", "output", ")" ]
Train CRF CEM recognizer.
[ "Train", "CRF", "CEM", "recognizer", "." ]
python
train
GNS3/gns3-server
gns3server/controller/__init__.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/__init__.py#L634-L643
def instance(): """ Singleton to return only on instance of Controller. :returns: instance of Controller """ if not hasattr(Controller, '_instance') or Controller._instance is None: Controller._instance = Controller() return Controller._instance
[ "def", "instance", "(", ")", ":", "if", "not", "hasattr", "(", "Controller", ",", "'_instance'", ")", "or", "Controller", ".", "_instance", "is", "None", ":", "Controller", ".", "_instance", "=", "Controller", "(", ")", "return", "Controller", ".", "_instance" ]
Singleton to return only on instance of Controller. :returns: instance of Controller
[ "Singleton", "to", "return", "only", "on", "instance", "of", "Controller", "." ]
python
train
EpistasisLab/tpot
tpot/operator_utils.py
https://github.com/EpistasisLab/tpot/blob/b626271e6b5896a73fb9d7d29bebc7aa9100772e/tpot/operator_utils.py#L138-L303
def TPOTOperatorClassFactory(opsourse, opdict, BaseClass=Operator, ArgBaseClass=ARGType, verbose=0): """Dynamically create operator class. Parameters ---------- opsourse: string operator source in config dictionary (key) opdict: dictionary operator params in config dictionary (value) regression: bool True if it can be used in TPOTRegressor classification: bool True if it can be used in TPOTClassifier BaseClass: Class inherited BaseClass for operator ArgBaseClass: Class inherited BaseClass for parameter verbose: int, optional (default: 0) How much information TPOT communicates while it's running. 0 = none, 1 = minimal, 2 = high, 3 = all. if verbose > 2 then ImportError will rasie during initialization Returns ------- op_class: Class a new class for a operator arg_types: list a list of parameter class """ class_profile = {} dep_op_list = {} # list of nested estimator/callable function dep_op_type = {} # type of nested estimator/callable function import_str, op_str, op_obj = source_decode(opsourse, verbose=verbose) if not op_obj: return None, None else: # define if the operator can be the root of a pipeline if issubclass(op_obj, ClassifierMixin): class_profile['root'] = True optype = "Classifier" elif issubclass(op_obj, RegressorMixin): class_profile['root'] = True optype = "Regressor" if issubclass(op_obj, TransformerMixin): optype = "Transformer" if issubclass(op_obj, SelectorMixin): optype = "Selector" @classmethod def op_type(cls): """Return the operator type. Possible values: "Classifier", "Regressor", "Selector", "Transformer" """ return optype class_profile['type'] = op_type class_profile['sklearn_class'] = op_obj import_hash = {} import_hash[import_str] = [op_str] arg_types = [] for pname in sorted(opdict.keys()): prange = opdict[pname] if not isinstance(prange, dict): classname = '{}__{}'.format(op_str, pname) arg_types.append(ARGTypeClassFactory(classname, prange, ArgBaseClass)) else: for dkey, dval in prange.items(): dep_import_str, dep_op_str, dep_op_obj = source_decode(dkey, verbose=verbose) if dep_import_str in import_hash: import_hash[import_str].append(dep_op_str) else: import_hash[dep_import_str] = [dep_op_str] dep_op_list[pname] = dep_op_str dep_op_type[pname] = dep_op_obj if dval: for dpname in sorted(dval.keys()): dprange = dval[dpname] classname = '{}__{}__{}'.format(op_str, dep_op_str, dpname) arg_types.append(ARGTypeClassFactory(classname, dprange, ArgBaseClass)) class_profile['arg_types'] = tuple(arg_types) class_profile['import_hash'] = import_hash class_profile['dep_op_list'] = dep_op_list class_profile['dep_op_type'] = dep_op_type @classmethod def parameter_types(cls): """Return the argument and return types of an operator. Parameters ---------- None Returns ------- parameter_types: tuple Tuple of the DEAP parameter types and the DEAP return type for the operator """ return ([np.ndarray] + arg_types, np.ndarray) # (input types, return types) class_profile['parameter_types'] = parameter_types @classmethod def export(cls, *args): """Represent the operator as a string so that it can be exported to a file. Parameters ---------- args Arbitrary arguments to be passed to the operator Returns ------- export_string: str String representation of the sklearn class with its parameters in the format: SklearnClassName(param1="val1", param2=val2) """ op_arguments = [] if dep_op_list: dep_op_arguments = {} for dep_op_str in dep_op_list.values(): dep_op_arguments[dep_op_str] = [] for arg_class, arg_value in zip(arg_types, args): aname_split = arg_class.__name__.split('__') if isinstance(arg_value, str): arg_value = '\"{}\"'.format(arg_value) if len(aname_split) == 2: # simple parameter op_arguments.append("{}={}".format(aname_split[-1], arg_value)) # Parameter of internal operator as a parameter in the # operator, usually in Selector else: dep_op_arguments[aname_split[1]].append("{}={}".format(aname_split[-1], arg_value)) tmp_op_args = [] if dep_op_list: # To make sure the inital operators is the first parameter just # for better persentation for dep_op_pname, dep_op_str in dep_op_list.items(): arg_value = dep_op_str # a callable function, e.g scoring function doptype = dep_op_type[dep_op_pname] if inspect.isclass(doptype): # a estimator if issubclass(doptype, BaseEstimator) or \ issubclass(doptype, ClassifierMixin) or \ issubclass(doptype, RegressorMixin) or \ issubclass(doptype, TransformerMixin): arg_value = "{}({})".format(dep_op_str, ", ".join(dep_op_arguments[dep_op_str])) tmp_op_args.append("{}={}".format(dep_op_pname, arg_value)) op_arguments = tmp_op_args + op_arguments return "{}({})".format(op_obj.__name__, ", ".join(op_arguments)) class_profile['export'] = export op_classname = 'TPOT_{}'.format(op_str) op_class = type(op_classname, (BaseClass,), class_profile) op_class.__name__ = op_str return op_class, arg_types
[ "def", "TPOTOperatorClassFactory", "(", "opsourse", ",", "opdict", ",", "BaseClass", "=", "Operator", ",", "ArgBaseClass", "=", "ARGType", ",", "verbose", "=", "0", ")", ":", "class_profile", "=", "{", "}", "dep_op_list", "=", "{", "}", "# list of nested estimator/callable function", "dep_op_type", "=", "{", "}", "# type of nested estimator/callable function", "import_str", ",", "op_str", ",", "op_obj", "=", "source_decode", "(", "opsourse", ",", "verbose", "=", "verbose", ")", "if", "not", "op_obj", ":", "return", "None", ",", "None", "else", ":", "# define if the operator can be the root of a pipeline", "if", "issubclass", "(", "op_obj", ",", "ClassifierMixin", ")", ":", "class_profile", "[", "'root'", "]", "=", "True", "optype", "=", "\"Classifier\"", "elif", "issubclass", "(", "op_obj", ",", "RegressorMixin", ")", ":", "class_profile", "[", "'root'", "]", "=", "True", "optype", "=", "\"Regressor\"", "if", "issubclass", "(", "op_obj", ",", "TransformerMixin", ")", ":", "optype", "=", "\"Transformer\"", "if", "issubclass", "(", "op_obj", ",", "SelectorMixin", ")", ":", "optype", "=", "\"Selector\"", "@", "classmethod", "def", "op_type", "(", "cls", ")", ":", "\"\"\"Return the operator type.\n\n Possible values:\n \"Classifier\", \"Regressor\", \"Selector\", \"Transformer\"\n \"\"\"", "return", "optype", "class_profile", "[", "'type'", "]", "=", "op_type", "class_profile", "[", "'sklearn_class'", "]", "=", "op_obj", "import_hash", "=", "{", "}", "import_hash", "[", "import_str", "]", "=", "[", "op_str", "]", "arg_types", "=", "[", "]", "for", "pname", "in", "sorted", "(", "opdict", ".", "keys", "(", ")", ")", ":", "prange", "=", "opdict", "[", "pname", "]", "if", "not", "isinstance", "(", "prange", ",", "dict", ")", ":", "classname", "=", "'{}__{}'", ".", "format", "(", "op_str", ",", "pname", ")", "arg_types", ".", "append", "(", "ARGTypeClassFactory", "(", "classname", ",", "prange", ",", "ArgBaseClass", ")", ")", "else", ":", "for", "dkey", ",", "dval", "in", "prange", ".", "items", "(", ")", ":", "dep_import_str", ",", "dep_op_str", ",", "dep_op_obj", "=", "source_decode", "(", "dkey", ",", "verbose", "=", "verbose", ")", "if", "dep_import_str", "in", "import_hash", ":", "import_hash", "[", "import_str", "]", ".", "append", "(", "dep_op_str", ")", "else", ":", "import_hash", "[", "dep_import_str", "]", "=", "[", "dep_op_str", "]", "dep_op_list", "[", "pname", "]", "=", "dep_op_str", "dep_op_type", "[", "pname", "]", "=", "dep_op_obj", "if", "dval", ":", "for", "dpname", "in", "sorted", "(", "dval", ".", "keys", "(", ")", ")", ":", "dprange", "=", "dval", "[", "dpname", "]", "classname", "=", "'{}__{}__{}'", ".", "format", "(", "op_str", ",", "dep_op_str", ",", "dpname", ")", "arg_types", ".", "append", "(", "ARGTypeClassFactory", "(", "classname", ",", "dprange", ",", "ArgBaseClass", ")", ")", "class_profile", "[", "'arg_types'", "]", "=", "tuple", "(", "arg_types", ")", "class_profile", "[", "'import_hash'", "]", "=", "import_hash", "class_profile", "[", "'dep_op_list'", "]", "=", "dep_op_list", "class_profile", "[", "'dep_op_type'", "]", "=", "dep_op_type", "@", "classmethod", "def", "parameter_types", "(", "cls", ")", ":", "\"\"\"Return the argument and return types of an operator.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n parameter_types: tuple\n Tuple of the DEAP parameter types and the DEAP return type for the\n operator\n\n \"\"\"", "return", "(", "[", "np", ".", "ndarray", "]", "+", "arg_types", ",", "np", ".", "ndarray", ")", "# (input types, return types)", "class_profile", "[", "'parameter_types'", "]", "=", "parameter_types", "@", "classmethod", "def", "export", "(", "cls", ",", "*", "args", ")", ":", "\"\"\"Represent the operator as a string so that it can be exported to a file.\n\n Parameters\n ----------\n args\n Arbitrary arguments to be passed to the operator\n\n Returns\n -------\n export_string: str\n String representation of the sklearn class with its parameters in\n the format:\n SklearnClassName(param1=\"val1\", param2=val2)\n\n \"\"\"", "op_arguments", "=", "[", "]", "if", "dep_op_list", ":", "dep_op_arguments", "=", "{", "}", "for", "dep_op_str", "in", "dep_op_list", ".", "values", "(", ")", ":", "dep_op_arguments", "[", "dep_op_str", "]", "=", "[", "]", "for", "arg_class", ",", "arg_value", "in", "zip", "(", "arg_types", ",", "args", ")", ":", "aname_split", "=", "arg_class", ".", "__name__", ".", "split", "(", "'__'", ")", "if", "isinstance", "(", "arg_value", ",", "str", ")", ":", "arg_value", "=", "'\\\"{}\\\"'", ".", "format", "(", "arg_value", ")", "if", "len", "(", "aname_split", ")", "==", "2", ":", "# simple parameter", "op_arguments", ".", "append", "(", "\"{}={}\"", ".", "format", "(", "aname_split", "[", "-", "1", "]", ",", "arg_value", ")", ")", "# Parameter of internal operator as a parameter in the", "# operator, usually in Selector", "else", ":", "dep_op_arguments", "[", "aname_split", "[", "1", "]", "]", ".", "append", "(", "\"{}={}\"", ".", "format", "(", "aname_split", "[", "-", "1", "]", ",", "arg_value", ")", ")", "tmp_op_args", "=", "[", "]", "if", "dep_op_list", ":", "# To make sure the inital operators is the first parameter just", "# for better persentation", "for", "dep_op_pname", ",", "dep_op_str", "in", "dep_op_list", ".", "items", "(", ")", ":", "arg_value", "=", "dep_op_str", "# a callable function, e.g scoring function", "doptype", "=", "dep_op_type", "[", "dep_op_pname", "]", "if", "inspect", ".", "isclass", "(", "doptype", ")", ":", "# a estimator", "if", "issubclass", "(", "doptype", ",", "BaseEstimator", ")", "or", "issubclass", "(", "doptype", ",", "ClassifierMixin", ")", "or", "issubclass", "(", "doptype", ",", "RegressorMixin", ")", "or", "issubclass", "(", "doptype", ",", "TransformerMixin", ")", ":", "arg_value", "=", "\"{}({})\"", ".", "format", "(", "dep_op_str", ",", "\", \"", ".", "join", "(", "dep_op_arguments", "[", "dep_op_str", "]", ")", ")", "tmp_op_args", ".", "append", "(", "\"{}={}\"", ".", "format", "(", "dep_op_pname", ",", "arg_value", ")", ")", "op_arguments", "=", "tmp_op_args", "+", "op_arguments", "return", "\"{}({})\"", ".", "format", "(", "op_obj", ".", "__name__", ",", "\", \"", ".", "join", "(", "op_arguments", ")", ")", "class_profile", "[", "'export'", "]", "=", "export", "op_classname", "=", "'TPOT_{}'", ".", "format", "(", "op_str", ")", "op_class", "=", "type", "(", "op_classname", ",", "(", "BaseClass", ",", ")", ",", "class_profile", ")", "op_class", ".", "__name__", "=", "op_str", "return", "op_class", ",", "arg_types" ]
Dynamically create operator class. Parameters ---------- opsourse: string operator source in config dictionary (key) opdict: dictionary operator params in config dictionary (value) regression: bool True if it can be used in TPOTRegressor classification: bool True if it can be used in TPOTClassifier BaseClass: Class inherited BaseClass for operator ArgBaseClass: Class inherited BaseClass for parameter verbose: int, optional (default: 0) How much information TPOT communicates while it's running. 0 = none, 1 = minimal, 2 = high, 3 = all. if verbose > 2 then ImportError will rasie during initialization Returns ------- op_class: Class a new class for a operator arg_types: list a list of parameter class
[ "Dynamically", "create", "operator", "class", "." ]
python
train
nanoporetech/ont_fast5_api
ont_fast5_api/fast5_file.py
https://github.com/nanoporetech/ont_fast5_api/blob/352b3903155fcf4f19234c4f429dcefaa6d6bc4a/ont_fast5_api/fast5_file.py#L725-L733
def _get_attributes(self, path): """ :param path: filepath within fast5 :return: dictionary of attributes found at ``path`` :rtype dict """ path_grp = self.handle[path] path_attr = path_grp.attrs return dict(path_attr)
[ "def", "_get_attributes", "(", "self", ",", "path", ")", ":", "path_grp", "=", "self", ".", "handle", "[", "path", "]", "path_attr", "=", "path_grp", ".", "attrs", "return", "dict", "(", "path_attr", ")" ]
:param path: filepath within fast5 :return: dictionary of attributes found at ``path`` :rtype dict
[ ":", "param", "path", ":", "filepath", "within", "fast5", ":", "return", ":", "dictionary", "of", "attributes", "found", "at", "path", ":", "rtype", "dict" ]
python
train
victorlei/smop
smop/parse.py
https://github.com/victorlei/smop/blob/bdad96b715d1dd75ce8ab4724f76b9b1bb1f61cd/smop/parse.py#L811-L816
def p_unwind(p): """ unwind : UNWIND_PROTECT stmt_list UNWIND_PROTECT_CLEANUP stmt_list END_UNWIND_PROTECT """ p[0] = node.try_catch( try_stmt=p[2], catch_stmt=node.expr_list(), finally_stmt=p[4])
[ "def", "p_unwind", "(", "p", ")", ":", "p", "[", "0", "]", "=", "node", ".", "try_catch", "(", "try_stmt", "=", "p", "[", "2", "]", ",", "catch_stmt", "=", "node", ".", "expr_list", "(", ")", ",", "finally_stmt", "=", "p", "[", "4", "]", ")" ]
unwind : UNWIND_PROTECT stmt_list UNWIND_PROTECT_CLEANUP stmt_list END_UNWIND_PROTECT
[ "unwind", ":", "UNWIND_PROTECT", "stmt_list", "UNWIND_PROTECT_CLEANUP", "stmt_list", "END_UNWIND_PROTECT" ]
python
train
reingart/gui2py
gui/tools/designer.py
https://github.com/reingart/gui2py/blob/aca0a05f6fcde55c94ad7cc058671a06608b01a4/gui/tools/designer.py#L267-L301
def mouse_up(self, evt, wx_obj=None): "Release the selected object (pass a wx_obj if the event was captured)" self.resizing = False if self.current: wx_obj = self.current if self.parent.wx_obj.HasCapture(): self.parent.wx_obj.ReleaseMouse() self.current = None if self.overlay: # When the mouse is released we reset the overlay and it # restores the former content to the window. dc = wx.ClientDC(wx_obj) odc = wx.DCOverlay(self.overlay, dc) odc.Clear() del odc self.overlay.Reset() self.overlay = None pos = evt.GetPosition() # convert to relative client coordinates of the container: if evt.GetEventObject() != wx_obj: pos = evt.GetEventObject().ClientToScreen(pos) # frame pos = wx_obj.ScreenToClient(pos) # panel # finish the multiple selection using the mouse: rect = wx.RectPP(self.pos, pos) for obj in wx_obj.obj: # only check child controls (not menubar/statusbar) if isinstance(obj, Control): obj_rect = obj.wx_obj.GetRect() if rect.ContainsRect(obj_rect): self.select(obj, keep_selection=True) self.pos = None if self.inspector and wx_obj: self.inspector.inspect(wx_obj.obj) if DEBUG: print "SELECTION", self.selection
[ "def", "mouse_up", "(", "self", ",", "evt", ",", "wx_obj", "=", "None", ")", ":", "self", ".", "resizing", "=", "False", "if", "self", ".", "current", ":", "wx_obj", "=", "self", ".", "current", "if", "self", ".", "parent", ".", "wx_obj", ".", "HasCapture", "(", ")", ":", "self", ".", "parent", ".", "wx_obj", ".", "ReleaseMouse", "(", ")", "self", ".", "current", "=", "None", "if", "self", ".", "overlay", ":", "# When the mouse is released we reset the overlay and it ", "# restores the former content to the window. ", "dc", "=", "wx", ".", "ClientDC", "(", "wx_obj", ")", "odc", "=", "wx", ".", "DCOverlay", "(", "self", ".", "overlay", ",", "dc", ")", "odc", ".", "Clear", "(", ")", "del", "odc", "self", ".", "overlay", ".", "Reset", "(", ")", "self", ".", "overlay", "=", "None", "pos", "=", "evt", ".", "GetPosition", "(", ")", "# convert to relative client coordinates of the container:", "if", "evt", ".", "GetEventObject", "(", ")", "!=", "wx_obj", ":", "pos", "=", "evt", ".", "GetEventObject", "(", ")", ".", "ClientToScreen", "(", "pos", ")", "# frame", "pos", "=", "wx_obj", ".", "ScreenToClient", "(", "pos", ")", "# panel", "# finish the multiple selection using the mouse:", "rect", "=", "wx", ".", "RectPP", "(", "self", ".", "pos", ",", "pos", ")", "for", "obj", "in", "wx_obj", ".", "obj", ":", "# only check child controls (not menubar/statusbar)", "if", "isinstance", "(", "obj", ",", "Control", ")", ":", "obj_rect", "=", "obj", ".", "wx_obj", ".", "GetRect", "(", ")", "if", "rect", ".", "ContainsRect", "(", "obj_rect", ")", ":", "self", ".", "select", "(", "obj", ",", "keep_selection", "=", "True", ")", "self", ".", "pos", "=", "None", "if", "self", ".", "inspector", "and", "wx_obj", ":", "self", ".", "inspector", ".", "inspect", "(", "wx_obj", ".", "obj", ")", "if", "DEBUG", ":", "print", "\"SELECTION\"", ",", "self", ".", "selection" ]
Release the selected object (pass a wx_obj if the event was captured)
[ "Release", "the", "selected", "object", "(", "pass", "a", "wx_obj", "if", "the", "event", "was", "captured", ")" ]
python
test
spyder-ide/spyder
spyder/plugins/projects/widgets/explorer.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/projects/widgets/explorer.py#L72-L82
def dragMoveEvent(self, event): """Reimplement Qt method""" index = self.indexAt(event.pos()) if index: dst = self.get_filename(index) if osp.isdir(dst): event.acceptProposedAction() else: event.ignore() else: event.ignore()
[ "def", "dragMoveEvent", "(", "self", ",", "event", ")", ":", "index", "=", "self", ".", "indexAt", "(", "event", ".", "pos", "(", ")", ")", "if", "index", ":", "dst", "=", "self", ".", "get_filename", "(", "index", ")", "if", "osp", ".", "isdir", "(", "dst", ")", ":", "event", ".", "acceptProposedAction", "(", ")", "else", ":", "event", ".", "ignore", "(", ")", "else", ":", "event", ".", "ignore", "(", ")" ]
Reimplement Qt method
[ "Reimplement", "Qt", "method" ]
python
train
inasafe/inasafe
safe/impact_function/impact_function.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/impact_function/impact_function.py#L335-L450
def is_equal(self, other): """Equality checker with message :param other: Other Impact Function to be compared. :type other: ImpactFunction :returns: True if both are the same IF, other wise False and the message. :rtype: bool, str """ properties = [ 'debug_mode', 'use_rounding', 'requested_extent', 'crs', 'analysis_extent', 'datastore', 'name', 'title', 'start_datetime', 'end_datetime', 'duration', 'earthquake_function', # 'performance_log', # I don't think need we need this one 'hazard', 'exposure', 'aggregation', # Output layers on new IF object will have a different provenance # data with the one from original IF. # 'impact', # 'exposure_summary', # 'aggregate_hazard_impacted', # 'aggregation_summary', # 'analysis_impacted', # 'exposure_summary_table', 'profiling', ] for if_property in properties: # Skip if it's debug mode for profiling if self.debug_mode: if if_property == 'profiling': continue try: property_a = getattr(self, if_property) property_b = getattr(other, if_property) if not isinstance(property_a, type(property_b)): message = ( 'Different type of property %s.\nA: %s\nB: %s' % ( if_property, type(property_a), type(property_b))) return False, message if isinstance(property_a, QgsMapLayer): if byteify(property_a.keywords) != byteify( property_b.keywords): message = ( 'Keyword Layer is not equal is %s' % if_property) return False, message if isinstance(property_a, QgsVectorLayer): fields_a = [f.name() for f in property_a.fields()] fields_b = [f.name() for f in property_b.fields()] if fields_a != fields_b: message = ( 'Layer fields is not equal for %s' % if_property) return False, message if (property_a.featureCount() != property_b.featureCount()): message = ( 'Feature count is not equal for %s' % if_property) return False, message elif isinstance(property_a, QgsGeometry): if not property_a.equals(property_b): string_a = property_a.asWkt() string_b = property_b.asWkt() message = ( '[Non Layer] The not equal property is %s.\n' 'A: %s\nB: %s' % (if_property, string_a, string_b)) return False, message elif isinstance(property_a, DataStore): if property_a.uri_path != property_b.uri_path: string_a = property_a.uri_path string_b = property_b.uri_path message = ( '[Non Layer] The not equal property is %s.\n' 'A: %s\nB: %s' % (if_property, string_a, string_b)) return False, message else: if property_a != property_b: string_a = property_a string_b = property_b message = ( '[Non Layer] The not equal property is %s.\n' 'A: %s\nB: %s' % (if_property, string_a, string_b)) return False, message except AttributeError as e: message = ( 'Property %s is not found. The exception is %s' % ( if_property, e)) return False, message except IndexError as e: if if_property == 'impact': continue else: message = ( 'Property %s is out of index. The exception is %s' % ( if_property, e)) return False, message except Exception as e: message = ( 'Error on %s with error message %s' % (if_property, e)) return False, message return True, ''
[ "def", "is_equal", "(", "self", ",", "other", ")", ":", "properties", "=", "[", "'debug_mode'", ",", "'use_rounding'", ",", "'requested_extent'", ",", "'crs'", ",", "'analysis_extent'", ",", "'datastore'", ",", "'name'", ",", "'title'", ",", "'start_datetime'", ",", "'end_datetime'", ",", "'duration'", ",", "'earthquake_function'", ",", "# 'performance_log', # I don't think need we need this one", "'hazard'", ",", "'exposure'", ",", "'aggregation'", ",", "# Output layers on new IF object will have a different provenance", "# data with the one from original IF.", "# 'impact',", "# 'exposure_summary',", "# 'aggregate_hazard_impacted',", "# 'aggregation_summary',", "# 'analysis_impacted',", "# 'exposure_summary_table',", "'profiling'", ",", "]", "for", "if_property", "in", "properties", ":", "# Skip if it's debug mode for profiling", "if", "self", ".", "debug_mode", ":", "if", "if_property", "==", "'profiling'", ":", "continue", "try", ":", "property_a", "=", "getattr", "(", "self", ",", "if_property", ")", "property_b", "=", "getattr", "(", "other", ",", "if_property", ")", "if", "not", "isinstance", "(", "property_a", ",", "type", "(", "property_b", ")", ")", ":", "message", "=", "(", "'Different type of property %s.\\nA: %s\\nB: %s'", "%", "(", "if_property", ",", "type", "(", "property_a", ")", ",", "type", "(", "property_b", ")", ")", ")", "return", "False", ",", "message", "if", "isinstance", "(", "property_a", ",", "QgsMapLayer", ")", ":", "if", "byteify", "(", "property_a", ".", "keywords", ")", "!=", "byteify", "(", "property_b", ".", "keywords", ")", ":", "message", "=", "(", "'Keyword Layer is not equal is %s'", "%", "if_property", ")", "return", "False", ",", "message", "if", "isinstance", "(", "property_a", ",", "QgsVectorLayer", ")", ":", "fields_a", "=", "[", "f", ".", "name", "(", ")", "for", "f", "in", "property_a", ".", "fields", "(", ")", "]", "fields_b", "=", "[", "f", ".", "name", "(", ")", "for", "f", "in", "property_b", ".", "fields", "(", ")", "]", "if", "fields_a", "!=", "fields_b", ":", "message", "=", "(", "'Layer fields is not equal for %s'", "%", "if_property", ")", "return", "False", ",", "message", "if", "(", "property_a", ".", "featureCount", "(", ")", "!=", "property_b", ".", "featureCount", "(", ")", ")", ":", "message", "=", "(", "'Feature count is not equal for %s'", "%", "if_property", ")", "return", "False", ",", "message", "elif", "isinstance", "(", "property_a", ",", "QgsGeometry", ")", ":", "if", "not", "property_a", ".", "equals", "(", "property_b", ")", ":", "string_a", "=", "property_a", ".", "asWkt", "(", ")", "string_b", "=", "property_b", ".", "asWkt", "(", ")", "message", "=", "(", "'[Non Layer] The not equal property is %s.\\n'", "'A: %s\\nB: %s'", "%", "(", "if_property", ",", "string_a", ",", "string_b", ")", ")", "return", "False", ",", "message", "elif", "isinstance", "(", "property_a", ",", "DataStore", ")", ":", "if", "property_a", ".", "uri_path", "!=", "property_b", ".", "uri_path", ":", "string_a", "=", "property_a", ".", "uri_path", "string_b", "=", "property_b", ".", "uri_path", "message", "=", "(", "'[Non Layer] The not equal property is %s.\\n'", "'A: %s\\nB: %s'", "%", "(", "if_property", ",", "string_a", ",", "string_b", ")", ")", "return", "False", ",", "message", "else", ":", "if", "property_a", "!=", "property_b", ":", "string_a", "=", "property_a", "string_b", "=", "property_b", "message", "=", "(", "'[Non Layer] The not equal property is %s.\\n'", "'A: %s\\nB: %s'", "%", "(", "if_property", ",", "string_a", ",", "string_b", ")", ")", "return", "False", ",", "message", "except", "AttributeError", "as", "e", ":", "message", "=", "(", "'Property %s is not found. The exception is %s'", "%", "(", "if_property", ",", "e", ")", ")", "return", "False", ",", "message", "except", "IndexError", "as", "e", ":", "if", "if_property", "==", "'impact'", ":", "continue", "else", ":", "message", "=", "(", "'Property %s is out of index. The exception is %s'", "%", "(", "if_property", ",", "e", ")", ")", "return", "False", ",", "message", "except", "Exception", "as", "e", ":", "message", "=", "(", "'Error on %s with error message %s'", "%", "(", "if_property", ",", "e", ")", ")", "return", "False", ",", "message", "return", "True", ",", "''" ]
Equality checker with message :param other: Other Impact Function to be compared. :type other: ImpactFunction :returns: True if both are the same IF, other wise False and the message. :rtype: bool, str
[ "Equality", "checker", "with", "message" ]
python
train
pyecore/pyecoregen
pyecoregen/ecore.py
https://github.com/pyecore/pyecoregen/blob/8c7a792f46d7d94e5d13e00e2967dd237351a4cf/pyecoregen/ecore.py#L30-L35
def folder_path_for_package(cls, package: ecore.EPackage): """Returns path to folder holding generated artifact for given element.""" parent = package.eContainer() if parent: return os.path.join(cls.folder_path_for_package(parent), package.name) return package.name
[ "def", "folder_path_for_package", "(", "cls", ",", "package", ":", "ecore", ".", "EPackage", ")", ":", "parent", "=", "package", ".", "eContainer", "(", ")", "if", "parent", ":", "return", "os", ".", "path", ".", "join", "(", "cls", ".", "folder_path_for_package", "(", "parent", ")", ",", "package", ".", "name", ")", "return", "package", ".", "name" ]
Returns path to folder holding generated artifact for given element.
[ "Returns", "path", "to", "folder", "holding", "generated", "artifact", "for", "given", "element", "." ]
python
train
Qiskit/qiskit-terra
qiskit/quantum_info/operators/channel/kraus.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/quantum_info/operators/channel/kraus.py#L250-L265
def power(self, n): """The matrix power of the channel. Args: n (int): compute the matrix power of the superoperator matrix. Returns: Kraus: the matrix power of the SuperOp converted to a Kraus channel. Raises: QiskitError: if the input and output dimensions of the QuantumChannel are not equal, or the power is not an integer. """ if n > 0: return super().power(n) return Kraus(SuperOp(self).power(n))
[ "def", "power", "(", "self", ",", "n", ")", ":", "if", "n", ">", "0", ":", "return", "super", "(", ")", ".", "power", "(", "n", ")", "return", "Kraus", "(", "SuperOp", "(", "self", ")", ".", "power", "(", "n", ")", ")" ]
The matrix power of the channel. Args: n (int): compute the matrix power of the superoperator matrix. Returns: Kraus: the matrix power of the SuperOp converted to a Kraus channel. Raises: QiskitError: if the input and output dimensions of the QuantumChannel are not equal, or the power is not an integer.
[ "The", "matrix", "power", "of", "the", "channel", "." ]
python
test
Chilipp/psyplot
psyplot/data.py
https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/data.py#L3222-L3230
def names(self): """Set of the variable in this list""" ret = set() for arr in self: if isinstance(arr, InteractiveList): ret.update(arr.names) else: ret.add(arr.name) return ret
[ "def", "names", "(", "self", ")", ":", "ret", "=", "set", "(", ")", "for", "arr", "in", "self", ":", "if", "isinstance", "(", "arr", ",", "InteractiveList", ")", ":", "ret", ".", "update", "(", "arr", ".", "names", ")", "else", ":", "ret", ".", "add", "(", "arr", ".", "name", ")", "return", "ret" ]
Set of the variable in this list
[ "Set", "of", "the", "variable", "in", "this", "list" ]
python
train
ansible/ansible-runner
ansible_runner/runner_config.py
https://github.com/ansible/ansible-runner/blob/8ce485480a5d0b602428d9d64a752e06fb46cdb8/ansible_runner/runner_config.py#L206-L276
def prepare_env(self): """ Manages reading environment metadata files under ``private_data_dir`` and merging/updating with existing values so the :py:class:`ansible_runner.runner.Runner` object can read and use them easily """ try: passwords = self.loader.load_file('env/passwords', Mapping) self.expect_passwords = { re.compile(pattern, re.M): password for pattern, password in iteritems(passwords) } except ConfigurationError: output.debug('Not loading passwords') self.expect_passwords = dict() self.expect_passwords[pexpect.TIMEOUT] = None self.expect_passwords[pexpect.EOF] = None try: # seed env with existing shell env self.env = os.environ.copy() envvars = self.loader.load_file('env/envvars', Mapping) if envvars: self.env.update({k:six.text_type(v) for k, v in envvars.items()}) if self.envvars and isinstance(self.envvars, dict): self.env.update({k:six.text_type(v) for k, v in self.envvars.items()}) except ConfigurationError: output.debug("Not loading environment vars") # Still need to pass default environment to pexpect self.env = os.environ.copy() try: self.settings = self.loader.load_file('env/settings', Mapping) except ConfigurationError: output.debug("Not loading settings") self.settings = dict() try: self.ssh_key_data = self.loader.load_file('env/ssh_key', string_types) except ConfigurationError: output.debug("Not loading ssh key") self.ssh_key_data = None self.idle_timeout = self.settings.get('idle_timeout', None) self.job_timeout = self.settings.get('job_timeout', None) self.pexpect_timeout = self.settings.get('pexpect_timeout', 5) self.process_isolation = self.settings.get('process_isolation', self.process_isolation) self.process_isolation_executable = self.settings.get('process_isolation_executable', self.process_isolation_executable) self.process_isolation_path = self.settings.get('process_isolation_path', self.process_isolation_path) self.process_isolation_hide_paths = self.settings.get('process_isolation_hide_paths', self.process_isolation_hide_paths) self.process_isolation_show_paths = self.settings.get('process_isolation_show_paths', self.process_isolation_show_paths) self.process_isolation_ro_paths = self.settings.get('process_isolation_ro_paths', self.process_isolation_ro_paths) self.pexpect_use_poll = self.settings.get('pexpect_use_poll', True) self.suppress_ansible_output = self.settings.get('suppress_ansible_output', self.quiet) self.directory_isolation_cleanup = bool(self.settings.get('directory_isolation_cleanup', True)) if 'AD_HOC_COMMAND_ID' in self.env or not os.path.exists(self.project_dir): self.cwd = self.private_data_dir else: if self.directory_isolation_path is not None: self.cwd = self.directory_isolation_path else: self.cwd = self.project_dir if 'fact_cache' in self.settings: if 'fact_cache_type' in self.settings: if self.settings['fact_cache_type'] == 'jsonfile': self.fact_cache = os.path.join(self.artifact_dir, self.settings['fact_cache']) else: self.fact_cache = os.path.join(self.artifact_dir, self.settings['fact_cache'])
[ "def", "prepare_env", "(", "self", ")", ":", "try", ":", "passwords", "=", "self", ".", "loader", ".", "load_file", "(", "'env/passwords'", ",", "Mapping", ")", "self", ".", "expect_passwords", "=", "{", "re", ".", "compile", "(", "pattern", ",", "re", ".", "M", ")", ":", "password", "for", "pattern", ",", "password", "in", "iteritems", "(", "passwords", ")", "}", "except", "ConfigurationError", ":", "output", ".", "debug", "(", "'Not loading passwords'", ")", "self", ".", "expect_passwords", "=", "dict", "(", ")", "self", ".", "expect_passwords", "[", "pexpect", ".", "TIMEOUT", "]", "=", "None", "self", ".", "expect_passwords", "[", "pexpect", ".", "EOF", "]", "=", "None", "try", ":", "# seed env with existing shell env", "self", ".", "env", "=", "os", ".", "environ", ".", "copy", "(", ")", "envvars", "=", "self", ".", "loader", ".", "load_file", "(", "'env/envvars'", ",", "Mapping", ")", "if", "envvars", ":", "self", ".", "env", ".", "update", "(", "{", "k", ":", "six", ".", "text_type", "(", "v", ")", "for", "k", ",", "v", "in", "envvars", ".", "items", "(", ")", "}", ")", "if", "self", ".", "envvars", "and", "isinstance", "(", "self", ".", "envvars", ",", "dict", ")", ":", "self", ".", "env", ".", "update", "(", "{", "k", ":", "six", ".", "text_type", "(", "v", ")", "for", "k", ",", "v", "in", "self", ".", "envvars", ".", "items", "(", ")", "}", ")", "except", "ConfigurationError", ":", "output", ".", "debug", "(", "\"Not loading environment vars\"", ")", "# Still need to pass default environment to pexpect", "self", ".", "env", "=", "os", ".", "environ", ".", "copy", "(", ")", "try", ":", "self", ".", "settings", "=", "self", ".", "loader", ".", "load_file", "(", "'env/settings'", ",", "Mapping", ")", "except", "ConfigurationError", ":", "output", ".", "debug", "(", "\"Not loading settings\"", ")", "self", ".", "settings", "=", "dict", "(", ")", "try", ":", "self", ".", "ssh_key_data", "=", "self", ".", "loader", ".", "load_file", "(", "'env/ssh_key'", ",", "string_types", ")", "except", "ConfigurationError", ":", "output", ".", "debug", "(", "\"Not loading ssh key\"", ")", "self", ".", "ssh_key_data", "=", "None", "self", ".", "idle_timeout", "=", "self", ".", "settings", ".", "get", "(", "'idle_timeout'", ",", "None", ")", "self", ".", "job_timeout", "=", "self", ".", "settings", ".", "get", "(", "'job_timeout'", ",", "None", ")", "self", ".", "pexpect_timeout", "=", "self", ".", "settings", ".", "get", "(", "'pexpect_timeout'", ",", "5", ")", "self", ".", "process_isolation", "=", "self", ".", "settings", ".", "get", "(", "'process_isolation'", ",", "self", ".", "process_isolation", ")", "self", ".", "process_isolation_executable", "=", "self", ".", "settings", ".", "get", "(", "'process_isolation_executable'", ",", "self", ".", "process_isolation_executable", ")", "self", ".", "process_isolation_path", "=", "self", ".", "settings", ".", "get", "(", "'process_isolation_path'", ",", "self", ".", "process_isolation_path", ")", "self", ".", "process_isolation_hide_paths", "=", "self", ".", "settings", ".", "get", "(", "'process_isolation_hide_paths'", ",", "self", ".", "process_isolation_hide_paths", ")", "self", ".", "process_isolation_show_paths", "=", "self", ".", "settings", ".", "get", "(", "'process_isolation_show_paths'", ",", "self", ".", "process_isolation_show_paths", ")", "self", ".", "process_isolation_ro_paths", "=", "self", ".", "settings", ".", "get", "(", "'process_isolation_ro_paths'", ",", "self", ".", "process_isolation_ro_paths", ")", "self", ".", "pexpect_use_poll", "=", "self", ".", "settings", ".", "get", "(", "'pexpect_use_poll'", ",", "True", ")", "self", ".", "suppress_ansible_output", "=", "self", ".", "settings", ".", "get", "(", "'suppress_ansible_output'", ",", "self", ".", "quiet", ")", "self", ".", "directory_isolation_cleanup", "=", "bool", "(", "self", ".", "settings", ".", "get", "(", "'directory_isolation_cleanup'", ",", "True", ")", ")", "if", "'AD_HOC_COMMAND_ID'", "in", "self", ".", "env", "or", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "project_dir", ")", ":", "self", ".", "cwd", "=", "self", ".", "private_data_dir", "else", ":", "if", "self", ".", "directory_isolation_path", "is", "not", "None", ":", "self", ".", "cwd", "=", "self", ".", "directory_isolation_path", "else", ":", "self", ".", "cwd", "=", "self", ".", "project_dir", "if", "'fact_cache'", "in", "self", ".", "settings", ":", "if", "'fact_cache_type'", "in", "self", ".", "settings", ":", "if", "self", ".", "settings", "[", "'fact_cache_type'", "]", "==", "'jsonfile'", ":", "self", ".", "fact_cache", "=", "os", ".", "path", ".", "join", "(", "self", ".", "artifact_dir", ",", "self", ".", "settings", "[", "'fact_cache'", "]", ")", "else", ":", "self", ".", "fact_cache", "=", "os", ".", "path", ".", "join", "(", "self", ".", "artifact_dir", ",", "self", ".", "settings", "[", "'fact_cache'", "]", ")" ]
Manages reading environment metadata files under ``private_data_dir`` and merging/updating with existing values so the :py:class:`ansible_runner.runner.Runner` object can read and use them easily
[ "Manages", "reading", "environment", "metadata", "files", "under", "private_data_dir", "and", "merging", "/", "updating", "with", "existing", "values", "so", "the", ":", "py", ":", "class", ":", "ansible_runner", ".", "runner", ".", "Runner", "object", "can", "read", "and", "use", "them", "easily" ]
python
train
StellarCN/py-stellar-base
stellar_base/stellarxdr/xdrgen.py
https://github.com/StellarCN/py-stellar-base/blob/cce2e782064fb3955c85e1696e630d67b1010848/stellar_base/stellarxdr/xdrgen.py#L706-L713
def p_version_def(t): """version_def : VERSION ID LBRACE procedure_def procedure_def_list RBRACE EQUALS constant SEMI""" global name_dict id = t[2] value = t[8] lineno = t.lineno(1) if id_unique(id, 'version', lineno): name_dict[id] = const_info(id, value, lineno)
[ "def", "p_version_def", "(", "t", ")", ":", "global", "name_dict", "id", "=", "t", "[", "2", "]", "value", "=", "t", "[", "8", "]", "lineno", "=", "t", ".", "lineno", "(", "1", ")", "if", "id_unique", "(", "id", ",", "'version'", ",", "lineno", ")", ":", "name_dict", "[", "id", "]", "=", "const_info", "(", "id", ",", "value", ",", "lineno", ")" ]
version_def : VERSION ID LBRACE procedure_def procedure_def_list RBRACE EQUALS constant SEMI
[ "version_def", ":", "VERSION", "ID", "LBRACE", "procedure_def", "procedure_def_list", "RBRACE", "EQUALS", "constant", "SEMI" ]
python
train
markovmodel/PyEMMA
pyemma/thermo/util/util.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/thermo/util/util.py#L401-L416
def assign_unbiased_state_label(memm_list, unbiased_state): r""" Sets the msm label for the given list of estimated MEMM objects. Parameters ---------- memm_list : list of estimated MEMM objects The MEMM objects which shall have the msm label set. unbiased_state : int or None Index of the unbiased thermodynamic state (if present). """ if unbiased_state is None: return for memm in memm_list: assert 0 <= unbiased_state < len(memm.models), "invalid state: " + str(unbiased_state) memm._unbiased_state = unbiased_state
[ "def", "assign_unbiased_state_label", "(", "memm_list", ",", "unbiased_state", ")", ":", "if", "unbiased_state", "is", "None", ":", "return", "for", "memm", "in", "memm_list", ":", "assert", "0", "<=", "unbiased_state", "<", "len", "(", "memm", ".", "models", ")", ",", "\"invalid state: \"", "+", "str", "(", "unbiased_state", ")", "memm", ".", "_unbiased_state", "=", "unbiased_state" ]
r""" Sets the msm label for the given list of estimated MEMM objects. Parameters ---------- memm_list : list of estimated MEMM objects The MEMM objects which shall have the msm label set. unbiased_state : int or None Index of the unbiased thermodynamic state (if present).
[ "r", "Sets", "the", "msm", "label", "for", "the", "given", "list", "of", "estimated", "MEMM", "objects", "." ]
python
train
PythonCharmers/python-future
src/future/types/newrange.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/types/newrange.py#L90-L100
def index(self, value): """Return the 0-based position of integer `value` in the sequence this range represents.""" try: diff = value - self._start except TypeError: raise ValueError('%r is not in range' % value) quotient, remainder = divmod(diff, self._step) if remainder == 0 and 0 <= quotient < self._len: return abs(quotient) raise ValueError('%r is not in range' % value)
[ "def", "index", "(", "self", ",", "value", ")", ":", "try", ":", "diff", "=", "value", "-", "self", ".", "_start", "except", "TypeError", ":", "raise", "ValueError", "(", "'%r is not in range'", "%", "value", ")", "quotient", ",", "remainder", "=", "divmod", "(", "diff", ",", "self", ".", "_step", ")", "if", "remainder", "==", "0", "and", "0", "<=", "quotient", "<", "self", ".", "_len", ":", "return", "abs", "(", "quotient", ")", "raise", "ValueError", "(", "'%r is not in range'", "%", "value", ")" ]
Return the 0-based position of integer `value` in the sequence this range represents.
[ "Return", "the", "0", "-", "based", "position", "of", "integer", "value", "in", "the", "sequence", "this", "range", "represents", "." ]
python
train
greenelab/PathCORE-T
pathcore/network.py
https://github.com/greenelab/PathCORE-T/blob/9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c/pathcore/network.py#L458-L501
def to_dataframe(self, drop_weights_below=0, whitelist=None): """ Conversion of the network to a pandas.DataFrame. Parameters ----------- drop_weights_below : int (default=0) specify an edge weight threshold - remove all edges with weight below this value whitelist : [set|list](tup(int, int))|None (default=None) option to pass in a set/list of edge ids (tup(int, int)) that should be kept in the resulting dataframe Returns ----------- pandas.DataFrame a pandas.DataFrame containing the network edge information. columns = [pw0, pw1, weight]. an additional "features" column is returned if this network is not an aggregate of multiple networks. """ network_df_cols = ["pw0", "pw1", "weight"] if self.features: network_df_cols.append("features") network_df = pd.DataFrame(columns=network_df_cols) idx = 0 edge_pathways = set() for (v0, v1), edge_obj in self.edges.items(): if (edge_obj.weight > drop_weights_below and (whitelist is None or (v0, v1) in whitelist)): row = [self.__getitem__(v0), self.__getitem__(v1), edge_obj.weight] edge_pathways.add(v0) edge_pathways.add(v1) if self.features: features = edge_obj.features_to_string() row.append(features) network_df.loc[idx] = row idx += 1 # faster to append by index. network_df = network_df.sort_values(by=["weight"], ascending=False) print("The pathway co-occurrence network " "contains {0} pathways.".format( len(edge_pathways))) return network_df
[ "def", "to_dataframe", "(", "self", ",", "drop_weights_below", "=", "0", ",", "whitelist", "=", "None", ")", ":", "network_df_cols", "=", "[", "\"pw0\"", ",", "\"pw1\"", ",", "\"weight\"", "]", "if", "self", ".", "features", ":", "network_df_cols", ".", "append", "(", "\"features\"", ")", "network_df", "=", "pd", ".", "DataFrame", "(", "columns", "=", "network_df_cols", ")", "idx", "=", "0", "edge_pathways", "=", "set", "(", ")", "for", "(", "v0", ",", "v1", ")", ",", "edge_obj", "in", "self", ".", "edges", ".", "items", "(", ")", ":", "if", "(", "edge_obj", ".", "weight", ">", "drop_weights_below", "and", "(", "whitelist", "is", "None", "or", "(", "v0", ",", "v1", ")", "in", "whitelist", ")", ")", ":", "row", "=", "[", "self", ".", "__getitem__", "(", "v0", ")", ",", "self", ".", "__getitem__", "(", "v1", ")", ",", "edge_obj", ".", "weight", "]", "edge_pathways", ".", "add", "(", "v0", ")", "edge_pathways", ".", "add", "(", "v1", ")", "if", "self", ".", "features", ":", "features", "=", "edge_obj", ".", "features_to_string", "(", ")", "row", ".", "append", "(", "features", ")", "network_df", ".", "loc", "[", "idx", "]", "=", "row", "idx", "+=", "1", "# faster to append by index.", "network_df", "=", "network_df", ".", "sort_values", "(", "by", "=", "[", "\"weight\"", "]", ",", "ascending", "=", "False", ")", "print", "(", "\"The pathway co-occurrence network \"", "\"contains {0} pathways.\"", ".", "format", "(", "len", "(", "edge_pathways", ")", ")", ")", "return", "network_df" ]
Conversion of the network to a pandas.DataFrame. Parameters ----------- drop_weights_below : int (default=0) specify an edge weight threshold - remove all edges with weight below this value whitelist : [set|list](tup(int, int))|None (default=None) option to pass in a set/list of edge ids (tup(int, int)) that should be kept in the resulting dataframe Returns ----------- pandas.DataFrame a pandas.DataFrame containing the network edge information. columns = [pw0, pw1, weight]. an additional "features" column is returned if this network is not an aggregate of multiple networks.
[ "Conversion", "of", "the", "network", "to", "a", "pandas", ".", "DataFrame", "." ]
python
train
gabstopper/smc-python
smc/base/collection.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/base/collection.py#L472-L522
def filter(self, *filter, **kw): # @ReservedAssignment """ Filter results for specific element type. keyword arguments can be used to specify a match against the elements attribute directly. It's important to note that if the search filter contains a / or -, the SMC will only search the name and comment fields. Otherwise other key fields of an element are searched. In addition, SMC searches are a 'contains' search meaning you may return more results than wanted. Use a key word argument to specify the elements attribute and value expected. :: >>> list(Router.objects.filter('10.10.10.1')) [Router(name=Router-110.10.10.10), Router(name=Router-10.10.10.10), Router(name=Router-10.10.10.1)] >>> list(Router.objects.filter(address='10.10.10.1')) [Router(name=Router-10.10.10.1)] :param str filter: any parameter to attempt to match on. For example, if this is a service, you could match on service name 'http' or ports of interest, '80'. :param bool exact_match: Can be passed as a keyword arg. Specifies whether the match needs to be exact or not (default: False) :param bool case_sensitive: Can be passed as a keyword arg. Specifies whether the match is case sensitive or not. (default: True) :param kw: keyword args can specify an attribute=value to use as an exact match against the elements attribute. :return: :class:`.ElementCollection` """ iexact = None if filter: _filter = filter[0] exact_match = kw.pop('exact_match', False) case_sensitive = kw.pop('case_sensitive', True) if kw: _, value = next(iter(kw.items())) _filter = value iexact = kw # Only strip metachars from network and address range if not exact_match and self._params.get('filter_context', {})\ in ('network', 'address_range', 'network_elements'): _filter = _strip_metachars(_filter) return self._clone( filter=_filter, iexact=iexact, exact_match=exact_match, case_sensitive=case_sensitive)
[ "def", "filter", "(", "self", ",", "*", "filter", ",", "*", "*", "kw", ")", ":", "# @ReservedAssignment", "iexact", "=", "None", "if", "filter", ":", "_filter", "=", "filter", "[", "0", "]", "exact_match", "=", "kw", ".", "pop", "(", "'exact_match'", ",", "False", ")", "case_sensitive", "=", "kw", ".", "pop", "(", "'case_sensitive'", ",", "True", ")", "if", "kw", ":", "_", ",", "value", "=", "next", "(", "iter", "(", "kw", ".", "items", "(", ")", ")", ")", "_filter", "=", "value", "iexact", "=", "kw", "# Only strip metachars from network and address range", "if", "not", "exact_match", "and", "self", ".", "_params", ".", "get", "(", "'filter_context'", ",", "{", "}", ")", "in", "(", "'network'", ",", "'address_range'", ",", "'network_elements'", ")", ":", "_filter", "=", "_strip_metachars", "(", "_filter", ")", "return", "self", ".", "_clone", "(", "filter", "=", "_filter", ",", "iexact", "=", "iexact", ",", "exact_match", "=", "exact_match", ",", "case_sensitive", "=", "case_sensitive", ")" ]
Filter results for specific element type. keyword arguments can be used to specify a match against the elements attribute directly. It's important to note that if the search filter contains a / or -, the SMC will only search the name and comment fields. Otherwise other key fields of an element are searched. In addition, SMC searches are a 'contains' search meaning you may return more results than wanted. Use a key word argument to specify the elements attribute and value expected. :: >>> list(Router.objects.filter('10.10.10.1')) [Router(name=Router-110.10.10.10), Router(name=Router-10.10.10.10), Router(name=Router-10.10.10.1)] >>> list(Router.objects.filter(address='10.10.10.1')) [Router(name=Router-10.10.10.1)] :param str filter: any parameter to attempt to match on. For example, if this is a service, you could match on service name 'http' or ports of interest, '80'. :param bool exact_match: Can be passed as a keyword arg. Specifies whether the match needs to be exact or not (default: False) :param bool case_sensitive: Can be passed as a keyword arg. Specifies whether the match is case sensitive or not. (default: True) :param kw: keyword args can specify an attribute=value to use as an exact match against the elements attribute. :return: :class:`.ElementCollection`
[ "Filter", "results", "for", "specific", "element", "type", ".", "keyword", "arguments", "can", "be", "used", "to", "specify", "a", "match", "against", "the", "elements", "attribute", "directly", ".", "It", "s", "important", "to", "note", "that", "if", "the", "search", "filter", "contains", "a", "/", "or", "-", "the", "SMC", "will", "only", "search", "the", "name", "and", "comment", "fields", ".", "Otherwise", "other", "key", "fields", "of", "an", "element", "are", "searched", ".", "In", "addition", "SMC", "searches", "are", "a", "contains", "search", "meaning", "you", "may", "return", "more", "results", "than", "wanted", ".", "Use", "a", "key", "word", "argument", "to", "specify", "the", "elements", "attribute", "and", "value", "expected", ".", "::", ">>>", "list", "(", "Router", ".", "objects", ".", "filter", "(", "10", ".", "10", ".", "10", ".", "1", "))", "[", "Router", "(", "name", "=", "Router", "-", "110", ".", "10", ".", "10", ".", "10", ")", "Router", "(", "name", "=", "Router", "-", "10", ".", "10", ".", "10", ".", "10", ")", "Router", "(", "name", "=", "Router", "-", "10", ".", "10", ".", "10", ".", "1", ")", "]", ">>>", "list", "(", "Router", ".", "objects", ".", "filter", "(", "address", "=", "10", ".", "10", ".", "10", ".", "1", "))", "[", "Router", "(", "name", "=", "Router", "-", "10", ".", "10", ".", "10", ".", "1", ")", "]", ":", "param", "str", "filter", ":", "any", "parameter", "to", "attempt", "to", "match", "on", ".", "For", "example", "if", "this", "is", "a", "service", "you", "could", "match", "on", "service", "name", "http", "or", "ports", "of", "interest", "80", ".", ":", "param", "bool", "exact_match", ":", "Can", "be", "passed", "as", "a", "keyword", "arg", ".", "Specifies", "whether", "the", "match", "needs", "to", "be", "exact", "or", "not", "(", "default", ":", "False", ")", ":", "param", "bool", "case_sensitive", ":", "Can", "be", "passed", "as", "a", "keyword", "arg", ".", "Specifies", "whether", "the", "match", "is", "case", "sensitive", "or", "not", ".", "(", "default", ":", "True", ")", ":", "param", "kw", ":", "keyword", "args", "can", "specify", "an", "attribute", "=", "value", "to", "use", "as", "an", "exact", "match", "against", "the", "elements", "attribute", ".", ":", "return", ":", ":", "class", ":", ".", "ElementCollection" ]
python
train
portfoliome/postpy
postpy/admin.py
https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/admin.py#L77-L87
def reset(db_name): """Reset database.""" conn = psycopg2.connect(database='postgres') db = Database(db_name) conn.autocommit = True with conn.cursor() as cursor: cursor.execute(db.drop_statement()) cursor.execute(db.create_statement()) conn.close()
[ "def", "reset", "(", "db_name", ")", ":", "conn", "=", "psycopg2", ".", "connect", "(", "database", "=", "'postgres'", ")", "db", "=", "Database", "(", "db_name", ")", "conn", ".", "autocommit", "=", "True", "with", "conn", ".", "cursor", "(", ")", "as", "cursor", ":", "cursor", ".", "execute", "(", "db", ".", "drop_statement", "(", ")", ")", "cursor", ".", "execute", "(", "db", ".", "create_statement", "(", ")", ")", "conn", ".", "close", "(", ")" ]
Reset database.
[ "Reset", "database", "." ]
python
train
undertheseanlp/languageflow
languageflow/evaluation/__init__.py
https://github.com/undertheseanlp/languageflow/blob/1436e0bf72803e02ccf727f41e8fc85ba167d9fe/languageflow/evaluation/__init__.py#L3-L24
def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None): """pretty print for confusion matrixes""" columnwidth = max([len(x) for x in labels] + [5]) # 5 is value length empty_cell = " " * columnwidth # Print header print(" " + empty_cell, end=" ") for label in labels: print("%{0}s".format(columnwidth) % label, end=" ") print() # Print rows for i, label1 in enumerate(labels): print(" %{0}s".format(columnwidth) % label1, end=" ") for j in range(len(labels)): cell = "%{0}.1f".format(columnwidth) % cm[i, j] if hide_zeroes: cell = cell if float(cm[i, j]) != 0 else empty_cell if hide_diagonal: cell = cell if i != j else empty_cell if hide_threshold: cell = cell if cm[i, j] > hide_threshold else empty_cell print(cell, end=" ") print()
[ "def", "print_cm", "(", "cm", ",", "labels", ",", "hide_zeroes", "=", "False", ",", "hide_diagonal", "=", "False", ",", "hide_threshold", "=", "None", ")", ":", "columnwidth", "=", "max", "(", "[", "len", "(", "x", ")", "for", "x", "in", "labels", "]", "+", "[", "5", "]", ")", "# 5 is value length", "empty_cell", "=", "\" \"", "*", "columnwidth", "# Print header", "print", "(", "\" \"", "+", "empty_cell", ",", "end", "=", "\" \"", ")", "for", "label", "in", "labels", ":", "print", "(", "\"%{0}s\"", ".", "format", "(", "columnwidth", ")", "%", "label", ",", "end", "=", "\" \"", ")", "print", "(", ")", "# Print rows", "for", "i", ",", "label1", "in", "enumerate", "(", "labels", ")", ":", "print", "(", "\" %{0}s\"", ".", "format", "(", "columnwidth", ")", "%", "label1", ",", "end", "=", "\" \"", ")", "for", "j", "in", "range", "(", "len", "(", "labels", ")", ")", ":", "cell", "=", "\"%{0}.1f\"", ".", "format", "(", "columnwidth", ")", "%", "cm", "[", "i", ",", "j", "]", "if", "hide_zeroes", ":", "cell", "=", "cell", "if", "float", "(", "cm", "[", "i", ",", "j", "]", ")", "!=", "0", "else", "empty_cell", "if", "hide_diagonal", ":", "cell", "=", "cell", "if", "i", "!=", "j", "else", "empty_cell", "if", "hide_threshold", ":", "cell", "=", "cell", "if", "cm", "[", "i", ",", "j", "]", ">", "hide_threshold", "else", "empty_cell", "print", "(", "cell", ",", "end", "=", "\" \"", ")", "print", "(", ")" ]
pretty print for confusion matrixes
[ "pretty", "print", "for", "confusion", "matrixes" ]
python
valid
apache/incubator-heron
heron/tools/explorer/src/python/logicalplan.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/explorer/src/python/logicalplan.py#L101-L107
def filter_spouts(table, header): """ filter to keep spouts """ spouts_info = [] for row in table: if row[0] == 'spout': spouts_info.append(row) return spouts_info, header
[ "def", "filter_spouts", "(", "table", ",", "header", ")", ":", "spouts_info", "=", "[", "]", "for", "row", "in", "table", ":", "if", "row", "[", "0", "]", "==", "'spout'", ":", "spouts_info", ".", "append", "(", "row", ")", "return", "spouts_info", ",", "header" ]
filter to keep spouts
[ "filter", "to", "keep", "spouts" ]
python
valid
ejeschke/ginga
ginga/examples/reference-viewer/MyGlobalPlugin.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/examples/reference-viewer/MyGlobalPlugin.py#L43-L106
def build_gui(self, container): """ This method is called when the plugin is invoked. It builds the GUI used by the plugin into the widget layout passed as ``container``. This method could be called several times if the plugin is opened and closed. The method may be omitted if there is no GUI for the plugin. This specific example uses the GUI widget set agnostic wrappers to build the GUI, but you can also just as easily use explicit toolkit calls here if you only want to support one widget set. """ top = Widgets.VBox() top.set_border_width(4) # this is a little trick for making plugins that work either in # a vertical or horizontal orientation. It returns a box container, # a scroll widget and an orientation ('vertical', 'horizontal') vbox, sw, orientation = Widgets.get_oriented_box(container) vbox.set_border_width(4) vbox.set_spacing(2) # Take a text widget to show some instructions self.msg_font = self.fv.get_font("sans", 12) tw = Widgets.TextArea(wrap=True, editable=False) tw.set_font(self.msg_font) self.tw = tw # Frame for instructions and add the text widget with another # blank widget to stretch as needed to fill emp fr = Widgets.Frame("Status") fr.set_widget(tw) vbox.add_widget(fr, stretch=0) # Add a spacer to stretch the rest of the way to the end of the # plugin space spacer = Widgets.Label('') vbox.add_widget(spacer, stretch=1) # scroll bars will allow lots of content to be accessed top.add_widget(sw, stretch=1) # A button box that is always visible at the bottom btns = Widgets.HBox() btns.set_spacing(3) # Add a close button for the convenience of the user btn = Widgets.Button("Close") btn.add_callback('activated', lambda w: self.close()) btns.add_widget(btn, stretch=0) btns.add_widget(Widgets.Label(''), stretch=1) top.add_widget(btns, stretch=0) # Add our GUI to the container container.add_widget(top, stretch=1) # NOTE: if you are building a GUI using a specific widget toolkit # (e.g. Qt) GUI calls, you need to extract the widget or layout # from the non-toolkit specific container wrapper and call on that # to pack your widget, e.g.: #cw = container.get_widget() #cw.addWidget(widget, stretch=1) self.gui_up = True
[ "def", "build_gui", "(", "self", ",", "container", ")", ":", "top", "=", "Widgets", ".", "VBox", "(", ")", "top", ".", "set_border_width", "(", "4", ")", "# this is a little trick for making plugins that work either in", "# a vertical or horizontal orientation. It returns a box container,", "# a scroll widget and an orientation ('vertical', 'horizontal')", "vbox", ",", "sw", ",", "orientation", "=", "Widgets", ".", "get_oriented_box", "(", "container", ")", "vbox", ".", "set_border_width", "(", "4", ")", "vbox", ".", "set_spacing", "(", "2", ")", "# Take a text widget to show some instructions", "self", ".", "msg_font", "=", "self", ".", "fv", ".", "get_font", "(", "\"sans\"", ",", "12", ")", "tw", "=", "Widgets", ".", "TextArea", "(", "wrap", "=", "True", ",", "editable", "=", "False", ")", "tw", ".", "set_font", "(", "self", ".", "msg_font", ")", "self", ".", "tw", "=", "tw", "# Frame for instructions and add the text widget with another", "# blank widget to stretch as needed to fill emp", "fr", "=", "Widgets", ".", "Frame", "(", "\"Status\"", ")", "fr", ".", "set_widget", "(", "tw", ")", "vbox", ".", "add_widget", "(", "fr", ",", "stretch", "=", "0", ")", "# Add a spacer to stretch the rest of the way to the end of the", "# plugin space", "spacer", "=", "Widgets", ".", "Label", "(", "''", ")", "vbox", ".", "add_widget", "(", "spacer", ",", "stretch", "=", "1", ")", "# scroll bars will allow lots of content to be accessed", "top", ".", "add_widget", "(", "sw", ",", "stretch", "=", "1", ")", "# A button box that is always visible at the bottom", "btns", "=", "Widgets", ".", "HBox", "(", ")", "btns", ".", "set_spacing", "(", "3", ")", "# Add a close button for the convenience of the user", "btn", "=", "Widgets", ".", "Button", "(", "\"Close\"", ")", "btn", ".", "add_callback", "(", "'activated'", ",", "lambda", "w", ":", "self", ".", "close", "(", ")", ")", "btns", ".", "add_widget", "(", "btn", ",", "stretch", "=", "0", ")", "btns", ".", "add_widget", "(", "Widgets", ".", "Label", "(", "''", ")", ",", "stretch", "=", "1", ")", "top", ".", "add_widget", "(", "btns", ",", "stretch", "=", "0", ")", "# Add our GUI to the container", "container", ".", "add_widget", "(", "top", ",", "stretch", "=", "1", ")", "# NOTE: if you are building a GUI using a specific widget toolkit", "# (e.g. Qt) GUI calls, you need to extract the widget or layout", "# from the non-toolkit specific container wrapper and call on that", "# to pack your widget, e.g.:", "#cw = container.get_widget()", "#cw.addWidget(widget, stretch=1)", "self", ".", "gui_up", "=", "True" ]
This method is called when the plugin is invoked. It builds the GUI used by the plugin into the widget layout passed as ``container``. This method could be called several times if the plugin is opened and closed. The method may be omitted if there is no GUI for the plugin. This specific example uses the GUI widget set agnostic wrappers to build the GUI, but you can also just as easily use explicit toolkit calls here if you only want to support one widget set.
[ "This", "method", "is", "called", "when", "the", "plugin", "is", "invoked", ".", "It", "builds", "the", "GUI", "used", "by", "the", "plugin", "into", "the", "widget", "layout", "passed", "as", "container", ".", "This", "method", "could", "be", "called", "several", "times", "if", "the", "plugin", "is", "opened", "and", "closed", ".", "The", "method", "may", "be", "omitted", "if", "there", "is", "no", "GUI", "for", "the", "plugin", "." ]
python
train
consbio/parserutils
parserutils/urls.py
https://github.com/consbio/parserutils/blob/f13f80db99ed43479336b116e38512e3566e4623/parserutils/urls.py#L24-L35
def get_base_url(url, include_path=False): """ :return: the url without the query or fragment segments """ if not url: return None parts = _urlsplit(url) base_url = _urlunsplit(( parts.scheme, parts.netloc, (parts.path if include_path else ''), None, None )) return base_url if base_url.endswith('/') else base_url + '/'
[ "def", "get_base_url", "(", "url", ",", "include_path", "=", "False", ")", ":", "if", "not", "url", ":", "return", "None", "parts", "=", "_urlsplit", "(", "url", ")", "base_url", "=", "_urlunsplit", "(", "(", "parts", ".", "scheme", ",", "parts", ".", "netloc", ",", "(", "parts", ".", "path", "if", "include_path", "else", "''", ")", ",", "None", ",", "None", ")", ")", "return", "base_url", "if", "base_url", ".", "endswith", "(", "'/'", ")", "else", "base_url", "+", "'/'" ]
:return: the url without the query or fragment segments
[ ":", "return", ":", "the", "url", "without", "the", "query", "or", "fragment", "segments" ]
python
train
flatangle/flatlib
flatlib/ephem/tools.py
https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/ephem/tools.py#L41-L51
def isDiurnal(jd, lat, lon): """ Returns true if the sun is above the horizon of a given date and location. """ sun = swe.sweObject(const.SUN, jd) mc = swe.sweHousesLon(jd, lat, lon, const.HOUSES_DEFAULT)[1][1] ra, decl = utils.eqCoords(sun['lon'], sun['lat']) mcRA, _ = utils.eqCoords(mc, 0.0) return utils.isAboveHorizon(ra, decl, mcRA, lat)
[ "def", "isDiurnal", "(", "jd", ",", "lat", ",", "lon", ")", ":", "sun", "=", "swe", ".", "sweObject", "(", "const", ".", "SUN", ",", "jd", ")", "mc", "=", "swe", ".", "sweHousesLon", "(", "jd", ",", "lat", ",", "lon", ",", "const", ".", "HOUSES_DEFAULT", ")", "[", "1", "]", "[", "1", "]", "ra", ",", "decl", "=", "utils", ".", "eqCoords", "(", "sun", "[", "'lon'", "]", ",", "sun", "[", "'lat'", "]", ")", "mcRA", ",", "_", "=", "utils", ".", "eqCoords", "(", "mc", ",", "0.0", ")", "return", "utils", ".", "isAboveHorizon", "(", "ra", ",", "decl", ",", "mcRA", ",", "lat", ")" ]
Returns true if the sun is above the horizon of a given date and location.
[ "Returns", "true", "if", "the", "sun", "is", "above", "the", "horizon", "of", "a", "given", "date", "and", "location", "." ]
python
train
ralphje/imagemounter
imagemounter/cli/shell.py
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/cli/shell.py#L68-L79
def default(self, line): """Overriding default to get access to any argparse commands we have specified.""" if any((line.startswith(x) for x in self.argparse_names())): try: args = self.argparser.parse_args(shlex.split(line)) except Exception: # intentionally catches also other errors in argparser pass else: args.func(args) else: cmd.Cmd.default(self, line)
[ "def", "default", "(", "self", ",", "line", ")", ":", "if", "any", "(", "(", "line", ".", "startswith", "(", "x", ")", "for", "x", "in", "self", ".", "argparse_names", "(", ")", ")", ")", ":", "try", ":", "args", "=", "self", ".", "argparser", ".", "parse_args", "(", "shlex", ".", "split", "(", "line", ")", ")", "except", "Exception", ":", "# intentionally catches also other errors in argparser", "pass", "else", ":", "args", ".", "func", "(", "args", ")", "else", ":", "cmd", ".", "Cmd", ".", "default", "(", "self", ",", "line", ")" ]
Overriding default to get access to any argparse commands we have specified.
[ "Overriding", "default", "to", "get", "access", "to", "any", "argparse", "commands", "we", "have", "specified", "." ]
python
train
awslabs/aws-sam-cli
samcli/commands/publish/command.py
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/commands/publish/command.py#L55-L83
def do_cli(ctx, template, semantic_version): """Publish the application based on command line inputs.""" try: template_data = get_template_data(template) except ValueError as ex: click.secho("Publish Failed", fg='red') raise UserException(str(ex)) # Override SemanticVersion in template metadata when provided in command input if semantic_version and SERVERLESS_REPO_APPLICATION in template_data.get(METADATA, {}): template_data.get(METADATA).get(SERVERLESS_REPO_APPLICATION)[SEMANTIC_VERSION] = semantic_version try: publish_output = publish_application(template_data) click.secho("Publish Succeeded", fg="green") click.secho(_gen_success_message(publish_output)) except InvalidS3UriError: click.secho("Publish Failed", fg='red') raise UserException( "Your SAM template contains invalid S3 URIs. Please make sure that you have uploaded application " "artifacts to S3 by packaging the template. See more details in {}".format(SAM_PACKAGE_DOC)) except ServerlessRepoError as ex: click.secho("Publish Failed", fg='red') LOG.debug("Failed to publish application to serverlessrepo", exc_info=True) error_msg = '{}\nPlease follow the instructions in {}'.format(str(ex), SAM_PUBLISH_DOC) raise UserException(error_msg) application_id = publish_output.get('application_id') _print_console_link(ctx.region, application_id)
[ "def", "do_cli", "(", "ctx", ",", "template", ",", "semantic_version", ")", ":", "try", ":", "template_data", "=", "get_template_data", "(", "template", ")", "except", "ValueError", "as", "ex", ":", "click", ".", "secho", "(", "\"Publish Failed\"", ",", "fg", "=", "'red'", ")", "raise", "UserException", "(", "str", "(", "ex", ")", ")", "# Override SemanticVersion in template metadata when provided in command input", "if", "semantic_version", "and", "SERVERLESS_REPO_APPLICATION", "in", "template_data", ".", "get", "(", "METADATA", ",", "{", "}", ")", ":", "template_data", ".", "get", "(", "METADATA", ")", ".", "get", "(", "SERVERLESS_REPO_APPLICATION", ")", "[", "SEMANTIC_VERSION", "]", "=", "semantic_version", "try", ":", "publish_output", "=", "publish_application", "(", "template_data", ")", "click", ".", "secho", "(", "\"Publish Succeeded\"", ",", "fg", "=", "\"green\"", ")", "click", ".", "secho", "(", "_gen_success_message", "(", "publish_output", ")", ")", "except", "InvalidS3UriError", ":", "click", ".", "secho", "(", "\"Publish Failed\"", ",", "fg", "=", "'red'", ")", "raise", "UserException", "(", "\"Your SAM template contains invalid S3 URIs. Please make sure that you have uploaded application \"", "\"artifacts to S3 by packaging the template. See more details in {}\"", ".", "format", "(", "SAM_PACKAGE_DOC", ")", ")", "except", "ServerlessRepoError", "as", "ex", ":", "click", ".", "secho", "(", "\"Publish Failed\"", ",", "fg", "=", "'red'", ")", "LOG", ".", "debug", "(", "\"Failed to publish application to serverlessrepo\"", ",", "exc_info", "=", "True", ")", "error_msg", "=", "'{}\\nPlease follow the instructions in {}'", ".", "format", "(", "str", "(", "ex", ")", ",", "SAM_PUBLISH_DOC", ")", "raise", "UserException", "(", "error_msg", ")", "application_id", "=", "publish_output", ".", "get", "(", "'application_id'", ")", "_print_console_link", "(", "ctx", ".", "region", ",", "application_id", ")" ]
Publish the application based on command line inputs.
[ "Publish", "the", "application", "based", "on", "command", "line", "inputs", "." ]
python
train
pypa/pipenv
pipenv/vendor/dotenv/cli.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/dotenv/cli.py#L59-L66
def get(ctx, key): '''Retrieve the value for the given key.''' file = ctx.obj['FILE'] stored_value = get_key(file, key) if stored_value: click.echo('%s=%s' % (key, stored_value)) else: exit(1)
[ "def", "get", "(", "ctx", ",", "key", ")", ":", "file", "=", "ctx", ".", "obj", "[", "'FILE'", "]", "stored_value", "=", "get_key", "(", "file", ",", "key", ")", "if", "stored_value", ":", "click", ".", "echo", "(", "'%s=%s'", "%", "(", "key", ",", "stored_value", ")", ")", "else", ":", "exit", "(", "1", ")" ]
Retrieve the value for the given key.
[ "Retrieve", "the", "value", "for", "the", "given", "key", "." ]
python
train
IDSIA/sacred
sacred/experiment.py
https://github.com/IDSIA/sacred/blob/72633776bed9b5bddf93ae7d215188e61970973a/sacred/experiment.py#L390-L406
def log_scalar(self, name, value, step=None): """ Add a new measurement. The measurement will be processed by the MongoDB* observer during a heartbeat event. Other observers are not yet supported. :param name: The name of the metric, e.g. training.loss :param value: The measured value :param step: The step number (integer), e.g. the iteration number If not specified, an internal counter for each metric is used, incremented by one. """ # Method added in change https://github.com/chovanecm/sacred/issues/4 # The same as Run.log_scalar return self.current_run.log_scalar(name, value, step)
[ "def", "log_scalar", "(", "self", ",", "name", ",", "value", ",", "step", "=", "None", ")", ":", "# Method added in change https://github.com/chovanecm/sacred/issues/4", "# The same as Run.log_scalar", "return", "self", ".", "current_run", ".", "log_scalar", "(", "name", ",", "value", ",", "step", ")" ]
Add a new measurement. The measurement will be processed by the MongoDB* observer during a heartbeat event. Other observers are not yet supported. :param name: The name of the metric, e.g. training.loss :param value: The measured value :param step: The step number (integer), e.g. the iteration number If not specified, an internal counter for each metric is used, incremented by one.
[ "Add", "a", "new", "measurement", "." ]
python
train
dpgaspar/Flask-AppBuilder
flask_appbuilder/api/__init__.py
https://github.com/dpgaspar/Flask-AppBuilder/blob/c293734c1b86e176a3ba57ee2deab6676d125576/flask_appbuilder/api/__init__.py#L640-L649
def _label_columns_json(self, cols=None): """ Prepares dict with labels to be JSON serializable """ ret = {} cols = cols or [] d = {k: v for (k, v) in self.label_columns.items() if k in cols} for key, value in d.items(): ret[key] = as_unicode(_(value).encode("UTF-8")) return ret
[ "def", "_label_columns_json", "(", "self", ",", "cols", "=", "None", ")", ":", "ret", "=", "{", "}", "cols", "=", "cols", "or", "[", "]", "d", "=", "{", "k", ":", "v", "for", "(", "k", ",", "v", ")", "in", "self", ".", "label_columns", ".", "items", "(", ")", "if", "k", "in", "cols", "}", "for", "key", ",", "value", "in", "d", ".", "items", "(", ")", ":", "ret", "[", "key", "]", "=", "as_unicode", "(", "_", "(", "value", ")", ".", "encode", "(", "\"UTF-8\"", ")", ")", "return", "ret" ]
Prepares dict with labels to be JSON serializable
[ "Prepares", "dict", "with", "labels", "to", "be", "JSON", "serializable" ]
python
train
inasafe/inasafe
safe/utilities/utilities.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/utilities/utilities.py#L65-L111
def get_error_message(exception, context=None, suggestion=None): """Convert exception into an ErrorMessage containing a stack trace. :param exception: Exception object. :type exception: Exception :param context: Optional context message. :type context: str :param suggestion: Optional suggestion. :type suggestion: str .. see also:: https://github.com/inasafe/inasafe/issues/577 :returns: An error message with stack trace info suitable for display. :rtype: ErrorMessage """ name, trace = humanise_exception(exception) problem = m.Message(name) if exception is None or exception == '': problem.append = m.Text(tr('No details provided')) else: if hasattr(exception, 'message') and \ isinstance(exception.message, Message): problem.append = m.Text(str(exception.message.message)) else: problem.append = m.Text(str(exception)) suggestion = suggestion if suggestion is None and hasattr(exception, 'suggestion'): suggestion = exception.suggestion error_message = ErrorMessage( problem, detail=context, suggestion=suggestion, traceback=trace ) args = exception.args for arg in args: error_message.details.append(arg) return error_message
[ "def", "get_error_message", "(", "exception", ",", "context", "=", "None", ",", "suggestion", "=", "None", ")", ":", "name", ",", "trace", "=", "humanise_exception", "(", "exception", ")", "problem", "=", "m", ".", "Message", "(", "name", ")", "if", "exception", "is", "None", "or", "exception", "==", "''", ":", "problem", ".", "append", "=", "m", ".", "Text", "(", "tr", "(", "'No details provided'", ")", ")", "else", ":", "if", "hasattr", "(", "exception", ",", "'message'", ")", "and", "isinstance", "(", "exception", ".", "message", ",", "Message", ")", ":", "problem", ".", "append", "=", "m", ".", "Text", "(", "str", "(", "exception", ".", "message", ".", "message", ")", ")", "else", ":", "problem", ".", "append", "=", "m", ".", "Text", "(", "str", "(", "exception", ")", ")", "suggestion", "=", "suggestion", "if", "suggestion", "is", "None", "and", "hasattr", "(", "exception", ",", "'suggestion'", ")", ":", "suggestion", "=", "exception", ".", "suggestion", "error_message", "=", "ErrorMessage", "(", "problem", ",", "detail", "=", "context", ",", "suggestion", "=", "suggestion", ",", "traceback", "=", "trace", ")", "args", "=", "exception", ".", "args", "for", "arg", "in", "args", ":", "error_message", ".", "details", ".", "append", "(", "arg", ")", "return", "error_message" ]
Convert exception into an ErrorMessage containing a stack trace. :param exception: Exception object. :type exception: Exception :param context: Optional context message. :type context: str :param suggestion: Optional suggestion. :type suggestion: str .. see also:: https://github.com/inasafe/inasafe/issues/577 :returns: An error message with stack trace info suitable for display. :rtype: ErrorMessage
[ "Convert", "exception", "into", "an", "ErrorMessage", "containing", "a", "stack", "trace", "." ]
python
train
tanghaibao/jcvi
jcvi/graphics/graph.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/graphics/graph.py#L18-L22
def make_sequence(seq, name="S"): """ Make unique nodes for sequence graph. """ return ["{}_{}_{}".format(name, i, x) for i, x in enumerate(seq)]
[ "def", "make_sequence", "(", "seq", ",", "name", "=", "\"S\"", ")", ":", "return", "[", "\"{}_{}_{}\"", ".", "format", "(", "name", ",", "i", ",", "x", ")", "for", "i", ",", "x", "in", "enumerate", "(", "seq", ")", "]" ]
Make unique nodes for sequence graph.
[ "Make", "unique", "nodes", "for", "sequence", "graph", "." ]
python
train
lwcook/horsetail-matching
horsetailmatching/densitymatching.py
https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/densitymatching.py#L152-L165
def getPDF(self): '''Function that gets vectors of the pdf and target at the last design evaluated. :return: tuple of q values, pdf values, target values ''' if hasattr(self, '_qplot'): return self._qplot, self._hplot, self._tplot else: raise ValueError('''The metric has not been evaluated at any design point so the PDF cannot get obtained''')
[ "def", "getPDF", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'_qplot'", ")", ":", "return", "self", ".", "_qplot", ",", "self", ".", "_hplot", ",", "self", ".", "_tplot", "else", ":", "raise", "ValueError", "(", "'''The metric has not been evaluated at any\n design point so the PDF cannot get obtained'''", ")" ]
Function that gets vectors of the pdf and target at the last design evaluated. :return: tuple of q values, pdf values, target values
[ "Function", "that", "gets", "vectors", "of", "the", "pdf", "and", "target", "at", "the", "last", "design", "evaluated", "." ]
python
train
bmuller/kademlia
kademlia/network.py
https://github.com/bmuller/kademlia/blob/4a8d445c9ee8f3ca10f56107e4445daed4933c8a/kademlia/network.py#L214-L225
def load_state(cls, fname): """ Load the state of this node (the alpha/ksize/id/immediate neighbors) from a cache file with the given fname. """ log.info("Loading state from %s", fname) with open(fname, 'rb') as file: data = pickle.load(file) svr = Server(data['ksize'], data['alpha'], data['id']) if data['neighbors']: svr.bootstrap(data['neighbors']) return svr
[ "def", "load_state", "(", "cls", ",", "fname", ")", ":", "log", ".", "info", "(", "\"Loading state from %s\"", ",", "fname", ")", "with", "open", "(", "fname", ",", "'rb'", ")", "as", "file", ":", "data", "=", "pickle", ".", "load", "(", "file", ")", "svr", "=", "Server", "(", "data", "[", "'ksize'", "]", ",", "data", "[", "'alpha'", "]", ",", "data", "[", "'id'", "]", ")", "if", "data", "[", "'neighbors'", "]", ":", "svr", ".", "bootstrap", "(", "data", "[", "'neighbors'", "]", ")", "return", "svr" ]
Load the state of this node (the alpha/ksize/id/immediate neighbors) from a cache file with the given fname.
[ "Load", "the", "state", "of", "this", "node", "(", "the", "alpha", "/", "ksize", "/", "id", "/", "immediate", "neighbors", ")", "from", "a", "cache", "file", "with", "the", "given", "fname", "." ]
python
train
AtomHash/evernode
evernode/classes/base_response.py
https://github.com/AtomHash/evernode/blob/b2fb91555fb937a3f3eba41db56dee26f9b034be/evernode/classes/base_response.py#L35-L39
def message(self, message=None): """ Set response message """ if message is not None: self.response_model.message = message return self.response_model.message
[ "def", "message", "(", "self", ",", "message", "=", "None", ")", ":", "if", "message", "is", "not", "None", ":", "self", ".", "response_model", ".", "message", "=", "message", "return", "self", ".", "response_model", ".", "message" ]
Set response message
[ "Set", "response", "message" ]
python
train
bwohlberg/sporco
sporco/admm/ccmod.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/ccmod.py#L895-L946
def ConvCnstrMOD(*args, **kwargs): """A wrapper function that dynamically defines a class derived from one of the implementations of the Convolutional Constrained MOD problems, and returns an object instantiated with the provided parameters. The wrapper is designed to allow the appropriate object to be created by calling this function using the same syntax as would be used if it were a class. The specific implementation is selected by use of an additional keyword argument 'method'. Valid values are: - ``'ism'`` : Use the implementation defined in :class:`.ConvCnstrMOD_IterSM`. This method works well for a small number of training images, but is very slow for larger training sets. - ``'cg'`` : Use the implementation defined in :class:`.ConvCnstrMOD_CG`. This method is slower than ``'ism'`` for small training sets, but has better run time scaling as the training set grows. - ``'cns'`` : Use the implementation defined in :class:`.ConvCnstrMOD_Consensus`. This method is the best choice for large training sets. The default value is ``'cns'``. """ # Extract method selection argument or set default if 'method' in kwargs: method = kwargs['method'] del kwargs['method'] else: method = 'cns' # Assign base class depending on method selection argument if method == 'ism': base = ConvCnstrMOD_IterSM elif method == 'cg': base = ConvCnstrMOD_CG elif method == 'cns': base = ConvCnstrMOD_Consensus else: raise ValueError('Unknown ConvCnstrMOD solver method %s' % method) # Nested class with dynamically determined inheritance class ConvCnstrMOD(base): def __init__(self, *args, **kwargs): super(ConvCnstrMOD, self).__init__(*args, **kwargs) # Allow pickling of objects of type ConvCnstrMOD _fix_dynamic_class_lookup(ConvCnstrMOD, method) # Return object of the nested class type return ConvCnstrMOD(*args, **kwargs)
[ "def", "ConvCnstrMOD", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Extract method selection argument or set default", "if", "'method'", "in", "kwargs", ":", "method", "=", "kwargs", "[", "'method'", "]", "del", "kwargs", "[", "'method'", "]", "else", ":", "method", "=", "'cns'", "# Assign base class depending on method selection argument", "if", "method", "==", "'ism'", ":", "base", "=", "ConvCnstrMOD_IterSM", "elif", "method", "==", "'cg'", ":", "base", "=", "ConvCnstrMOD_CG", "elif", "method", "==", "'cns'", ":", "base", "=", "ConvCnstrMOD_Consensus", "else", ":", "raise", "ValueError", "(", "'Unknown ConvCnstrMOD solver method %s'", "%", "method", ")", "# Nested class with dynamically determined inheritance", "class", "ConvCnstrMOD", "(", "base", ")", ":", "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "super", "(", "ConvCnstrMOD", ",", "self", ")", ".", "__init__", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# Allow pickling of objects of type ConvCnstrMOD", "_fix_dynamic_class_lookup", "(", "ConvCnstrMOD", ",", "method", ")", "# Return object of the nested class type", "return", "ConvCnstrMOD", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
A wrapper function that dynamically defines a class derived from one of the implementations of the Convolutional Constrained MOD problems, and returns an object instantiated with the provided parameters. The wrapper is designed to allow the appropriate object to be created by calling this function using the same syntax as would be used if it were a class. The specific implementation is selected by use of an additional keyword argument 'method'. Valid values are: - ``'ism'`` : Use the implementation defined in :class:`.ConvCnstrMOD_IterSM`. This method works well for a small number of training images, but is very slow for larger training sets. - ``'cg'`` : Use the implementation defined in :class:`.ConvCnstrMOD_CG`. This method is slower than ``'ism'`` for small training sets, but has better run time scaling as the training set grows. - ``'cns'`` : Use the implementation defined in :class:`.ConvCnstrMOD_Consensus`. This method is the best choice for large training sets. The default value is ``'cns'``.
[ "A", "wrapper", "function", "that", "dynamically", "defines", "a", "class", "derived", "from", "one", "of", "the", "implementations", "of", "the", "Convolutional", "Constrained", "MOD", "problems", "and", "returns", "an", "object", "instantiated", "with", "the", "provided", "parameters", ".", "The", "wrapper", "is", "designed", "to", "allow", "the", "appropriate", "object", "to", "be", "created", "by", "calling", "this", "function", "using", "the", "same", "syntax", "as", "would", "be", "used", "if", "it", "were", "a", "class", ".", "The", "specific", "implementation", "is", "selected", "by", "use", "of", "an", "additional", "keyword", "argument", "method", ".", "Valid", "values", "are", ":" ]
python
train
lsst-sqre/documenteer
documenteer/sphinxext/lssttasks/pyapisummary.py
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/pyapisummary.py#L244-L265
def _format_import_example(self, task_class): """Generate nodes that show a code sample demonstrating how to import the task class. Parameters ---------- task_class : ``lsst.pipe.base.Task``-type The Task class. Returns ------- nodes : `list` of docutils nodes Docutils nodes showing a class import statement. """ code = 'from {0.__module__} import {0.__name__}'.format(task_class) # This is a bare-bones version of what Sphinx's code-block directive # does. The 'language' attr triggers the pygments treatment. literal_node = nodes.literal_block(code, code) literal_node['language'] = 'py' return [literal_node]
[ "def", "_format_import_example", "(", "self", ",", "task_class", ")", ":", "code", "=", "'from {0.__module__} import {0.__name__}'", ".", "format", "(", "task_class", ")", "# This is a bare-bones version of what Sphinx's code-block directive", "# does. The 'language' attr triggers the pygments treatment.", "literal_node", "=", "nodes", ".", "literal_block", "(", "code", ",", "code", ")", "literal_node", "[", "'language'", "]", "=", "'py'", "return", "[", "literal_node", "]" ]
Generate nodes that show a code sample demonstrating how to import the task class. Parameters ---------- task_class : ``lsst.pipe.base.Task``-type The Task class. Returns ------- nodes : `list` of docutils nodes Docutils nodes showing a class import statement.
[ "Generate", "nodes", "that", "show", "a", "code", "sample", "demonstrating", "how", "to", "import", "the", "task", "class", "." ]
python
train
jhuapl-boss/intern
intern/remote/boss/remote.py
https://github.com/jhuapl-boss/intern/blob/d8fc6df011d8f212c87e6a1fd4cc21cfb5d103ed/intern/remote/boss/remote.py#L573-L593
def add_user( self, user, first_name=None, last_name=None, email=None, password=None ): """ Add a new user. Args: user (string): User name. first_name (optional[string]): User's first name. Defaults to None. last_name (optional[string]): User's last name. Defaults to None. email: (optional[string]): User's email address. Defaults to None. password: (optional[string]): User's password. Defaults to None. Raises: requests.HTTPError on failure. """ self.project_service.set_auth(self._token_project) self.project_service.add_user( user, first_name, last_name, email, password)
[ "def", "add_user", "(", "self", ",", "user", ",", "first_name", "=", "None", ",", "last_name", "=", "None", ",", "email", "=", "None", ",", "password", "=", "None", ")", ":", "self", ".", "project_service", ".", "set_auth", "(", "self", ".", "_token_project", ")", "self", ".", "project_service", ".", "add_user", "(", "user", ",", "first_name", ",", "last_name", ",", "email", ",", "password", ")" ]
Add a new user. Args: user (string): User name. first_name (optional[string]): User's first name. Defaults to None. last_name (optional[string]): User's last name. Defaults to None. email: (optional[string]): User's email address. Defaults to None. password: (optional[string]): User's password. Defaults to None. Raises: requests.HTTPError on failure.
[ "Add", "a", "new", "user", "." ]
python
train
mattrobenolt/django-sudo
sudo/utils.py
https://github.com/mattrobenolt/django-sudo/blob/089e21a88bc3ebf9d76ea706f26707d2e4f3f729/sudo/utils.py#L70-L87
def is_safe_url(url, host=None): """ Return ``True`` if the url is a safe redirection (i.e. it doesn't point to a different host and uses a safe scheme). Always returns ``False`` on an empty url. """ if url is not None: url = url.strip() if not url: return False if six.PY2: # pragma: nocover try: url = force_text(url) except UnicodeDecodeError: return False # Chrome treats \ completely as / in paths but it could be part of some # basic auth credentials so we need to check both URLs. return _is_safe_url(url, host) and _is_safe_url(url.replace('\\', '/'), host)
[ "def", "is_safe_url", "(", "url", ",", "host", "=", "None", ")", ":", "if", "url", "is", "not", "None", ":", "url", "=", "url", ".", "strip", "(", ")", "if", "not", "url", ":", "return", "False", "if", "six", ".", "PY2", ":", "# pragma: nocover", "try", ":", "url", "=", "force_text", "(", "url", ")", "except", "UnicodeDecodeError", ":", "return", "False", "# Chrome treats \\ completely as / in paths but it could be part of some", "# basic auth credentials so we need to check both URLs.", "return", "_is_safe_url", "(", "url", ",", "host", ")", "and", "_is_safe_url", "(", "url", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", ",", "host", ")" ]
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to a different host and uses a safe scheme). Always returns ``False`` on an empty url.
[ "Return", "True", "if", "the", "url", "is", "a", "safe", "redirection", "(", "i", ".", "e", ".", "it", "doesn", "t", "point", "to", "a", "different", "host", "and", "uses", "a", "safe", "scheme", ")", ".", "Always", "returns", "False", "on", "an", "empty", "url", "." ]
python
train
python-diamond/Diamond
src/diamond/handler/signalfx.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/diamond/handler/signalfx.py#L71-L84
def get_default_config_help(self): """ Returns the help text for the configuration options for this handler """ config = super(SignalfxHandler, self).get_default_config_help() config.update({ 'url': 'Where to send metrics', 'batch': 'How many to store before sending', 'filter_metrics_regex': 'Comma separated collector:regex filters', 'auth_token': 'Org API token to use when sending metrics', }) return config
[ "def", "get_default_config_help", "(", "self", ")", ":", "config", "=", "super", "(", "SignalfxHandler", ",", "self", ")", ".", "get_default_config_help", "(", ")", "config", ".", "update", "(", "{", "'url'", ":", "'Where to send metrics'", ",", "'batch'", ":", "'How many to store before sending'", ",", "'filter_metrics_regex'", ":", "'Comma separated collector:regex filters'", ",", "'auth_token'", ":", "'Org API token to use when sending metrics'", ",", "}", ")", "return", "config" ]
Returns the help text for the configuration options for this handler
[ "Returns", "the", "help", "text", "for", "the", "configuration", "options", "for", "this", "handler" ]
python
train
icgood/pysasl
pysasl/__init__.py
https://github.com/icgood/pysasl/blob/241bdd349577cc99f05c4239755c307e6a46018c/pysasl/__init__.py#L326-L338
def plaintext(cls): """Uses only authentication mechanisms that provide the credentials in un-hashed form, typically meaning :attr:`~pysasl.AuthenticationCredentials.has_secret` is True. Returns: A new :class:`SASLAuth` object. """ builtin_mechs = cls._get_builtin_mechanisms() plaintext_mechs = [mech for _, mech in builtin_mechs.items() if mech.insecure and mech.priority is not None] return SASLAuth(plaintext_mechs)
[ "def", "plaintext", "(", "cls", ")", ":", "builtin_mechs", "=", "cls", ".", "_get_builtin_mechanisms", "(", ")", "plaintext_mechs", "=", "[", "mech", "for", "_", ",", "mech", "in", "builtin_mechs", ".", "items", "(", ")", "if", "mech", ".", "insecure", "and", "mech", ".", "priority", "is", "not", "None", "]", "return", "SASLAuth", "(", "plaintext_mechs", ")" ]
Uses only authentication mechanisms that provide the credentials in un-hashed form, typically meaning :attr:`~pysasl.AuthenticationCredentials.has_secret` is True. Returns: A new :class:`SASLAuth` object.
[ "Uses", "only", "authentication", "mechanisms", "that", "provide", "the", "credentials", "in", "un", "-", "hashed", "form", "typically", "meaning", ":", "attr", ":", "~pysasl", ".", "AuthenticationCredentials", ".", "has_secret", "is", "True", "." ]
python
train
newville/wxmplot
wxmplot/config.py
https://github.com/newville/wxmplot/blob/8e0dc037453e5cdf18c968dc5a3d29efd761edee/wxmplot/config.py#L741-L756
def set_logscale(self, xscale='linear', yscale='linear', delay_draw=False): "set log or linear scale for x, y axis" self.xscale = xscale self.yscale = yscale for axes in self.canvas.figure.get_axes(): try: axes.set_yscale(yscale, basey=10) except: axes.set_yscale('linear') try: axes.set_xscale(xscale, basex=10) except: axes.set_xscale('linear') if not delay_draw: self.process_data()
[ "def", "set_logscale", "(", "self", ",", "xscale", "=", "'linear'", ",", "yscale", "=", "'linear'", ",", "delay_draw", "=", "False", ")", ":", "self", ".", "xscale", "=", "xscale", "self", ".", "yscale", "=", "yscale", "for", "axes", "in", "self", ".", "canvas", ".", "figure", ".", "get_axes", "(", ")", ":", "try", ":", "axes", ".", "set_yscale", "(", "yscale", ",", "basey", "=", "10", ")", "except", ":", "axes", ".", "set_yscale", "(", "'linear'", ")", "try", ":", "axes", ".", "set_xscale", "(", "xscale", ",", "basex", "=", "10", ")", "except", ":", "axes", ".", "set_xscale", "(", "'linear'", ")", "if", "not", "delay_draw", ":", "self", ".", "process_data", "(", ")" ]
set log or linear scale for x, y axis
[ "set", "log", "or", "linear", "scale", "for", "x", "y", "axis" ]
python
train
klavinslab/coral
coral/analysis/_sequence/melting_temp.py
https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_sequence/melting_temp.py#L22-L144
def tm(seq, dna_conc=50, salt_conc=50, parameters='cloning'): '''Calculate nearest-neighbor melting temperature (Tm). :param seq: Sequence for which to calculate the tm. :type seq: coral.DNA :param dna_conc: DNA concentration in nM. :type dna_conc: float :param salt_conc: Salt concentration in mM. :type salt_conc: float :param parameters: Nearest-neighbor parameter set. Available options: 'breslauer': Breslauer86 parameters 'sugimoto': Sugimoto96 parameters 'santalucia96': SantaLucia96 parameters 'santalucia98': SantaLucia98 parameters 'cloning': breslauer without corrections 'cloning_sl98': santalucia98 fit to 'cloning' :type parameters: str :returns: Melting temperature (Tm) in °C. :rtype: float :raises: ValueError if parameter argument is invalid. ''' if parameters == 'breslauer': params = tm_params.BRESLAUER elif parameters == 'sugimoto': params = tm_params.SUGIMOTO elif parameters == 'santalucia96': params = tm_params.SANTALUCIA96 elif parameters == 'santalucia98' or parameters == 'cloning_sl98': params = tm_params.SANTALUCIA98 elif parameters == 'cloning': params = tm_params.CLONING else: raise ValueError('Unsupported parameter set.') # Thermodynamic parameters pars = {'delta_h': params['delta_h'], 'delta_s': params['delta_s']} pars_error = {'delta_h': params['delta_h_err'], 'delta_s': params['delta_s_err']} # Error corrections - done first for use of reverse_complement parameters if parameters == 'breslauer': deltas = breslauer_corrections(seq, pars_error) elif parameters == 'sugimoto': deltas = breslauer_corrections(seq, pars_error) elif parameters == 'santalucia96': deltas = breslauer_corrections(seq, pars_error) elif parameters == 'santalucia98' or parameters == 'cloning_sl98': deltas = santalucia98_corrections(seq, pars_error) elif parameters == 'cloning': deltas = breslauer_corrections(seq, pars_error) deltas[0] += 3.4 deltas[1] += 12.4 # Sum up the nearest-neighbor enthalpy and entropy seq = str(seq).upper() # TODO: catch more cases when alphabets expand if 'N' in seq: raise ValueError('Can\'t calculate Tm of an N base.') new_delt = _pair_deltas(seq, pars) deltas[0] += new_delt[0] deltas[1] += new_delt[1] # Unit corrections salt_conc /= 1e3 dna_conc /= 1e9 deltas[0] *= 1e3 # Universal gas constant (R) R = 1.9872 # Supposedly this is what dnamate does, but the output doesn't match theirs # melt = (-deltas[0] / (-deltas[1] + R * log(dna_conc / 4.0))) + # 16.6 * log(salt_conc) - 273.15 # return melt # Overall equation is supposedly: # sum{dH}/(sum{dS} + R ln(dna_conc/b)) - 273.15 # with salt corrections for the whole term (or for santalucia98, # salt corrections added to the dS term. # So far, implementing this as described does not give results that match # any calculator but Biopython's if parameters == 'breslauer' or parameters == 'cloning': numerator = -deltas[0] # Modified dna_conc denominator denominator = (-deltas[1]) + R * log(dna_conc / 16.0) # Modified Schildkraut-Lifson equation adjustment salt_adjustment = 16.6 * log(salt_conc) / log(10.0) melt = numerator / denominator + salt_adjustment - 273.15 elif parameters == 'santalucia98' or 'cloning_sl98': # TODO: dna_conc should be divided by 2.0 when dna_conc >> template # (like PCR) numerator = -deltas[0] # SantaLucia 98 salt correction salt_adjustment = 0.368 * (len(seq) - 1) * log(salt_conc) denominator = -deltas[1] + salt_adjustment + R * log(dna_conc / 4.0) melt = -deltas[0] / denominator - 273.15 elif parameters == 'santalucia96': # TODO: find a way to test whether the code below matches another # algorithm. It appears to be correct, but need to test it. numerator = -deltas[0] denominator = -deltas[1] + R * log(dna_conc / 4.0) # SantaLucia 96 salt correction salt_adjustment = 12.5 * log10(salt_conc) melt = numerator / denominator + salt_adjustment - 273.15 elif parameters == 'sugimoto': # TODO: the stuff below is untested and probably wrong numerator = -deltas[0] denominator = -deltas[1] + R * log(dna_conc / 4.0) # Sugimoto parameters were fit holding salt concentration constant # Salt correction can be chosen / ignored? Remove sugimoto set since # it's so similar to santalucia98? salt_correction = 16.6 * log10(salt_conc) melt = numerator / denominator + salt_correction - 273.15 if parameters == 'cloning_sl98': # Corrections to make santalucia98 method approximate cloning method. # May be even better for cloning with Phusion than 'cloning' method melt *= 1.27329212575 melt += -2.55585450119 return melt
[ "def", "tm", "(", "seq", ",", "dna_conc", "=", "50", ",", "salt_conc", "=", "50", ",", "parameters", "=", "'cloning'", ")", ":", "if", "parameters", "==", "'breslauer'", ":", "params", "=", "tm_params", ".", "BRESLAUER", "elif", "parameters", "==", "'sugimoto'", ":", "params", "=", "tm_params", ".", "SUGIMOTO", "elif", "parameters", "==", "'santalucia96'", ":", "params", "=", "tm_params", ".", "SANTALUCIA96", "elif", "parameters", "==", "'santalucia98'", "or", "parameters", "==", "'cloning_sl98'", ":", "params", "=", "tm_params", ".", "SANTALUCIA98", "elif", "parameters", "==", "'cloning'", ":", "params", "=", "tm_params", ".", "CLONING", "else", ":", "raise", "ValueError", "(", "'Unsupported parameter set.'", ")", "# Thermodynamic parameters", "pars", "=", "{", "'delta_h'", ":", "params", "[", "'delta_h'", "]", ",", "'delta_s'", ":", "params", "[", "'delta_s'", "]", "}", "pars_error", "=", "{", "'delta_h'", ":", "params", "[", "'delta_h_err'", "]", ",", "'delta_s'", ":", "params", "[", "'delta_s_err'", "]", "}", "# Error corrections - done first for use of reverse_complement parameters", "if", "parameters", "==", "'breslauer'", ":", "deltas", "=", "breslauer_corrections", "(", "seq", ",", "pars_error", ")", "elif", "parameters", "==", "'sugimoto'", ":", "deltas", "=", "breslauer_corrections", "(", "seq", ",", "pars_error", ")", "elif", "parameters", "==", "'santalucia96'", ":", "deltas", "=", "breslauer_corrections", "(", "seq", ",", "pars_error", ")", "elif", "parameters", "==", "'santalucia98'", "or", "parameters", "==", "'cloning_sl98'", ":", "deltas", "=", "santalucia98_corrections", "(", "seq", ",", "pars_error", ")", "elif", "parameters", "==", "'cloning'", ":", "deltas", "=", "breslauer_corrections", "(", "seq", ",", "pars_error", ")", "deltas", "[", "0", "]", "+=", "3.4", "deltas", "[", "1", "]", "+=", "12.4", "# Sum up the nearest-neighbor enthalpy and entropy", "seq", "=", "str", "(", "seq", ")", ".", "upper", "(", ")", "# TODO: catch more cases when alphabets expand", "if", "'N'", "in", "seq", ":", "raise", "ValueError", "(", "'Can\\'t calculate Tm of an N base.'", ")", "new_delt", "=", "_pair_deltas", "(", "seq", ",", "pars", ")", "deltas", "[", "0", "]", "+=", "new_delt", "[", "0", "]", "deltas", "[", "1", "]", "+=", "new_delt", "[", "1", "]", "# Unit corrections", "salt_conc", "/=", "1e3", "dna_conc", "/=", "1e9", "deltas", "[", "0", "]", "*=", "1e3", "# Universal gas constant (R)", "R", "=", "1.9872", "# Supposedly this is what dnamate does, but the output doesn't match theirs", "# melt = (-deltas[0] / (-deltas[1] + R * log(dna_conc / 4.0))) +", "# 16.6 * log(salt_conc) - 273.15", "# return melt", "# Overall equation is supposedly:", "# sum{dH}/(sum{dS} + R ln(dna_conc/b)) - 273.15", "# with salt corrections for the whole term (or for santalucia98,", "# salt corrections added to the dS term.", "# So far, implementing this as described does not give results that match", "# any calculator but Biopython's", "if", "parameters", "==", "'breslauer'", "or", "parameters", "==", "'cloning'", ":", "numerator", "=", "-", "deltas", "[", "0", "]", "# Modified dna_conc denominator", "denominator", "=", "(", "-", "deltas", "[", "1", "]", ")", "+", "R", "*", "log", "(", "dna_conc", "/", "16.0", ")", "# Modified Schildkraut-Lifson equation adjustment", "salt_adjustment", "=", "16.6", "*", "log", "(", "salt_conc", ")", "/", "log", "(", "10.0", ")", "melt", "=", "numerator", "/", "denominator", "+", "salt_adjustment", "-", "273.15", "elif", "parameters", "==", "'santalucia98'", "or", "'cloning_sl98'", ":", "# TODO: dna_conc should be divided by 2.0 when dna_conc >> template", "# (like PCR)", "numerator", "=", "-", "deltas", "[", "0", "]", "# SantaLucia 98 salt correction", "salt_adjustment", "=", "0.368", "*", "(", "len", "(", "seq", ")", "-", "1", ")", "*", "log", "(", "salt_conc", ")", "denominator", "=", "-", "deltas", "[", "1", "]", "+", "salt_adjustment", "+", "R", "*", "log", "(", "dna_conc", "/", "4.0", ")", "melt", "=", "-", "deltas", "[", "0", "]", "/", "denominator", "-", "273.15", "elif", "parameters", "==", "'santalucia96'", ":", "# TODO: find a way to test whether the code below matches another", "# algorithm. It appears to be correct, but need to test it.", "numerator", "=", "-", "deltas", "[", "0", "]", "denominator", "=", "-", "deltas", "[", "1", "]", "+", "R", "*", "log", "(", "dna_conc", "/", "4.0", ")", "# SantaLucia 96 salt correction", "salt_adjustment", "=", "12.5", "*", "log10", "(", "salt_conc", ")", "melt", "=", "numerator", "/", "denominator", "+", "salt_adjustment", "-", "273.15", "elif", "parameters", "==", "'sugimoto'", ":", "# TODO: the stuff below is untested and probably wrong", "numerator", "=", "-", "deltas", "[", "0", "]", "denominator", "=", "-", "deltas", "[", "1", "]", "+", "R", "*", "log", "(", "dna_conc", "/", "4.0", ")", "# Sugimoto parameters were fit holding salt concentration constant", "# Salt correction can be chosen / ignored? Remove sugimoto set since", "# it's so similar to santalucia98?", "salt_correction", "=", "16.6", "*", "log10", "(", "salt_conc", ")", "melt", "=", "numerator", "/", "denominator", "+", "salt_correction", "-", "273.15", "if", "parameters", "==", "'cloning_sl98'", ":", "# Corrections to make santalucia98 method approximate cloning method.", "# May be even better for cloning with Phusion than 'cloning' method", "melt", "*=", "1.27329212575", "melt", "+=", "-", "2.55585450119", "return", "melt" ]
Calculate nearest-neighbor melting temperature (Tm). :param seq: Sequence for which to calculate the tm. :type seq: coral.DNA :param dna_conc: DNA concentration in nM. :type dna_conc: float :param salt_conc: Salt concentration in mM. :type salt_conc: float :param parameters: Nearest-neighbor parameter set. Available options: 'breslauer': Breslauer86 parameters 'sugimoto': Sugimoto96 parameters 'santalucia96': SantaLucia96 parameters 'santalucia98': SantaLucia98 parameters 'cloning': breslauer without corrections 'cloning_sl98': santalucia98 fit to 'cloning' :type parameters: str :returns: Melting temperature (Tm) in °C. :rtype: float :raises: ValueError if parameter argument is invalid.
[ "Calculate", "nearest", "-", "neighbor", "melting", "temperature", "(", "Tm", ")", "." ]
python
train
eraclitux/ipcampy
ipcampy/foscam.py
https://github.com/eraclitux/ipcampy/blob/bffd1c4df9006705cffa5b83a090b0db90cbcbcf/ipcampy/foscam.py#L55-L69
def move(self, pos): """Move cam to given preset position. pos - must be within 1 to 16. Returns: CamException in case of errors, "ok" otherwise.""" try: payload = {"address":self.address, "user": self.user, "pwd": self.pswd, "pos": map_position(pos)} resp = requests.get( "http://{address}/decoder_control.cgi?command={pos}&user={user}&pwd={pwd}".format(**payload) ) except KeyError: raise CamException("Position must be within 1 to 16.") if resp.status_code != 200: raise CamException("Unauthorized. Wrong user or password.") return "ok"
[ "def", "move", "(", "self", ",", "pos", ")", ":", "try", ":", "payload", "=", "{", "\"address\"", ":", "self", ".", "address", ",", "\"user\"", ":", "self", ".", "user", ",", "\"pwd\"", ":", "self", ".", "pswd", ",", "\"pos\"", ":", "map_position", "(", "pos", ")", "}", "resp", "=", "requests", ".", "get", "(", "\"http://{address}/decoder_control.cgi?command={pos}&user={user}&pwd={pwd}\"", ".", "format", "(", "*", "*", "payload", ")", ")", "except", "KeyError", ":", "raise", "CamException", "(", "\"Position must be within 1 to 16.\"", ")", "if", "resp", ".", "status_code", "!=", "200", ":", "raise", "CamException", "(", "\"Unauthorized. Wrong user or password.\"", ")", "return", "\"ok\"" ]
Move cam to given preset position. pos - must be within 1 to 16. Returns: CamException in case of errors, "ok" otherwise.
[ "Move", "cam", "to", "given", "preset", "position", ".", "pos", "-", "must", "be", "within", "1", "to", "16", ".", "Returns", ":", "CamException", "in", "case", "of", "errors", "ok", "otherwise", "." ]
python
train
bokeh/bokeh
bokeh/application/handlers/code_runner.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/application/handlers/code_runner.py#L158-L196
def run(self, module, post_check): ''' Execute the configured source code in a module and run any post checks. Args: module (Module) : a module to execute the configured code in. post_check(callable) : a function that can raise an exception if expected post-conditions are not met after code execution. ''' try: # Simulate the sys.path behaviour decribed here: # # https://docs.python.org/2/library/sys.html#sys.path _cwd = os.getcwd() _sys_path = list(sys.path) _sys_argv = list(sys.argv) sys.path.insert(0, os.path.dirname(self._path)) sys.argv = [os.path.basename(self._path)] + self._argv exec(self._code, module.__dict__) post_check() except Exception as e: self._failed = True self._error_detail = traceback.format_exc() _exc_type, _exc_value, exc_traceback = sys.exc_info() filename, line_number, func, txt = traceback.extract_tb(exc_traceback)[-1] self._error = "%s\nFile \"%s\", line %d, in %s:\n%s" % (str(e), os.path.basename(filename), line_number, func, txt) finally: # undo sys.path, CWD fixups os.chdir(_cwd) sys.path = _sys_path sys.argv = _sys_argv self.ran = True
[ "def", "run", "(", "self", ",", "module", ",", "post_check", ")", ":", "try", ":", "# Simulate the sys.path behaviour decribed here:", "#", "# https://docs.python.org/2/library/sys.html#sys.path", "_cwd", "=", "os", ".", "getcwd", "(", ")", "_sys_path", "=", "list", "(", "sys", ".", "path", ")", "_sys_argv", "=", "list", "(", "sys", ".", "argv", ")", "sys", ".", "path", ".", "insert", "(", "0", ",", "os", ".", "path", ".", "dirname", "(", "self", ".", "_path", ")", ")", "sys", ".", "argv", "=", "[", "os", ".", "path", ".", "basename", "(", "self", ".", "_path", ")", "]", "+", "self", ".", "_argv", "exec", "(", "self", ".", "_code", ",", "module", ".", "__dict__", ")", "post_check", "(", ")", "except", "Exception", "as", "e", ":", "self", ".", "_failed", "=", "True", "self", ".", "_error_detail", "=", "traceback", ".", "format_exc", "(", ")", "_exc_type", ",", "_exc_value", ",", "exc_traceback", "=", "sys", ".", "exc_info", "(", ")", "filename", ",", "line_number", ",", "func", ",", "txt", "=", "traceback", ".", "extract_tb", "(", "exc_traceback", ")", "[", "-", "1", "]", "self", ".", "_error", "=", "\"%s\\nFile \\\"%s\\\", line %d, in %s:\\n%s\"", "%", "(", "str", "(", "e", ")", ",", "os", ".", "path", ".", "basename", "(", "filename", ")", ",", "line_number", ",", "func", ",", "txt", ")", "finally", ":", "# undo sys.path, CWD fixups", "os", ".", "chdir", "(", "_cwd", ")", "sys", ".", "path", "=", "_sys_path", "sys", ".", "argv", "=", "_sys_argv", "self", ".", "ran", "=", "True" ]
Execute the configured source code in a module and run any post checks. Args: module (Module) : a module to execute the configured code in. post_check(callable) : a function that can raise an exception if expected post-conditions are not met after code execution.
[ "Execute", "the", "configured", "source", "code", "in", "a", "module", "and", "run", "any", "post", "checks", "." ]
python
train
emdb-empiar/ahds
ahds/grammar.py
https://github.com/emdb-empiar/ahds/blob/6a752f6806d4f62155cd2e1194de8aabe7195e0f/ahds/grammar.py#L307-L316
def get_parsed_data(fn, *args, **kwargs): """All above functions as a single function :param str fn: file name :return list parsed_data: structured metadata """ file_format = detect_format(fn, *args, **kwargs) data = get_header(fn, file_format, *args, **kwargs) parsed_data = parse_header(data, *args, **kwargs) return parsed_data
[ "def", "get_parsed_data", "(", "fn", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "file_format", "=", "detect_format", "(", "fn", ",", "*", "args", ",", "*", "*", "kwargs", ")", "data", "=", "get_header", "(", "fn", ",", "file_format", ",", "*", "args", ",", "*", "*", "kwargs", ")", "parsed_data", "=", "parse_header", "(", "data", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "parsed_data" ]
All above functions as a single function :param str fn: file name :return list parsed_data: structured metadata
[ "All", "above", "functions", "as", "a", "single", "function", ":", "param", "str", "fn", ":", "file", "name", ":", "return", "list", "parsed_data", ":", "structured", "metadata" ]
python
train
sernst/cauldron
cauldron/templating.py
https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/templating.py#L148-L165
def render_template(template_name: str, **kwargs): """ Renders the template file with the given filename from within Cauldron's template environment folder. :param template_name: The filename of the template to render. Any path elements should be relative to Cauldron's root template folder. :param kwargs: Any elements passed to Jinja2 for rendering the template :return: The rendered string """ return get_environment().get_template(template_name).render( cauldron_template_uid=make_template_uid(), **kwargs )
[ "def", "render_template", "(", "template_name", ":", "str", ",", "*", "*", "kwargs", ")", ":", "return", "get_environment", "(", ")", ".", "get_template", "(", "template_name", ")", ".", "render", "(", "cauldron_template_uid", "=", "make_template_uid", "(", ")", ",", "*", "*", "kwargs", ")" ]
Renders the template file with the given filename from within Cauldron's template environment folder. :param template_name: The filename of the template to render. Any path elements should be relative to Cauldron's root template folder. :param kwargs: Any elements passed to Jinja2 for rendering the template :return: The rendered string
[ "Renders", "the", "template", "file", "with", "the", "given", "filename", "from", "within", "Cauldron", "s", "template", "environment", "folder", "." ]
python
train
apache/airflow
airflow/utils/dag_processing.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dag_processing.py#L1126-L1227
def heartbeat(self): """ This should be periodically called by the manager loop. This method will kick off new processes to process DAG definition files and read the results from the finished processors. :return: a list of SimpleDags that were produced by processors that have finished since the last time this was called :rtype: list[airflow.utils.dag_processing.SimpleDag] """ finished_processors = {} """:type : dict[unicode, AbstractDagFileProcessor]""" running_processors = {} """:type : dict[unicode, AbstractDagFileProcessor]""" for file_path, processor in self._processors.items(): if processor.done: self.log.debug("Processor for %s finished", file_path) now = timezone.utcnow() finished_processors[file_path] = processor self._last_runtime[file_path] = (now - processor.start_time).total_seconds() self._last_finish_time[file_path] = now self._run_count[file_path] += 1 else: running_processors[file_path] = processor self._processors = running_processors self.log.debug("%s/%s DAG parsing processes running", len(self._processors), self._parallelism) self.log.debug("%s file paths queued for processing", len(self._file_path_queue)) # Collect all the DAGs that were found in the processed files simple_dags = [] for file_path, processor in finished_processors.items(): if processor.result is None: self.log.warning( "Processor for %s exited with return code %s.", processor.file_path, processor.exit_code ) else: for simple_dag in processor.result: simple_dags.append(simple_dag) # Generate more file paths to process if we processed all the files # already. if len(self._file_path_queue) == 0: # If the file path is already being processed, or if a file was # processed recently, wait until the next batch file_paths_in_progress = self._processors.keys() now = timezone.utcnow() file_paths_recently_processed = [] for file_path in self._file_paths: last_finish_time = self.get_last_finish_time(file_path) if (last_finish_time is not None and (now - last_finish_time).total_seconds() < self._file_process_interval): file_paths_recently_processed.append(file_path) files_paths_at_run_limit = [file_path for file_path, num_runs in self._run_count.items() if num_runs == self._max_runs] files_paths_to_queue = list(set(self._file_paths) - set(file_paths_in_progress) - set(file_paths_recently_processed) - set(files_paths_at_run_limit)) for file_path, processor in self._processors.items(): self.log.debug( "File path %s is still being processed (started: %s)", processor.file_path, processor.start_time.isoformat() ) self.log.debug( "Queuing the following files for processing:\n\t%s", "\n\t".join(files_paths_to_queue) ) self._file_path_queue.extend(files_paths_to_queue) zombies = self._find_zombies() # Start more processors if we have enough slots and files to process while (self._parallelism - len(self._processors) > 0 and len(self._file_path_queue) > 0): file_path = self._file_path_queue.pop(0) processor = self._processor_factory(file_path, zombies) processor.start() self.log.debug( "Started a process (PID: %s) to generate tasks for %s", processor.pid, file_path ) self._processors[file_path] = processor # Update heartbeat count. self._run_count[self._heart_beat_key] += 1 return simple_dags
[ "def", "heartbeat", "(", "self", ")", ":", "finished_processors", "=", "{", "}", "\"\"\":type : dict[unicode, AbstractDagFileProcessor]\"\"\"", "running_processors", "=", "{", "}", "\"\"\":type : dict[unicode, AbstractDagFileProcessor]\"\"\"", "for", "file_path", ",", "processor", "in", "self", ".", "_processors", ".", "items", "(", ")", ":", "if", "processor", ".", "done", ":", "self", ".", "log", ".", "debug", "(", "\"Processor for %s finished\"", ",", "file_path", ")", "now", "=", "timezone", ".", "utcnow", "(", ")", "finished_processors", "[", "file_path", "]", "=", "processor", "self", ".", "_last_runtime", "[", "file_path", "]", "=", "(", "now", "-", "processor", ".", "start_time", ")", ".", "total_seconds", "(", ")", "self", ".", "_last_finish_time", "[", "file_path", "]", "=", "now", "self", ".", "_run_count", "[", "file_path", "]", "+=", "1", "else", ":", "running_processors", "[", "file_path", "]", "=", "processor", "self", ".", "_processors", "=", "running_processors", "self", ".", "log", ".", "debug", "(", "\"%s/%s DAG parsing processes running\"", ",", "len", "(", "self", ".", "_processors", ")", ",", "self", ".", "_parallelism", ")", "self", ".", "log", ".", "debug", "(", "\"%s file paths queued for processing\"", ",", "len", "(", "self", ".", "_file_path_queue", ")", ")", "# Collect all the DAGs that were found in the processed files", "simple_dags", "=", "[", "]", "for", "file_path", ",", "processor", "in", "finished_processors", ".", "items", "(", ")", ":", "if", "processor", ".", "result", "is", "None", ":", "self", ".", "log", ".", "warning", "(", "\"Processor for %s exited with return code %s.\"", ",", "processor", ".", "file_path", ",", "processor", ".", "exit_code", ")", "else", ":", "for", "simple_dag", "in", "processor", ".", "result", ":", "simple_dags", ".", "append", "(", "simple_dag", ")", "# Generate more file paths to process if we processed all the files", "# already.", "if", "len", "(", "self", ".", "_file_path_queue", ")", "==", "0", ":", "# If the file path is already being processed, or if a file was", "# processed recently, wait until the next batch", "file_paths_in_progress", "=", "self", ".", "_processors", ".", "keys", "(", ")", "now", "=", "timezone", ".", "utcnow", "(", ")", "file_paths_recently_processed", "=", "[", "]", "for", "file_path", "in", "self", ".", "_file_paths", ":", "last_finish_time", "=", "self", ".", "get_last_finish_time", "(", "file_path", ")", "if", "(", "last_finish_time", "is", "not", "None", "and", "(", "now", "-", "last_finish_time", ")", ".", "total_seconds", "(", ")", "<", "self", ".", "_file_process_interval", ")", ":", "file_paths_recently_processed", ".", "append", "(", "file_path", ")", "files_paths_at_run_limit", "=", "[", "file_path", "for", "file_path", ",", "num_runs", "in", "self", ".", "_run_count", ".", "items", "(", ")", "if", "num_runs", "==", "self", ".", "_max_runs", "]", "files_paths_to_queue", "=", "list", "(", "set", "(", "self", ".", "_file_paths", ")", "-", "set", "(", "file_paths_in_progress", ")", "-", "set", "(", "file_paths_recently_processed", ")", "-", "set", "(", "files_paths_at_run_limit", ")", ")", "for", "file_path", ",", "processor", "in", "self", ".", "_processors", ".", "items", "(", ")", ":", "self", ".", "log", ".", "debug", "(", "\"File path %s is still being processed (started: %s)\"", ",", "processor", ".", "file_path", ",", "processor", ".", "start_time", ".", "isoformat", "(", ")", ")", "self", ".", "log", ".", "debug", "(", "\"Queuing the following files for processing:\\n\\t%s\"", ",", "\"\\n\\t\"", ".", "join", "(", "files_paths_to_queue", ")", ")", "self", ".", "_file_path_queue", ".", "extend", "(", "files_paths_to_queue", ")", "zombies", "=", "self", ".", "_find_zombies", "(", ")", "# Start more processors if we have enough slots and files to process", "while", "(", "self", ".", "_parallelism", "-", "len", "(", "self", ".", "_processors", ")", ">", "0", "and", "len", "(", "self", ".", "_file_path_queue", ")", ">", "0", ")", ":", "file_path", "=", "self", ".", "_file_path_queue", ".", "pop", "(", "0", ")", "processor", "=", "self", ".", "_processor_factory", "(", "file_path", ",", "zombies", ")", "processor", ".", "start", "(", ")", "self", ".", "log", ".", "debug", "(", "\"Started a process (PID: %s) to generate tasks for %s\"", ",", "processor", ".", "pid", ",", "file_path", ")", "self", ".", "_processors", "[", "file_path", "]", "=", "processor", "# Update heartbeat count.", "self", ".", "_run_count", "[", "self", ".", "_heart_beat_key", "]", "+=", "1", "return", "simple_dags" ]
This should be periodically called by the manager loop. This method will kick off new processes to process DAG definition files and read the results from the finished processors. :return: a list of SimpleDags that were produced by processors that have finished since the last time this was called :rtype: list[airflow.utils.dag_processing.SimpleDag]
[ "This", "should", "be", "periodically", "called", "by", "the", "manager", "loop", ".", "This", "method", "will", "kick", "off", "new", "processes", "to", "process", "DAG", "definition", "files", "and", "read", "the", "results", "from", "the", "finished", "processors", "." ]
python
test
eqcorrscan/EQcorrscan
eqcorrscan/utils/clustering.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/clustering.py#L840-L906
def space_cluster(catalog, d_thresh, show=True): """ Cluster a catalog by distance only. Will compute the matrix of physical distances between events and utilize the :mod:`scipy.clustering.hierarchy` module to perform the clustering. :type catalog: obspy.core.event.Catalog :param catalog: Catalog of events to clustered :type d_thresh: float :param d_thresh: Maximum inter-event distance threshold :returns: list of :class:`obspy.core.event.Catalog` objects :rtype: list >>> from eqcorrscan.utils.clustering import space_cluster >>> from obspy.clients.fdsn import Client >>> from obspy import UTCDateTime >>> client = Client("NCEDC") >>> starttime = UTCDateTime("2002-01-01") >>> endtime = UTCDateTime("2002-02-01") >>> cat = client.get_events(starttime=starttime, endtime=endtime, ... minmagnitude=2) >>> groups = space_cluster(catalog=cat, d_thresh=2, show=False) >>> from eqcorrscan.utils.clustering import space_cluster >>> from obspy.clients.fdsn import Client >>> from obspy import UTCDateTime >>> client = Client("https://earthquake.usgs.gov") >>> starttime = UTCDateTime("2002-01-01") >>> endtime = UTCDateTime("2002-02-01") >>> cat = client.get_events(starttime=starttime, endtime=endtime, ... minmagnitude=6) >>> groups = space_cluster(catalog=cat, d_thresh=1000, show=False) """ # Compute the distance matrix and linkage dist_mat = dist_mat_km(catalog) dist_vec = squareform(dist_mat) Z = linkage(dist_vec, method='average') # Cluster the linkage using the given threshold as the cutoff indices = fcluster(Z, t=d_thresh, criterion='distance') group_ids = list(set(indices)) indices = [(indices[i], i) for i in range(len(indices))] if show: # Plot the dendrogram...if it's not way too huge dendrogram(Z, color_threshold=d_thresh, distance_sort='ascending') plt.show() # Sort by group id indices.sort(key=lambda tup: tup[0]) groups = [] for group_id in group_ids: group = Catalog() for ind in indices: if ind[0] == group_id: group.append(catalog[ind[1]]) elif ind[0] > group_id: # Because we have sorted by group id, when the index is greater # than the group_id we can break the inner loop. # Patch applied by CJC 05/11/2015 groups.append(group) break groups.append(group) return groups
[ "def", "space_cluster", "(", "catalog", ",", "d_thresh", ",", "show", "=", "True", ")", ":", "# Compute the distance matrix and linkage", "dist_mat", "=", "dist_mat_km", "(", "catalog", ")", "dist_vec", "=", "squareform", "(", "dist_mat", ")", "Z", "=", "linkage", "(", "dist_vec", ",", "method", "=", "'average'", ")", "# Cluster the linkage using the given threshold as the cutoff", "indices", "=", "fcluster", "(", "Z", ",", "t", "=", "d_thresh", ",", "criterion", "=", "'distance'", ")", "group_ids", "=", "list", "(", "set", "(", "indices", ")", ")", "indices", "=", "[", "(", "indices", "[", "i", "]", ",", "i", ")", "for", "i", "in", "range", "(", "len", "(", "indices", ")", ")", "]", "if", "show", ":", "# Plot the dendrogram...if it's not way too huge", "dendrogram", "(", "Z", ",", "color_threshold", "=", "d_thresh", ",", "distance_sort", "=", "'ascending'", ")", "plt", ".", "show", "(", ")", "# Sort by group id", "indices", ".", "sort", "(", "key", "=", "lambda", "tup", ":", "tup", "[", "0", "]", ")", "groups", "=", "[", "]", "for", "group_id", "in", "group_ids", ":", "group", "=", "Catalog", "(", ")", "for", "ind", "in", "indices", ":", "if", "ind", "[", "0", "]", "==", "group_id", ":", "group", ".", "append", "(", "catalog", "[", "ind", "[", "1", "]", "]", ")", "elif", "ind", "[", "0", "]", ">", "group_id", ":", "# Because we have sorted by group id, when the index is greater", "# than the group_id we can break the inner loop.", "# Patch applied by CJC 05/11/2015", "groups", ".", "append", "(", "group", ")", "break", "groups", ".", "append", "(", "group", ")", "return", "groups" ]
Cluster a catalog by distance only. Will compute the matrix of physical distances between events and utilize the :mod:`scipy.clustering.hierarchy` module to perform the clustering. :type catalog: obspy.core.event.Catalog :param catalog: Catalog of events to clustered :type d_thresh: float :param d_thresh: Maximum inter-event distance threshold :returns: list of :class:`obspy.core.event.Catalog` objects :rtype: list >>> from eqcorrscan.utils.clustering import space_cluster >>> from obspy.clients.fdsn import Client >>> from obspy import UTCDateTime >>> client = Client("NCEDC") >>> starttime = UTCDateTime("2002-01-01") >>> endtime = UTCDateTime("2002-02-01") >>> cat = client.get_events(starttime=starttime, endtime=endtime, ... minmagnitude=2) >>> groups = space_cluster(catalog=cat, d_thresh=2, show=False) >>> from eqcorrscan.utils.clustering import space_cluster >>> from obspy.clients.fdsn import Client >>> from obspy import UTCDateTime >>> client = Client("https://earthquake.usgs.gov") >>> starttime = UTCDateTime("2002-01-01") >>> endtime = UTCDateTime("2002-02-01") >>> cat = client.get_events(starttime=starttime, endtime=endtime, ... minmagnitude=6) >>> groups = space_cluster(catalog=cat, d_thresh=1000, show=False)
[ "Cluster", "a", "catalog", "by", "distance", "only", "." ]
python
train
pyvisa/pyvisa
pyvisa/shell.py
https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/shell.py#L231-L261
def do_timeout(self, args): """Get or set timeout (in ms) for resource in use. Get timeout: timeout Set timeout: timeout <mstimeout> """ if not self.current: print('There are no resources in use. Use the command "open".') return args = args.strip() if not args: try: print('Timeout: {}ms'.format(self.current.timeout)) except Exception as e: print(e) else: args = args.split(' ') try: self.current.timeout = float(args[0]) print('Done') except Exception as e: print(e)
[ "def", "do_timeout", "(", "self", ",", "args", ")", ":", "if", "not", "self", ".", "current", ":", "print", "(", "'There are no resources in use. Use the command \"open\".'", ")", "return", "args", "=", "args", ".", "strip", "(", ")", "if", "not", "args", ":", "try", ":", "print", "(", "'Timeout: {}ms'", ".", "format", "(", "self", ".", "current", ".", "timeout", ")", ")", "except", "Exception", "as", "e", ":", "print", "(", "e", ")", "else", ":", "args", "=", "args", ".", "split", "(", "' '", ")", "try", ":", "self", ".", "current", ".", "timeout", "=", "float", "(", "args", "[", "0", "]", ")", "print", "(", "'Done'", ")", "except", "Exception", "as", "e", ":", "print", "(", "e", ")" ]
Get or set timeout (in ms) for resource in use. Get timeout: timeout Set timeout: timeout <mstimeout>
[ "Get", "or", "set", "timeout", "(", "in", "ms", ")", "for", "resource", "in", "use", "." ]
python
train
dslackw/slpkg
slpkg/repoinfo.py
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/repoinfo.py#L107-L126
def repository_data(self, repo): """ Grap data packages """ sum_pkgs, size, unsize, last_upd = 0, [], [], "" for line in (Utils().read_file( self.meta.lib_path + repo + "_repo/PACKAGES.TXT").splitlines()): if line.startswith("PACKAGES.TXT;"): last_upd = line[14:].strip() if line.startswith("PACKAGE NAME:"): sum_pkgs += 1 if line.startswith("PACKAGE SIZE (compressed): "): size.append(line[28:-2].strip()) if line.startswith("PACKAGE SIZE (uncompressed): "): unsize.append(line[30:-2].strip()) if repo in ["salix", "slackl"]: log = Utils().read_file( self.meta.log_path + "{0}/ChangeLog.txt".format(repo)) last_upd = log.split("\n", 1)[0] return [sum_pkgs, size, unsize, last_upd]
[ "def", "repository_data", "(", "self", ",", "repo", ")", ":", "sum_pkgs", ",", "size", ",", "unsize", ",", "last_upd", "=", "0", ",", "[", "]", ",", "[", "]", ",", "\"\"", "for", "line", "in", "(", "Utils", "(", ")", ".", "read_file", "(", "self", ".", "meta", ".", "lib_path", "+", "repo", "+", "\"_repo/PACKAGES.TXT\"", ")", ".", "splitlines", "(", ")", ")", ":", "if", "line", ".", "startswith", "(", "\"PACKAGES.TXT;\"", ")", ":", "last_upd", "=", "line", "[", "14", ":", "]", ".", "strip", "(", ")", "if", "line", ".", "startswith", "(", "\"PACKAGE NAME:\"", ")", ":", "sum_pkgs", "+=", "1", "if", "line", ".", "startswith", "(", "\"PACKAGE SIZE (compressed): \"", ")", ":", "size", ".", "append", "(", "line", "[", "28", ":", "-", "2", "]", ".", "strip", "(", ")", ")", "if", "line", ".", "startswith", "(", "\"PACKAGE SIZE (uncompressed): \"", ")", ":", "unsize", ".", "append", "(", "line", "[", "30", ":", "-", "2", "]", ".", "strip", "(", ")", ")", "if", "repo", "in", "[", "\"salix\"", ",", "\"slackl\"", "]", ":", "log", "=", "Utils", "(", ")", ".", "read_file", "(", "self", ".", "meta", ".", "log_path", "+", "\"{0}/ChangeLog.txt\"", ".", "format", "(", "repo", ")", ")", "last_upd", "=", "log", ".", "split", "(", "\"\\n\"", ",", "1", ")", "[", "0", "]", "return", "[", "sum_pkgs", ",", "size", ",", "unsize", ",", "last_upd", "]" ]
Grap data packages
[ "Grap", "data", "packages" ]
python
train
chriso/gauged
gauged/drivers/sqlite.py
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/drivers/sqlite.py#L90-L105
def replace_blocks(self, blocks): """Replace multiple blocks. blocks must be a list of tuples where each tuple consists of (namespace, offset, key, data, flags)""" start = 0 bulk_insert = self.bulk_insert blocks_len = len(blocks) select = 'SELECT ?,?,?,?,?' query = 'REPLACE INTO gauged_data (namespace, offset, `key`, ' \ 'data, flags) ' execute = self.cursor.execute while start < blocks_len: rows = blocks[start:start+bulk_insert] params = [param for params in rows for param in params] insert = (select + ' UNION ') * (len(rows) - 1) + select execute(query + insert, params) start += bulk_insert
[ "def", "replace_blocks", "(", "self", ",", "blocks", ")", ":", "start", "=", "0", "bulk_insert", "=", "self", ".", "bulk_insert", "blocks_len", "=", "len", "(", "blocks", ")", "select", "=", "'SELECT ?,?,?,?,?'", "query", "=", "'REPLACE INTO gauged_data (namespace, offset, `key`, '", "'data, flags) '", "execute", "=", "self", ".", "cursor", ".", "execute", "while", "start", "<", "blocks_len", ":", "rows", "=", "blocks", "[", "start", ":", "start", "+", "bulk_insert", "]", "params", "=", "[", "param", "for", "params", "in", "rows", "for", "param", "in", "params", "]", "insert", "=", "(", "select", "+", "' UNION '", ")", "*", "(", "len", "(", "rows", ")", "-", "1", ")", "+", "select", "execute", "(", "query", "+", "insert", ",", "params", ")", "start", "+=", "bulk_insert" ]
Replace multiple blocks. blocks must be a list of tuples where each tuple consists of (namespace, offset, key, data, flags)
[ "Replace", "multiple", "blocks", ".", "blocks", "must", "be", "a", "list", "of", "tuples", "where", "each", "tuple", "consists", "of", "(", "namespace", "offset", "key", "data", "flags", ")" ]
python
train
Fizzadar/pyinfra
pyinfra/api/state.py
https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/state.py#L400-L409
def is_host_in_limit(self, host): ''' Returns a boolean indicating if the host is within the current state limit. ''' limit_hosts = self.limit_hosts if not isinstance(limit_hosts, list): return True return host in limit_hosts
[ "def", "is_host_in_limit", "(", "self", ",", "host", ")", ":", "limit_hosts", "=", "self", ".", "limit_hosts", "if", "not", "isinstance", "(", "limit_hosts", ",", "list", ")", ":", "return", "True", "return", "host", "in", "limit_hosts" ]
Returns a boolean indicating if the host is within the current state limit.
[ "Returns", "a", "boolean", "indicating", "if", "the", "host", "is", "within", "the", "current", "state", "limit", "." ]
python
train
openego/eDisGo
edisgo/grid/network.py
https://github.com/openego/eDisGo/blob/e6245bdaf236f9c49dbda5a18c1c458290f41e2b/edisgo/grid/network.py#L2996-L3209
def save(self, directory, parameters='all'): """ Saves results to disk. Depending on which results are selected and if they exist, the following directories and files are created: * `powerflow_results` directory * `voltages_pu.csv` See :py:attr:`~pfa_v_mag_pu` for more information. * `currents.csv` See :func:`~i_res` for more information. * `active_powers.csv` See :py:attr:`~pfa_p` for more information. * `reactive_powers.csv` See :py:attr:`~pfa_q` for more information. * `apparent_powers.csv` See :func:`~s_res` for more information. * `grid_losses.csv` See :py:attr:`~grid_losses` for more information. * `hv_mv_exchanges.csv` See :py:attr:`~hv_mv_exchanges` for more information. * `pypsa_network` directory See :py:func:`pypsa.Network.export_to_csv_folder` * `grid_expansion_results` directory * `grid_expansion_costs.csv` See :py:attr:`~grid_expansion_costs` for more information. * `equipment_changes.csv` See :py:attr:`~equipment_changes` for more information. * `unresolved_issues.csv` See :py:attr:`~unresolved_issues` for more information. * `curtailment_results` directory Files depend on curtailment specifications. There will be one file for each curtailment specification, that is for every key in :py:attr:`~curtailment` dictionary. * `storage_integration_results` directory * `storages.csv` See :func:`~storages` for more information. Parameters ---------- directory : :obj:`str` Directory to save the results in. parameters : :obj:`str` or :obj:`list` of :obj:`str` Specifies which results will be saved. By default all results are saved. To only save certain results set `parameters` to one of the following options or choose several options by providing a list: * 'pypsa_network' * 'powerflow_results' * 'grid_expansion_results' * 'curtailment_results' * 'storage_integration_results' """ def _save_power_flow_results(target_dir): if self.pfa_v_mag_pu is not None: # create directory os.makedirs(target_dir, exist_ok=True) # voltage self.pfa_v_mag_pu.to_csv( os.path.join(target_dir, 'voltages_pu.csv')) # current self.i_res.to_csv( os.path.join(target_dir, 'currents.csv')) # active power self.pfa_p.to_csv( os.path.join(target_dir, 'active_powers.csv')) # reactive power self.pfa_q.to_csv( os.path.join(target_dir, 'reactive_powers.csv')) # apparent power self.s_res().to_csv( os.path.join(target_dir, 'apparent_powers.csv')) # grid losses self.grid_losses.to_csv( os.path.join(target_dir, 'grid_losses.csv')) # grid exchanges self.hv_mv_exchanges.to_csv(os.path.join( target_dir, 'hv_mv_exchanges.csv')) def _save_pypsa_network(target_dir): if self.network.pypsa: # create directory os.makedirs(target_dir, exist_ok=True) self.network.pypsa.export_to_csv_folder(target_dir) def _save_grid_expansion_results(target_dir): if self.grid_expansion_costs is not None: # create directory os.makedirs(target_dir, exist_ok=True) # grid expansion costs self.grid_expansion_costs.to_csv(os.path.join( target_dir, 'grid_expansion_costs.csv')) # unresolved issues pd.DataFrame(self.unresolved_issues).to_csv(os.path.join( target_dir, 'unresolved_issues.csv')) # equipment changes self.equipment_changes.to_csv(os.path.join( target_dir, 'equipment_changes.csv')) def _save_curtailment_results(target_dir): if self.curtailment is not None: # create directory os.makedirs(target_dir, exist_ok=True) for key, curtailment_df in self.curtailment.items(): if type(key) == tuple: type_prefix = '-'.join([key[0], str(key[1])]) elif type(key) == str: type_prefix = key else: raise KeyError("Unknown key type {} for key {}".format( type(key), key)) filename = os.path.join( target_dir, '{}.csv'.format(type_prefix)) curtailment_df.to_csv(filename, index_label=type_prefix) def _save_storage_integration_results(target_dir): storages = self.storages if not storages.empty: # create directory os.makedirs(target_dir, exist_ok=True) # general storage information storages.to_csv(os.path.join(target_dir, 'storages.csv')) # storages time series ts_p, ts_q = self.storages_timeseries() ts_p.to_csv(os.path.join( target_dir, 'storages_active_power.csv')) ts_q.to_csv(os.path.join( target_dir, 'storages_reactive_power.csv')) if not self.storages_costs_reduction is None: self.storages_costs_reduction.to_csv( os.path.join(target_dir, 'storages_costs_reduction.csv')) # dictionary with function to call to save each parameter func_dict = { 'powerflow_results': _save_power_flow_results, 'pypsa_network': _save_pypsa_network, 'grid_expansion_results': _save_grid_expansion_results, 'curtailment_results': _save_curtailment_results, 'storage_integration_results': _save_storage_integration_results } # if string is given convert to list if isinstance(parameters, str): if parameters == 'all': parameters = ['powerflow_results', 'pypsa_network', 'grid_expansion_results', 'curtailment_results', 'storage_integration_results'] else: parameters = [parameters] # save each parameter for parameter in parameters: try: func_dict[parameter](os.path.join(directory, parameter)) except KeyError: message = "Invalid input {} for `parameters` when saving " \ "results. Must be any or a list of the following: " \ "'pypsa_network', 'powerflow_results', " \ "'grid_expansion_results', 'curtailment_results', " \ "'storage_integration_results'.".format(parameter) logger.error(message) raise KeyError(message) except: raise # save measures pd.DataFrame(data={'measure': self.measures}).to_csv( os.path.join(directory, 'measures.csv')) # save configs with open(os.path.join(directory, 'configs.csv'), 'w') as f: writer = csv.writer(f) rows = [ ['{}'.format(key)] + [value for item in values.items() for value in item] for key, values in self.network.config._data.items()] writer.writerows(rows)
[ "def", "save", "(", "self", ",", "directory", ",", "parameters", "=", "'all'", ")", ":", "def", "_save_power_flow_results", "(", "target_dir", ")", ":", "if", "self", ".", "pfa_v_mag_pu", "is", "not", "None", ":", "# create directory", "os", ".", "makedirs", "(", "target_dir", ",", "exist_ok", "=", "True", ")", "# voltage", "self", ".", "pfa_v_mag_pu", ".", "to_csv", "(", "os", ".", "path", ".", "join", "(", "target_dir", ",", "'voltages_pu.csv'", ")", ")", "# current", "self", ".", "i_res", ".", "to_csv", "(", "os", ".", "path", ".", "join", "(", "target_dir", ",", "'currents.csv'", ")", ")", "# active power", "self", ".", "pfa_p", ".", "to_csv", "(", "os", ".", "path", ".", "join", "(", "target_dir", ",", "'active_powers.csv'", ")", ")", "# reactive power", "self", ".", "pfa_q", ".", "to_csv", "(", "os", ".", "path", ".", "join", "(", "target_dir", ",", "'reactive_powers.csv'", ")", ")", "# apparent power", "self", ".", "s_res", "(", ")", ".", "to_csv", "(", "os", ".", "path", ".", "join", "(", "target_dir", ",", "'apparent_powers.csv'", ")", ")", "# grid losses", "self", ".", "grid_losses", ".", "to_csv", "(", "os", ".", "path", ".", "join", "(", "target_dir", ",", "'grid_losses.csv'", ")", ")", "# grid exchanges", "self", ".", "hv_mv_exchanges", ".", "to_csv", "(", "os", ".", "path", ".", "join", "(", "target_dir", ",", "'hv_mv_exchanges.csv'", ")", ")", "def", "_save_pypsa_network", "(", "target_dir", ")", ":", "if", "self", ".", "network", ".", "pypsa", ":", "# create directory", "os", ".", "makedirs", "(", "target_dir", ",", "exist_ok", "=", "True", ")", "self", ".", "network", ".", "pypsa", ".", "export_to_csv_folder", "(", "target_dir", ")", "def", "_save_grid_expansion_results", "(", "target_dir", ")", ":", "if", "self", ".", "grid_expansion_costs", "is", "not", "None", ":", "# create directory", "os", ".", "makedirs", "(", "target_dir", ",", "exist_ok", "=", "True", ")", "# grid expansion costs", "self", ".", "grid_expansion_costs", ".", "to_csv", "(", "os", ".", "path", ".", "join", "(", "target_dir", ",", "'grid_expansion_costs.csv'", ")", ")", "# unresolved issues", "pd", ".", "DataFrame", "(", "self", ".", "unresolved_issues", ")", ".", "to_csv", "(", "os", ".", "path", ".", "join", "(", "target_dir", ",", "'unresolved_issues.csv'", ")", ")", "# equipment changes", "self", ".", "equipment_changes", ".", "to_csv", "(", "os", ".", "path", ".", "join", "(", "target_dir", ",", "'equipment_changes.csv'", ")", ")", "def", "_save_curtailment_results", "(", "target_dir", ")", ":", "if", "self", ".", "curtailment", "is", "not", "None", ":", "# create directory", "os", ".", "makedirs", "(", "target_dir", ",", "exist_ok", "=", "True", ")", "for", "key", ",", "curtailment_df", "in", "self", ".", "curtailment", ".", "items", "(", ")", ":", "if", "type", "(", "key", ")", "==", "tuple", ":", "type_prefix", "=", "'-'", ".", "join", "(", "[", "key", "[", "0", "]", ",", "str", "(", "key", "[", "1", "]", ")", "]", ")", "elif", "type", "(", "key", ")", "==", "str", ":", "type_prefix", "=", "key", "else", ":", "raise", "KeyError", "(", "\"Unknown key type {} for key {}\"", ".", "format", "(", "type", "(", "key", ")", ",", "key", ")", ")", "filename", "=", "os", ".", "path", ".", "join", "(", "target_dir", ",", "'{}.csv'", ".", "format", "(", "type_prefix", ")", ")", "curtailment_df", ".", "to_csv", "(", "filename", ",", "index_label", "=", "type_prefix", ")", "def", "_save_storage_integration_results", "(", "target_dir", ")", ":", "storages", "=", "self", ".", "storages", "if", "not", "storages", ".", "empty", ":", "# create directory", "os", ".", "makedirs", "(", "target_dir", ",", "exist_ok", "=", "True", ")", "# general storage information", "storages", ".", "to_csv", "(", "os", ".", "path", ".", "join", "(", "target_dir", ",", "'storages.csv'", ")", ")", "# storages time series", "ts_p", ",", "ts_q", "=", "self", ".", "storages_timeseries", "(", ")", "ts_p", ".", "to_csv", "(", "os", ".", "path", ".", "join", "(", "target_dir", ",", "'storages_active_power.csv'", ")", ")", "ts_q", ".", "to_csv", "(", "os", ".", "path", ".", "join", "(", "target_dir", ",", "'storages_reactive_power.csv'", ")", ")", "if", "not", "self", ".", "storages_costs_reduction", "is", "None", ":", "self", ".", "storages_costs_reduction", ".", "to_csv", "(", "os", ".", "path", ".", "join", "(", "target_dir", ",", "'storages_costs_reduction.csv'", ")", ")", "# dictionary with function to call to save each parameter", "func_dict", "=", "{", "'powerflow_results'", ":", "_save_power_flow_results", ",", "'pypsa_network'", ":", "_save_pypsa_network", ",", "'grid_expansion_results'", ":", "_save_grid_expansion_results", ",", "'curtailment_results'", ":", "_save_curtailment_results", ",", "'storage_integration_results'", ":", "_save_storage_integration_results", "}", "# if string is given convert to list", "if", "isinstance", "(", "parameters", ",", "str", ")", ":", "if", "parameters", "==", "'all'", ":", "parameters", "=", "[", "'powerflow_results'", ",", "'pypsa_network'", ",", "'grid_expansion_results'", ",", "'curtailment_results'", ",", "'storage_integration_results'", "]", "else", ":", "parameters", "=", "[", "parameters", "]", "# save each parameter", "for", "parameter", "in", "parameters", ":", "try", ":", "func_dict", "[", "parameter", "]", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "parameter", ")", ")", "except", "KeyError", ":", "message", "=", "\"Invalid input {} for `parameters` when saving \"", "\"results. Must be any or a list of the following: \"", "\"'pypsa_network', 'powerflow_results', \"", "\"'grid_expansion_results', 'curtailment_results', \"", "\"'storage_integration_results'.\"", ".", "format", "(", "parameter", ")", "logger", ".", "error", "(", "message", ")", "raise", "KeyError", "(", "message", ")", "except", ":", "raise", "# save measures", "pd", ".", "DataFrame", "(", "data", "=", "{", "'measure'", ":", "self", ".", "measures", "}", ")", ".", "to_csv", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "'measures.csv'", ")", ")", "# save configs", "with", "open", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "'configs.csv'", ")", ",", "'w'", ")", "as", "f", ":", "writer", "=", "csv", ".", "writer", "(", "f", ")", "rows", "=", "[", "[", "'{}'", ".", "format", "(", "key", ")", "]", "+", "[", "value", "for", "item", "in", "values", ".", "items", "(", ")", "for", "value", "in", "item", "]", "for", "key", ",", "values", "in", "self", ".", "network", ".", "config", ".", "_data", ".", "items", "(", ")", "]", "writer", ".", "writerows", "(", "rows", ")" ]
Saves results to disk. Depending on which results are selected and if they exist, the following directories and files are created: * `powerflow_results` directory * `voltages_pu.csv` See :py:attr:`~pfa_v_mag_pu` for more information. * `currents.csv` See :func:`~i_res` for more information. * `active_powers.csv` See :py:attr:`~pfa_p` for more information. * `reactive_powers.csv` See :py:attr:`~pfa_q` for more information. * `apparent_powers.csv` See :func:`~s_res` for more information. * `grid_losses.csv` See :py:attr:`~grid_losses` for more information. * `hv_mv_exchanges.csv` See :py:attr:`~hv_mv_exchanges` for more information. * `pypsa_network` directory See :py:func:`pypsa.Network.export_to_csv_folder` * `grid_expansion_results` directory * `grid_expansion_costs.csv` See :py:attr:`~grid_expansion_costs` for more information. * `equipment_changes.csv` See :py:attr:`~equipment_changes` for more information. * `unresolved_issues.csv` See :py:attr:`~unresolved_issues` for more information. * `curtailment_results` directory Files depend on curtailment specifications. There will be one file for each curtailment specification, that is for every key in :py:attr:`~curtailment` dictionary. * `storage_integration_results` directory * `storages.csv` See :func:`~storages` for more information. Parameters ---------- directory : :obj:`str` Directory to save the results in. parameters : :obj:`str` or :obj:`list` of :obj:`str` Specifies which results will be saved. By default all results are saved. To only save certain results set `parameters` to one of the following options or choose several options by providing a list: * 'pypsa_network' * 'powerflow_results' * 'grid_expansion_results' * 'curtailment_results' * 'storage_integration_results'
[ "Saves", "results", "to", "disk", "." ]
python
train
mardix/pylot
pylot/utils.py
https://github.com/mardix/pylot/blob/506a33a56ebdfc0925b94015e8cf98ccb16a143c/pylot/utils.py#L264-L274
def bg_thread(func): """ A threading decorator :param func: :return: """ @functools.wraps(func) def wrapper(*args, **kwargs): p = threading.Thread(target=func, args=args, kwargs=kwargs) p.start() return wrapper
[ "def", "bg_thread", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "p", "=", "threading", ".", "Thread", "(", "target", "=", "func", ",", "args", "=", "args", ",", "kwargs", "=", "kwargs", ")", "p", ".", "start", "(", ")", "return", "wrapper" ]
A threading decorator :param func: :return:
[ "A", "threading", "decorator", ":", "param", "func", ":", ":", "return", ":" ]
python
train
peri-source/peri
peri/logger.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/logger.py#L138-L155
def set_verbosity(self, verbosity='vvv', handlers=None): """ Set the verbosity level of a certain log handler or of all handlers. Parameters ---------- verbosity : 'v' to 'vvvvv' the level of verbosity, more v's is more verbose handlers : string, or list of strings handler names can be found in ``peri.logger.types.keys()`` Current set is:: ['console-bw', 'console-color', 'rotating-log'] """ self.verbosity = sanitize(verbosity) self.set_level(v2l[verbosity], handlers=handlers) self.set_formatter(v2f[verbosity], handlers=handlers)
[ "def", "set_verbosity", "(", "self", ",", "verbosity", "=", "'vvv'", ",", "handlers", "=", "None", ")", ":", "self", ".", "verbosity", "=", "sanitize", "(", "verbosity", ")", "self", ".", "set_level", "(", "v2l", "[", "verbosity", "]", ",", "handlers", "=", "handlers", ")", "self", ".", "set_formatter", "(", "v2f", "[", "verbosity", "]", ",", "handlers", "=", "handlers", ")" ]
Set the verbosity level of a certain log handler or of all handlers. Parameters ---------- verbosity : 'v' to 'vvvvv' the level of verbosity, more v's is more verbose handlers : string, or list of strings handler names can be found in ``peri.logger.types.keys()`` Current set is:: ['console-bw', 'console-color', 'rotating-log']
[ "Set", "the", "verbosity", "level", "of", "a", "certain", "log", "handler", "or", "of", "all", "handlers", "." ]
python
valid
datastax/python-driver
cassandra/io/twistedreactor.py
https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/io/twistedreactor.py#L93-L99
def clientConnectionFailed(self, connector, reason): """ Overridden twisted callback which is called when the connection attempt fails. """ log.debug("Connect failed: %s", reason) self.conn.defunct(reason.value)
[ "def", "clientConnectionFailed", "(", "self", ",", "connector", ",", "reason", ")", ":", "log", ".", "debug", "(", "\"Connect failed: %s\"", ",", "reason", ")", "self", ".", "conn", ".", "defunct", "(", "reason", ".", "value", ")" ]
Overridden twisted callback which is called when the connection attempt fails.
[ "Overridden", "twisted", "callback", "which", "is", "called", "when", "the", "connection", "attempt", "fails", "." ]
python
train
cknoll/ipydex
ipydex/core.py
https://github.com/cknoll/ipydex/blob/ca528ce4c97ee934e48efbd5983c1b7cd88bec9d/ipydex/core.py#L396-L405
def color_exepthook(pdb=0, mode=2): """ Make tracebacks after exceptions colored, verbose, and/or call pdb (python cmd line debugger) at the place where the exception occurs """ modus = ['Plain', 'Context', 'Verbose'][mode] # select the mode sys.excepthook = ultratb.FormattedTB(mode=modus, color_scheme='Linux', call_pdb=pdb)
[ "def", "color_exepthook", "(", "pdb", "=", "0", ",", "mode", "=", "2", ")", ":", "modus", "=", "[", "'Plain'", ",", "'Context'", ",", "'Verbose'", "]", "[", "mode", "]", "# select the mode", "sys", ".", "excepthook", "=", "ultratb", ".", "FormattedTB", "(", "mode", "=", "modus", ",", "color_scheme", "=", "'Linux'", ",", "call_pdb", "=", "pdb", ")" ]
Make tracebacks after exceptions colored, verbose, and/or call pdb (python cmd line debugger) at the place where the exception occurs
[ "Make", "tracebacks", "after", "exceptions", "colored", "verbose", "and", "/", "or", "call", "pdb", "(", "python", "cmd", "line", "debugger", ")", "at", "the", "place", "where", "the", "exception", "occurs" ]
python
train
timothyb0912/pylogit
pylogit/bootstrap.py
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap.py#L737-L788
def calc_abc_interval(self, conf_percentage, init_vals, epsilon=0.001, **fit_kwargs): """ Calculates Approximate Bootstrap Confidence Intervals for one's model. Parameters ---------- conf_percentage : scalar in the interval (0.0, 100.0). Denotes the confidence-level for the returned endpoints. For instance, to calculate a 95% confidence interval, pass `95`. init_vals : 1D ndarray. The initial values used to estimate the one's choice model. epsilon : positive float, optional. Should denote the 'very small' value being used to calculate the desired finite difference approximations to the various influence functions. Should be close to zero. Default == sys.float_info.epsilon. fit_kwargs : additional keyword arguments, optional. Should contain any additional kwargs used to alter the default behavior of `model_obj.fit_mle` and thereby enforce conformity with how the MLE was obtained. Will be passed directly to `model_obj.fit_mle`. Returns ------- None. Will store the ABC intervals as `self.abc_interval`. """ print("Calculating Approximate Bootstrap Confidence (ABC) Intervals") print(time.strftime("%a %m-%d-%Y %I:%M%p")) sys.stdout.flush() # Get the alpha % that corresponds to the given confidence percentage. alpha = bc.get_alpha_from_conf_percentage(conf_percentage) # Create the column names for the dataframe of confidence intervals single_column_names =\ ['{:.3g}%'.format(alpha / 2.0), '{:.3g}%'.format(100 - alpha / 2.0)] # Calculate the ABC confidence intervals conf_intervals =\ abc.calc_abc_interval(self.model_obj, self.mle_params.values, init_vals, conf_percentage, epsilon=epsilon, **fit_kwargs) # Store the ABC confidence intervals self.abc_interval = pd.DataFrame(conf_intervals.T, index=self.mle_params.index, columns=single_column_names) return None
[ "def", "calc_abc_interval", "(", "self", ",", "conf_percentage", ",", "init_vals", ",", "epsilon", "=", "0.001", ",", "*", "*", "fit_kwargs", ")", ":", "print", "(", "\"Calculating Approximate Bootstrap Confidence (ABC) Intervals\"", ")", "print", "(", "time", ".", "strftime", "(", "\"%a %m-%d-%Y %I:%M%p\"", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "# Get the alpha % that corresponds to the given confidence percentage.", "alpha", "=", "bc", ".", "get_alpha_from_conf_percentage", "(", "conf_percentage", ")", "# Create the column names for the dataframe of confidence intervals", "single_column_names", "=", "[", "'{:.3g}%'", ".", "format", "(", "alpha", "/", "2.0", ")", ",", "'{:.3g}%'", ".", "format", "(", "100", "-", "alpha", "/", "2.0", ")", "]", "# Calculate the ABC confidence intervals", "conf_intervals", "=", "abc", ".", "calc_abc_interval", "(", "self", ".", "model_obj", ",", "self", ".", "mle_params", ".", "values", ",", "init_vals", ",", "conf_percentage", ",", "epsilon", "=", "epsilon", ",", "*", "*", "fit_kwargs", ")", "# Store the ABC confidence intervals", "self", ".", "abc_interval", "=", "pd", ".", "DataFrame", "(", "conf_intervals", ".", "T", ",", "index", "=", "self", ".", "mle_params", ".", "index", ",", "columns", "=", "single_column_names", ")", "return", "None" ]
Calculates Approximate Bootstrap Confidence Intervals for one's model. Parameters ---------- conf_percentage : scalar in the interval (0.0, 100.0). Denotes the confidence-level for the returned endpoints. For instance, to calculate a 95% confidence interval, pass `95`. init_vals : 1D ndarray. The initial values used to estimate the one's choice model. epsilon : positive float, optional. Should denote the 'very small' value being used to calculate the desired finite difference approximations to the various influence functions. Should be close to zero. Default == sys.float_info.epsilon. fit_kwargs : additional keyword arguments, optional. Should contain any additional kwargs used to alter the default behavior of `model_obj.fit_mle` and thereby enforce conformity with how the MLE was obtained. Will be passed directly to `model_obj.fit_mle`. Returns ------- None. Will store the ABC intervals as `self.abc_interval`.
[ "Calculates", "Approximate", "Bootstrap", "Confidence", "Intervals", "for", "one", "s", "model", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/structural/purecn.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purecn.py#L165-L185
def _remove_overlaps(in_file, out_dir, data): """Remove regions that overlap with next region, these result in issues with PureCN. """ out_file = os.path.join(out_dir, "%s-nooverlaps%s" % utils.splitext_plus(os.path.basename(in_file))) if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: with open(in_file) as in_handle: with open(tx_out_file, "w") as out_handle: prev_line = None for line in in_handle: if prev_line: pchrom, pstart, pend = prev_line.split("\t", 4)[:3] cchrom, cstart, cend = line.split("\t", 4)[:3] # Skip if chromosomes match and end overlaps start if pchrom == cchrom and int(pend) > int(cstart): pass else: out_handle.write(prev_line) prev_line = line out_handle.write(prev_line) return out_file
[ "def", "_remove_overlaps", "(", "in_file", ",", "out_dir", ",", "data", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"%s-nooverlaps%s\"", "%", "utils", ".", "splitext_plus", "(", "os", ".", "path", ".", "basename", "(", "in_file", ")", ")", ")", "if", "not", "utils", ".", "file_uptodate", "(", "out_file", ",", "in_file", ")", ":", "with", "file_transaction", "(", "data", ",", "out_file", ")", "as", "tx_out_file", ":", "with", "open", "(", "in_file", ")", "as", "in_handle", ":", "with", "open", "(", "tx_out_file", ",", "\"w\"", ")", "as", "out_handle", ":", "prev_line", "=", "None", "for", "line", "in", "in_handle", ":", "if", "prev_line", ":", "pchrom", ",", "pstart", ",", "pend", "=", "prev_line", ".", "split", "(", "\"\\t\"", ",", "4", ")", "[", ":", "3", "]", "cchrom", ",", "cstart", ",", "cend", "=", "line", ".", "split", "(", "\"\\t\"", ",", "4", ")", "[", ":", "3", "]", "# Skip if chromosomes match and end overlaps start", "if", "pchrom", "==", "cchrom", "and", "int", "(", "pend", ")", ">", "int", "(", "cstart", ")", ":", "pass", "else", ":", "out_handle", ".", "write", "(", "prev_line", ")", "prev_line", "=", "line", "out_handle", ".", "write", "(", "prev_line", ")", "return", "out_file" ]
Remove regions that overlap with next region, these result in issues with PureCN.
[ "Remove", "regions", "that", "overlap", "with", "next", "region", "these", "result", "in", "issues", "with", "PureCN", "." ]
python
train
Chilipp/model-organization
model_organization/config.py
https://github.com/Chilipp/model-organization/blob/694d1219c7ed7e1b2b17153afa11bdc21169bca2/model_organization/config.py#L748-L755
def save(self): """ Save the entire configuration files """ self.projects.save() self.experiments.save() safe_dump(self.global_config, self._globals_file, default_flow_style=False)
[ "def", "save", "(", "self", ")", ":", "self", ".", "projects", ".", "save", "(", ")", "self", ".", "experiments", ".", "save", "(", ")", "safe_dump", "(", "self", ".", "global_config", ",", "self", ".", "_globals_file", ",", "default_flow_style", "=", "False", ")" ]
Save the entire configuration files
[ "Save", "the", "entire", "configuration", "files" ]
python
train
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/collection.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/collection.py#L1342-L1353
def _count(self, cmd, collation=None): """Internal count helper.""" with self._socket_for_reads() as (sock_info, slave_ok): res = self._command( sock_info, cmd, slave_ok, allowable_errors=["ns missing"], codec_options=self.__write_response_codec_options, read_concern=self.read_concern, collation=collation) if res.get("errmsg", "") == "ns missing": return 0 return int(res["n"])
[ "def", "_count", "(", "self", ",", "cmd", ",", "collation", "=", "None", ")", ":", "with", "self", ".", "_socket_for_reads", "(", ")", "as", "(", "sock_info", ",", "slave_ok", ")", ":", "res", "=", "self", ".", "_command", "(", "sock_info", ",", "cmd", ",", "slave_ok", ",", "allowable_errors", "=", "[", "\"ns missing\"", "]", ",", "codec_options", "=", "self", ".", "__write_response_codec_options", ",", "read_concern", "=", "self", ".", "read_concern", ",", "collation", "=", "collation", ")", "if", "res", ".", "get", "(", "\"errmsg\"", ",", "\"\"", ")", "==", "\"ns missing\"", ":", "return", "0", "return", "int", "(", "res", "[", "\"n\"", "]", ")" ]
Internal count helper.
[ "Internal", "count", "helper", "." ]
python
train
tanghaibao/goatools
goatools/anno/opts.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/anno/opts.py#L65-L73
def getfnc_qual_ev(self): """Keep annotaion if it passes potentially modified selection.""" fnc_key = ( self.nd_not2desc[(self._keep_nd, self._keep_not)], self.incexc2num[( self.include_evcodes is not None, self.exclude_evcodes is not None)], ) return self.param2fnc[fnc_key]
[ "def", "getfnc_qual_ev", "(", "self", ")", ":", "fnc_key", "=", "(", "self", ".", "nd_not2desc", "[", "(", "self", ".", "_keep_nd", ",", "self", ".", "_keep_not", ")", "]", ",", "self", ".", "incexc2num", "[", "(", "self", ".", "include_evcodes", "is", "not", "None", ",", "self", ".", "exclude_evcodes", "is", "not", "None", ")", "]", ",", ")", "return", "self", ".", "param2fnc", "[", "fnc_key", "]" ]
Keep annotaion if it passes potentially modified selection.
[ "Keep", "annotaion", "if", "it", "passes", "potentially", "modified", "selection", "." ]
python
train
darkfeline/animanager
animanager/db/query/update.py
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/db/query/update.py#L98-L109
def bump(db, aid): """Bump anime regular episode count.""" anime = lookup(db, aid) if anime.complete: return episode = anime.watched_episodes + 1 with db: set_watched(db, aid, get_eptype(db, 'regular').id, episode) set_status( db, aid, anime.enddate and episode >= anime.episodecount, episode)
[ "def", "bump", "(", "db", ",", "aid", ")", ":", "anime", "=", "lookup", "(", "db", ",", "aid", ")", "if", "anime", ".", "complete", ":", "return", "episode", "=", "anime", ".", "watched_episodes", "+", "1", "with", "db", ":", "set_watched", "(", "db", ",", "aid", ",", "get_eptype", "(", "db", ",", "'regular'", ")", ".", "id", ",", "episode", ")", "set_status", "(", "db", ",", "aid", ",", "anime", ".", "enddate", "and", "episode", ">=", "anime", ".", "episodecount", ",", "episode", ")" ]
Bump anime regular episode count.
[ "Bump", "anime", "regular", "episode", "count", "." ]
python
train
HazyResearch/pdftotree
pdftotree/utils/pdf/layout_utils.py
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/layout_utils.py#L217-L273
def recursive_xy_divide(elems, avg_font_size): """ Recursively group/divide the document by white stripes by projecting elements onto alternating axes as intervals. avg_font_size: the minimum gap size between elements below which we consider interval continuous. """ log = logging.getLogger(__name__) log.info(avg_font_size) objects = list(elems.mentions) objects.extend(elems.segments) bboxes = [] # A tree that is a list of its children # bboxes can be recursively reconstructed from # the leaves def divide(objs, bbox, h_split=True, is_single=False): """ Recursive wrapper for splitting a list of objects with bounding boxes. h_split: whether to split along y axis, otherwise we split along x axis. """ if not objs: return [] # range start/end indices axis = 1 if h_split else 0 intervals, groups = project_onto(objs, axis, avg_font_size) # base case where we can not actually divide single_child = len(groups) == 1 # Can not divide in both X and Y, stop if is_single and single_child: bboxes.append(bbox) return objs else: children = [] for interval, group in zip(intervals, groups): # Create the bbox for the subgroup sub_bbox = np.array(bbox) sub_bbox[[axis, axis + 2]] = interval # Append the sub-document tree child = divide(group, sub_bbox, not h_split, single_child) children.append(child) return children full_page_bbox = (0, 0, elems.layout.width, elems.layout.height) # Filter out invalid objects objects = [o for o in objects if inside(full_page_bbox, o.bbox)] log.info("avg_font_size for dividing", avg_font_size) tree = divide(objects, full_page_bbox) if objects else [] return bboxes, tree
[ "def", "recursive_xy_divide", "(", "elems", ",", "avg_font_size", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "__name__", ")", "log", ".", "info", "(", "avg_font_size", ")", "objects", "=", "list", "(", "elems", ".", "mentions", ")", "objects", ".", "extend", "(", "elems", ".", "segments", ")", "bboxes", "=", "[", "]", "# A tree that is a list of its children", "# bboxes can be recursively reconstructed from", "# the leaves", "def", "divide", "(", "objs", ",", "bbox", ",", "h_split", "=", "True", ",", "is_single", "=", "False", ")", ":", "\"\"\"\n Recursive wrapper for splitting a list of objects\n with bounding boxes.\n h_split: whether to split along y axis, otherwise\n we split along x axis.\n \"\"\"", "if", "not", "objs", ":", "return", "[", "]", "# range start/end indices", "axis", "=", "1", "if", "h_split", "else", "0", "intervals", ",", "groups", "=", "project_onto", "(", "objs", ",", "axis", ",", "avg_font_size", ")", "# base case where we can not actually divide", "single_child", "=", "len", "(", "groups", ")", "==", "1", "# Can not divide in both X and Y, stop", "if", "is_single", "and", "single_child", ":", "bboxes", ".", "append", "(", "bbox", ")", "return", "objs", "else", ":", "children", "=", "[", "]", "for", "interval", ",", "group", "in", "zip", "(", "intervals", ",", "groups", ")", ":", "# Create the bbox for the subgroup", "sub_bbox", "=", "np", ".", "array", "(", "bbox", ")", "sub_bbox", "[", "[", "axis", ",", "axis", "+", "2", "]", "]", "=", "interval", "# Append the sub-document tree", "child", "=", "divide", "(", "group", ",", "sub_bbox", ",", "not", "h_split", ",", "single_child", ")", "children", ".", "append", "(", "child", ")", "return", "children", "full_page_bbox", "=", "(", "0", ",", "0", ",", "elems", ".", "layout", ".", "width", ",", "elems", ".", "layout", ".", "height", ")", "# Filter out invalid objects", "objects", "=", "[", "o", "for", "o", "in", "objects", "if", "inside", "(", "full_page_bbox", ",", "o", ".", "bbox", ")", "]", "log", ".", "info", "(", "\"avg_font_size for dividing\"", ",", "avg_font_size", ")", "tree", "=", "divide", "(", "objects", ",", "full_page_bbox", ")", "if", "objects", "else", "[", "]", "return", "bboxes", ",", "tree" ]
Recursively group/divide the document by white stripes by projecting elements onto alternating axes as intervals. avg_font_size: the minimum gap size between elements below which we consider interval continuous.
[ "Recursively", "group", "/", "divide", "the", "document", "by", "white", "stripes", "by", "projecting", "elements", "onto", "alternating", "axes", "as", "intervals", "." ]
python
train
openstack/networking-hyperv
networking_hyperv/neutron/agent/layer2.py
https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/layer2.py#L412-L432
def _work(self): """Process the information regarding the available ports.""" if self._refresh_cache: # Inconsistent cache might cause exceptions. For example, # if a port has been removed, it will be known in the next # loop. Using the old switch port can cause exceptions. LOG.debug("Refreshing os_win caches...") self._utils.update_cache() self._refresh_cache = False if self._bound_ports or self._unbound_ports: eventlet.spawn_n(self._notify_plugin_on_port_updates) # notify plugin about port deltas if self._added_ports: LOG.debug("Agent loop has new devices!") self._treat_devices_added() if self._removed_ports: LOG.debug("Agent loop has lost devices...") self._treat_devices_removed()
[ "def", "_work", "(", "self", ")", ":", "if", "self", ".", "_refresh_cache", ":", "# Inconsistent cache might cause exceptions. For example,", "# if a port has been removed, it will be known in the next", "# loop. Using the old switch port can cause exceptions.", "LOG", ".", "debug", "(", "\"Refreshing os_win caches...\"", ")", "self", ".", "_utils", ".", "update_cache", "(", ")", "self", ".", "_refresh_cache", "=", "False", "if", "self", ".", "_bound_ports", "or", "self", ".", "_unbound_ports", ":", "eventlet", ".", "spawn_n", "(", "self", ".", "_notify_plugin_on_port_updates", ")", "# notify plugin about port deltas", "if", "self", ".", "_added_ports", ":", "LOG", ".", "debug", "(", "\"Agent loop has new devices!\"", ")", "self", ".", "_treat_devices_added", "(", ")", "if", "self", ".", "_removed_ports", ":", "LOG", ".", "debug", "(", "\"Agent loop has lost devices...\"", ")", "self", ".", "_treat_devices_removed", "(", ")" ]
Process the information regarding the available ports.
[ "Process", "the", "information", "regarding", "the", "available", "ports", "." ]
python
train
pallets/werkzeug
src/werkzeug/_reloader.py
https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/_reloader.py#L153-L178
def restart_with_reloader(self): """Spawn a new Python interpreter with the same arguments as this one, but running the reloader thread. """ while 1: _log("info", " * Restarting with %s" % self.name) args = _get_args_for_reloading() # a weird bug on windows. sometimes unicode strings end up in the # environment and subprocess.call does not like this, encode them # to latin1 and continue. if os.name == "nt" and PY2: new_environ = {} for key, value in iteritems(os.environ): if isinstance(key, text_type): key = key.encode("iso-8859-1") if isinstance(value, text_type): value = value.encode("iso-8859-1") new_environ[key] = value else: new_environ = os.environ.copy() new_environ["WERKZEUG_RUN_MAIN"] = "true" exit_code = subprocess.call(args, env=new_environ, close_fds=False) if exit_code != 3: return exit_code
[ "def", "restart_with_reloader", "(", "self", ")", ":", "while", "1", ":", "_log", "(", "\"info\"", ",", "\" * Restarting with %s\"", "%", "self", ".", "name", ")", "args", "=", "_get_args_for_reloading", "(", ")", "# a weird bug on windows. sometimes unicode strings end up in the", "# environment and subprocess.call does not like this, encode them", "# to latin1 and continue.", "if", "os", ".", "name", "==", "\"nt\"", "and", "PY2", ":", "new_environ", "=", "{", "}", "for", "key", ",", "value", "in", "iteritems", "(", "os", ".", "environ", ")", ":", "if", "isinstance", "(", "key", ",", "text_type", ")", ":", "key", "=", "key", ".", "encode", "(", "\"iso-8859-1\"", ")", "if", "isinstance", "(", "value", ",", "text_type", ")", ":", "value", "=", "value", ".", "encode", "(", "\"iso-8859-1\"", ")", "new_environ", "[", "key", "]", "=", "value", "else", ":", "new_environ", "=", "os", ".", "environ", ".", "copy", "(", ")", "new_environ", "[", "\"WERKZEUG_RUN_MAIN\"", "]", "=", "\"true\"", "exit_code", "=", "subprocess", ".", "call", "(", "args", ",", "env", "=", "new_environ", ",", "close_fds", "=", "False", ")", "if", "exit_code", "!=", "3", ":", "return", "exit_code" ]
Spawn a new Python interpreter with the same arguments as this one, but running the reloader thread.
[ "Spawn", "a", "new", "Python", "interpreter", "with", "the", "same", "arguments", "as", "this", "one", "but", "running", "the", "reloader", "thread", "." ]
python
train
SKA-ScienceDataProcessor/integration-prototype
sip/tango_control/tango_master/app/sdp_master_device.py
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/tango_control/tango_master/app/sdp_master_device.py#L118-L126
def _get_service_state(service_id: str): """Get the Service state object for the specified id.""" LOG.debug('Getting state of service %s', service_id) services = get_service_id_list() service_ids = [s for s in services if service_id in s] if len(service_ids) != 1: return 'Service not found! services = {}'.format(str(services)) subsystem, name, version = service_ids[0].split(':') return ServiceState(subsystem, name, version)
[ "def", "_get_service_state", "(", "service_id", ":", "str", ")", ":", "LOG", ".", "debug", "(", "'Getting state of service %s'", ",", "service_id", ")", "services", "=", "get_service_id_list", "(", ")", "service_ids", "=", "[", "s", "for", "s", "in", "services", "if", "service_id", "in", "s", "]", "if", "len", "(", "service_ids", ")", "!=", "1", ":", "return", "'Service not found! services = {}'", ".", "format", "(", "str", "(", "services", ")", ")", "subsystem", ",", "name", ",", "version", "=", "service_ids", "[", "0", "]", ".", "split", "(", "':'", ")", "return", "ServiceState", "(", "subsystem", ",", "name", ",", "version", ")" ]
Get the Service state object for the specified id.
[ "Get", "the", "Service", "state", "object", "for", "the", "specified", "id", "." ]
python
train
IdentityPython/pysaml2
src/saml2/validate.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/validate.py#L310-L334
def validate_value_type(value, spec): """ c_value_type = {'base': 'string', 'enumeration': ['Permit', 'Deny', 'Indeterminate']} {'member': 'anyURI', 'base': 'list'} {'base': 'anyURI'} {'base': 'NCName'} {'base': 'string'} """ if "maxlen" in spec: return len(value) <= int(spec["maxlen"]) if spec["base"] == "string": if "enumeration" in spec: if value not in spec["enumeration"]: raise NotValid("value not in enumeration") else: return valid_string(value) elif spec["base"] == "list": # comma separated list of values for val in [v.strip() for v in value.split(",")]: valid(spec["member"], val) else: return valid(spec["base"], value) return True
[ "def", "validate_value_type", "(", "value", ",", "spec", ")", ":", "if", "\"maxlen\"", "in", "spec", ":", "return", "len", "(", "value", ")", "<=", "int", "(", "spec", "[", "\"maxlen\"", "]", ")", "if", "spec", "[", "\"base\"", "]", "==", "\"string\"", ":", "if", "\"enumeration\"", "in", "spec", ":", "if", "value", "not", "in", "spec", "[", "\"enumeration\"", "]", ":", "raise", "NotValid", "(", "\"value not in enumeration\"", ")", "else", ":", "return", "valid_string", "(", "value", ")", "elif", "spec", "[", "\"base\"", "]", "==", "\"list\"", ":", "# comma separated list of values", "for", "val", "in", "[", "v", ".", "strip", "(", ")", "for", "v", "in", "value", ".", "split", "(", "\",\"", ")", "]", ":", "valid", "(", "spec", "[", "\"member\"", "]", ",", "val", ")", "else", ":", "return", "valid", "(", "spec", "[", "\"base\"", "]", ",", "value", ")", "return", "True" ]
c_value_type = {'base': 'string', 'enumeration': ['Permit', 'Deny', 'Indeterminate']} {'member': 'anyURI', 'base': 'list'} {'base': 'anyURI'} {'base': 'NCName'} {'base': 'string'}
[ "c_value_type", "=", "{", "base", ":", "string", "enumeration", ":", "[", "Permit", "Deny", "Indeterminate", "]", "}", "{", "member", ":", "anyURI", "base", ":", "list", "}", "{", "base", ":", "anyURI", "}", "{", "base", ":", "NCName", "}", "{", "base", ":", "string", "}" ]
python
train
FreshXOpenSource/wallaby-frontend-qt
wallaby/frontends/qt/reactor/qt4reactor.py
https://github.com/FreshXOpenSource/wallaby-frontend-qt/blob/eee70d0ec4ce34827f62a1654e28dbff8a8afb1a/wallaby/frontends/qt/reactor/qt4reactor.py#L171-L180
def _remove(self, xer, primary): """ Private method for removing a descriptor from the event loop. It does the inverse job of _add, and also add a check in case of the fd has gone away. """ if xer in primary: notifier = primary.pop(xer) notifier.shutdown()
[ "def", "_remove", "(", "self", ",", "xer", ",", "primary", ")", ":", "if", "xer", "in", "primary", ":", "notifier", "=", "primary", ".", "pop", "(", "xer", ")", "notifier", ".", "shutdown", "(", ")" ]
Private method for removing a descriptor from the event loop. It does the inverse job of _add, and also add a check in case of the fd has gone away.
[ "Private", "method", "for", "removing", "a", "descriptor", "from", "the", "event", "loop", "." ]
python
train
charnley/rmsd
rmsd/calculate_rmsd.py
https://github.com/charnley/rmsd/blob/cd8af499fb63529a1b5b1f880fdb2dab2731544a/rmsd/calculate_rmsd.py#L273-L319
def reorder_distance(p_atoms, q_atoms, p_coord, q_coord): """ Re-orders the input atom list and xyz coordinates by atom type and then by distance of each atom from the centroid. Parameters ---------- atoms : array (N,1) matrix, where N is points holding the atoms' names coord : array (N,D) matrix, where N is points and D is dimension Returns ------- atoms_reordered : array (N,1) matrix, where N is points holding the ordered atoms' names coords_reordered : array (N,D) matrix, where N is points and D is dimension (rows re-ordered) """ # Find unique atoms unique_atoms = np.unique(p_atoms) # generate full view from q shape to fill in atom view on the fly view_reorder = np.zeros(q_atoms.shape, dtype=int) for atom in unique_atoms: p_atom_idx, = np.where(p_atoms == atom) q_atom_idx, = np.where(q_atoms == atom) A_coord = p_coord[p_atom_idx] B_coord = q_coord[q_atom_idx] # Calculate distance from each atom to centroid A_norms = np.linalg.norm(A_coord, axis=1) B_norms = np.linalg.norm(B_coord, axis=1) reorder_indices_A = np.argsort(A_norms) reorder_indices_B = np.argsort(B_norms) # Project the order of P onto Q translator = np.argsort(reorder_indices_A) view = reorder_indices_B[translator] view_reorder[p_atom_idx] = q_atom_idx[view] return view_reorder
[ "def", "reorder_distance", "(", "p_atoms", ",", "q_atoms", ",", "p_coord", ",", "q_coord", ")", ":", "# Find unique atoms", "unique_atoms", "=", "np", ".", "unique", "(", "p_atoms", ")", "# generate full view from q shape to fill in atom view on the fly", "view_reorder", "=", "np", ".", "zeros", "(", "q_atoms", ".", "shape", ",", "dtype", "=", "int", ")", "for", "atom", "in", "unique_atoms", ":", "p_atom_idx", ",", "=", "np", ".", "where", "(", "p_atoms", "==", "atom", ")", "q_atom_idx", ",", "=", "np", ".", "where", "(", "q_atoms", "==", "atom", ")", "A_coord", "=", "p_coord", "[", "p_atom_idx", "]", "B_coord", "=", "q_coord", "[", "q_atom_idx", "]", "# Calculate distance from each atom to centroid", "A_norms", "=", "np", ".", "linalg", ".", "norm", "(", "A_coord", ",", "axis", "=", "1", ")", "B_norms", "=", "np", ".", "linalg", ".", "norm", "(", "B_coord", ",", "axis", "=", "1", ")", "reorder_indices_A", "=", "np", ".", "argsort", "(", "A_norms", ")", "reorder_indices_B", "=", "np", ".", "argsort", "(", "B_norms", ")", "# Project the order of P onto Q", "translator", "=", "np", ".", "argsort", "(", "reorder_indices_A", ")", "view", "=", "reorder_indices_B", "[", "translator", "]", "view_reorder", "[", "p_atom_idx", "]", "=", "q_atom_idx", "[", "view", "]", "return", "view_reorder" ]
Re-orders the input atom list and xyz coordinates by atom type and then by distance of each atom from the centroid. Parameters ---------- atoms : array (N,1) matrix, where N is points holding the atoms' names coord : array (N,D) matrix, where N is points and D is dimension Returns ------- atoms_reordered : array (N,1) matrix, where N is points holding the ordered atoms' names coords_reordered : array (N,D) matrix, where N is points and D is dimension (rows re-ordered)
[ "Re", "-", "orders", "the", "input", "atom", "list", "and", "xyz", "coordinates", "by", "atom", "type", "and", "then", "by", "distance", "of", "each", "atom", "from", "the", "centroid", "." ]
python
train
project-rig/rig
rig/machine_control/boot.py
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/boot.py#L62-L164
def boot(hostname, boot_port=consts.BOOT_PORT, scamp_binary=None, sark_struct=None, boot_delay=0.05, post_boot_delay=2.0, sv_overrides=dict(), **kwargs): """Boot a SpiNNaker machine of the given size. Parameters ---------- hostname : str Hostname or IP address of the SpiNNaker chip to use to boot the system. boot_port : int The port number to sent boot packets to. scamp_binary : filename or None Filename of the binary to boot the machine with or None to use the SC&MP binary bundled with Rig. sark_struct : filename or None The 'sark.struct' file which defines the datastructures or None to use the one bundled with Rig. boot_delay : float Number of seconds to pause between sending boot data packets. post_boot_delay : float Number of seconds to wait after sending last piece of boot data to give SC&MP time to re-initialise the Ethernet interface. Note that this does *not* wait for the system to fully boot. sv_overrides : {name: value, ...} Values used to override the defaults in the 'sv' struct defined in the struct file. Notes ----- The constants `rig.machine_control.boot.spinX_boot_options` provide boot parameters for specific SpiNNaker board revisions, for example:: boot("board1", **spin3_boot_options) Will boot the Spin3 board connected with hostname "board1". Returns ------- {struct_name: :py:class:`~rig.machine_control.struct_file.Struct`} Layout of structs in memory. """ # Get the boot data if not specified. scamp_binary = (scamp_binary if scamp_binary is not None else pkg_resources.resource_filename("rig", "boot/scamp.boot")) sark_struct = (sark_struct if sark_struct is not None else pkg_resources.resource_filename("rig", "boot/sark.struct")) with open(scamp_binary, "rb") as f: boot_data = f.read() # Read the struct file and modify the "sv" struct to contain the # configuration values and write this into the boot data. with open(sark_struct, "rb") as f: struct_data = f.read() structs = struct_file.read_struct_file(struct_data) sv = structs[b"sv"] sv_overrides.update(kwargs) # Allow non-explicit keyword arguments for SV sv.update_default_values(**sv_overrides) sv.update_default_values(unix_time=int(time.time()), boot_sig=int(time.time()), root_chip=1) struct_packed = sv.pack() assert len(struct_packed) >= 128 # Otherwise shoving this data in is nasty buf = bytearray(boot_data) buf[BOOT_DATA_OFFSET:BOOT_DATA_OFFSET+BOOT_DATA_LENGTH] = \ struct_packed[:BOOT_DATA_LENGTH] assert len(buf) < DTCM_SIZE # Assert that we fit in DTCM boot_data = bytes(buf) # Create a socket to communicate with the board sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.connect((hostname, boot_port)) # Transmit the boot data as a series of SDP packets. First determine # how many blocks must be sent and transmit that, then transmit each # block. n_blocks = (len(buf) + BOOT_BYTE_SIZE - 1) // BOOT_BYTE_SIZE assert n_blocks <= BOOT_MAX_BLOCKS boot_packet(sock, BootCommand.start, arg3=n_blocks - 1) time.sleep(boot_delay) block = 0 while len(boot_data) > 0: # Get the data to transmit data, boot_data = (boot_data[:BOOT_BYTE_SIZE], boot_data[BOOT_BYTE_SIZE:]) # Transmit, delay and increment the block count a1 = ((BOOT_WORD_SIZE - 1) << 8) | block boot_packet(sock, BootCommand.send_block, a1, data=data) time.sleep(boot_delay) block += 1 # Send the END command boot_packet(sock, BootCommand.end, 1) # Close the socket and give time to boot sock.close() time.sleep(post_boot_delay) return structs
[ "def", "boot", "(", "hostname", ",", "boot_port", "=", "consts", ".", "BOOT_PORT", ",", "scamp_binary", "=", "None", ",", "sark_struct", "=", "None", ",", "boot_delay", "=", "0.05", ",", "post_boot_delay", "=", "2.0", ",", "sv_overrides", "=", "dict", "(", ")", ",", "*", "*", "kwargs", ")", ":", "# Get the boot data if not specified.", "scamp_binary", "=", "(", "scamp_binary", "if", "scamp_binary", "is", "not", "None", "else", "pkg_resources", ".", "resource_filename", "(", "\"rig\"", ",", "\"boot/scamp.boot\"", ")", ")", "sark_struct", "=", "(", "sark_struct", "if", "sark_struct", "is", "not", "None", "else", "pkg_resources", ".", "resource_filename", "(", "\"rig\"", ",", "\"boot/sark.struct\"", ")", ")", "with", "open", "(", "scamp_binary", ",", "\"rb\"", ")", "as", "f", ":", "boot_data", "=", "f", ".", "read", "(", ")", "# Read the struct file and modify the \"sv\" struct to contain the", "# configuration values and write this into the boot data.", "with", "open", "(", "sark_struct", ",", "\"rb\"", ")", "as", "f", ":", "struct_data", "=", "f", ".", "read", "(", ")", "structs", "=", "struct_file", ".", "read_struct_file", "(", "struct_data", ")", "sv", "=", "structs", "[", "b\"sv\"", "]", "sv_overrides", ".", "update", "(", "kwargs", ")", "# Allow non-explicit keyword arguments for SV", "sv", ".", "update_default_values", "(", "*", "*", "sv_overrides", ")", "sv", ".", "update_default_values", "(", "unix_time", "=", "int", "(", "time", ".", "time", "(", ")", ")", ",", "boot_sig", "=", "int", "(", "time", ".", "time", "(", ")", ")", ",", "root_chip", "=", "1", ")", "struct_packed", "=", "sv", ".", "pack", "(", ")", "assert", "len", "(", "struct_packed", ")", ">=", "128", "# Otherwise shoving this data in is nasty", "buf", "=", "bytearray", "(", "boot_data", ")", "buf", "[", "BOOT_DATA_OFFSET", ":", "BOOT_DATA_OFFSET", "+", "BOOT_DATA_LENGTH", "]", "=", "struct_packed", "[", ":", "BOOT_DATA_LENGTH", "]", "assert", "len", "(", "buf", ")", "<", "DTCM_SIZE", "# Assert that we fit in DTCM", "boot_data", "=", "bytes", "(", "buf", ")", "# Create a socket to communicate with the board", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_DGRAM", ")", "sock", ".", "connect", "(", "(", "hostname", ",", "boot_port", ")", ")", "# Transmit the boot data as a series of SDP packets. First determine", "# how many blocks must be sent and transmit that, then transmit each", "# block.", "n_blocks", "=", "(", "len", "(", "buf", ")", "+", "BOOT_BYTE_SIZE", "-", "1", ")", "//", "BOOT_BYTE_SIZE", "assert", "n_blocks", "<=", "BOOT_MAX_BLOCKS", "boot_packet", "(", "sock", ",", "BootCommand", ".", "start", ",", "arg3", "=", "n_blocks", "-", "1", ")", "time", ".", "sleep", "(", "boot_delay", ")", "block", "=", "0", "while", "len", "(", "boot_data", ")", ">", "0", ":", "# Get the data to transmit", "data", ",", "boot_data", "=", "(", "boot_data", "[", ":", "BOOT_BYTE_SIZE", "]", ",", "boot_data", "[", "BOOT_BYTE_SIZE", ":", "]", ")", "# Transmit, delay and increment the block count", "a1", "=", "(", "(", "BOOT_WORD_SIZE", "-", "1", ")", "<<", "8", ")", "|", "block", "boot_packet", "(", "sock", ",", "BootCommand", ".", "send_block", ",", "a1", ",", "data", "=", "data", ")", "time", ".", "sleep", "(", "boot_delay", ")", "block", "+=", "1", "# Send the END command", "boot_packet", "(", "sock", ",", "BootCommand", ".", "end", ",", "1", ")", "# Close the socket and give time to boot", "sock", ".", "close", "(", ")", "time", ".", "sleep", "(", "post_boot_delay", ")", "return", "structs" ]
Boot a SpiNNaker machine of the given size. Parameters ---------- hostname : str Hostname or IP address of the SpiNNaker chip to use to boot the system. boot_port : int The port number to sent boot packets to. scamp_binary : filename or None Filename of the binary to boot the machine with or None to use the SC&MP binary bundled with Rig. sark_struct : filename or None The 'sark.struct' file which defines the datastructures or None to use the one bundled with Rig. boot_delay : float Number of seconds to pause between sending boot data packets. post_boot_delay : float Number of seconds to wait after sending last piece of boot data to give SC&MP time to re-initialise the Ethernet interface. Note that this does *not* wait for the system to fully boot. sv_overrides : {name: value, ...} Values used to override the defaults in the 'sv' struct defined in the struct file. Notes ----- The constants `rig.machine_control.boot.spinX_boot_options` provide boot parameters for specific SpiNNaker board revisions, for example:: boot("board1", **spin3_boot_options) Will boot the Spin3 board connected with hostname "board1". Returns ------- {struct_name: :py:class:`~rig.machine_control.struct_file.Struct`} Layout of structs in memory.
[ "Boot", "a", "SpiNNaker", "machine", "of", "the", "given", "size", "." ]
python
train
inasafe/inasafe
safe/gui/tools/help/osm_downloader_help.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/help/osm_downloader_help.py#L43-L133
def content(): """Helper method that returns just the content. This method was added so that the text could be reused in the dock_help module. .. versionadded:: 3.2.2 :returns: A message object without brand element. :rtype: safe.messaging.message.Message """ message = m.Message() paragraph = m.Paragraph( m.Image( 'file:///%s/img/screenshots/' 'osm-downloader-screenshot.png' % resources_path()), style_class='text-center' ) message.add(paragraph) body = tr( 'This tool will fetch building (\'structure\') or road (' '\'highway\') data from the OpenStreetMap project for you. ' 'The downloaded data will have InaSAFE keywords defined and a ' 'default QGIS style applied. To use this tool effectively:' ) tips = m.BulletedList() tips.add(tr( 'Your current extent, when opening this window, will be used to ' 'determine the area for which you want data to be retrieved. ' 'You can interactively select the area by using the ' '\'select on map\' button - which will temporarily hide this ' 'window and allow you to drag a rectangle on the map. After you ' 'have finished dragging the rectangle, this window will ' 'reappear.')) tips.add(tr( 'Check the output directory is correct. Note that the saved ' 'dataset will be named after the type of data being downloaded ' 'e.g. roads.shp or buildings.shp (and associated files).' )) tips.add(tr( 'By default simple file names will be used (e.g. roads.shp, ' 'buildings.shp). If you wish you can specify a prefix to ' 'add in front of this default name. For example using a prefix ' 'of \'padang-\' will cause the downloaded files to be saved as ' '\'padang-roads.shp\' and \'padang-buildings.shp\'. Note that ' 'the only allowed prefix characters are A-Z, a-z, 0-9 and the ' 'characters \'-\' and \'_\'. You can leave this blank if you ' 'prefer.' )) tips.add(tr( 'If a dataset already exists in the output directory it will be ' 'overwritten.' )) tips.add(tr( 'This tool requires a working internet connection and fetching ' 'buildings or roads will consume your bandwidth.')) tips.add(m.Link( 'http://www.openstreetmap.org/copyright', text=tr( 'Downloaded data is copyright OpenStreetMap contributors ' '(click for more info).') )) message.add(m.Paragraph(body)) message.add(tips) message.add(m.Paragraph( # format 'When the __Political boundaries__' for proper i18n tr('When the %s ' 'box in the Feature types menu is ticked, the Political boundary ' 'options panel will be enabled. The panel lets you select which ' 'admin level you wish to download. The admin levels are country ' 'specific. When you select an admin level, the local name for ' 'that admin level will be shown. You can change which country ' 'is used for the admin level description using the country drop ' 'down menu. The country will be automatically set to coincide ' 'with the view extent if a matching country can be found.') % ( m.ImportantText(tr('Political boundaries')).to_html(), ))) message.add(m.Paragraph( m.ImportantText(tr('Note: ')), tr( 'We have only provide presets for a subset of the available ' 'countries. If you want to know what the levels are for your ' 'country, please check on the following web page: '), m.Link( 'http://wiki.openstreetmap.org/wiki/Tag:boundary%3Dadministrative', text=tr( 'List of OSM Admin Boundary definitions ')))) return message
[ "def", "content", "(", ")", ":", "message", "=", "m", ".", "Message", "(", ")", "paragraph", "=", "m", ".", "Paragraph", "(", "m", ".", "Image", "(", "'file:///%s/img/screenshots/'", "'osm-downloader-screenshot.png'", "%", "resources_path", "(", ")", ")", ",", "style_class", "=", "'text-center'", ")", "message", ".", "add", "(", "paragraph", ")", "body", "=", "tr", "(", "'This tool will fetch building (\\'structure\\') or road ('", "'\\'highway\\') data from the OpenStreetMap project for you. '", "'The downloaded data will have InaSAFE keywords defined and a '", "'default QGIS style applied. To use this tool effectively:'", ")", "tips", "=", "m", ".", "BulletedList", "(", ")", "tips", ".", "add", "(", "tr", "(", "'Your current extent, when opening this window, will be used to '", "'determine the area for which you want data to be retrieved. '", "'You can interactively select the area by using the '", "'\\'select on map\\' button - which will temporarily hide this '", "'window and allow you to drag a rectangle on the map. After you '", "'have finished dragging the rectangle, this window will '", "'reappear.'", ")", ")", "tips", ".", "add", "(", "tr", "(", "'Check the output directory is correct. Note that the saved '", "'dataset will be named after the type of data being downloaded '", "'e.g. roads.shp or buildings.shp (and associated files).'", ")", ")", "tips", ".", "add", "(", "tr", "(", "'By default simple file names will be used (e.g. roads.shp, '", "'buildings.shp). If you wish you can specify a prefix to '", "'add in front of this default name. For example using a prefix '", "'of \\'padang-\\' will cause the downloaded files to be saved as '", "'\\'padang-roads.shp\\' and \\'padang-buildings.shp\\'. Note that '", "'the only allowed prefix characters are A-Z, a-z, 0-9 and the '", "'characters \\'-\\' and \\'_\\'. You can leave this blank if you '", "'prefer.'", ")", ")", "tips", ".", "add", "(", "tr", "(", "'If a dataset already exists in the output directory it will be '", "'overwritten.'", ")", ")", "tips", ".", "add", "(", "tr", "(", "'This tool requires a working internet connection and fetching '", "'buildings or roads will consume your bandwidth.'", ")", ")", "tips", ".", "add", "(", "m", ".", "Link", "(", "'http://www.openstreetmap.org/copyright'", ",", "text", "=", "tr", "(", "'Downloaded data is copyright OpenStreetMap contributors '", "'(click for more info).'", ")", ")", ")", "message", ".", "add", "(", "m", ".", "Paragraph", "(", "body", ")", ")", "message", ".", "add", "(", "tips", ")", "message", ".", "add", "(", "m", ".", "Paragraph", "(", "# format 'When the __Political boundaries__' for proper i18n", "tr", "(", "'When the %s '", "'box in the Feature types menu is ticked, the Political boundary '", "'options panel will be enabled. The panel lets you select which '", "'admin level you wish to download. The admin levels are country '", "'specific. When you select an admin level, the local name for '", "'that admin level will be shown. You can change which country '", "'is used for the admin level description using the country drop '", "'down menu. The country will be automatically set to coincide '", "'with the view extent if a matching country can be found.'", ")", "%", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Political boundaries'", ")", ")", ".", "to_html", "(", ")", ",", ")", ")", ")", "message", ".", "add", "(", "m", ".", "Paragraph", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Note: '", ")", ")", ",", "tr", "(", "'We have only provide presets for a subset of the available '", "'countries. If you want to know what the levels are for your '", "'country, please check on the following web page: '", ")", ",", "m", ".", "Link", "(", "'http://wiki.openstreetmap.org/wiki/Tag:boundary%3Dadministrative'", ",", "text", "=", "tr", "(", "'List of OSM Admin Boundary definitions '", ")", ")", ")", ")", "return", "message" ]
Helper method that returns just the content. This method was added so that the text could be reused in the dock_help module. .. versionadded:: 3.2.2 :returns: A message object without brand element. :rtype: safe.messaging.message.Message
[ "Helper", "method", "that", "returns", "just", "the", "content", "." ]
python
train
pallets/werkzeug
src/werkzeug/formparser.py
https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/formparser.py#L197-L206
def parse_from_environ(self, environ): """Parses the information from the environment as form data. :param environ: the WSGI environment to be used for parsing. :return: A tuple in the form ``(stream, form, files)``. """ content_type = environ.get("CONTENT_TYPE", "") content_length = get_content_length(environ) mimetype, options = parse_options_header(content_type) return self.parse(get_input_stream(environ), mimetype, content_length, options)
[ "def", "parse_from_environ", "(", "self", ",", "environ", ")", ":", "content_type", "=", "environ", ".", "get", "(", "\"CONTENT_TYPE\"", ",", "\"\"", ")", "content_length", "=", "get_content_length", "(", "environ", ")", "mimetype", ",", "options", "=", "parse_options_header", "(", "content_type", ")", "return", "self", ".", "parse", "(", "get_input_stream", "(", "environ", ")", ",", "mimetype", ",", "content_length", ",", "options", ")" ]
Parses the information from the environment as form data. :param environ: the WSGI environment to be used for parsing. :return: A tuple in the form ``(stream, form, files)``.
[ "Parses", "the", "information", "from", "the", "environment", "as", "form", "data", "." ]
python
train
MisterY/gnucash-portfolio
gnucash_portfolio/accounts.py
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/accounts.py#L258-L261
def get_aggregate_by_id(self, account_id: str) -> AccountAggregate: """ Returns the aggregate for the given id """ account = self.get_by_id(account_id) return self.get_account_aggregate(account)
[ "def", "get_aggregate_by_id", "(", "self", ",", "account_id", ":", "str", ")", "->", "AccountAggregate", ":", "account", "=", "self", ".", "get_by_id", "(", "account_id", ")", "return", "self", ".", "get_account_aggregate", "(", "account", ")" ]
Returns the aggregate for the given id
[ "Returns", "the", "aggregate", "for", "the", "given", "id" ]
python
train
trailofbits/manticore
manticore/native/manticore.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/manticore.py#L138-L154
def _hook_callback(self, state, pc, instruction): 'Invoke all registered generic hooks' # Ignore symbolic pc. # TODO(yan): Should we ask the solver if any of the hooks are possible, # and execute those that are? if issymbolic(pc): return # Invoke all pc-specific hooks for cb in self._hooks.get(pc, []): cb(state) # Invoke all pc-agnostic hooks for cb in self._hooks.get(None, []): cb(state)
[ "def", "_hook_callback", "(", "self", ",", "state", ",", "pc", ",", "instruction", ")", ":", "# Ignore symbolic pc.", "# TODO(yan): Should we ask the solver if any of the hooks are possible,", "# and execute those that are?", "if", "issymbolic", "(", "pc", ")", ":", "return", "# Invoke all pc-specific hooks", "for", "cb", "in", "self", ".", "_hooks", ".", "get", "(", "pc", ",", "[", "]", ")", ":", "cb", "(", "state", ")", "# Invoke all pc-agnostic hooks", "for", "cb", "in", "self", ".", "_hooks", ".", "get", "(", "None", ",", "[", "]", ")", ":", "cb", "(", "state", ")" ]
Invoke all registered generic hooks
[ "Invoke", "all", "registered", "generic", "hooks" ]
python
valid
jaybaird/python-bloomfilter
pybloom/pybloom.py
https://github.com/jaybaird/python-bloomfilter/blob/2bbe01ad49965bf759e31781e6820408068862ac/pybloom/pybloom.py#L388-L410
def tofile(self, f): """Serialize this ScalableBloomFilter into the file-object `f'.""" f.write(pack(self.FILE_FMT, self.scale, self.ratio, self.initial_capacity, self.error_rate)) # Write #-of-filters f.write(pack(b'<l', len(self.filters))) if len(self.filters) > 0: # Then each filter directly, with a header describing # their lengths. headerpos = f.tell() headerfmt = b'<' + b'Q'*(len(self.filters)) f.write(b'.' * calcsize(headerfmt)) filter_sizes = [] for filter in self.filters: begin = f.tell() filter.tofile(f) filter_sizes.append(f.tell() - begin) f.seek(headerpos) f.write(pack(headerfmt, *filter_sizes))
[ "def", "tofile", "(", "self", ",", "f", ")", ":", "f", ".", "write", "(", "pack", "(", "self", ".", "FILE_FMT", ",", "self", ".", "scale", ",", "self", ".", "ratio", ",", "self", ".", "initial_capacity", ",", "self", ".", "error_rate", ")", ")", "# Write #-of-filters", "f", ".", "write", "(", "pack", "(", "b'<l'", ",", "len", "(", "self", ".", "filters", ")", ")", ")", "if", "len", "(", "self", ".", "filters", ")", ">", "0", ":", "# Then each filter directly, with a header describing", "# their lengths.", "headerpos", "=", "f", ".", "tell", "(", ")", "headerfmt", "=", "b'<'", "+", "b'Q'", "*", "(", "len", "(", "self", ".", "filters", ")", ")", "f", ".", "write", "(", "b'.'", "*", "calcsize", "(", "headerfmt", ")", ")", "filter_sizes", "=", "[", "]", "for", "filter", "in", "self", ".", "filters", ":", "begin", "=", "f", ".", "tell", "(", ")", "filter", ".", "tofile", "(", "f", ")", "filter_sizes", ".", "append", "(", "f", ".", "tell", "(", ")", "-", "begin", ")", "f", ".", "seek", "(", "headerpos", ")", "f", ".", "write", "(", "pack", "(", "headerfmt", ",", "*", "filter_sizes", ")", ")" ]
Serialize this ScalableBloomFilter into the file-object `f'.
[ "Serialize", "this", "ScalableBloomFilter", "into", "the", "file", "-", "object", "f", "." ]
python
train
nickmckay/LiPD-utilities
Python/lipd/misc.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L534-L554
def prompt_protocol(): """ Prompt user if they would like to save pickle file as a dictionary or an object. :return str: Answer """ stop = 3 ans = "" while True and stop > 0: ans = input("Save as (d)ictionary or (o)bject?\n" "* Note:\n" "Dictionaries are more basic, and are compatible with Python v2.7+.\n" "Objects are more complex, and are only compatible with v3.4+ ") if ans not in ("d", "o"): print("Invalid response: Please choose 'd' or 'o'") else: break # if a valid answer isn't captured, default to dictionary (safer, broader) if ans == "": ans = "d" return ans
[ "def", "prompt_protocol", "(", ")", ":", "stop", "=", "3", "ans", "=", "\"\"", "while", "True", "and", "stop", ">", "0", ":", "ans", "=", "input", "(", "\"Save as (d)ictionary or (o)bject?\\n\"", "\"* Note:\\n\"", "\"Dictionaries are more basic, and are compatible with Python v2.7+.\\n\"", "\"Objects are more complex, and are only compatible with v3.4+ \"", ")", "if", "ans", "not", "in", "(", "\"d\"", ",", "\"o\"", ")", ":", "print", "(", "\"Invalid response: Please choose 'd' or 'o'\"", ")", "else", ":", "break", "# if a valid answer isn't captured, default to dictionary (safer, broader)", "if", "ans", "==", "\"\"", ":", "ans", "=", "\"d\"", "return", "ans" ]
Prompt user if they would like to save pickle file as a dictionary or an object. :return str: Answer
[ "Prompt", "user", "if", "they", "would", "like", "to", "save", "pickle", "file", "as", "a", "dictionary", "or", "an", "object", "." ]
python
train
gem/oq-engine
openquake/baselib/general.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/baselib/general.py#L973-L985
def random_filter(objects, reduction_factor, seed=42): """ Given a list of objects, returns a sublist by extracting randomly some elements. The reduction factor (< 1) tells how small is the extracted list compared to the original list. """ assert 0 < reduction_factor <= 1, reduction_factor rnd = random.Random(seed) out = [] for obj in objects: if rnd.random() <= reduction_factor: out.append(obj) return out
[ "def", "random_filter", "(", "objects", ",", "reduction_factor", ",", "seed", "=", "42", ")", ":", "assert", "0", "<", "reduction_factor", "<=", "1", ",", "reduction_factor", "rnd", "=", "random", ".", "Random", "(", "seed", ")", "out", "=", "[", "]", "for", "obj", "in", "objects", ":", "if", "rnd", ".", "random", "(", ")", "<=", "reduction_factor", ":", "out", ".", "append", "(", "obj", ")", "return", "out" ]
Given a list of objects, returns a sublist by extracting randomly some elements. The reduction factor (< 1) tells how small is the extracted list compared to the original list.
[ "Given", "a", "list", "of", "objects", "returns", "a", "sublist", "by", "extracting", "randomly", "some", "elements", ".", "The", "reduction", "factor", "(", "<", "1", ")", "tells", "how", "small", "is", "the", "extracted", "list", "compared", "to", "the", "original", "list", "." ]
python
train
theonlypwner/crc32
crc32.py
https://github.com/theonlypwner/crc32/blob/b2c8ee40fecd07b2b0670be592c55fe7122429d8/crc32.py#L215-L314
def get_parser(): ''' Return the command-line parser ''' parser = argparse.ArgumentParser( description="Reverse, undo, and calculate CRC32 checksums") subparsers = parser.add_subparsers(metavar='action') poly_flip_parser = argparse.ArgumentParser(add_help=False) subparser_group = poly_flip_parser.add_mutually_exclusive_group() subparser_group.add_argument( '-m', '--msbit', dest="msb", action='store_true', help='treat the polynomial as normal (msbit-first)') subparser_group.add_argument('-l', '--lsbit', action='store_false', help='treat the polynomial as reversed (lsbit-first) [default]') desired_poly_parser = argparse.ArgumentParser(add_help=False) desired_poly_parser.add_argument( 'desired', type=str, help='[int] desired checksum') default_poly_parser = argparse.ArgumentParser(add_help=False) default_poly_parser.add_argument( 'poly', default='0xEDB88320', type=str, nargs='?', help='[int] polynomial [default: 0xEDB88320]') accum_parser = argparse.ArgumentParser(add_help=False) accum_parser.add_argument( 'accum', type=str, help='[int] accumulator (final checksum)') default_accum_parser = argparse.ArgumentParser(add_help=False) default_accum_parser.add_argument( 'accum', default='0', type=str, nargs='?', help='[int] starting accumulator [default: 0]') outfile_parser = argparse.ArgumentParser(add_help=False) outfile_parser.add_argument('-o', '--outfile', metavar="f", type=argparse.FileType('w'), default=sys.stdout, help="Output to a file instead of stdout") infile_parser = argparse.ArgumentParser(add_help=False) subparser_group = infile_parser.add_mutually_exclusive_group() subparser_group.add_argument('-i', '--infile', metavar="f", type=argparse.FileType('rb'), default=sys.stdin, help="Input from a file instead of stdin") subparser_group.add_argument('-s', '--str', metavar="s", type=str, default='', dest='instr', help="Use a string as input") subparser = subparsers.add_parser('flip', parents=[outfile_parser], help="flip the bits to convert normal(msbit-first) polynomials to reversed (lsbit-first) and vice versa") subparser.add_argument('poly', type=str, help='[int] polynomial') subparser.set_defaults( func=lambda: print_num(reverseBits(parse_dword(args.poly)))) subparser = subparsers.add_parser('reciprocal', parents=[outfile_parser], help="find the reciprocal (Koopman notation) of a reversed (lsbit-first) polynomial and vice versa") subparser.add_argument('poly', type=str, help='[int] polynomial') subparser.set_defaults(func=reciprocal_callback) subparser = subparsers.add_parser('table', parents=[outfile_parser, poly_flip_parser, default_poly_parser], help="generate a lookup table for a polynomial") subparser.set_defaults(func=table_callback) subparser = subparsers.add_parser('reverse', parents=[ outfile_parser, poly_flip_parser, desired_poly_parser, default_accum_parser, default_poly_parser], help="find a patch that causes the CRC32 checksum to become a desired value") subparser.set_defaults(func=reverse_callback) subparser = subparsers.add_parser('undo', parents=[ outfile_parser, poly_flip_parser, accum_parser, default_poly_parser, infile_parser], help="rewind a CRC32 checksum") subparser.add_argument('-n', '--len', metavar='l', type=str, default='0', help='[int] number of bytes to rewind [default: 0]') subparser.set_defaults(func=undo_callback) subparser = subparsers.add_parser('calc', parents=[ outfile_parser, poly_flip_parser, default_accum_parser, default_poly_parser, infile_parser], help="calculate the CRC32 checksum") subparser.set_defaults(func=calc_callback) return parser
[ "def", "get_parser", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"Reverse, undo, and calculate CRC32 checksums\"", ")", "subparsers", "=", "parser", ".", "add_subparsers", "(", "metavar", "=", "'action'", ")", "poly_flip_parser", "=", "argparse", ".", "ArgumentParser", "(", "add_help", "=", "False", ")", "subparser_group", "=", "poly_flip_parser", ".", "add_mutually_exclusive_group", "(", ")", "subparser_group", ".", "add_argument", "(", "'-m'", ",", "'--msbit'", ",", "dest", "=", "\"msb\"", ",", "action", "=", "'store_true'", ",", "help", "=", "'treat the polynomial as normal (msbit-first)'", ")", "subparser_group", ".", "add_argument", "(", "'-l'", ",", "'--lsbit'", ",", "action", "=", "'store_false'", ",", "help", "=", "'treat the polynomial as reversed (lsbit-first) [default]'", ")", "desired_poly_parser", "=", "argparse", ".", "ArgumentParser", "(", "add_help", "=", "False", ")", "desired_poly_parser", ".", "add_argument", "(", "'desired'", ",", "type", "=", "str", ",", "help", "=", "'[int] desired checksum'", ")", "default_poly_parser", "=", "argparse", ".", "ArgumentParser", "(", "add_help", "=", "False", ")", "default_poly_parser", ".", "add_argument", "(", "'poly'", ",", "default", "=", "'0xEDB88320'", ",", "type", "=", "str", ",", "nargs", "=", "'?'", ",", "help", "=", "'[int] polynomial [default: 0xEDB88320]'", ")", "accum_parser", "=", "argparse", ".", "ArgumentParser", "(", "add_help", "=", "False", ")", "accum_parser", ".", "add_argument", "(", "'accum'", ",", "type", "=", "str", ",", "help", "=", "'[int] accumulator (final checksum)'", ")", "default_accum_parser", "=", "argparse", ".", "ArgumentParser", "(", "add_help", "=", "False", ")", "default_accum_parser", ".", "add_argument", "(", "'accum'", ",", "default", "=", "'0'", ",", "type", "=", "str", ",", "nargs", "=", "'?'", ",", "help", "=", "'[int] starting accumulator [default: 0]'", ")", "outfile_parser", "=", "argparse", ".", "ArgumentParser", "(", "add_help", "=", "False", ")", "outfile_parser", ".", "add_argument", "(", "'-o'", ",", "'--outfile'", ",", "metavar", "=", "\"f\"", ",", "type", "=", "argparse", ".", "FileType", "(", "'w'", ")", ",", "default", "=", "sys", ".", "stdout", ",", "help", "=", "\"Output to a file instead of stdout\"", ")", "infile_parser", "=", "argparse", ".", "ArgumentParser", "(", "add_help", "=", "False", ")", "subparser_group", "=", "infile_parser", ".", "add_mutually_exclusive_group", "(", ")", "subparser_group", ".", "add_argument", "(", "'-i'", ",", "'--infile'", ",", "metavar", "=", "\"f\"", ",", "type", "=", "argparse", ".", "FileType", "(", "'rb'", ")", ",", "default", "=", "sys", ".", "stdin", ",", "help", "=", "\"Input from a file instead of stdin\"", ")", "subparser_group", ".", "add_argument", "(", "'-s'", ",", "'--str'", ",", "metavar", "=", "\"s\"", ",", "type", "=", "str", ",", "default", "=", "''", ",", "dest", "=", "'instr'", ",", "help", "=", "\"Use a string as input\"", ")", "subparser", "=", "subparsers", ".", "add_parser", "(", "'flip'", ",", "parents", "=", "[", "outfile_parser", "]", ",", "help", "=", "\"flip the bits to convert normal(msbit-first) polynomials to reversed (lsbit-first) and vice versa\"", ")", "subparser", ".", "add_argument", "(", "'poly'", ",", "type", "=", "str", ",", "help", "=", "'[int] polynomial'", ")", "subparser", ".", "set_defaults", "(", "func", "=", "lambda", ":", "print_num", "(", "reverseBits", "(", "parse_dword", "(", "args", ".", "poly", ")", ")", ")", ")", "subparser", "=", "subparsers", ".", "add_parser", "(", "'reciprocal'", ",", "parents", "=", "[", "outfile_parser", "]", ",", "help", "=", "\"find the reciprocal (Koopman notation) of a reversed (lsbit-first) polynomial and vice versa\"", ")", "subparser", ".", "add_argument", "(", "'poly'", ",", "type", "=", "str", ",", "help", "=", "'[int] polynomial'", ")", "subparser", ".", "set_defaults", "(", "func", "=", "reciprocal_callback", ")", "subparser", "=", "subparsers", ".", "add_parser", "(", "'table'", ",", "parents", "=", "[", "outfile_parser", ",", "poly_flip_parser", ",", "default_poly_parser", "]", ",", "help", "=", "\"generate a lookup table for a polynomial\"", ")", "subparser", ".", "set_defaults", "(", "func", "=", "table_callback", ")", "subparser", "=", "subparsers", ".", "add_parser", "(", "'reverse'", ",", "parents", "=", "[", "outfile_parser", ",", "poly_flip_parser", ",", "desired_poly_parser", ",", "default_accum_parser", ",", "default_poly_parser", "]", ",", "help", "=", "\"find a patch that causes the CRC32 checksum to become a desired value\"", ")", "subparser", ".", "set_defaults", "(", "func", "=", "reverse_callback", ")", "subparser", "=", "subparsers", ".", "add_parser", "(", "'undo'", ",", "parents", "=", "[", "outfile_parser", ",", "poly_flip_parser", ",", "accum_parser", ",", "default_poly_parser", ",", "infile_parser", "]", ",", "help", "=", "\"rewind a CRC32 checksum\"", ")", "subparser", ".", "add_argument", "(", "'-n'", ",", "'--len'", ",", "metavar", "=", "'l'", ",", "type", "=", "str", ",", "default", "=", "'0'", ",", "help", "=", "'[int] number of bytes to rewind [default: 0]'", ")", "subparser", ".", "set_defaults", "(", "func", "=", "undo_callback", ")", "subparser", "=", "subparsers", ".", "add_parser", "(", "'calc'", ",", "parents", "=", "[", "outfile_parser", ",", "poly_flip_parser", ",", "default_accum_parser", ",", "default_poly_parser", ",", "infile_parser", "]", ",", "help", "=", "\"calculate the CRC32 checksum\"", ")", "subparser", ".", "set_defaults", "(", "func", "=", "calc_callback", ")", "return", "parser" ]
Return the command-line parser
[ "Return", "the", "command", "-", "line", "parser" ]
python
train
Groundworkstech/pybfd
pybfd/bfd.py
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L537-L542
def my_archieve(self): """Return the my archieve attribute of the BFD file being processed.""" if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.MY_ARCHIEVE)
[ "def", "my_archieve", "(", "self", ")", ":", "if", "not", "self", ".", "_ptr", ":", "raise", "BfdException", "(", "\"BFD not initialized\"", ")", "return", "_bfd", ".", "get_bfd_attribute", "(", "self", ".", "_ptr", ",", "BfdAttributes", ".", "MY_ARCHIEVE", ")" ]
Return the my archieve attribute of the BFD file being processed.
[ "Return", "the", "my", "archieve", "attribute", "of", "the", "BFD", "file", "being", "processed", "." ]
python
train
fboender/ansible-cmdb
src/ansiblecmdb/ansible.py
https://github.com/fboender/ansible-cmdb/blob/ebd960ac10684e8c9ec2b12751bba2c4c9504ab7/src/ansiblecmdb/ansible.py#L348-L361
def hosts_in_group(self, groupname): """ Return a list of hostnames that are in a group. """ result = [] for hostname, hostinfo in self.hosts.items(): if groupname == 'all': result.append(hostname) elif 'groups' in hostinfo: if groupname in hostinfo['groups']: result.append(hostname) else: hostinfo['groups'] = [groupname] return result
[ "def", "hosts_in_group", "(", "self", ",", "groupname", ")", ":", "result", "=", "[", "]", "for", "hostname", ",", "hostinfo", "in", "self", ".", "hosts", ".", "items", "(", ")", ":", "if", "groupname", "==", "'all'", ":", "result", ".", "append", "(", "hostname", ")", "elif", "'groups'", "in", "hostinfo", ":", "if", "groupname", "in", "hostinfo", "[", "'groups'", "]", ":", "result", ".", "append", "(", "hostname", ")", "else", ":", "hostinfo", "[", "'groups'", "]", "=", "[", "groupname", "]", "return", "result" ]
Return a list of hostnames that are in a group.
[ "Return", "a", "list", "of", "hostnames", "that", "are", "in", "a", "group", "." ]
python
train
Microsoft/LightGBM
python-package/lightgbm/basic.py
https://github.com/Microsoft/LightGBM/blob/8d2ec69f4f685b0ab1c4624d59ee2d3287bb3147/python-package/lightgbm/basic.py#L1792-L1821
def add_valid(self, data, name): """Add validation data. Parameters ---------- data : Dataset Validation data. name : string Name of validation data. Returns ------- self : Booster Booster with set validation data. """ if not isinstance(data, Dataset): raise TypeError('Validation data should be Dataset instance, met {}' .format(type(data).__name__)) if data._predictor is not self.__init_predictor: raise LightGBMError("Add validation data failed, " "you should use same predictor for these data") _safe_call(_LIB.LGBM_BoosterAddValidData( self.handle, data.construct().handle)) self.valid_sets.append(data) self.name_valid_sets.append(name) self.__num_dataset += 1 self.__inner_predict_buffer.append(None) self.__is_predicted_cur_iter.append(False) return self
[ "def", "add_valid", "(", "self", ",", "data", ",", "name", ")", ":", "if", "not", "isinstance", "(", "data", ",", "Dataset", ")", ":", "raise", "TypeError", "(", "'Validation data should be Dataset instance, met {}'", ".", "format", "(", "type", "(", "data", ")", ".", "__name__", ")", ")", "if", "data", ".", "_predictor", "is", "not", "self", ".", "__init_predictor", ":", "raise", "LightGBMError", "(", "\"Add validation data failed, \"", "\"you should use same predictor for these data\"", ")", "_safe_call", "(", "_LIB", ".", "LGBM_BoosterAddValidData", "(", "self", ".", "handle", ",", "data", ".", "construct", "(", ")", ".", "handle", ")", ")", "self", ".", "valid_sets", ".", "append", "(", "data", ")", "self", ".", "name_valid_sets", ".", "append", "(", "name", ")", "self", ".", "__num_dataset", "+=", "1", "self", ".", "__inner_predict_buffer", ".", "append", "(", "None", ")", "self", ".", "__is_predicted_cur_iter", ".", "append", "(", "False", ")", "return", "self" ]
Add validation data. Parameters ---------- data : Dataset Validation data. name : string Name of validation data. Returns ------- self : Booster Booster with set validation data.
[ "Add", "validation", "data", "." ]
python
train
SCIP-Interfaces/PySCIPOpt
examples/unfinished/vrp.py
https://github.com/SCIP-Interfaces/PySCIPOpt/blob/9c960b40d94a48b0304d73dbe28b467b9c065abe/examples/unfinished/vrp.py#L15-L78
def solve_vrp(V,c,m,q,Q): """solve_vrp -- solve the vehicle routing problem. - start with assignment model (depot has a special status) - add cuts until all components of the graph are connected Parameters: - V: set/list of nodes in the graph - c[i,j]: cost for traversing edge (i,j) - m: number of vehicles available - q[i]: demand for customer i - Q: vehicle capacity Returns the optimum objective value and the list of edges used. """ def addcut(cut_edges): """addcut: add constraint to eliminate infeasible solutions Parameters: - cut_edges: list of edges in the current solution, except connections to depot Returns True if a cut was added, False otherwise """ G = networkx.Graph() G.add_edges_from(cut_edges) Components = networkx.connected_components(G) cut = False for S in Components: S_card = len(S) q_sum = sum(q[i] for i in S) NS = int(math.ceil(float(q_sum)/Q)) S_edges = [(i,j) for i in S for j in S if i<j and (i,j) in cut_edges] if S_card >= 3 and (len(S_edges) >= S_card or NS > 1): add = model.addCons(quicksum(x[i,j] for i in S for j in S if j > i) <= S_card-NS) cut = True return cut model = Model("vrp") x = {} for i in V: for j in V: if j > i and i == V[0]: # depot x[i,j] = model.addVar(ub=2, vtype="I", name="x(%s,%s)"%(i,j)) elif j > i: x[i,j] = model.addVar(ub=1, vtype="I", name="x(%s,%s)"%(i,j)) model.addCons(quicksum(x[V[0],j] for j in V[1:]) == 2*m, "DegreeDepot") for i in V[1:]: model.addCons(quicksum(x[j,i] for j in V if j < i) + quicksum(x[i,j] for j in V if j > i) == 2, "Degree(%s)"%i) model.setObjective(quicksum(c[i,j]*x[i,j] for i in V for j in V if j>i), "minimize") model.hideOutput() EPS = 1.e-6 while True: model.optimize() edges = [] for (i,j) in x: if model.getVal(x[i,j]) > EPS: if i != V[0] and j != V[0]: edges.append((i,j)) if addcut(edges) == False: break return model.getObjVal(),edges
[ "def", "solve_vrp", "(", "V", ",", "c", ",", "m", ",", "q", ",", "Q", ")", ":", "def", "addcut", "(", "cut_edges", ")", ":", "\"\"\"addcut: add constraint to eliminate infeasible solutions\n Parameters:\n - cut_edges: list of edges in the current solution, except connections to depot\n Returns True if a cut was added, False otherwise\n \"\"\"", "G", "=", "networkx", ".", "Graph", "(", ")", "G", ".", "add_edges_from", "(", "cut_edges", ")", "Components", "=", "networkx", ".", "connected_components", "(", "G", ")", "cut", "=", "False", "for", "S", "in", "Components", ":", "S_card", "=", "len", "(", "S", ")", "q_sum", "=", "sum", "(", "q", "[", "i", "]", "for", "i", "in", "S", ")", "NS", "=", "int", "(", "math", ".", "ceil", "(", "float", "(", "q_sum", ")", "/", "Q", ")", ")", "S_edges", "=", "[", "(", "i", ",", "j", ")", "for", "i", "in", "S", "for", "j", "in", "S", "if", "i", "<", "j", "and", "(", "i", ",", "j", ")", "in", "cut_edges", "]", "if", "S_card", ">=", "3", "and", "(", "len", "(", "S_edges", ")", ">=", "S_card", "or", "NS", ">", "1", ")", ":", "add", "=", "model", ".", "addCons", "(", "quicksum", "(", "x", "[", "i", ",", "j", "]", "for", "i", "in", "S", "for", "j", "in", "S", "if", "j", ">", "i", ")", "<=", "S_card", "-", "NS", ")", "cut", "=", "True", "return", "cut", "model", "=", "Model", "(", "\"vrp\"", ")", "x", "=", "{", "}", "for", "i", "in", "V", ":", "for", "j", "in", "V", ":", "if", "j", ">", "i", "and", "i", "==", "V", "[", "0", "]", ":", "# depot", "x", "[", "i", ",", "j", "]", "=", "model", ".", "addVar", "(", "ub", "=", "2", ",", "vtype", "=", "\"I\"", ",", "name", "=", "\"x(%s,%s)\"", "%", "(", "i", ",", "j", ")", ")", "elif", "j", ">", "i", ":", "x", "[", "i", ",", "j", "]", "=", "model", ".", "addVar", "(", "ub", "=", "1", ",", "vtype", "=", "\"I\"", ",", "name", "=", "\"x(%s,%s)\"", "%", "(", "i", ",", "j", ")", ")", "model", ".", "addCons", "(", "quicksum", "(", "x", "[", "V", "[", "0", "]", ",", "j", "]", "for", "j", "in", "V", "[", "1", ":", "]", ")", "==", "2", "*", "m", ",", "\"DegreeDepot\"", ")", "for", "i", "in", "V", "[", "1", ":", "]", ":", "model", ".", "addCons", "(", "quicksum", "(", "x", "[", "j", ",", "i", "]", "for", "j", "in", "V", "if", "j", "<", "i", ")", "+", "quicksum", "(", "x", "[", "i", ",", "j", "]", "for", "j", "in", "V", "if", "j", ">", "i", ")", "==", "2", ",", "\"Degree(%s)\"", "%", "i", ")", "model", ".", "setObjective", "(", "quicksum", "(", "c", "[", "i", ",", "j", "]", "*", "x", "[", "i", ",", "j", "]", "for", "i", "in", "V", "for", "j", "in", "V", "if", "j", ">", "i", ")", ",", "\"minimize\"", ")", "model", ".", "hideOutput", "(", ")", "EPS", "=", "1.e-6", "while", "True", ":", "model", ".", "optimize", "(", ")", "edges", "=", "[", "]", "for", "(", "i", ",", "j", ")", "in", "x", ":", "if", "model", ".", "getVal", "(", "x", "[", "i", ",", "j", "]", ")", ">", "EPS", ":", "if", "i", "!=", "V", "[", "0", "]", "and", "j", "!=", "V", "[", "0", "]", ":", "edges", ".", "append", "(", "(", "i", ",", "j", ")", ")", "if", "addcut", "(", "edges", ")", "==", "False", ":", "break", "return", "model", ".", "getObjVal", "(", ")", ",", "edges" ]
solve_vrp -- solve the vehicle routing problem. - start with assignment model (depot has a special status) - add cuts until all components of the graph are connected Parameters: - V: set/list of nodes in the graph - c[i,j]: cost for traversing edge (i,j) - m: number of vehicles available - q[i]: demand for customer i - Q: vehicle capacity Returns the optimum objective value and the list of edges used.
[ "solve_vrp", "--", "solve", "the", "vehicle", "routing", "problem", ".", "-", "start", "with", "assignment", "model", "(", "depot", "has", "a", "special", "status", ")", "-", "add", "cuts", "until", "all", "components", "of", "the", "graph", "are", "connected", "Parameters", ":", "-", "V", ":", "set", "/", "list", "of", "nodes", "in", "the", "graph", "-", "c", "[", "i", "j", "]", ":", "cost", "for", "traversing", "edge", "(", "i", "j", ")", "-", "m", ":", "number", "of", "vehicles", "available", "-", "q", "[", "i", "]", ":", "demand", "for", "customer", "i", "-", "Q", ":", "vehicle", "capacity", "Returns", "the", "optimum", "objective", "value", "and", "the", "list", "of", "edges", "used", "." ]
python
train
Qiskit/qiskit-terra
qiskit/circuit/quantumcircuit.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/circuit/quantumcircuit.py#L155-L182
def combine(self, rhs): """ Append rhs to self if self contains compatible registers. Two circuits are compatible if they contain the same registers or if they contain different registers with unique names. The returned circuit will contain all unique registers between both circuits. Return self + rhs as a new object. """ # Check registers in LHS are compatible with RHS self._check_compatible_regs(rhs) # Make new circuit with combined registers combined_qregs = deepcopy(self.qregs) combined_cregs = deepcopy(self.cregs) for element in rhs.qregs: if element not in self.qregs: combined_qregs.append(element) for element in rhs.cregs: if element not in self.cregs: combined_cregs.append(element) circuit = QuantumCircuit(*combined_qregs, *combined_cregs) for instruction_context in itertools.chain(self.data, rhs.data): circuit.append(*instruction_context) return circuit
[ "def", "combine", "(", "self", ",", "rhs", ")", ":", "# Check registers in LHS are compatible with RHS", "self", ".", "_check_compatible_regs", "(", "rhs", ")", "# Make new circuit with combined registers", "combined_qregs", "=", "deepcopy", "(", "self", ".", "qregs", ")", "combined_cregs", "=", "deepcopy", "(", "self", ".", "cregs", ")", "for", "element", "in", "rhs", ".", "qregs", ":", "if", "element", "not", "in", "self", ".", "qregs", ":", "combined_qregs", ".", "append", "(", "element", ")", "for", "element", "in", "rhs", ".", "cregs", ":", "if", "element", "not", "in", "self", ".", "cregs", ":", "combined_cregs", ".", "append", "(", "element", ")", "circuit", "=", "QuantumCircuit", "(", "*", "combined_qregs", ",", "*", "combined_cregs", ")", "for", "instruction_context", "in", "itertools", ".", "chain", "(", "self", ".", "data", ",", "rhs", ".", "data", ")", ":", "circuit", ".", "append", "(", "*", "instruction_context", ")", "return", "circuit" ]
Append rhs to self if self contains compatible registers. Two circuits are compatible if they contain the same registers or if they contain different registers with unique names. The returned circuit will contain all unique registers between both circuits. Return self + rhs as a new object.
[ "Append", "rhs", "to", "self", "if", "self", "contains", "compatible", "registers", "." ]
python
test
pandas-dev/pandas
pandas/core/frame.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L4843-L4953
def nlargest(self, n, columns, keep='first'): """ Return the first `n` rows ordered by `columns` in descending order. Return the first `n` rows with the largest values in `columns`, in descending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=False).head(n)``, but more performant. Parameters ---------- n : int Number of rows to return. columns : label or list of labels Column label(s) to order by. keep : {'first', 'last', 'all'}, default 'first' Where there are duplicate values: - `first` : prioritize the first occurrence(s) - `last` : prioritize the last occurrence(s) - ``all`` : do not drop any duplicates, even it means selecting more than `n` items. .. versionadded:: 0.24.0 Returns ------- DataFrame The first `n` rows ordered by the given columns in descending order. See Also -------- DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in ascending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Notes ----- This function cannot be used with all column types. For example, when specifying columns with `object` or `category` dtypes, ``TypeError`` is raised. Examples -------- >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000, ... 434000, 434000, 337000, 11300, ... 11300, 11300], ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128, ... 17036, 182, 38, 311], ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN", ... "IS", "NR", "TV", "AI"]}, ... index=["Italy", "France", "Malta", ... "Maldives", "Brunei", "Iceland", ... "Nauru", "Tuvalu", "Anguilla"]) >>> df population GDP alpha-2 Italy 59000000 1937894 IT France 65000000 2583560 FR Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN Iceland 337000 17036 IS Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI In the following example, we will use ``nlargest`` to select the three rows having the largest values in column "population". >>> df.nlargest(3, 'population') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Malta 434000 12011 MT When using ``keep='last'``, ties are resolved in reverse order: >>> df.nlargest(3, 'population', keep='last') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Brunei 434000 12128 BN When using ``keep='all'``, all duplicate items are maintained: >>> df.nlargest(3, 'population', keep='all') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN To order by the largest values in column "population" and then "GDP", we can specify multiple columns like in the next example. >>> df.nlargest(3, ['population', 'GDP']) population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Brunei 434000 12128 BN """ return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest()
[ "def", "nlargest", "(", "self", ",", "n", ",", "columns", ",", "keep", "=", "'first'", ")", ":", "return", "algorithms", ".", "SelectNFrame", "(", "self", ",", "n", "=", "n", ",", "keep", "=", "keep", ",", "columns", "=", "columns", ")", ".", "nlargest", "(", ")" ]
Return the first `n` rows ordered by `columns` in descending order. Return the first `n` rows with the largest values in `columns`, in descending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=False).head(n)``, but more performant. Parameters ---------- n : int Number of rows to return. columns : label or list of labels Column label(s) to order by. keep : {'first', 'last', 'all'}, default 'first' Where there are duplicate values: - `first` : prioritize the first occurrence(s) - `last` : prioritize the last occurrence(s) - ``all`` : do not drop any duplicates, even it means selecting more than `n` items. .. versionadded:: 0.24.0 Returns ------- DataFrame The first `n` rows ordered by the given columns in descending order. See Also -------- DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in ascending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Notes ----- This function cannot be used with all column types. For example, when specifying columns with `object` or `category` dtypes, ``TypeError`` is raised. Examples -------- >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000, ... 434000, 434000, 337000, 11300, ... 11300, 11300], ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128, ... 17036, 182, 38, 311], ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN", ... "IS", "NR", "TV", "AI"]}, ... index=["Italy", "France", "Malta", ... "Maldives", "Brunei", "Iceland", ... "Nauru", "Tuvalu", "Anguilla"]) >>> df population GDP alpha-2 Italy 59000000 1937894 IT France 65000000 2583560 FR Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN Iceland 337000 17036 IS Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI In the following example, we will use ``nlargest`` to select the three rows having the largest values in column "population". >>> df.nlargest(3, 'population') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Malta 434000 12011 MT When using ``keep='last'``, ties are resolved in reverse order: >>> df.nlargest(3, 'population', keep='last') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Brunei 434000 12128 BN When using ``keep='all'``, all duplicate items are maintained: >>> df.nlargest(3, 'population', keep='all') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN To order by the largest values in column "population" and then "GDP", we can specify multiple columns like in the next example. >>> df.nlargest(3, ['population', 'GDP']) population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Brunei 434000 12128 BN
[ "Return", "the", "first", "n", "rows", "ordered", "by", "columns", "in", "descending", "order", "." ]
python
train
ArchiveTeam/wpull
wpull/application/tasks/download.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/application/tasks/download.py#L180-L209
def _build_cookie_jar(cls, session: AppSession): '''Build the cookie jar''' if not session.args.cookies: return if session.args.load_cookies or session.args.save_cookies: session.factory.set('CookieJar', BetterMozillaCookieJar) cookie_jar = session.factory.new('CookieJar') if session.args.load_cookies: cookie_jar.load(session.args.load_cookies, ignore_discard=True) else: cookie_jar = session.factory.new('CookieJar') policy = session.factory.new('CookiePolicy', cookie_jar=cookie_jar) cookie_jar.set_policy(policy) _logger.debug(__('Loaded cookies: {0}', list(cookie_jar))) cookie_jar_wrapper = session.factory.new( 'CookieJarWrapper', cookie_jar, save_filename=session.args.save_cookies, keep_session_cookies=session.args.keep_session_cookies, ) return cookie_jar_wrapper
[ "def", "_build_cookie_jar", "(", "cls", ",", "session", ":", "AppSession", ")", ":", "if", "not", "session", ".", "args", ".", "cookies", ":", "return", "if", "session", ".", "args", ".", "load_cookies", "or", "session", ".", "args", ".", "save_cookies", ":", "session", ".", "factory", ".", "set", "(", "'CookieJar'", ",", "BetterMozillaCookieJar", ")", "cookie_jar", "=", "session", ".", "factory", ".", "new", "(", "'CookieJar'", ")", "if", "session", ".", "args", ".", "load_cookies", ":", "cookie_jar", ".", "load", "(", "session", ".", "args", ".", "load_cookies", ",", "ignore_discard", "=", "True", ")", "else", ":", "cookie_jar", "=", "session", ".", "factory", ".", "new", "(", "'CookieJar'", ")", "policy", "=", "session", ".", "factory", ".", "new", "(", "'CookiePolicy'", ",", "cookie_jar", "=", "cookie_jar", ")", "cookie_jar", ".", "set_policy", "(", "policy", ")", "_logger", ".", "debug", "(", "__", "(", "'Loaded cookies: {0}'", ",", "list", "(", "cookie_jar", ")", ")", ")", "cookie_jar_wrapper", "=", "session", ".", "factory", ".", "new", "(", "'CookieJarWrapper'", ",", "cookie_jar", ",", "save_filename", "=", "session", ".", "args", ".", "save_cookies", ",", "keep_session_cookies", "=", "session", ".", "args", ".", "keep_session_cookies", ",", ")", "return", "cookie_jar_wrapper" ]
Build the cookie jar
[ "Build", "the", "cookie", "jar" ]
python
train
grundic/yagocd
yagocd/resources/__init__.py
https://github.com/grundic/yagocd/blob/4c75336ae6f107c8723d37b15e52169151822127/yagocd/resources/__init__.py#L114-L125
def get_predecessors(self, transitive=False): """ Property for getting predecessors (parents) of current pipeline. This property automatically populates from API call :return: list of :class:`yagocd.resources.pipeline.PipelineEntity`. :rtype: list of yagocd.resources.pipeline.PipelineEntity """ result = self._predecessors if transitive: return YagocdUtil.graph_depth_walk(result, lambda v: v.predecessors) return result
[ "def", "get_predecessors", "(", "self", ",", "transitive", "=", "False", ")", ":", "result", "=", "self", ".", "_predecessors", "if", "transitive", ":", "return", "YagocdUtil", ".", "graph_depth_walk", "(", "result", ",", "lambda", "v", ":", "v", ".", "predecessors", ")", "return", "result" ]
Property for getting predecessors (parents) of current pipeline. This property automatically populates from API call :return: list of :class:`yagocd.resources.pipeline.PipelineEntity`. :rtype: list of yagocd.resources.pipeline.PipelineEntity
[ "Property", "for", "getting", "predecessors", "(", "parents", ")", "of", "current", "pipeline", ".", "This", "property", "automatically", "populates", "from", "API", "call" ]
python
train
ellmetha/django-machina
machina/apps/forum_permission/shortcuts.py
https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_permission/shortcuts.py#L25-L40
def assign_perm(perm, user_or_group, forum=None, has_perm=True): """ Assigns a permission to a user (anonymous or not) or a group. """ user, group = get_identity(user_or_group) perm = ForumPermission.objects.get(codename=perm) if user: return UserForumPermission.objects.create( forum=forum, permission=perm, user=user if not user.is_anonymous else None, anonymous_user=user.is_anonymous, has_perm=has_perm, ) if group: return GroupForumPermission.objects.create( forum=forum, permission=perm, group=group, has_perm=has_perm, )
[ "def", "assign_perm", "(", "perm", ",", "user_or_group", ",", "forum", "=", "None", ",", "has_perm", "=", "True", ")", ":", "user", ",", "group", "=", "get_identity", "(", "user_or_group", ")", "perm", "=", "ForumPermission", ".", "objects", ".", "get", "(", "codename", "=", "perm", ")", "if", "user", ":", "return", "UserForumPermission", ".", "objects", ".", "create", "(", "forum", "=", "forum", ",", "permission", "=", "perm", ",", "user", "=", "user", "if", "not", "user", ".", "is_anonymous", "else", "None", ",", "anonymous_user", "=", "user", ".", "is_anonymous", ",", "has_perm", "=", "has_perm", ",", ")", "if", "group", ":", "return", "GroupForumPermission", ".", "objects", ".", "create", "(", "forum", "=", "forum", ",", "permission", "=", "perm", ",", "group", "=", "group", ",", "has_perm", "=", "has_perm", ",", ")" ]
Assigns a permission to a user (anonymous or not) or a group.
[ "Assigns", "a", "permission", "to", "a", "user", "(", "anonymous", "or", "not", ")", "or", "a", "group", "." ]
python
train
google/python-adb
adb/filesync_protocol.py
https://github.com/google/python-adb/blob/d9b94b2dda555c14674c19806debb8449c0e9652/adb/filesync_protocol.py#L120-L164
def Push(cls, connection, datafile, filename, st_mode=DEFAULT_PUSH_MODE, mtime=0, progress_callback=None): """Push a file-like object to the device. Args: connection: ADB connection datafile: File-like object for reading from filename: Filename to push to st_mode: stat mode for filename mtime: modification time progress_callback: callback method that accepts filename, bytes_written and total_bytes Raises: PushFailedError: Raised on push failure. """ fileinfo = ('{},{}'.format(filename, int(st_mode))).encode('utf-8') cnxn = FileSyncConnection(connection, b'<2I') cnxn.Send(b'SEND', fileinfo) if progress_callback: total_bytes = os.fstat(datafile.fileno()).st_size if isinstance(datafile, file) else -1 progress = cls._HandleProgress(lambda current: progress_callback(filename, current, total_bytes)) next(progress) while True: data = datafile.read(MAX_PUSH_DATA) if data: cnxn.Send(b'DATA', data) if progress_callback: progress.send(len(data)) else: break if mtime == 0: mtime = int(time.time()) # DONE doesn't send data, but it hides the last bit of data in the size # field. cnxn.Send(b'DONE', size=mtime) for cmd_id, _, data in cnxn.ReadUntil((), b'OKAY', b'FAIL'): if cmd_id == b'OKAY': return raise PushFailedError(data)
[ "def", "Push", "(", "cls", ",", "connection", ",", "datafile", ",", "filename", ",", "st_mode", "=", "DEFAULT_PUSH_MODE", ",", "mtime", "=", "0", ",", "progress_callback", "=", "None", ")", ":", "fileinfo", "=", "(", "'{},{}'", ".", "format", "(", "filename", ",", "int", "(", "st_mode", ")", ")", ")", ".", "encode", "(", "'utf-8'", ")", "cnxn", "=", "FileSyncConnection", "(", "connection", ",", "b'<2I'", ")", "cnxn", ".", "Send", "(", "b'SEND'", ",", "fileinfo", ")", "if", "progress_callback", ":", "total_bytes", "=", "os", ".", "fstat", "(", "datafile", ".", "fileno", "(", ")", ")", ".", "st_size", "if", "isinstance", "(", "datafile", ",", "file", ")", "else", "-", "1", "progress", "=", "cls", ".", "_HandleProgress", "(", "lambda", "current", ":", "progress_callback", "(", "filename", ",", "current", ",", "total_bytes", ")", ")", "next", "(", "progress", ")", "while", "True", ":", "data", "=", "datafile", ".", "read", "(", "MAX_PUSH_DATA", ")", "if", "data", ":", "cnxn", ".", "Send", "(", "b'DATA'", ",", "data", ")", "if", "progress_callback", ":", "progress", ".", "send", "(", "len", "(", "data", ")", ")", "else", ":", "break", "if", "mtime", "==", "0", ":", "mtime", "=", "int", "(", "time", ".", "time", "(", ")", ")", "# DONE doesn't send data, but it hides the last bit of data in the size", "# field.", "cnxn", ".", "Send", "(", "b'DONE'", ",", "size", "=", "mtime", ")", "for", "cmd_id", ",", "_", ",", "data", "in", "cnxn", ".", "ReadUntil", "(", "(", ")", ",", "b'OKAY'", ",", "b'FAIL'", ")", ":", "if", "cmd_id", "==", "b'OKAY'", ":", "return", "raise", "PushFailedError", "(", "data", ")" ]
Push a file-like object to the device. Args: connection: ADB connection datafile: File-like object for reading from filename: Filename to push to st_mode: stat mode for filename mtime: modification time progress_callback: callback method that accepts filename, bytes_written and total_bytes Raises: PushFailedError: Raised on push failure.
[ "Push", "a", "file", "-", "like", "object", "to", "the", "device", "." ]
python
train
vmlaker/coils
coils/String.py
https://github.com/vmlaker/coils/blob/a3a613b3d661dec010e5879c86e62cbff2519dd0/coils/String.py#L25-L29
def time2string(tstamp, micro=True): """Given a :class:`datetime.datetime` object, return a formatted time string.""" tformat = TIME_FORMAT if micro else TIME_FORMAT[:-len(MICRO)] return tstamp.strftime(tformat)
[ "def", "time2string", "(", "tstamp", ",", "micro", "=", "True", ")", ":", "tformat", "=", "TIME_FORMAT", "if", "micro", "else", "TIME_FORMAT", "[", ":", "-", "len", "(", "MICRO", ")", "]", "return", "tstamp", ".", "strftime", "(", "tformat", ")" ]
Given a :class:`datetime.datetime` object, return a formatted time string.
[ "Given", "a", ":", "class", ":", "datetime", ".", "datetime", "object", "return", "a", "formatted", "time", "string", "." ]
python
train
limpyd/redis-limpyd-jobs
limpyd_jobs/models.py
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/models.py#L392-L401
def duration(self): """ If the start and end times of the job are defined, return a timedelta, else return None """ try: start, end = self.hmget('start', 'end') return parse(end) - parse(start) except: return None
[ "def", "duration", "(", "self", ")", ":", "try", ":", "start", ",", "end", "=", "self", ".", "hmget", "(", "'start'", ",", "'end'", ")", "return", "parse", "(", "end", ")", "-", "parse", "(", "start", ")", "except", ":", "return", "None" ]
If the start and end times of the job are defined, return a timedelta, else return None
[ "If", "the", "start", "and", "end", "times", "of", "the", "job", "are", "defined", "return", "a", "timedelta", "else", "return", "None" ]
python
train
Opentrons/opentrons
update-server/otupdate/buildroot/file_actions.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/update-server/otupdate/buildroot/file_actions.py#L161-L196
def hash_file(path: str, progress_callback: Callable[[float], None], chunk_size: int = 1024, file_size: int = None, algo: str = 'sha256') -> bytes: """ Hash a file and return the hash, providing progress callbacks :param path: The file to hash :param progress_callback: The callback to call with progress between 0 and 1. May not ever be precisely 1.0. :param chunk_size: If specified, the size of the chunks to hash in one call If not specified, defaults to 1024 :param file_size: If specified, the size of the file to hash (used for progress callback generation). If not specified, calculated internally. :param algo: The algorithm to use. Can be anything used by :py:mod:`hashlib` :returns: The output has ascii hex """ hasher = hashlib.new(algo) have_read = 0 if not chunk_size: chunk_size = 1024 with open(path, 'rb') as to_hash: if not file_size: file_size = to_hash.seek(0, 2) to_hash.seek(0) while True: chunk = to_hash.read(chunk_size) hasher.update(chunk) have_read += len(chunk) progress_callback(have_read/file_size) if len(chunk) != chunk_size: break return binascii.hexlify(hasher.digest())
[ "def", "hash_file", "(", "path", ":", "str", ",", "progress_callback", ":", "Callable", "[", "[", "float", "]", ",", "None", "]", ",", "chunk_size", ":", "int", "=", "1024", ",", "file_size", ":", "int", "=", "None", ",", "algo", ":", "str", "=", "'sha256'", ")", "->", "bytes", ":", "hasher", "=", "hashlib", ".", "new", "(", "algo", ")", "have_read", "=", "0", "if", "not", "chunk_size", ":", "chunk_size", "=", "1024", "with", "open", "(", "path", ",", "'rb'", ")", "as", "to_hash", ":", "if", "not", "file_size", ":", "file_size", "=", "to_hash", ".", "seek", "(", "0", ",", "2", ")", "to_hash", ".", "seek", "(", "0", ")", "while", "True", ":", "chunk", "=", "to_hash", ".", "read", "(", "chunk_size", ")", "hasher", ".", "update", "(", "chunk", ")", "have_read", "+=", "len", "(", "chunk", ")", "progress_callback", "(", "have_read", "/", "file_size", ")", "if", "len", "(", "chunk", ")", "!=", "chunk_size", ":", "break", "return", "binascii", ".", "hexlify", "(", "hasher", ".", "digest", "(", ")", ")" ]
Hash a file and return the hash, providing progress callbacks :param path: The file to hash :param progress_callback: The callback to call with progress between 0 and 1. May not ever be precisely 1.0. :param chunk_size: If specified, the size of the chunks to hash in one call If not specified, defaults to 1024 :param file_size: If specified, the size of the file to hash (used for progress callback generation). If not specified, calculated internally. :param algo: The algorithm to use. Can be anything used by :py:mod:`hashlib` :returns: The output has ascii hex
[ "Hash", "a", "file", "and", "return", "the", "hash", "providing", "progress", "callbacks" ]
python
train
impact27/registrator
registrator/image.py
https://github.com/impact27/registrator/blob/04c099d83e0466207dc5b2e40d9b03db020d4dad/registrator/image.py#L107-L155
def find_rotation_scale(im0, im1, isccs=False): """Compares the images and return the best guess for the rotation angle, and scale difference. Parameters ---------- im0: 2d array First image im1: 2d array Second image isccs: boolean, default False Set to True if the images are alredy DFT and in CCS representation Returns ------- angle: number The angle difference scale: number The scale difference Notes ----- Uses find_shift_dft """ # sanitize input im0 = np.asarray(im0, dtype=np.float32) im1 = np.asarray(im1, dtype=np.float32) truesize = None # if ccs, convert to shifted dft before giving to polar_fft if isccs: truesize = im0.shape im0 = centered_mag_sq_ccs(im0) im1 = centered_mag_sq_ccs(im1) # Get log polar coordinates. choose the log base lp1, log_base = polar_fft(im1, logpolar=True, isshiftdft=isccs, logoutput=True, truesize=truesize) lp0, log_base = polar_fft(im0, logpolar=True, isshiftdft=isccs, logoutput=True, truesize=truesize, nangle=lp1.shape[0], radiimax=lp1.shape[1]) # Find the shift with log of the log-polar images, # to compensate for dft intensity repartition angle, scale = find_shift_dft(lp0, lp1) # get angle in correct units angle *= np.pi / lp1.shape[0] # get scale in linear units scale = log_base ** (scale) # return angle and scale return angle, scale
[ "def", "find_rotation_scale", "(", "im0", ",", "im1", ",", "isccs", "=", "False", ")", ":", "# sanitize input", "im0", "=", "np", ".", "asarray", "(", "im0", ",", "dtype", "=", "np", ".", "float32", ")", "im1", "=", "np", ".", "asarray", "(", "im1", ",", "dtype", "=", "np", ".", "float32", ")", "truesize", "=", "None", "# if ccs, convert to shifted dft before giving to polar_fft", "if", "isccs", ":", "truesize", "=", "im0", ".", "shape", "im0", "=", "centered_mag_sq_ccs", "(", "im0", ")", "im1", "=", "centered_mag_sq_ccs", "(", "im1", ")", "# Get log polar coordinates. choose the log base", "lp1", ",", "log_base", "=", "polar_fft", "(", "im1", ",", "logpolar", "=", "True", ",", "isshiftdft", "=", "isccs", ",", "logoutput", "=", "True", ",", "truesize", "=", "truesize", ")", "lp0", ",", "log_base", "=", "polar_fft", "(", "im0", ",", "logpolar", "=", "True", ",", "isshiftdft", "=", "isccs", ",", "logoutput", "=", "True", ",", "truesize", "=", "truesize", ",", "nangle", "=", "lp1", ".", "shape", "[", "0", "]", ",", "radiimax", "=", "lp1", ".", "shape", "[", "1", "]", ")", "# Find the shift with log of the log-polar images,", "# to compensate for dft intensity repartition", "angle", ",", "scale", "=", "find_shift_dft", "(", "lp0", ",", "lp1", ")", "# get angle in correct units", "angle", "*=", "np", ".", "pi", "/", "lp1", ".", "shape", "[", "0", "]", "# get scale in linear units", "scale", "=", "log_base", "**", "(", "scale", ")", "# return angle and scale", "return", "angle", ",", "scale" ]
Compares the images and return the best guess for the rotation angle, and scale difference. Parameters ---------- im0: 2d array First image im1: 2d array Second image isccs: boolean, default False Set to True if the images are alredy DFT and in CCS representation Returns ------- angle: number The angle difference scale: number The scale difference Notes ----- Uses find_shift_dft
[ "Compares", "the", "images", "and", "return", "the", "best", "guess", "for", "the", "rotation", "angle", "and", "scale", "difference", "." ]
python
train
rosenbrockc/fortpy
fortpy/scripts/analyze.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/scripts/analyze.py#L653-L682
def do_postfix(self, arg): """Sets the function to apply to the values of a specific variable before plotting or tabulating values. """ usable, filename, append = self._redirect_split(arg) sargs = usable.split() if len(sargs) == 1 and sargs[0] == "list": self._print_map_dict("functions", filename, append) elif len(sargs) >= 2: defvars = self._postfix_varlist("postfix " + arg) for var in defvars.values(): if not self._validate_var(var): msg.err("Variable '{}' is not a valid variable|property combination.") return fxn = arg.split()[-1] if ":" not in fxn: msg.err("{} is not a valid postfix function expression.") self.help_postfix() return modvar = fxn.split(":")[0] if modvar not in defvars: msg.err("Invalid postfix function: variable '{}' not defined.".format(modvar)) return defvars["lambda"] = fxn self.curargs["functions"][defvars[modvar]] = defvars #Give the user some feedback so that they know it was successful. self.do_postfix("list")
[ "def", "do_postfix", "(", "self", ",", "arg", ")", ":", "usable", ",", "filename", ",", "append", "=", "self", ".", "_redirect_split", "(", "arg", ")", "sargs", "=", "usable", ".", "split", "(", ")", "if", "len", "(", "sargs", ")", "==", "1", "and", "sargs", "[", "0", "]", "==", "\"list\"", ":", "self", ".", "_print_map_dict", "(", "\"functions\"", ",", "filename", ",", "append", ")", "elif", "len", "(", "sargs", ")", ">=", "2", ":", "defvars", "=", "self", ".", "_postfix_varlist", "(", "\"postfix \"", "+", "arg", ")", "for", "var", "in", "defvars", ".", "values", "(", ")", ":", "if", "not", "self", ".", "_validate_var", "(", "var", ")", ":", "msg", ".", "err", "(", "\"Variable '{}' is not a valid variable|property combination.\"", ")", "return", "fxn", "=", "arg", ".", "split", "(", ")", "[", "-", "1", "]", "if", "\":\"", "not", "in", "fxn", ":", "msg", ".", "err", "(", "\"{} is not a valid postfix function expression.\"", ")", "self", ".", "help_postfix", "(", ")", "return", "modvar", "=", "fxn", ".", "split", "(", "\":\"", ")", "[", "0", "]", "if", "modvar", "not", "in", "defvars", ":", "msg", ".", "err", "(", "\"Invalid postfix function: variable '{}' not defined.\"", ".", "format", "(", "modvar", ")", ")", "return", "defvars", "[", "\"lambda\"", "]", "=", "fxn", "self", ".", "curargs", "[", "\"functions\"", "]", "[", "defvars", "[", "modvar", "]", "]", "=", "defvars", "#Give the user some feedback so that they know it was successful.", "self", ".", "do_postfix", "(", "\"list\"", ")" ]
Sets the function to apply to the values of a specific variable before plotting or tabulating values.
[ "Sets", "the", "function", "to", "apply", "to", "the", "values", "of", "a", "specific", "variable", "before", "plotting", "or", "tabulating", "values", "." ]
python
train
Loudr/pale
pale/endpoint.py
https://github.com/Loudr/pale/blob/dc002ee6032c856551143af222ff8f71ed9853fe/pale/endpoint.py#L122-L286
def _execute(self, request, **kwargs): """The top-level execute function for the endpoint. This method is intended to remain as-is, and not be overridden. It gets called by your HTTP framework's route handler, and performs the following actions to process the request: ``authenticate_request`` Validate the Bearer token, populate the ``current_user``, and make sure that the token covers the scope needed to call the requested method. * * ``parse arguments`` The argument parser is responsible for: - First, coercing and patching any parameters that might require it due to versioning (i.e. the caller is using an old API version that supports `index` as a parameter for pagination, but the current version uses the name `offset`) - Second, iterating through the endpoint's supported arguments and validating that the params passed in comply with the endpoint's requirements - Third, populating the `context.args` array with the validated arguments If any of the arguments are invalid, then the Argument parser will raise an ArgumentError that bubbles up to the `try/catch` block of the execute method. * * ``before handler`` The before_handlers are specified by the Endpoint definition, and are intended to supporty DRY-ing up your codebase. Have a set of Endpoints that all need to grab an object from the ORM based on the same parameter? Make them inherit from an Endpoint subclass that performs that task in a before_handler! * * ``handle`` The core logic of your API endpoint, as implemented by you in your Endpoint subclass. The API Framework expects ``handle`` to return a dictionary specifying the response object and the JSON key that it should hang off of, or a tuple of a dictionary and an HTTP status code. * * ``after_handler`` Like the before_handlers, the ``after_handlers`` happen after the handle method, and allow the endpoint developer to re-use code for post-processing data from an endpoint. * * ``render response`` Like the argument parser, the response renderer is responsible for a few things: - First, it converts the ORM objects into JSON-serializable Python dictionaries using the Resource objects defined by the API implementation, - Second, it does any version parameter coersion, renaming and reformatting the edge version of the response to match the version requested by the API caller, - and Third, it serializes the Python dictionary into the response format requested by the API caller (right now, we only support JSON responses, but it'd be reasonble to support something like HTML or XML or whatever in the future). The rendered JSON text is then returned as the response that should be sent by your HTTP framework's routing handler. * * ``_after_response_handler`` The `_after_response_handlers` are specified by the Endpoint definition, and enable manipulation of the response object before it is returned to the client, but after the response is rendered. Because these are instancemethods, they may share instance data from `self` specified in the endpoint's `_handle` method. ``_finalize_content`` The `_finalize_content` method is overridden by the Endpoint and is called after the response is rendered into a serializable result. This method is called with two arguments, the context and the rendered content, and expected to return updated rendered content. For in-place modification of dicts, this method will still be expected to return the given argument. ``_allow_cors`` This value is set to enable CORs for a given endpoint. When set to a string it supplies an explicit value to 'Access-Control-Allow-Origin'. Set to True, this will allow access from *all* domains; Access-Control-Allow-Origin = "*" """ try: self._create_context(request) self._authenticate() context = get_current_context() self._parse_args() if hasattr(self, '_before_handlers') and \ isinstance(self._before_handlers, (list, tuple)): for handler in self._before_handlers: handler(context) context.handler_result = self._handle(context) if hasattr(self, '_after_handlers') and \ isinstance(self._after_handlers, (list, tuple)): for handler in self._after_handlers: handler(context) self._render() response = context.response # After calling ._render(), the response is ready to go, so we # shouldn't need to handle any other exceptions beyond this point. except AuthenticationError as e: if hasattr(e, 'message') and e.message is not None: message = e.message else: message = "You don't have permission to do that." err = APIError.Forbidden(message) response = self._response_class(*err.response) response.headers["Content-Type"] = 'application/json' except ArgumentError as e: err = APIError.UnprocessableEntity(e.message) response = self._response_class(*err.response) response.headers["Content-Type"] = 'application/json' except APIError as e: response = self._response_class(*e.response) response.headers["Content-Type"] = 'application/json' except PaleRaisedResponse as r: response = self._response_class(*r.response) response.headers["Content-Type"] = 'application/json' except Exception as e: logging.exception("Failed to handle Pale Endpoint %s: %r", self.__class__.__name__, e) err = APIError.Exception(repr(e)) response = self._response_class(*err.response) response.headers["Content-Type"] = 'application/json' allow_cors = getattr(self, "_allow_cors", None) if allow_cors is True: response.headers['Access-Control-Allow-Origin'] = '*' elif isinstance(allow_cors, basestring): response.headers['Access-Control-Allow-Origin'] = allow_cors context.response = response try: if hasattr(self, '_after_response_handlers') and \ isinstance(self._after_response_handlers, (list, tuple)): for handler in self._after_response_handlers: handler(context, response) except Exception as e: logging.exception( "Failed to process _after_response_handlers for Endpoint %s", self.__class__.__name__) raise return response
[ "def", "_execute", "(", "self", ",", "request", ",", "*", "*", "kwargs", ")", ":", "try", ":", "self", ".", "_create_context", "(", "request", ")", "self", ".", "_authenticate", "(", ")", "context", "=", "get_current_context", "(", ")", "self", ".", "_parse_args", "(", ")", "if", "hasattr", "(", "self", ",", "'_before_handlers'", ")", "and", "isinstance", "(", "self", ".", "_before_handlers", ",", "(", "list", ",", "tuple", ")", ")", ":", "for", "handler", "in", "self", ".", "_before_handlers", ":", "handler", "(", "context", ")", "context", ".", "handler_result", "=", "self", ".", "_handle", "(", "context", ")", "if", "hasattr", "(", "self", ",", "'_after_handlers'", ")", "and", "isinstance", "(", "self", ".", "_after_handlers", ",", "(", "list", ",", "tuple", ")", ")", ":", "for", "handler", "in", "self", ".", "_after_handlers", ":", "handler", "(", "context", ")", "self", ".", "_render", "(", ")", "response", "=", "context", ".", "response", "# After calling ._render(), the response is ready to go, so we", "# shouldn't need to handle any other exceptions beyond this point.", "except", "AuthenticationError", "as", "e", ":", "if", "hasattr", "(", "e", ",", "'message'", ")", "and", "e", ".", "message", "is", "not", "None", ":", "message", "=", "e", ".", "message", "else", ":", "message", "=", "\"You don't have permission to do that.\"", "err", "=", "APIError", ".", "Forbidden", "(", "message", ")", "response", "=", "self", ".", "_response_class", "(", "*", "err", ".", "response", ")", "response", ".", "headers", "[", "\"Content-Type\"", "]", "=", "'application/json'", "except", "ArgumentError", "as", "e", ":", "err", "=", "APIError", ".", "UnprocessableEntity", "(", "e", ".", "message", ")", "response", "=", "self", ".", "_response_class", "(", "*", "err", ".", "response", ")", "response", ".", "headers", "[", "\"Content-Type\"", "]", "=", "'application/json'", "except", "APIError", "as", "e", ":", "response", "=", "self", ".", "_response_class", "(", "*", "e", ".", "response", ")", "response", ".", "headers", "[", "\"Content-Type\"", "]", "=", "'application/json'", "except", "PaleRaisedResponse", "as", "r", ":", "response", "=", "self", ".", "_response_class", "(", "*", "r", ".", "response", ")", "response", ".", "headers", "[", "\"Content-Type\"", "]", "=", "'application/json'", "except", "Exception", "as", "e", ":", "logging", ".", "exception", "(", "\"Failed to handle Pale Endpoint %s: %r\"", ",", "self", ".", "__class__", ".", "__name__", ",", "e", ")", "err", "=", "APIError", ".", "Exception", "(", "repr", "(", "e", ")", ")", "response", "=", "self", ".", "_response_class", "(", "*", "err", ".", "response", ")", "response", ".", "headers", "[", "\"Content-Type\"", "]", "=", "'application/json'", "allow_cors", "=", "getattr", "(", "self", ",", "\"_allow_cors\"", ",", "None", ")", "if", "allow_cors", "is", "True", ":", "response", ".", "headers", "[", "'Access-Control-Allow-Origin'", "]", "=", "'*'", "elif", "isinstance", "(", "allow_cors", ",", "basestring", ")", ":", "response", ".", "headers", "[", "'Access-Control-Allow-Origin'", "]", "=", "allow_cors", "context", ".", "response", "=", "response", "try", ":", "if", "hasattr", "(", "self", ",", "'_after_response_handlers'", ")", "and", "isinstance", "(", "self", ".", "_after_response_handlers", ",", "(", "list", ",", "tuple", ")", ")", ":", "for", "handler", "in", "self", ".", "_after_response_handlers", ":", "handler", "(", "context", ",", "response", ")", "except", "Exception", "as", "e", ":", "logging", ".", "exception", "(", "\"Failed to process _after_response_handlers for Endpoint %s\"", ",", "self", ".", "__class__", ".", "__name__", ")", "raise", "return", "response" ]
The top-level execute function for the endpoint. This method is intended to remain as-is, and not be overridden. It gets called by your HTTP framework's route handler, and performs the following actions to process the request: ``authenticate_request`` Validate the Bearer token, populate the ``current_user``, and make sure that the token covers the scope needed to call the requested method. * * ``parse arguments`` The argument parser is responsible for: - First, coercing and patching any parameters that might require it due to versioning (i.e. the caller is using an old API version that supports `index` as a parameter for pagination, but the current version uses the name `offset`) - Second, iterating through the endpoint's supported arguments and validating that the params passed in comply with the endpoint's requirements - Third, populating the `context.args` array with the validated arguments If any of the arguments are invalid, then the Argument parser will raise an ArgumentError that bubbles up to the `try/catch` block of the execute method. * * ``before handler`` The before_handlers are specified by the Endpoint definition, and are intended to supporty DRY-ing up your codebase. Have a set of Endpoints that all need to grab an object from the ORM based on the same parameter? Make them inherit from an Endpoint subclass that performs that task in a before_handler! * * ``handle`` The core logic of your API endpoint, as implemented by you in your Endpoint subclass. The API Framework expects ``handle`` to return a dictionary specifying the response object and the JSON key that it should hang off of, or a tuple of a dictionary and an HTTP status code. * * ``after_handler`` Like the before_handlers, the ``after_handlers`` happen after the handle method, and allow the endpoint developer to re-use code for post-processing data from an endpoint. * * ``render response`` Like the argument parser, the response renderer is responsible for a few things: - First, it converts the ORM objects into JSON-serializable Python dictionaries using the Resource objects defined by the API implementation, - Second, it does any version parameter coersion, renaming and reformatting the edge version of the response to match the version requested by the API caller, - and Third, it serializes the Python dictionary into the response format requested by the API caller (right now, we only support JSON responses, but it'd be reasonble to support something like HTML or XML or whatever in the future). The rendered JSON text is then returned as the response that should be sent by your HTTP framework's routing handler. * * ``_after_response_handler`` The `_after_response_handlers` are specified by the Endpoint definition, and enable manipulation of the response object before it is returned to the client, but after the response is rendered. Because these are instancemethods, they may share instance data from `self` specified in the endpoint's `_handle` method. ``_finalize_content`` The `_finalize_content` method is overridden by the Endpoint and is called after the response is rendered into a serializable result. This method is called with two arguments, the context and the rendered content, and expected to return updated rendered content. For in-place modification of dicts, this method will still be expected to return the given argument. ``_allow_cors`` This value is set to enable CORs for a given endpoint. When set to a string it supplies an explicit value to 'Access-Control-Allow-Origin'. Set to True, this will allow access from *all* domains; Access-Control-Allow-Origin = "*"
[ "The", "top", "-", "level", "execute", "function", "for", "the", "endpoint", "." ]
python
train
emlazzarin/acrylic
acrylic/datatable.py
https://github.com/emlazzarin/acrylic/blob/08c6702d73b9660ead1024653f4fa016f6340e46/acrylic/datatable.py#L656-L675
def rename(self, old_fieldname, new_fieldname): """ Renames a specific field, and preserves the underlying order. """ if old_fieldname not in self: raise Exception("DataTable does not have field `%s`" % old_fieldname) if not isinstance(new_fieldname, basestring): raise ValueError("DataTable fields must be strings, not `%s`" % type(new_fieldname)) if old_fieldname == new_fieldname: return new_names = self.fields location = new_names.index(old_fieldname) del new_names[location] new_names.insert(location, new_fieldname) self.fields = new_names
[ "def", "rename", "(", "self", ",", "old_fieldname", ",", "new_fieldname", ")", ":", "if", "old_fieldname", "not", "in", "self", ":", "raise", "Exception", "(", "\"DataTable does not have field `%s`\"", "%", "old_fieldname", ")", "if", "not", "isinstance", "(", "new_fieldname", ",", "basestring", ")", ":", "raise", "ValueError", "(", "\"DataTable fields must be strings, not `%s`\"", "%", "type", "(", "new_fieldname", ")", ")", "if", "old_fieldname", "==", "new_fieldname", ":", "return", "new_names", "=", "self", ".", "fields", "location", "=", "new_names", ".", "index", "(", "old_fieldname", ")", "del", "new_names", "[", "location", "]", "new_names", ".", "insert", "(", "location", ",", "new_fieldname", ")", "self", ".", "fields", "=", "new_names" ]
Renames a specific field, and preserves the underlying order.
[ "Renames", "a", "specific", "field", "and", "preserves", "the", "underlying", "order", "." ]
python
train