repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
Scoppio/RagnarokEngine3
RagnarokEngine3/RE3.py
https://github.com/Scoppio/RagnarokEngine3/blob/4395d419ccd64fe9327c41f200b72ee0176ad896/RagnarokEngine3/RE3.py#L2140-L2157
def query_collision(collision_object): """ Check to see if the specified object is colliding with any of the objects currently in the Collision Manager Returns the first object we are colliding with if there was a collision and None if no collisions was found """ global collidable_objects # Note that we use a Brute Force approach for the time being. # It performs horribly under heavy loads, but it meets # our needs for the time being. for obj in collidable_objects: # Make sure we don't check ourself against ourself. if obj.obj_id is not collision_object.obj_id: if collision_object.is_colliding(obj): # A collision has been detected. Return the object that we are colliding with. return obj # No collision was noticed. Return None. return None
[ "def", "query_collision", "(", "collision_object", ")", ":", "global", "collidable_objects", "# Note that we use a Brute Force approach for the time being.", "# It performs horribly under heavy loads, but it meets", "# our needs for the time being.", "for", "obj", "in", "collidable_objec...
Check to see if the specified object is colliding with any of the objects currently in the Collision Manager Returns the first object we are colliding with if there was a collision and None if no collisions was found
[ "Check", "to", "see", "if", "the", "specified", "object", "is", "colliding", "with", "any", "of", "the", "objects", "currently", "in", "the", "Collision", "Manager", "Returns", "the", "first", "object", "we", "are", "colliding", "with", "if", "there", "was",...
python
train
xapple/fasta
fasta/__init__.py
https://github.com/xapple/fasta/blob/a827c3138812d555203be45187ffae1277dd0d76/fasta/__init__.py#L304-L319
def remove_trailing_stars(self, new_path=None, in_place=True, check=False): """Remove the bad character that can be inserted by some programs at the end of sequences.""" # Optional check # if check and int(sh.grep('-c', '\*', self.path, _ok_code=[0,1])) == 0: return self # Faster with bash utilities # if in_place is True: sh.sed('-i', 's/\*$//g', self.path) return self # Standard way # if new_path is None: new_fasta = self.__class__(new_temp_path()) else: new_fasta = self.__class__(new_path) new_fasta.create() for seq in self: new_fasta.add_str(str(seq.seq).rstrip('*'), seq.id) new_fasta.close() return new_fasta
[ "def", "remove_trailing_stars", "(", "self", ",", "new_path", "=", "None", ",", "in_place", "=", "True", ",", "check", "=", "False", ")", ":", "# Optional check #", "if", "check", "and", "int", "(", "sh", ".", "grep", "(", "'-c'", ",", "'\\*'", ",", "s...
Remove the bad character that can be inserted by some programs at the end of sequences.
[ "Remove", "the", "bad", "character", "that", "can", "be", "inserted", "by", "some", "programs", "at", "the", "end", "of", "sequences", "." ]
python
train
apache/incubator-mxnet
example/ssd/evaluate/eval_metric.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/evaluate/eval_metric.py#L214-L225
def _recall_prec(self, record, count): """ get recall and precision from internal records """ record = np.delete(record, np.where(record[:, 1].astype(int) == 0)[0], axis=0) sorted_records = record[record[:,0].argsort()[::-1]] tp = np.cumsum(sorted_records[:, 1].astype(int) == 1) fp = np.cumsum(sorted_records[:, 1].astype(int) == 2) if count <= 0: recall = tp * 0.0 else: recall = tp / float(count) prec = tp.astype(float) / (tp + fp) return recall, prec
[ "def", "_recall_prec", "(", "self", ",", "record", ",", "count", ")", ":", "record", "=", "np", ".", "delete", "(", "record", ",", "np", ".", "where", "(", "record", "[", ":", ",", "1", "]", ".", "astype", "(", "int", ")", "==", "0", ")", "[", ...
get recall and precision from internal records
[ "get", "recall", "and", "precision", "from", "internal", "records" ]
python
train
hydpy-dev/hydpy
hydpy/models/hbranch/hbranch_model.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/hbranch/hbranch_model.py#L97-L103
def pick_input_v1(self): """Updates |Input| based on |Total|.""" flu = self.sequences.fluxes.fastaccess inl = self.sequences.inlets.fastaccess flu.input = 0. for idx in range(inl.len_total): flu.input += inl.total[idx][0]
[ "def", "pick_input_v1", "(", "self", ")", ":", "flu", "=", "self", ".", "sequences", ".", "fluxes", ".", "fastaccess", "inl", "=", "self", ".", "sequences", ".", "inlets", ".", "fastaccess", "flu", ".", "input", "=", "0.", "for", "idx", "in", "range", ...
Updates |Input| based on |Total|.
[ "Updates", "|Input|", "based", "on", "|Total|", "." ]
python
train
nok/sklearn-porter
sklearn_porter/estimator/classifier/DecisionTreeClassifier/__init__.py
https://github.com/nok/sklearn-porter/blob/04673f768310bde31f9747a68a5e070592441ef2/sklearn_porter/estimator/classifier/DecisionTreeClassifier/__init__.py#L331-L352
def create_tree(self): """ Parse and build the tree branches. Returns ------- :return : string The tree branches as string. """ feature_indices = [] for i in self.estimator.tree_.feature: n_features = self.n_features if self.n_features > 1 or (self.n_features == 1 and i >= 0): feature_indices.append([str(j) for j in range(n_features)][i]) indentation = 1 if self.target_language in ['java', 'js', 'php', 'ruby'] else 0 return self.create_branches( self.estimator.tree_.children_left, self.estimator.tree_.children_right, self.estimator.tree_.threshold, self.estimator.tree_.value, feature_indices, 0, indentation)
[ "def", "create_tree", "(", "self", ")", ":", "feature_indices", "=", "[", "]", "for", "i", "in", "self", ".", "estimator", ".", "tree_", ".", "feature", ":", "n_features", "=", "self", ".", "n_features", "if", "self", ".", "n_features", ">", "1", "or",...
Parse and build the tree branches. Returns ------- :return : string The tree branches as string.
[ "Parse", "and", "build", "the", "tree", "branches", "." ]
python
train
rwl/godot
godot/node.py
https://github.com/rwl/godot/blob/013687c9e8983d2aa2ceebb8a76c5c4f1e37c90f/godot/node.py#L659-L676
def parse_xdot_label_directive(self, new): """ Parses the label drawing directive, updating the label components. """ components = XdotAttrParser().parse_xdot_data(new) pos_x = min( [c.x for c in components] ) pos_y = min( [c.y for c in components] ) move_to_origin(components) container = Container(auto_size=True, position=[pos_x-self.pos[0], pos_y-self.pos[1]], bgcolor="red") container.add( *components ) self.label_drawing = container
[ "def", "parse_xdot_label_directive", "(", "self", ",", "new", ")", ":", "components", "=", "XdotAttrParser", "(", ")", ".", "parse_xdot_data", "(", "new", ")", "pos_x", "=", "min", "(", "[", "c", ".", "x", "for", "c", "in", "components", "]", ")", "pos...
Parses the label drawing directive, updating the label components.
[ "Parses", "the", "label", "drawing", "directive", "updating", "the", "label", "components", "." ]
python
test
skorch-dev/skorch
skorch/net.py
https://github.com/skorch-dev/skorch/blob/5b9b8b7b7712cb6e5aaa759d9608ea6269d5bcd3/skorch/net.py#L764-L809
def partial_fit(self, X, y=None, classes=None, **fit_params): """Fit the module. If the module is initialized, it is not re-initialized, which means that this method should be used if you want to continue training a model (warm start). Parameters ---------- X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. y : target data, compatible with skorch.dataset.Dataset The same data types as for ``X`` are supported. If your X is a Dataset that contains the target, ``y`` may be set to None. classes : array, sahpe (n_classes,) Solely for sklearn compatibility, currently unused. **fit_params : dict Additional parameters passed to the ``forward`` method of the module and to the ``self.train_split`` call. """ if not self.initialized_: self.initialize() self.notify('on_train_begin', X=X, y=y) try: self.fit_loop(X, y, **fit_params) except KeyboardInterrupt: pass self.notify('on_train_end', X=X, y=y) return self
[ "def", "partial_fit", "(", "self", ",", "X", ",", "y", "=", "None", ",", "classes", "=", "None", ",", "*", "*", "fit_params", ")", ":", "if", "not", "self", ".", "initialized_", ":", "self", ".", "initialize", "(", ")", "self", ".", "notify", "(", ...
Fit the module. If the module is initialized, it is not re-initialized, which means that this method should be used if you want to continue training a model (warm start). Parameters ---------- X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. y : target data, compatible with skorch.dataset.Dataset The same data types as for ``X`` are supported. If your X is a Dataset that contains the target, ``y`` may be set to None. classes : array, sahpe (n_classes,) Solely for sklearn compatibility, currently unused. **fit_params : dict Additional parameters passed to the ``forward`` method of the module and to the ``self.train_split`` call.
[ "Fit", "the", "module", "." ]
python
train
KelSolaar/Umbra
umbra/components/factory/script_editor/search_in_files.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/factory/script_editor/search_in_files.py#L481-L494
def default_filter_in(self, value): """ Setter for **self.__default_filter_in** attribute. :param value: Attribute value. :type value: unicode """ if value is not None: assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format( "default_filter_in", value) assert os.path.exists(value), "'{0}' attribute: '{1}' file doesn't exists!".format( "default_filter_in", value) self.__default_filter_in = value
[ "def", "default_filter_in", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "assert", "type", "(", "value", ")", "is", "unicode", ",", "\"'{0}' attribute: '{1}' type is not 'unicode'!\"", ".", "format", "(", "\"default_filter_in\"", ...
Setter for **self.__default_filter_in** attribute. :param value: Attribute value. :type value: unicode
[ "Setter", "for", "**", "self", ".", "__default_filter_in", "**", "attribute", "." ]
python
train
google/grr
grr/core/grr_response_core/lib/rdfvalues/structs.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/rdfvalues/structs.py#L841-L852
def Definition(self): """Return a string with the definition of this field.""" result = self._FormatDescriptionComment() result += " enum %s {\n" % self.enum_name for k, v in sorted(iteritems(self.reverse_enum)): result += " %s = %s;\n" % (v, k) result += " }\n" result += self._FormatField() return result
[ "def", "Definition", "(", "self", ")", ":", "result", "=", "self", ".", "_FormatDescriptionComment", "(", ")", "result", "+=", "\" enum %s {\\n\"", "%", "self", ".", "enum_name", "for", "k", ",", "v", "in", "sorted", "(", "iteritems", "(", "self", ".", ...
Return a string with the definition of this field.
[ "Return", "a", "string", "with", "the", "definition", "of", "this", "field", "." ]
python
train
SmartTeleMax/iktomi
iktomi/forms/convs.py
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/forms/convs.py#L280-L294
def clean_value(self, value): ''' Additional clean action to preprocess value before :meth:`to_python` method. Subclasses may define own clean_value method to allow additional clean actions like html cleanup, etc. ''' # We have to clean before checking min/max length. It's done in # separate method to allow additional clean action in subclasses. if self.nontext_replacement is not None: value = replace_nontext(value, self.nontext_replacement) if self.strip: value = value.strip() return value
[ "def", "clean_value", "(", "self", ",", "value", ")", ":", "# We have to clean before checking min/max length. It's done in", "# separate method to allow additional clean action in subclasses.", "if", "self", ".", "nontext_replacement", "is", "not", "None", ":", "value", "=", ...
Additional clean action to preprocess value before :meth:`to_python` method. Subclasses may define own clean_value method to allow additional clean actions like html cleanup, etc.
[ "Additional", "clean", "action", "to", "preprocess", "value", "before", ":", "meth", ":", "to_python", "method", "." ]
python
train
TkTech/Jawa
jawa/classloader.py
https://github.com/TkTech/Jawa/blob/94c8424e699029ac33fbc0e866fff0ecb2742289/jawa/classloader.py#L180-L195
def search_constant_pool(self, *, path: str, **options): """Partially load the class at `path`, yield all matching constants from the ConstantPool. This is an optimization method that does not load a complete ClassFile, nor does it add the results to the ClassLoader cache. :param path: Fully-qualified path to a ClassFile. :param options: A list of options to pass into `ConstantPool.find()` """ with self.open(f'{path}.class') as source: # Skip over the magic, minor, and major version. source.read(8) pool = ConstantPool() pool.unpack(source) yield from pool.find(**options)
[ "def", "search_constant_pool", "(", "self", ",", "*", ",", "path", ":", "str", ",", "*", "*", "options", ")", ":", "with", "self", ".", "open", "(", "f'{path}.class'", ")", "as", "source", ":", "# Skip over the magic, minor, and major version.", "source", ".",...
Partially load the class at `path`, yield all matching constants from the ConstantPool. This is an optimization method that does not load a complete ClassFile, nor does it add the results to the ClassLoader cache. :param path: Fully-qualified path to a ClassFile. :param options: A list of options to pass into `ConstantPool.find()`
[ "Partially", "load", "the", "class", "at", "path", "yield", "all", "matching", "constants", "from", "the", "ConstantPool", "." ]
python
train
althonos/moclo
moclo/moclo/core/vectors.py
https://github.com/althonos/moclo/blob/28a03748df8a2fa43f0c0c8098ca64d11559434e/moclo/moclo/core/vectors.py#L108-L145
def assemble(self, module, *modules, **kwargs): # type: (AbstractModule, *AbstractModule, **Any) -> SeqRecord """Assemble the provided modules into the vector. Arguments: module (`~moclo.base.modules.AbstractModule`): a module to insert in the vector. modules (`~moclo.base.modules.AbstractModule`, optional): additional modules to insert in the vector. The order of the parameters is not important, since modules will be sorted by their start overhang in the function. Returns: `~Bio.SeqRecord.SeqRecord`: the assembled sequence with sequence annotations inherited from the vector and the modules. Raises: `~moclo.errors.DuplicateModules`: when two different modules share the same start overhang, leading in possibly non-deterministic constructs. `~moclo.errors.MissingModule`: when a module has an end overhang that is not shared by any other module, leading to a partial construct only `~moclo.errors.InvalidSequence`: when one of the modules does not match the required module structure (missing site, wrong overhang, etc.). `~moclo.errors.UnusedModules`: when some modules were not used during the assembly (mostly caused by duplicate parts). """ mgr = AssemblyManager( vector=self, modules=[module] + list(modules), name=kwargs.get("name", "assembly"), id_=kwargs.get("id", "assembly"), ) return mgr.assemble()
[ "def", "assemble", "(", "self", ",", "module", ",", "*", "modules", ",", "*", "*", "kwargs", ")", ":", "# type: (AbstractModule, *AbstractModule, **Any) -> SeqRecord", "mgr", "=", "AssemblyManager", "(", "vector", "=", "self", ",", "modules", "=", "[", "module",...
Assemble the provided modules into the vector. Arguments: module (`~moclo.base.modules.AbstractModule`): a module to insert in the vector. modules (`~moclo.base.modules.AbstractModule`, optional): additional modules to insert in the vector. The order of the parameters is not important, since modules will be sorted by their start overhang in the function. Returns: `~Bio.SeqRecord.SeqRecord`: the assembled sequence with sequence annotations inherited from the vector and the modules. Raises: `~moclo.errors.DuplicateModules`: when two different modules share the same start overhang, leading in possibly non-deterministic constructs. `~moclo.errors.MissingModule`: when a module has an end overhang that is not shared by any other module, leading to a partial construct only `~moclo.errors.InvalidSequence`: when one of the modules does not match the required module structure (missing site, wrong overhang, etc.). `~moclo.errors.UnusedModules`: when some modules were not used during the assembly (mostly caused by duplicate parts).
[ "Assemble", "the", "provided", "modules", "into", "the", "vector", "." ]
python
train
azavea/python-omgeo
omgeo/postprocessors.py
https://github.com/azavea/python-omgeo/blob/40f4e006f087dbc795a5d954ffa2c0eab433f8c9/omgeo/postprocessors.py#L580-L584
def _points_within_distance(self, pnt1, pnt2): """Returns true if lat/lon points are within given distance in metres.""" if self._get_distance(pnt1, pnt2) <= self.distance: return True return False
[ "def", "_points_within_distance", "(", "self", ",", "pnt1", ",", "pnt2", ")", ":", "if", "self", ".", "_get_distance", "(", "pnt1", ",", "pnt2", ")", "<=", "self", ".", "distance", ":", "return", "True", "return", "False" ]
Returns true if lat/lon points are within given distance in metres.
[ "Returns", "true", "if", "lat", "/", "lon", "points", "are", "within", "given", "distance", "in", "metres", "." ]
python
train
dourvaris/nano-python
src/nano/rpc.py
https://github.com/dourvaris/nano-python/blob/f26b8bc895b997067780f925049a70e82c0c2479/src/nano/rpc.py#L282-L339
def account_info(self, account, representative=False, weight=False, pending=False): """ Returns frontier, open block, change representative block, balance, last modified timestamp from local database & block count for **account** :param account: Account to return info for :type account: str :param representative: if True, also returns the representative block :type representative: bool :param weight: if True, also returns the voting weight :type weight: bool :param pending: if True, also returns the pending balance :type pending: bool :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.account_info( ... account="xrb_3t6k35gi95xu6tergt6p69ck76ogmitsa8mnijtpxm9fkcm736xtoncuohr3" ... ) { "frontier": "FF84533A571D953A596EA401FD41743AC85D04F406E76FDE4408EAED50B473C5", "open_block": "991CF190094C00F0B68E2E5F75F6BEE95A2E0BD93CEAA4A6734DB9F19B728948", "representative_block": "991CF190094C00F0B68E2E5F75F6BEE95A2E0BD93CEAA4A6734DB9F19B728948", "balance": "235580100176034320859259343606608761791", "modified_timestamp": "1501793775", "block_count": "33" } """ account = self._process_value(account, 'account') payload = {"account": account} if representative: payload['representative'] = self._process_value(representative, 'strbool') if weight: payload['weight'] = self._process_value(weight, 'strbool') if pending: payload['pending'] = self._process_value(pending, 'strbool') resp = self.call('account_info', payload) for key in ( 'modified_timestamp', 'block_count', 'balance', 'pending', 'weight', ): if key in resp: resp[key] = int(resp[key]) return resp
[ "def", "account_info", "(", "self", ",", "account", ",", "representative", "=", "False", ",", "weight", "=", "False", ",", "pending", "=", "False", ")", ":", "account", "=", "self", ".", "_process_value", "(", "account", ",", "'account'", ")", "payload", ...
Returns frontier, open block, change representative block, balance, last modified timestamp from local database & block count for **account** :param account: Account to return info for :type account: str :param representative: if True, also returns the representative block :type representative: bool :param weight: if True, also returns the voting weight :type weight: bool :param pending: if True, also returns the pending balance :type pending: bool :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.account_info( ... account="xrb_3t6k35gi95xu6tergt6p69ck76ogmitsa8mnijtpxm9fkcm736xtoncuohr3" ... ) { "frontier": "FF84533A571D953A596EA401FD41743AC85D04F406E76FDE4408EAED50B473C5", "open_block": "991CF190094C00F0B68E2E5F75F6BEE95A2E0BD93CEAA4A6734DB9F19B728948", "representative_block": "991CF190094C00F0B68E2E5F75F6BEE95A2E0BD93CEAA4A6734DB9F19B728948", "balance": "235580100176034320859259343606608761791", "modified_timestamp": "1501793775", "block_count": "33" }
[ "Returns", "frontier", "open", "block", "change", "representative", "block", "balance", "last", "modified", "timestamp", "from", "local", "database", "&", "block", "count", "for", "**", "account", "**" ]
python
train
SheffieldML/GPy
GPy/kern/src/todo/hetero.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/kern/src/todo/hetero.py#L89-L91
def dKdiag_dtheta(self, dL_dKdiag, X, target): """Gradient of diagonal of covariance with respect to parameters.""" target += 2.*self.mapping.df_dtheta(dL_dKdiag[:, None]*self.mapping.f(X), X)
[ "def", "dKdiag_dtheta", "(", "self", ",", "dL_dKdiag", ",", "X", ",", "target", ")", ":", "target", "+=", "2.", "*", "self", ".", "mapping", ".", "df_dtheta", "(", "dL_dKdiag", "[", ":", ",", "None", "]", "*", "self", ".", "mapping", ".", "f", "(",...
Gradient of diagonal of covariance with respect to parameters.
[ "Gradient", "of", "diagonal", "of", "covariance", "with", "respect", "to", "parameters", "." ]
python
train
molmod/molmod
molmod/minimizer.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L337-L376
def _bracket(self, qinit, f0, fun): """Find a bracket that does contain the minimum""" self.num_bracket = 0 qa = qinit fa = fun(qa) counter = 0 if fa >= f0: while True: self.num_bracket += 1 #print " bracket shrink" qb, fb = qa, fa qa /= 1+phi fa = fun(qa) if qa < self.qtol: return if fa < f0: return (0, f0), (qa, fa), (qb, fb) counter += 1 if self.max_iter is not None and counter > self.max_iter: return else: self.num_bracket += 1 #print " bracket grow1" qb, fb = qa, fa qa *= (1+phi) fa = fun(qa) if fa >= fb: return (0, f0), (qb, fb), (qa, fa) while True: self.num_bracket += 1 #print " bracket grow2" qc, fc = qb, fb qb, fb = qa, fa qa = qb*(1+phi) - qc fa = fun(qa) if fa >= fb: return (qc, fc), (qb, fb), (qa, fa) counter += 1 if self.max_iter is not None and counter > self.max_iter: return
[ "def", "_bracket", "(", "self", ",", "qinit", ",", "f0", ",", "fun", ")", ":", "self", ".", "num_bracket", "=", "0", "qa", "=", "qinit", "fa", "=", "fun", "(", "qa", ")", "counter", "=", "0", "if", "fa", ">=", "f0", ":", "while", "True", ":", ...
Find a bracket that does contain the minimum
[ "Find", "a", "bracket", "that", "does", "contain", "the", "minimum" ]
python
train
nugget/python-insteonplm
insteonplm/devices/__init__.py
https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/devices/__init__.py#L187-L194
def id_request(self): """Request a device ID from a device.""" import inspect curframe = inspect.currentframe() calframe = inspect.getouterframes(curframe, 2) _LOGGER.debug('caller name: %s', calframe[1][3]) msg = StandardSend(self.address, COMMAND_ID_REQUEST_0X10_0X00) self._plm.send_msg(msg)
[ "def", "id_request", "(", "self", ")", ":", "import", "inspect", "curframe", "=", "inspect", ".", "currentframe", "(", ")", "calframe", "=", "inspect", ".", "getouterframes", "(", "curframe", ",", "2", ")", "_LOGGER", ".", "debug", "(", "'caller name: %s'", ...
Request a device ID from a device.
[ "Request", "a", "device", "ID", "from", "a", "device", "." ]
python
train
gwastro/pycbc-glue
pycbc_glue/ligolw/types.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/ligolw/types.py#L144-L152
def mk_complex_format_func(fmt): """ Function used internally to generate functions to format complex valued data. """ fmt = fmt + u"+i" + fmt def complex_format_func(z): return fmt % (z.real, z.imag) return complex_format_func
[ "def", "mk_complex_format_func", "(", "fmt", ")", ":", "fmt", "=", "fmt", "+", "u\"+i\"", "+", "fmt", "def", "complex_format_func", "(", "z", ")", ":", "return", "fmt", "%", "(", "z", ".", "real", ",", "z", ".", "imag", ")", "return", "complex_format_f...
Function used internally to generate functions to format complex valued data.
[ "Function", "used", "internally", "to", "generate", "functions", "to", "format", "complex", "valued", "data", "." ]
python
train
fboender/ansible-cmdb
lib/jsonxs.py
https://github.com/fboender/ansible-cmdb/blob/ebd960ac10684e8c9ec2b12751bba2c4c9504ab7/lib/jsonxs.py#L130-L174
def jsonxs(data, expr, action=ACTION_GET, value=None, default=None): """ Get, set, delete values in a JSON structure. `expr` is a JSONpath-like expression pointing to the desired value. `action` determines the action to perform. See the module-level `ACTION_*` constants. `value` should be given if action is `ACTION_SET`. If `default` is set and `expr` isn't found, return `default` instead. This will override all exceptions. """ tokens = tokenize(expr) # Walk through the list of tokens to reach the correct path in the data # structure. try: prev_path = None cur_path = data for token in tokens: prev_path = cur_path if not token in cur_path and action in [ACTION_SET, ACTION_MKDICT, ACTION_MKLIST]: # When setting values or creating dicts/lists, the key can be # missing from the data struture continue cur_path = cur_path[token] except Exception: if default is not None: return default else: raise # Perform action the user requested. if action == ACTION_GET: return cur_path elif action == ACTION_DEL: del prev_path[token] elif action == ACTION_SET: prev_path[token] = value elif action == ACTION_APPEND: prev_path[token].append(value) elif action == ACTION_INSERT: prev_path.insert(token, value) elif action == ACTION_MKDICT: prev_path[token] = {} elif action == ACTION_MKLIST: prev_path[token] = [] else: raise ValueError("Invalid action: {}".format(action))
[ "def", "jsonxs", "(", "data", ",", "expr", ",", "action", "=", "ACTION_GET", ",", "value", "=", "None", ",", "default", "=", "None", ")", ":", "tokens", "=", "tokenize", "(", "expr", ")", "# Walk through the list of tokens to reach the correct path in the data", ...
Get, set, delete values in a JSON structure. `expr` is a JSONpath-like expression pointing to the desired value. `action` determines the action to perform. See the module-level `ACTION_*` constants. `value` should be given if action is `ACTION_SET`. If `default` is set and `expr` isn't found, return `default` instead. This will override all exceptions.
[ "Get", "set", "delete", "values", "in", "a", "JSON", "structure", ".", "expr", "is", "a", "JSONpath", "-", "like", "expression", "pointing", "to", "the", "desired", "value", ".", "action", "determines", "the", "action", "to", "perform", ".", "See", "the", ...
python
train
noahbenson/neuropythy
neuropythy/vision/cmag.py
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/vision/cmag.py#L478-L527
def isoline_vmag(hemi, isolines=None, surface='midgray', min_length=2, **kw): ''' isoline_vmag(hemi) calculates the visual magnification function f using the default set of iso-lines (as returned by neuropythy.vision.visual_isolines()). The hemi argument may alternately be a mesh object. isoline_vmag(hemi, isolns) uses the given iso-lines rather than the default ones. The return value of this funciton is a dictionary whose keys are 'tangential', 'radial', and 'areal', and whose values are the estimated visual magnification functions. These functions are of the form f(x,y) where x and y can be numbers or arrays in the visual field. ''' from neuropythy.util import (curry, zinv) from neuropythy.mri import is_cortex from neuropythy.vision import visual_isolines from neuropythy.geometry import to_mesh # if there's no isolines, get them if isolines is None: isolines = visual_isolines(hemi, **kw) # see if the isolines is a lazy map of visual areas; if so return a lazy map recursing... if pimms.is_vector(isolines.keys(), 'int'): f = lambda k: isoline_vmag(isolines[k], surface=surface, min_length=min_length) return pimms.lazy_map({k:curry(f, k) for k in six.iterkeys(isolines)}) mesh = to_mesh((hemi, surface)) # filter by min length if min_length is not None: isolines = {k: {kk: {kkk: [vvv[ii] for ii in iis] for (kkk,vvv) in six.iteritems(vv)} for (kk,vv) in six.iteritems(v) for iis in [[ii for (ii,u) in enumerate(vv['polar_angles']) if len(u) >= min_length]] if len(iis) > 0} for (k,v) in six.iteritems(isolines)} (rlns,tlns) = [isolines[k] for k in ['eccentricity', 'polar_angle']] if len(rlns) < 2: raise ValueError('fewer than 2 iso-eccentricity lines found') if len(tlns) < 2: raise ValueError('fewer than 2 iso-angle lines found') # grab the visual/surface lines ((rvlns,tvlns),(rslns,tslns)) = [[[u for lns in six.itervalues(xlns) for u in lns[k]] for xlns in (rlns,tlns)] for k in ('visual_coordinates','surface_coordinates')] # calculate some distances (rslen,tslen) = [[np.sqrt(np.sum((sx[:,:-1] - sx[:,1:])**2, 0)) for sx in slns] for slns in (rslns,tslns)] (rvlen,tvlen) = [[np.sqrt(np.sum((vx[:,:-1] - vx[:,1:])**2, 0)) for vx in vlns] for vlns in (rvlns,tvlns)] (rvxy, tvxy) = [[0.5*(vx[:,:-1] + vx[:,1:]) for vx in vlns] for vlns in (rvlns,tvlns)] (rvlen,tvlen,rslen,tslen) = [np.concatenate(u) for u in (rvlen,tvlen,rslen,tslen)] (rvxy,tvxy) = [np.hstack(vxy) for vxy in (rvxy,tvxy)] (rvmag,tvmag) = [vlen * zinv(slen) for (vlen,slen) in zip([rvlen,tvlen],[rslen,tslen])] return {k: {'visual_coordinates':vxy, 'visual_magnification': vmag, 'visual_lengths': vlen, 'surface_lengths': slen} for (k,vxy,vmag,vlen,slen) in zip(['radial','tangential'], [rvxy,tvxy], [rvmag,tvmag], [rvlen,tvlen], [rslen,tslen])}
[ "def", "isoline_vmag", "(", "hemi", ",", "isolines", "=", "None", ",", "surface", "=", "'midgray'", ",", "min_length", "=", "2", ",", "*", "*", "kw", ")", ":", "from", "neuropythy", ".", "util", "import", "(", "curry", ",", "zinv", ")", "from", "neur...
isoline_vmag(hemi) calculates the visual magnification function f using the default set of iso-lines (as returned by neuropythy.vision.visual_isolines()). The hemi argument may alternately be a mesh object. isoline_vmag(hemi, isolns) uses the given iso-lines rather than the default ones. The return value of this funciton is a dictionary whose keys are 'tangential', 'radial', and 'areal', and whose values are the estimated visual magnification functions. These functions are of the form f(x,y) where x and y can be numbers or arrays in the visual field.
[ "isoline_vmag", "(", "hemi", ")", "calculates", "the", "visual", "magnification", "function", "f", "using", "the", "default", "set", "of", "iso", "-", "lines", "(", "as", "returned", "by", "neuropythy", ".", "vision", ".", "visual_isolines", "()", ")", ".", ...
python
train
seibert-media/Highton
highton/call_mixins/list_call_mixin.py
https://github.com/seibert-media/Highton/blob/1519e4fb105f62882c2e7bc81065d994649558d8/highton/call_mixins/list_call_mixin.py#L12-L23
def list(cls, params=None): """ Retrieves a list of the model :param params: params as dictionary :type params: dict :return: the list of the parsed xml objects :rtype: list """ return fields.ListField(name=cls.ENDPOINT, init_class=cls).decode( cls.element_from_string(cls._get_request(params=params).text) )
[ "def", "list", "(", "cls", ",", "params", "=", "None", ")", ":", "return", "fields", ".", "ListField", "(", "name", "=", "cls", ".", "ENDPOINT", ",", "init_class", "=", "cls", ")", ".", "decode", "(", "cls", ".", "element_from_string", "(", "cls", "....
Retrieves a list of the model :param params: params as dictionary :type params: dict :return: the list of the parsed xml objects :rtype: list
[ "Retrieves", "a", "list", "of", "the", "model" ]
python
train
androguard/androguard
androguard/core/bytecodes/dvm.py
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecodes/dvm.py#L4557-L4569
def show_buff(self, pos): """ Return the display of the instruction :rtype: string """ buff = self.get_name() + " " buff += "%x:" % self.first_key for i in self.targets: buff += " %x" % i return buff
[ "def", "show_buff", "(", "self", ",", "pos", ")", ":", "buff", "=", "self", ".", "get_name", "(", ")", "+", "\" \"", "buff", "+=", "\"%x:\"", "%", "self", ".", "first_key", "for", "i", "in", "self", ".", "targets", ":", "buff", "+=", "\" %x\"", "%"...
Return the display of the instruction :rtype: string
[ "Return", "the", "display", "of", "the", "instruction" ]
python
train
openstack/horizon
openstack_dashboard/dashboards/admin/group_types/tables.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/dashboards/admin/group_types/tables.py#L55-L59
def filter(self, table, group_types, filter_string): """Naive case-insensitive search.""" query = filter_string.lower() return [group_type for group_type in group_types if query in group_type.name.lower()]
[ "def", "filter", "(", "self", ",", "table", ",", "group_types", ",", "filter_string", ")", ":", "query", "=", "filter_string", ".", "lower", "(", ")", "return", "[", "group_type", "for", "group_type", "in", "group_types", "if", "query", "in", "group_type", ...
Naive case-insensitive search.
[ "Naive", "case", "-", "insensitive", "search", "." ]
python
train
SmokinCaterpillar/pypet
pypet/parameter.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/parameter.py#L2448-L2463
def f_set_single(self, name, item): """Adds a single data item to the pickle result. Note that it is NOT checked if the item can be pickled! """ if self.v_stored: self._logger.debug('You are changing an already stored result. If ' 'you not explicitly overwrite the data on disk, this change ' 'might be lost and not propagated to disk.') if name == PickleResult.PROTOCOL: raise AttributeError('You cannot name an entry `%s`' % PickleResult.PROTOCOL) self._data[name] = item
[ "def", "f_set_single", "(", "self", ",", "name", ",", "item", ")", ":", "if", "self", ".", "v_stored", ":", "self", ".", "_logger", ".", "debug", "(", "'You are changing an already stored result. If '", "'you not explicitly overwrite the data on disk, this change '", "'...
Adds a single data item to the pickle result. Note that it is NOT checked if the item can be pickled!
[ "Adds", "a", "single", "data", "item", "to", "the", "pickle", "result", "." ]
python
test
eventbrite/eventbrite-sdk-python
eventbrite/access_methods.py
https://github.com/eventbrite/eventbrite-sdk-python/blob/f2e5dc5aa1aa3e45766de13f16fd65722163d91a/eventbrite/access_methods.py#L902-L913
def post_user_bookmarks_unsave(self, id, **data): """ POST /users/:id/bookmarks/unsave/ Removes the specified bookmark from the event for the user. Returns ``{"deleted": true}``. A user is only authorized to unsave his/her own events. error NOT_AUTHORIZED You are not authorized to unsave an event for this user. error ARGUMENTS_ERROR There are errors with your arguments. """ return self.post("/users/{0}/bookmarks/unsave/".format(id), data=data)
[ "def", "post_user_bookmarks_unsave", "(", "self", ",", "id", ",", "*", "*", "data", ")", ":", "return", "self", ".", "post", "(", "\"/users/{0}/bookmarks/unsave/\"", ".", "format", "(", "id", ")", ",", "data", "=", "data", ")" ]
POST /users/:id/bookmarks/unsave/ Removes the specified bookmark from the event for the user. Returns ``{"deleted": true}``. A user is only authorized to unsave his/her own events. error NOT_AUTHORIZED You are not authorized to unsave an event for this user. error ARGUMENTS_ERROR There are errors with your arguments.
[ "POST", "/", "users", "/", ":", "id", "/", "bookmarks", "/", "unsave", "/", "Removes", "the", "specified", "bookmark", "from", "the", "event", "for", "the", "user", ".", "Returns", "{", "deleted", ":", "true", "}", ".", "A", "user", "is", "only", "au...
python
train
tensorflow/probability
tensorflow_probability/python/mcmc/replica_exchange_mc.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/replica_exchange_mc.py#L519-L556
def bootstrap_results(self, init_state): """Returns an object with the same type as returned by `one_step`. Args: init_state: `Tensor` or Python `list` of `Tensor`s representing the initial state(s) of the Markov chain(s). Returns: kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of `Tensor`s representing internal calculations made within this function. This inculdes replica states. """ with tf.compat.v1.name_scope( name=mcmc_util.make_name(self.name, 'remc', 'bootstrap_results'), values=[init_state]): replica_results = [ self.replica_kernels[i].bootstrap_results(init_state) for i in range(self.num_replica) ] init_state_parts = ( list(init_state) if mcmc_util.is_list_like(init_state) else [init_state]) # Convert all states parts to tensor... replica_states = [[ tf.convert_to_tensor(value=s) for s in init_state_parts ] for i in range(self.num_replica)] if not mcmc_util.is_list_like(init_state): replica_states = [s[0] for s in replica_states] return ReplicaExchangeMCKernelResults( replica_states=replica_states, replica_results=replica_results, sampled_replica_states=replica_states, sampled_replica_results=replica_results, )
[ "def", "bootstrap_results", "(", "self", ",", "init_state", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", "=", "mcmc_util", ".", "make_name", "(", "self", ".", "name", ",", "'remc'", ",", "'bootstrap_results'", ")", ",...
Returns an object with the same type as returned by `one_step`. Args: init_state: `Tensor` or Python `list` of `Tensor`s representing the initial state(s) of the Markov chain(s). Returns: kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of `Tensor`s representing internal calculations made within this function. This inculdes replica states.
[ "Returns", "an", "object", "with", "the", "same", "type", "as", "returned", "by", "one_step", "." ]
python
test
yamcs/yamcs-python
yamcs-client/yamcs/mdb/client.py
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/mdb/client.py#L37-L48
def get_space_system(self, name): """ Gets a single space system by its unique name. :param str name: A fully-qualified XTCE name :rtype: .SpaceSystem """ url = '/mdb/{}/space-systems{}'.format(self._instance, name) response = self._client.get_proto(url) message = mdb_pb2.SpaceSystemInfo() message.ParseFromString(response.content) return SpaceSystem(message)
[ "def", "get_space_system", "(", "self", ",", "name", ")", ":", "url", "=", "'/mdb/{}/space-systems{}'", ".", "format", "(", "self", ".", "_instance", ",", "name", ")", "response", "=", "self", ".", "_client", ".", "get_proto", "(", "url", ")", "message", ...
Gets a single space system by its unique name. :param str name: A fully-qualified XTCE name :rtype: .SpaceSystem
[ "Gets", "a", "single", "space", "system", "by", "its", "unique", "name", "." ]
python
train
RedHatInsights/insights-core
insights/formats/text.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/formats/text.py#L94-L108
def progress_bar(self, c, broker): """ Print the formated progress information for the processed return types """ v = broker.get(c) if v and isinstance(v, dict) and len(v) > 0 and 'type' in v: if v["type"] in self.responses: print(self.responses[v["type"]].color + self.responses[v["type"]].intl + Style.RESET_ALL, end="", file=self.stream) else: print(".", end="", file=self.stream) elif c in broker.exceptions: self.counts['exception'] += len(broker.exceptions[c]) print(Fore.RED + "E" + Style.RESET_ALL, end="", file=self.stream) return self
[ "def", "progress_bar", "(", "self", ",", "c", ",", "broker", ")", ":", "v", "=", "broker", ".", "get", "(", "c", ")", "if", "v", "and", "isinstance", "(", "v", ",", "dict", ")", "and", "len", "(", "v", ")", ">", "0", "and", "'type'", "in", "v...
Print the formated progress information for the processed return types
[ "Print", "the", "formated", "progress", "information", "for", "the", "processed", "return", "types" ]
python
train
django-blog-zinnia/wordpress2zinnia
zinnia_wordpress/management/commands/wp2zinnia.py
https://github.com/django-blog-zinnia/wordpress2zinnia/blob/656df6d431418a660f0e590d2226af5e6dd7a3e6/zinnia_wordpress/management/commands/wp2zinnia.py#L134-L158
def import_authors(self, tree): """ Retrieve all the authors used in posts and convert it to new or existing author and return the conversion. """ self.write_out(self.style.STEP('- Importing authors\n')) post_authors = set() for item in tree.findall('channel/item'): post_type = item.find('{%s}post_type' % WP_NS).text if post_type == 'post': post_authors.add(item.find( '{http://purl.org/dc/elements/1.1/}creator').text) self.write_out('> %i authors found.\n' % len(post_authors)) authors = {} for post_author in post_authors: if self.default_author: authors[post_author] = self.default_author else: authors[post_author] = self.migrate_author( post_author.replace(' ', '-')) return authors
[ "def", "import_authors", "(", "self", ",", "tree", ")", ":", "self", ".", "write_out", "(", "self", ".", "style", ".", "STEP", "(", "'- Importing authors\\n'", ")", ")", "post_authors", "=", "set", "(", ")", "for", "item", "in", "tree", ".", "findall", ...
Retrieve all the authors used in posts and convert it to new or existing author and return the conversion.
[ "Retrieve", "all", "the", "authors", "used", "in", "posts", "and", "convert", "it", "to", "new", "or", "existing", "author", "and", "return", "the", "conversion", "." ]
python
train
juju/charm-helpers
charmhelpers/contrib/openstack/amulet/utils.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/amulet/utils.py#L853-L861
def delete_instance(self, nova, instance): """Delete the specified instance.""" # /!\ DEPRECATION WARNING self.log.warn('/!\\ DEPRECATION WARNING: use ' 'delete_resource instead of delete_instance.') self.log.debug('Deleting instance ({})...'.format(instance)) return self.delete_resource(nova.servers, instance, msg='nova instance')
[ "def", "delete_instance", "(", "self", ",", "nova", ",", "instance", ")", ":", "# /!\\ DEPRECATION WARNING", "self", ".", "log", ".", "warn", "(", "'/!\\\\ DEPRECATION WARNING: use '", "'delete_resource instead of delete_instance.'", ")", "self", ".", "log", ".", "de...
Delete the specified instance.
[ "Delete", "the", "specified", "instance", "." ]
python
train
gem/oq-engine
openquake/hazardlib/gsim/hong_goda_2007.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/hong_goda_2007.py#L117-L121
def _compute_simple_distance_term(self, C, rjb): """ The distance term for the PGA case ignores magnitude (equation 15) """ return C["b4"] * np.log(np.sqrt(rjb ** 2. + C["h"] ** 2.))
[ "def", "_compute_simple_distance_term", "(", "self", ",", "C", ",", "rjb", ")", ":", "return", "C", "[", "\"b4\"", "]", "*", "np", ".", "log", "(", "np", ".", "sqrt", "(", "rjb", "**", "2.", "+", "C", "[", "\"h\"", "]", "**", "2.", ")", ")" ]
The distance term for the PGA case ignores magnitude (equation 15)
[ "The", "distance", "term", "for", "the", "PGA", "case", "ignores", "magnitude", "(", "equation", "15", ")" ]
python
train
daler/gffutils
gffutils/interface.py
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/interface.py#L355-L409
def _relation(self, id, join_on, join_to, level=None, featuretype=None, order_by=None, reverse=False, completely_within=False, limit=None): # The following docstring will be included in the parents() and # children() docstrings to maintain consistency, since they both # delegate to this method. """ Parameters ---------- id : string or a Feature object level : None or int If `level=None` (default), then return all children regardless of level. If `level` is an integer, then constrain to just that level. {_method_doc} Returns ------- A generator object that yields :class:`Feature` objects. """ if isinstance(id, Feature): id = id.id other = ''' JOIN relations ON relations.{join_on} = features.id WHERE relations.{join_to} = ? '''.format(**locals()) args = [id] level_clause = '' if level is not None: level_clause = 'relations.level = ?' args.append(level) query, args = helpers.make_query( args=args, other=other, extra=level_clause, featuretype=featuretype, order_by=order_by, reverse=reverse, limit=limit, completely_within=completely_within, ) # modify _SELECT so that only unique results are returned query = query.replace("SELECT", "SELECT DISTINCT") for i in self._execute(query, args): yield self._feature_returner(**i)
[ "def", "_relation", "(", "self", ",", "id", ",", "join_on", ",", "join_to", ",", "level", "=", "None", ",", "featuretype", "=", "None", ",", "order_by", "=", "None", ",", "reverse", "=", "False", ",", "completely_within", "=", "False", ",", "limit", "=...
Parameters ---------- id : string or a Feature object level : None or int If `level=None` (default), then return all children regardless of level. If `level` is an integer, then constrain to just that level. {_method_doc} Returns ------- A generator object that yields :class:`Feature` objects.
[ "Parameters", "----------" ]
python
train
geophysics-ubonn/reda
lib/reda/plotters/plots2d.py
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/plotters/plots2d.py#L230-L414
def plot_pseudosection(df, plot_key, spacing=1, ctypes=None, dd_merge=False, cb=False, **kwargs): """Create a pseudosection plot for a given measurement Parameters ---------- df: dataframe measurement dataframe, one measurement frame (i.e., only one frequency etc) key: which key to colorcode spacing: float, optional assumed electrode spacing ctypes: list of strings which configurations to plot, default: dd dd_merge: bool, optional ? cb: bool, optional ? """ grid = None pseudo_d_functions = { 'dd': _pseudodepths_dd_simple, 'schlumberger': _pseudodepths_schlumberger, 'wenner': _pseudodepths_wenner, } titles = { 'dd': 'dipole-dipole configurations', 'schlumberger': 'Schlumberger configurations', 'wenner': 'Wenner configurations', } # for now sort data and only plot dipole-dipole only_types = ctypes or ['dd', ] if 'schlumberger' in only_types: raise Exception('plotting of pseudosections not implemented for ' + 'Schlumberger configurations!') configs = df[['a', 'b', 'm', 'n']].values results = fT.filter( configs, settings={'only_types': only_types, }, ) values = df[plot_key].values plot_objects = [] for key in sorted(results.keys()): print('plotting: ', key) if key == 'not_sorted': continue index_dict = results[key] # it is possible that we want to generate multiple plots for one # type of measurement, i.e., to separate skips of dipole-dipole # measurements. Therefore we generate two lists: # 1) list of list of indices to plot # 2) corresponding labels if key == 'dd' and not dd_merge: plot_list = [] labels_add = [] for skip in sorted(index_dict.keys()): plot_list.append(index_dict[skip]) labels_add.append(' - skip {0}'.format(skip)) else: # merge all indices plot_list = [np.hstack(index_dict.values()), ] print('schlumberger', plot_list) labels_add = ['', ] # generate plots for indices, label_add in zip(plot_list, labels_add): if len(indices) == 0: continue ddc = configs[indices] plot_data = values[indices] px, pz = pseudo_d_functions[key](ddc, spacing, grid) # we need at least four points for a spatial interpolation, I # think... if px.size <= 4: continue # take 200 points for the new grid in every direction. Could be # adapted to the actual ratio xg = np.linspace(px.min(), px.max(), 200) zg = np.linspace(pz.min(), pz.max(), 200) x, z = np.meshgrid(xg, zg) cmap_name = kwargs.get('cmap_name', 'jet') cmap = mpl.cm.get_cmap(cmap_name) # normalize data data_min = kwargs.get('cbmin', plot_data.min()) data_max = kwargs.get('cbmax', plot_data.max()) cnorm = mpl.colors.Normalize(vmin=data_min, vmax=data_max) scalarMap = mpl.cm.ScalarMappable(norm=cnorm, cmap=cmap) fcolors = scalarMap.to_rgba(plot_data) try: image = si.griddata( (px, pz), fcolors, (x, z), method='linear', ) except siq.QhullError as e: print('Ex', e) continue cmap = mpl.cm.get_cmap('jet_r') data_ratio = np.abs(px.max() - px.min()) / np.abs(pz.min()) fig_size_y = 15 / data_ratio + 6 / 2.54 fig = plt.figure(figsize=(15, fig_size_y)) fig_top = 1 / 2.54 / fig_size_y fig_left = 2 / 2.54 / 15 fig_right = 1 / 2.54 / 15 if cb: fig_bottom = 3 / 2.54 / fig_size_y else: fig_bottom = 0.05 ax = fig.add_axes([ fig_left, fig_bottom + fig_top * 2, 1 - fig_left - fig_right, 1 - fig_top - fig_bottom - fig_top * 2 ]) im = ax.imshow( image[::-1], extent=(xg.min(), xg.max(), zg.min(), zg.max()), interpolation='none', aspect='auto', # vmin=10, # vmax=300, cmap=cmap, ) ax.set_ylim(pz.min(), 0) # colorbar if cb: print('plotting colorbar') # the colorbar has 3 cm on the bottom ax_cb = fig.add_axes([ fig_left * 4, fig_top * 2, 1 - fig_left * 4 - fig_right * 4, fig_bottom - fig_top * 2 ]) # from mpl_toolkits.axes_grid1 import make_axes_locatable # divider = make_axes_locatable(ax) # ax_cb = divider.append_axes("bottom", "5%", pad="3%") # (ax_cb, kw) = mpl.colorbar.make_axes_gridspec( # ax, # orientation='horizontal', # fraction=fig_bottom, # pad=0.3, # shrink=0.9, # # location='bottom', # ) cb = mpl.colorbar.ColorbarBase( ax=ax_cb, cmap=cmap, norm=cnorm, orientation='horizontal', # **kw ) cb.set_label('cb label') else: fig_bottom = 0.05 # 1cm on top # # 3 cm on bottom for colorbar # fig.subplots_adjust( # top=1 - fig_top, # bottom=fig_bottom, # ) ax.set_title(titles[key] + label_add) ax.set_aspect('equal') ax.set_xlabel('x [m]') ax.set_ylabel('x [z]') plot_objects.append((fig, ax, im)) return plot_objects
[ "def", "plot_pseudosection", "(", "df", ",", "plot_key", ",", "spacing", "=", "1", ",", "ctypes", "=", "None", ",", "dd_merge", "=", "False", ",", "cb", "=", "False", ",", "*", "*", "kwargs", ")", ":", "grid", "=", "None", "pseudo_d_functions", "=", ...
Create a pseudosection plot for a given measurement Parameters ---------- df: dataframe measurement dataframe, one measurement frame (i.e., only one frequency etc) key: which key to colorcode spacing: float, optional assumed electrode spacing ctypes: list of strings which configurations to plot, default: dd dd_merge: bool, optional ? cb: bool, optional ?
[ "Create", "a", "pseudosection", "plot", "for", "a", "given", "measurement" ]
python
train
uw-it-aca/uw-restclients-canvas
uw_canvas/assignments.py
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/assignments.py#L30-L40
def update_assignment(self, assignment): """ Modify an existing assignment. https://canvas.instructure.com/doc/api/assignments.html#method.assignments_api.update """ url = ASSIGNMENTS_API.format(assignment.course_id) + "/{}".format( assignment.assignment_id) data = self._put_resource(url, assignment.json_data()) return Assignment(data=data)
[ "def", "update_assignment", "(", "self", ",", "assignment", ")", ":", "url", "=", "ASSIGNMENTS_API", ".", "format", "(", "assignment", ".", "course_id", ")", "+", "\"/{}\"", ".", "format", "(", "assignment", ".", "assignment_id", ")", "data", "=", "self", ...
Modify an existing assignment. https://canvas.instructure.com/doc/api/assignments.html#method.assignments_api.update
[ "Modify", "an", "existing", "assignment", "." ]
python
test
mitsei/dlkit
dlkit/services/assessment.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/assessment.py#L3038-L3046
def use_comparative_assessment_offered_view(self): """Pass through to provider AssessmentOfferedLookupSession.use_comparative_assessment_offered_view""" self._object_views['assessment_offered'] = COMPARATIVE # self._get_provider_session('assessment_offered_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_comparative_assessment_offered_view() except AttributeError: pass
[ "def", "use_comparative_assessment_offered_view", "(", "self", ")", ":", "self", ".", "_object_views", "[", "'assessment_offered'", "]", "=", "COMPARATIVE", "# self._get_provider_session('assessment_offered_lookup_session') # To make sure the session is tracked", "for", "session", ...
Pass through to provider AssessmentOfferedLookupSession.use_comparative_assessment_offered_view
[ "Pass", "through", "to", "provider", "AssessmentOfferedLookupSession", ".", "use_comparative_assessment_offered_view" ]
python
train
gbowerman/azurerm
azurerm/amsrp.py
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/amsrp.py#L732-L756
def update_media_assetfile(access_token, parent_asset_id, asset_id, content_length, name): '''Update Media Service Asset File. Args: access_token (str): A valid Azure authentication token. parent_asset_id (str): A Media Service Asset Parent Asset ID. asset_id (str): A Media Service Asset Asset ID. content_length (str): A Media Service Asset Content Length. name (str): A Media Service Asset name. Returns: HTTP response. JSON body. ''' path = '/Files' full_path = ''.join([path, "('", asset_id, "')"]) full_path_encoded = urllib.parse.quote(full_path, safe='') endpoint = ''.join([ams_rest_endpoint, full_path_encoded]) body = '{ \ "ContentFileSize": "' + str(content_length) + '", \ "Id": "' + asset_id + '", \ "MimeType": "video/mp4", \ "Name": "' + name + '", \ "ParentAssetId": "' + parent_asset_id + '" \ }' return do_ams_patch(endpoint, full_path_encoded, body, access_token)
[ "def", "update_media_assetfile", "(", "access_token", ",", "parent_asset_id", ",", "asset_id", ",", "content_length", ",", "name", ")", ":", "path", "=", "'/Files'", "full_path", "=", "''", ".", "join", "(", "[", "path", ",", "\"('\"", ",", "asset_id", ",", ...
Update Media Service Asset File. Args: access_token (str): A valid Azure authentication token. parent_asset_id (str): A Media Service Asset Parent Asset ID. asset_id (str): A Media Service Asset Asset ID. content_length (str): A Media Service Asset Content Length. name (str): A Media Service Asset name. Returns: HTTP response. JSON body.
[ "Update", "Media", "Service", "Asset", "File", "." ]
python
train
TeamHG-Memex/eli5
eli5/formatters/html.py
https://github.com/TeamHG-Memex/eli5/blob/371b402a0676295c05e582a2dd591f7af476b86b/eli5/formatters/html.py#L142-L165
def render_targets_weighted_spans( targets, # type: List[TargetExplanation] preserve_density, # type: Optional[bool] ): # type: (...) -> List[Optional[str]] """ Return a list of rendered weighted spans for targets. Function must accept a list in order to select consistent weight ranges across all targets. """ prepared_weighted_spans = prepare_weighted_spans( targets, preserve_density) def _fmt_pws(pws): # type: (PreparedWeightedSpans) -> str name = ('<b>{}:</b> '.format(pws.doc_weighted_spans.vec_name) if pws.doc_weighted_spans.vec_name else '') return '{}{}'.format(name, render_weighted_spans(pws)) def _fmt_pws_list(pws_lst): # type: (List[PreparedWeightedSpans]) -> str return '<br/>'.join(_fmt_pws(pws) for pws in pws_lst) return [_fmt_pws_list(pws_lst) if pws_lst else None for pws_lst in prepared_weighted_spans]
[ "def", "render_targets_weighted_spans", "(", "targets", ",", "# type: List[TargetExplanation]", "preserve_density", ",", "# type: Optional[bool]", ")", ":", "# type: (...) -> List[Optional[str]]", "prepared_weighted_spans", "=", "prepare_weighted_spans", "(", "targets", ",", "pre...
Return a list of rendered weighted spans for targets. Function must accept a list in order to select consistent weight ranges across all targets.
[ "Return", "a", "list", "of", "rendered", "weighted", "spans", "for", "targets", ".", "Function", "must", "accept", "a", "list", "in", "order", "to", "select", "consistent", "weight", "ranges", "across", "all", "targets", "." ]
python
train
graphql-python/graphql-core-next
graphql/utilities/find_breaking_changes.py
https://github.com/graphql-python/graphql-core-next/blob/073dce3f002f897d40f9348ffd8f107815160540/graphql/utilities/find_breaking_changes.py#L147-L166
def find_removed_types( old_schema: GraphQLSchema, new_schema: GraphQLSchema ) -> List[BreakingChange]: """Find removed types. Given two schemas, returns a list containing descriptions of any breaking changes in the newSchema related to removing an entire type. """ old_type_map = old_schema.type_map new_type_map = new_schema.type_map breaking_changes = [] for type_name in old_type_map: if type_name not in new_type_map: breaking_changes.append( BreakingChange( BreakingChangeType.TYPE_REMOVED, f"{type_name} was removed." ) ) return breaking_changes
[ "def", "find_removed_types", "(", "old_schema", ":", "GraphQLSchema", ",", "new_schema", ":", "GraphQLSchema", ")", "->", "List", "[", "BreakingChange", "]", ":", "old_type_map", "=", "old_schema", ".", "type_map", "new_type_map", "=", "new_schema", ".", "type_map...
Find removed types. Given two schemas, returns a list containing descriptions of any breaking changes in the newSchema related to removing an entire type.
[ "Find", "removed", "types", "." ]
python
train
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9187-L9217
def metaseries_description_metadata(description): """Return metatata from MetaSeries image description as dict.""" if not description.startswith('<MetaData>'): raise ValueError('invalid MetaSeries image description') from xml.etree import cElementTree as etree # delayed import root = etree.fromstring(description) types = {'float': float, 'int': int, 'bool': lambda x: asbool(x, 'on', 'off')} def parse(root, result): # recursive for child in root: attrib = child.attrib if not attrib: result[child.tag] = parse(child, {}) continue if 'id' in attrib: i = attrib['id'] t = attrib['type'] v = attrib['value'] if t in types: result[i] = types[t](v) else: result[i] = v return result adict = parse(root, {}) if 'Description' in adict: adict['Description'] = adict['Description'].replace('&#13;&#10;', '\n') return adict
[ "def", "metaseries_description_metadata", "(", "description", ")", ":", "if", "not", "description", ".", "startswith", "(", "'<MetaData>'", ")", ":", "raise", "ValueError", "(", "'invalid MetaSeries image description'", ")", "from", "xml", ".", "etree", "import", "c...
Return metatata from MetaSeries image description as dict.
[ "Return", "metatata", "from", "MetaSeries", "image", "description", "as", "dict", "." ]
python
train
PredixDev/predixpy
predix/admin/acs.py
https://github.com/PredixDev/predixpy/blob/a0cb34cf40f716229351bb6d90d6ecace958c81f/predix/admin/acs.py#L23-L30
def _get_uri(self): """ Will return the uri for an existing instance. """ if not self.service.exists(): logging.warning("Service does not yet exist.") return self.service.settings.data['uri']
[ "def", "_get_uri", "(", "self", ")", ":", "if", "not", "self", ".", "service", ".", "exists", "(", ")", ":", "logging", ".", "warning", "(", "\"Service does not yet exist.\"", ")", "return", "self", ".", "service", ".", "settings", ".", "data", "[", "'ur...
Will return the uri for an existing instance.
[ "Will", "return", "the", "uri", "for", "an", "existing", "instance", "." ]
python
train
awslabs/serverless-application-model
samtranslator/intrinsics/actions.py
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/intrinsics/actions.py#L55-L76
def _parse_resource_reference(cls, ref_value): """ Splits a resource reference of structure "LogicalId.Property" and returns the "LogicalId" and "Property" separately. :param string ref_value: Input reference value which *may* contain the structure "LogicalId.Property" :return string, string: Returns two values - logical_id, property. If the input does not contain the structure, then both `logical_id` and property will be None """ no_result = (None, None) if not isinstance(ref_value, string_types): return no_result splits = ref_value.split(cls._resource_ref_separator, 1) # Either there is no 'dot' (or) one of the values is empty string (Ex: when you split "LogicalId.") if len(splits) != 2 or not all(splits): return no_result return splits[0], splits[1]
[ "def", "_parse_resource_reference", "(", "cls", ",", "ref_value", ")", ":", "no_result", "=", "(", "None", ",", "None", ")", "if", "not", "isinstance", "(", "ref_value", ",", "string_types", ")", ":", "return", "no_result", "splits", "=", "ref_value", ".", ...
Splits a resource reference of structure "LogicalId.Property" and returns the "LogicalId" and "Property" separately. :param string ref_value: Input reference value which *may* contain the structure "LogicalId.Property" :return string, string: Returns two values - logical_id, property. If the input does not contain the structure, then both `logical_id` and property will be None
[ "Splits", "a", "resource", "reference", "of", "structure", "LogicalId", ".", "Property", "and", "returns", "the", "LogicalId", "and", "Property", "separately", "." ]
python
train
wilson-eft/wilson
wilson/run/smeft/rge.py
https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/run/smeft/rge.py#L43-L59
def smeft_evolve_continuous(C_in, scale_in, scale_out, newphys=True, **kwargs): """Solve the SMEFT RGEs by numeric integration, returning a function that allows to compute an interpolated solution at arbitrary intermediate scales.""" sol = _smeft_evolve(C_in, scale_in, scale_out, newphys=newphys, dense_output=True, **kwargs) @np.vectorize def _rge_solution(scale): t = log(scale) y = sol.sol(t).view(complex) yd = C_array2dict(y) yw = arrays2wcxf_nonred(yd) return yw def rge_solution(scale): # this is to return a scalar if the input is scalar return _rge_solution(scale)[()] return rge_solution
[ "def", "smeft_evolve_continuous", "(", "C_in", ",", "scale_in", ",", "scale_out", ",", "newphys", "=", "True", ",", "*", "*", "kwargs", ")", ":", "sol", "=", "_smeft_evolve", "(", "C_in", ",", "scale_in", ",", "scale_out", ",", "newphys", "=", "newphys", ...
Solve the SMEFT RGEs by numeric integration, returning a function that allows to compute an interpolated solution at arbitrary intermediate scales.
[ "Solve", "the", "SMEFT", "RGEs", "by", "numeric", "integration", "returning", "a", "function", "that", "allows", "to", "compute", "an", "interpolated", "solution", "at", "arbitrary", "intermediate", "scales", "." ]
python
train
ganguli-lab/proxalgs
proxalgs/core.py
https://github.com/ganguli-lab/proxalgs/blob/74f54467ad072d3229edea93fa84ddd98dd77c67/proxalgs/core.py#L121-L232
def minimize(self, theta_init, max_iter=50, callback=None, disp=0, tau=(10., 2., 2.), tol=1e-3): """ Minimize a list of objectives using a proximal consensus algorithm Parameters ---------- theta_init : ndarray Initial parameter vector (numpy array) max_iter : int, optional Maximum number of iterations to run (default: 50) callback : function, optional a function that gets called on each iteration with the following arguments: the current parameter value (ndarray), and a dictionary that contains a information about the status of the algorithm disp : int, optional determines how much information to display when running. Ranges from 0 (nothing) to 3 (lots of information) Returns ------- theta : ndarray The parameters found after running the optimization procedure Other Parameters ---------------- tau : (float, float, float), optional initial, increment and decrement parameters for the momentum scheduler (default: (10, 2, 2)) tol : float, optional residual tolerance for assessing convergence. if both the primal and dual residuals are less than this value, then the algorithm has converged (default: 1e-3) """ # get list of objectives for this parameter num_obj = len(self.objectives) assert num_obj >= 1, "There must be at least one objective!" # initialize lists of primal and dual variable copies, one for each objective orig_shape = theta_init.shape primals = [theta_init.flatten() for _ in range(num_obj)] duals = [np.zeros(theta_init.size) for _ in range(num_obj)] theta_avg = np.mean(primals, axis=0).ravel() # initialize penalty parameter tau = namedtuple('tau', ('init', 'inc', 'dec'))(*tau) rho = tau.init # store cumulative runtimes of each iteration, starting now tstart = time.time() # clear metadata self.metadata = defaultdict(list) # run ADMM iterations self.converged = False for cur_iter in range(max_iter): # store the parameters from the previous iteration theta_prev = theta_avg # update each primal variable copy by taking a proximal step via each objective for varidx, dual in enumerate(duals): primals[varidx] = self.objectives[varidx]((theta_prev - dual).reshape(orig_shape), rho).ravel() # average primal copies theta_avg = np.mean(primals, axis=0) # update the dual variables (after primal update has finished) for varidx, primal in enumerate(primals): duals[varidx] += primal - theta_avg # compute primal and dual residuals primal_resid = float(np.sum([np.linalg.norm(primal - theta_avg) for primal in primals])) dual_resid = num_obj * rho ** 2 * np.linalg.norm(theta_avg - theta_prev) # update penalty parameter according to primal and dual residuals # (see sect. 3.4.1 of the Boyd and Parikh ADMM paper) if primal_resid > tau.init * dual_resid: rho *= float(tau.inc) elif dual_resid > tau.init * primal_resid: rho /= float(tau.dec) # update metadata for this iteration self.metadata['Primal resid'].append(primal_resid) self.metadata['Dual resid'].append(dual_resid) self.metadata['Time (s)'].append(time.time() - tstart) self.metadata['rho'].append(rho) # invoke the callback function with the current parameters and # history if callback is not None: # get the metadata from this iteration data = valmap(last, self.metadata) callback(theta_avg.reshape(orig_shape), data) # update the display self.update_display(cur_iter + 1, disp) # check for convergence if (primal_resid <= tol) & (dual_resid <= tol): self.converged = True break # clean up display self.update_display(-1, disp) # store and return final parameters self.theta = theta_avg.reshape(orig_shape) return self.theta
[ "def", "minimize", "(", "self", ",", "theta_init", ",", "max_iter", "=", "50", ",", "callback", "=", "None", ",", "disp", "=", "0", ",", "tau", "=", "(", "10.", ",", "2.", ",", "2.", ")", ",", "tol", "=", "1e-3", ")", ":", "# get list of objectives...
Minimize a list of objectives using a proximal consensus algorithm Parameters ---------- theta_init : ndarray Initial parameter vector (numpy array) max_iter : int, optional Maximum number of iterations to run (default: 50) callback : function, optional a function that gets called on each iteration with the following arguments: the current parameter value (ndarray), and a dictionary that contains a information about the status of the algorithm disp : int, optional determines how much information to display when running. Ranges from 0 (nothing) to 3 (lots of information) Returns ------- theta : ndarray The parameters found after running the optimization procedure Other Parameters ---------------- tau : (float, float, float), optional initial, increment and decrement parameters for the momentum scheduler (default: (10, 2, 2)) tol : float, optional residual tolerance for assessing convergence. if both the primal and dual residuals are less than this value, then the algorithm has converged (default: 1e-3)
[ "Minimize", "a", "list", "of", "objectives", "using", "a", "proximal", "consensus", "algorithm" ]
python
train
pnuckowski/aioresponses
aioresponses/compat.py
https://github.com/pnuckowski/aioresponses/blob/566461a21a25757e313e0d4afaf338d53d66db03/aioresponses/compat.py#L44-L47
def normalize_url(url: 'Union[URL, str]') -> 'URL': """Normalize url to make comparisons.""" url = URL(url) return url.with_query(urlencode(sorted(parse_qsl(url.query_string))))
[ "def", "normalize_url", "(", "url", ":", "'Union[URL, str]'", ")", "->", "'URL'", ":", "url", "=", "URL", "(", "url", ")", "return", "url", ".", "with_query", "(", "urlencode", "(", "sorted", "(", "parse_qsl", "(", "url", ".", "query_string", ")", ")", ...
Normalize url to make comparisons.
[ "Normalize", "url", "to", "make", "comparisons", "." ]
python
train
vladsaveliev/TargQC
targqc/utilz/jsontemplate/_jsontemplate.py
https://github.com/vladsaveliev/TargQC/blob/e887c36b2194dbd73c6ea32989b6cb84c6c0e58d/targqc/utilz/jsontemplate/_jsontemplate.py#L1725-L1752
def _Execute(statements, context, callback, trace): """Execute a bunch of template statements in a ScopedContext. Args: callback: Strings are "written" to this callback function. trace: Trace object, or None This is called in a mutually recursive fashion. """ # Every time we call _Execute, increase this depth if trace: trace.exec_depth += 1 for i, statement in enumerate(statements): if isinstance(statement, six.string_types): callback(statement) else: # In the case of a substitution, args is a pair (name, formatters). # In the case of a section, it's a _Section instance. try: func, args = statement func(args, context, callback, trace) except UndefinedVariable as e: # Show context for statements start = max(0, i - 3) end = i + 3 e.near = statements[start:end] e.trace = trace # Attach caller's trace (could be None) raise
[ "def", "_Execute", "(", "statements", ",", "context", ",", "callback", ",", "trace", ")", ":", "# Every time we call _Execute, increase this depth", "if", "trace", ":", "trace", ".", "exec_depth", "+=", "1", "for", "i", ",", "statement", "in", "enumerate", "(", ...
Execute a bunch of template statements in a ScopedContext. Args: callback: Strings are "written" to this callback function. trace: Trace object, or None This is called in a mutually recursive fashion.
[ "Execute", "a", "bunch", "of", "template", "statements", "in", "a", "ScopedContext", "." ]
python
train
ttsteiger/cryptocompy
cryptocompy/mining.py
https://github.com/ttsteiger/cryptocompy/blob/b0514079202587a5bfb3a4f2c871196315b9302e/cryptocompy/mining.py#L68-L120
def get_mining_equipment(): """Get all the mining equipment information available. Returns: This function returns two major dictionaries. The first one contains information about the coins for which mining equipment data is available. coin_data: {symbol1: {'BlockNumber': ..., 'BlockReward': ..., 'BlockRewardReduction': ..., 'BlockTime': ..., 'DifficultyAdjustment': ..., 'NetHashesPerSecond': ..., 'PreviousTotalCoinsMined': ..., 'PriceUSD': ..., 'Symbol': ..., 'TotalCoinsMined': ...}, symbol2: {...}, ...} The other one contains all the available mining equipment. mining_data: {id1: {'AffiliateURL': ..., 'Algorithm': ..., 'Company': ..., 'Cost': ..., 'CurrenciesAvailable': ..., 'CurrenciesAvailableLogo': ..., 'CurrenciesAvailableName': ..., 'Currency': ..., 'EquipmentType': ..., 'HashesPerSecond': ..., 'Id': ..., 'LogoUrl': ..., 'Name': ..., 'ParentId': ..., 'PowerConsumption': ..., 'Recommended': ..., 'Sponsored': ..., 'Url': ...}, id2: {...}, """ # load data url = build_url('miningequipment') data = load_data(url) coin_data = data['CoinData'] mining_data = data['MiningData'] return coin_data, mining_data
[ "def", "get_mining_equipment", "(", ")", ":", "# load data", "url", "=", "build_url", "(", "'miningequipment'", ")", "data", "=", "load_data", "(", "url", ")", "coin_data", "=", "data", "[", "'CoinData'", "]", "mining_data", "=", "data", "[", "'MiningData'", ...
Get all the mining equipment information available. Returns: This function returns two major dictionaries. The first one contains information about the coins for which mining equipment data is available. coin_data: {symbol1: {'BlockNumber': ..., 'BlockReward': ..., 'BlockRewardReduction': ..., 'BlockTime': ..., 'DifficultyAdjustment': ..., 'NetHashesPerSecond': ..., 'PreviousTotalCoinsMined': ..., 'PriceUSD': ..., 'Symbol': ..., 'TotalCoinsMined': ...}, symbol2: {...}, ...} The other one contains all the available mining equipment. mining_data: {id1: {'AffiliateURL': ..., 'Algorithm': ..., 'Company': ..., 'Cost': ..., 'CurrenciesAvailable': ..., 'CurrenciesAvailableLogo': ..., 'CurrenciesAvailableName': ..., 'Currency': ..., 'EquipmentType': ..., 'HashesPerSecond': ..., 'Id': ..., 'LogoUrl': ..., 'Name': ..., 'ParentId': ..., 'PowerConsumption': ..., 'Recommended': ..., 'Sponsored': ..., 'Url': ...}, id2: {...},
[ "Get", "all", "the", "mining", "equipment", "information", "available", "." ]
python
train
dmlc/gluon-nlp
scripts/bert/utils.py
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/bert/utils.py#L108-L123
def profile(curr_step, start_step, end_step, profile_name='profile.json', early_exit=True): """profile the program between [start_step, end_step).""" if curr_step == start_step: mx.nd.waitall() mx.profiler.set_config(profile_memory=False, profile_symbolic=True, profile_imperative=True, filename=profile_name, aggregate_stats=True) mx.profiler.set_state('run') elif curr_step == end_step: mx.nd.waitall() mx.profiler.set_state('stop') logging.info(mx.profiler.dumps()) mx.profiler.dump() if early_exit: exit()
[ "def", "profile", "(", "curr_step", ",", "start_step", ",", "end_step", ",", "profile_name", "=", "'profile.json'", ",", "early_exit", "=", "True", ")", ":", "if", "curr_step", "==", "start_step", ":", "mx", ".", "nd", ".", "waitall", "(", ")", "mx", "."...
profile the program between [start_step, end_step).
[ "profile", "the", "program", "between", "[", "start_step", "end_step", ")", "." ]
python
train
apache/airflow
airflow/utils/helpers.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/helpers.py#L131-L140
def reduce_in_chunks(fn, iterable, initializer, chunk_size=0): """ Reduce the given list of items by splitting it into chunks of the given size and passing each chunk through the reducer """ if len(iterable) == 0: return initializer if chunk_size == 0: chunk_size = len(iterable) return reduce(fn, chunks(iterable, chunk_size), initializer)
[ "def", "reduce_in_chunks", "(", "fn", ",", "iterable", ",", "initializer", ",", "chunk_size", "=", "0", ")", ":", "if", "len", "(", "iterable", ")", "==", "0", ":", "return", "initializer", "if", "chunk_size", "==", "0", ":", "chunk_size", "=", "len", ...
Reduce the given list of items by splitting it into chunks of the given size and passing each chunk through the reducer
[ "Reduce", "the", "given", "list", "of", "items", "by", "splitting", "it", "into", "chunks", "of", "the", "given", "size", "and", "passing", "each", "chunk", "through", "the", "reducer" ]
python
test
balloob/pychromecast
pychromecast/socket_client.py
https://github.com/balloob/pychromecast/blob/831b09c4fed185a7bffe0ea330b7849d5f4e36b6/pychromecast/socket_client.py#L1099-L1122
def new_socket(): """ Create a new socket with OS-specific parameters Try to set SO_REUSEPORT for BSD-flavored systems if it's an option. Catches errors if not. """ new_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) new_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) try: # noinspection PyUnresolvedReferences reuseport = socket.SO_REUSEPORT except AttributeError: pass else: try: new_sock.setsockopt(socket.SOL_SOCKET, reuseport, 1) except (OSError, socket.error) as err: # OSError on python 3, socket.error on python 2 if err.errno != errno.ENOPROTOOPT: raise return new_sock
[ "def", "new_socket", "(", ")", ":", "new_sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "new_sock", ".", "setsockopt", "(", "socket", ".", "SOL_SOCKET", ",", "socket", ".", "SO_REUSEADDR", ",", ...
Create a new socket with OS-specific parameters Try to set SO_REUSEPORT for BSD-flavored systems if it's an option. Catches errors if not.
[ "Create", "a", "new", "socket", "with", "OS", "-", "specific", "parameters" ]
python
train
gem/oq-engine
openquake/hmtk/parsers/catalogue/gcmt_ndk_parser.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/parsers/catalogue/gcmt_ndk_parser.py#L216-L301
def to_hmtk(self, use_centroid=True): ''' Convert the content of the GCMT catalogue to a HMTK catalogue. ''' self._preallocate_data_dict() for iloc, gcmt in enumerate(self.catalogue.gcmts): self.catalogue.data['eventID'][iloc] = iloc if use_centroid: self.catalogue.data['year'][iloc] = \ gcmt.centroid.date.year self.catalogue.data['month'][iloc] = \ gcmt.centroid.date.month self.catalogue.data['day'][iloc] = \ gcmt.centroid.date.day self.catalogue.data['hour'][iloc] = \ gcmt.centroid.time.hour self.catalogue.data['minute'][iloc] = \ gcmt.centroid.time.minute self.catalogue.data['second'][iloc] = \ gcmt.centroid.time.second self.catalogue.data['longitude'][iloc] = \ gcmt.centroid.longitude self.catalogue.data['latitude'][iloc] = \ gcmt.centroid.latitude self.catalogue.data['depth'][iloc] = \ gcmt.centroid.depth else: self.catalogue.data['year'][iloc] = \ gcmt.hypocentre.date.year self.catalogue.data['month'][iloc] = \ gcmt.hypocentre.date.month self.catalogue.data['day'][iloc] = \ gcmt.hypocentre.date.day self.catalogue.data['hour'][iloc] = \ gcmt.hypocentre.time.hour self.catalogue.data['minute'][iloc] = \ gcmt.hypocentre.time.minute self.catalogue.data['second'][iloc] = \ gcmt.hypocentre.time.second self.catalogue.data['longitude'][iloc] = \ gcmt.hypocentre.longitude self.catalogue.data['latitude'][iloc] = \ gcmt.hypocentre.latitude self.catalogue.data['depth'][iloc] = \ gcmt.hypocentre.depth # Moment, magnitude and relative errors self.catalogue.data['moment'][iloc] = gcmt.moment self.catalogue.data['magnitude'][iloc] = gcmt.magnitude self.catalogue.data['f_clvd'][iloc] = gcmt.f_clvd self.catalogue.data['e_rel'][iloc] = gcmt.e_rel self.catalogue.data['centroidID'][iloc] = gcmt.identifier # Nodal planes self.catalogue.data['strike1'][iloc] = \ gcmt.nodal_planes.nodal_plane_1['strike'] self.catalogue.data['dip1'][iloc] = \ gcmt.nodal_planes.nodal_plane_1['dip'] self.catalogue.data['rake1'][iloc] = \ gcmt.nodal_planes.nodal_plane_1['rake'] self.catalogue.data['strike2'][iloc] = \ gcmt.nodal_planes.nodal_plane_2['strike'] self.catalogue.data['dip2'][iloc] = \ gcmt.nodal_planes.nodal_plane_2['dip'] self.catalogue.data['rake2'][iloc] = \ gcmt.nodal_planes.nodal_plane_2['rake'] # Principal axes self.catalogue.data['eigenvalue_b'][iloc] = \ gcmt.principal_axes.b_axis['eigenvalue'] self.catalogue.data['azimuth_b'][iloc] = \ gcmt.principal_axes.b_axis['azimuth'] self.catalogue.data['plunge_b'][iloc] = \ gcmt.principal_axes.b_axis['plunge'] self.catalogue.data['eigenvalue_p'][iloc] = \ gcmt.principal_axes.p_axis['eigenvalue'] self.catalogue.data['azimuth_p'][iloc] = \ gcmt.principal_axes.p_axis['azimuth'] self.catalogue.data['plunge_p'][iloc] = \ gcmt.principal_axes.p_axis['plunge'] self.catalogue.data['eigenvalue_t'][iloc] = \ gcmt.principal_axes.t_axis['eigenvalue'] self.catalogue.data['azimuth_t'][iloc] = \ gcmt.principal_axes.t_axis['azimuth'] self.catalogue.data['plunge_t'][iloc] = \ gcmt.principal_axes.t_axis['plunge'] return self.catalogue
[ "def", "to_hmtk", "(", "self", ",", "use_centroid", "=", "True", ")", ":", "self", ".", "_preallocate_data_dict", "(", ")", "for", "iloc", ",", "gcmt", "in", "enumerate", "(", "self", ".", "catalogue", ".", "gcmts", ")", ":", "self", ".", "catalogue", ...
Convert the content of the GCMT catalogue to a HMTK catalogue.
[ "Convert", "the", "content", "of", "the", "GCMT", "catalogue", "to", "a", "HMTK", "catalogue", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_ip_policy.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_ip_policy.py#L893-L912
def hide_routemap_holder_route_map_content_set_weight_weight_value(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy") route_map = ET.SubElement(hide_routemap_holder, "route-map") name_key = ET.SubElement(route_map, "name") name_key.text = kwargs.pop('name') action_rm_key = ET.SubElement(route_map, "action-rm") action_rm_key.text = kwargs.pop('action_rm') instance_key = ET.SubElement(route_map, "instance") instance_key.text = kwargs.pop('instance') content = ET.SubElement(route_map, "content") set = ET.SubElement(content, "set") weight = ET.SubElement(set, "weight") weight_value = ET.SubElement(weight, "weight-value") weight_value.text = kwargs.pop('weight_value') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "hide_routemap_holder_route_map_content_set_weight_weight_value", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "hide_routemap_holder", "=", "ET", ".", "SubElement", "(", "config", ",", "\"hide-...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
zeth/inputs
inputs.py
https://github.com/zeth/inputs/blob/a46681dbf77d6ab07834f550e5855c1f50701f99/inputs.py#L2280-L2300
def handle_input(self, event): """Process they keyboard input.""" self.update_timeval() self.events = [] code = self._get_event_key_code(event) if code in self.codes: new_code = self.codes[code] else: new_code = 0 event_type = self._get_event_type(event) value = self._get_key_value(event, event_type) scan_event, key_event = self.emulate_press( new_code, code, value, self.timeval) self.events.append(scan_event) self.events.append(key_event) # End with a sync marker self.events.append(self.sync_marker(self.timeval)) # We are done self.write_to_pipe(self.events)
[ "def", "handle_input", "(", "self", ",", "event", ")", ":", "self", ".", "update_timeval", "(", ")", "self", ".", "events", "=", "[", "]", "code", "=", "self", ".", "_get_event_key_code", "(", "event", ")", "if", "code", "in", "self", ".", "codes", "...
Process they keyboard input.
[ "Process", "they", "keyboard", "input", "." ]
python
train
haizi-zh/scrapy-qiniu
scrapy_qiniu/impl.py
https://github.com/haizi-zh/scrapy-qiniu/blob/9a3dddacd2e665cb3c86308772040946c3b82415/scrapy_qiniu/impl.py#L168-L172
def file_path(self, request, response=None, info=None): """ 抓取到的资源存放到七牛的时候, 应该采用什么样的key? 返回的path是一个JSON字符串, 其中有bucket和key的信息 """ return json.dumps(self._extract_key_info(request))
[ "def", "file_path", "(", "self", ",", "request", ",", "response", "=", "None", ",", "info", "=", "None", ")", ":", "return", "json", ".", "dumps", "(", "self", ".", "_extract_key_info", "(", "request", ")", ")" ]
抓取到的资源存放到七牛的时候, 应该采用什么样的key? 返回的path是一个JSON字符串, 其中有bucket和key的信息
[ "抓取到的资源存放到七牛的时候", "应该采用什么样的key?", "返回的path是一个JSON字符串", "其中有bucket和key的信息" ]
python
train
nuagenetworks/bambou
bambou/nurest_object.py
https://github.com/nuagenetworks/bambou/blob/d334fea23e384d3df8e552fe1849ad707941c666/bambou/nurest_object.py#L912-L941
def create_child(self, nurest_object, response_choice=None, async=False, callback=None, commit=True): """ Add given nurest_object to the current object For example, to add a child into a parent, you can call parent.create_child(nurest_object=child) Args: nurest_object (bambou.NURESTObject): the NURESTObject object to add response_choice (int): Automatically send a response choice when confirmation is needed async (bool): should the request be done asynchronously or not callback (function): callback containing the object and the connection Returns: Returns the object and connection (object, connection) Example: >>> entity = NUEntity(name="Super Entity") >>> parent_entity.create_child(entity) # the new entity as been created in the parent_entity """ # if nurest_object.id: # raise InternalConsitencyError("Cannot create a child that already has an ID: %s." % nurest_object) return self._manage_child_object(nurest_object=nurest_object, async=async, method=HTTP_METHOD_POST, callback=callback, handler=self._did_create_child, response_choice=response_choice, commit=commit)
[ "def", "create_child", "(", "self", ",", "nurest_object", ",", "response_choice", "=", "None", ",", "async", "=", "False", ",", "callback", "=", "None", ",", "commit", "=", "True", ")", ":", "# if nurest_object.id:", "# raise InternalConsitencyError(\"Cannot cre...
Add given nurest_object to the current object For example, to add a child into a parent, you can call parent.create_child(nurest_object=child) Args: nurest_object (bambou.NURESTObject): the NURESTObject object to add response_choice (int): Automatically send a response choice when confirmation is needed async (bool): should the request be done asynchronously or not callback (function): callback containing the object and the connection Returns: Returns the object and connection (object, connection) Example: >>> entity = NUEntity(name="Super Entity") >>> parent_entity.create_child(entity) # the new entity as been created in the parent_entity
[ "Add", "given", "nurest_object", "to", "the", "current", "object" ]
python
train
jaraco/keyrings.alt
keyrings/alt/Gnome.py
https://github.com/jaraco/keyrings.alt/blob/5b71223d12bf9ac6abd05b1b395f1efccb5ea660/keyrings/alt/Gnome.py#L39-L61
def _find_passwords(self, service, username, deleting=False): """Get password of the username for the service """ passwords = [] service = self._safe_string(service) username = self._safe_string(username) for attrs_tuple in (('username', 'service'), ('user', 'domain')): attrs = GnomeKeyring.Attribute.list_new() GnomeKeyring.Attribute.list_append_string( attrs, attrs_tuple[0], username) GnomeKeyring.Attribute.list_append_string( attrs, attrs_tuple[1], service) result, items = GnomeKeyring.find_items_sync( GnomeKeyring.ItemType.NETWORK_PASSWORD, attrs) if result == GnomeKeyring.Result.OK: passwords += items elif deleting: if result == GnomeKeyring.Result.CANCELLED: raise PasswordDeleteError("Cancelled by user") elif result != GnomeKeyring.Result.NO_MATCH: raise PasswordDeleteError(result.value_name) return passwords
[ "def", "_find_passwords", "(", "self", ",", "service", ",", "username", ",", "deleting", "=", "False", ")", ":", "passwords", "=", "[", "]", "service", "=", "self", ".", "_safe_string", "(", "service", ")", "username", "=", "self", ".", "_safe_string", "...
Get password of the username for the service
[ "Get", "password", "of", "the", "username", "for", "the", "service" ]
python
train
mozilla/build-mar
src/mardor/utils.py
https://github.com/mozilla/build-mar/blob/d8c3b3469e55654d31f430cb343fd89392196c4e/src/mardor/utils.py#L145-L166
def xz_compress_stream(src): """Compress data from `src`. Args: src (iterable): iterable that yields blocks of data to compress Yields: blocks of compressed data """ compressor = lzma.LZMACompressor( check=lzma.CHECK_CRC64, filters=[ {"id": lzma.FILTER_X86}, {"id": lzma.FILTER_LZMA2, "preset": lzma.PRESET_DEFAULT}, ]) for block in src: encoded = compressor.compress(block) if encoded: yield encoded yield compressor.flush()
[ "def", "xz_compress_stream", "(", "src", ")", ":", "compressor", "=", "lzma", ".", "LZMACompressor", "(", "check", "=", "lzma", ".", "CHECK_CRC64", ",", "filters", "=", "[", "{", "\"id\"", ":", "lzma", ".", "FILTER_X86", "}", ",", "{", "\"id\"", ":", "...
Compress data from `src`. Args: src (iterable): iterable that yields blocks of data to compress Yields: blocks of compressed data
[ "Compress", "data", "from", "src", "." ]
python
train
DataDog/integrations-core
haproxy/datadog_checks/haproxy/haproxy.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/haproxy/datadog_checks/haproxy/haproxy.py#L639-L673
def _process_event(self, data, url, services_incl_filter=None, services_excl_filter=None, custom_tags=None): ''' Main event processing loop. An event will be created for a service status change. Service checks on the server side can be used to provide the same functionality ''' hostname = data['svname'] service_name = data['pxname'] key = "%s:%s" % (hostname, service_name) status = self.host_status[url][key] custom_tags = [] if custom_tags is None else custom_tags if self._is_service_excl_filtered(service_name, services_incl_filter, services_excl_filter): return data_status = data['status'] if status is None: self.host_status[url][key] = data_status return if status != data_status and data_status in ('up', 'down'): # If the status of a host has changed, we trigger an event try: lastchg = int(data['lastchg']) except Exception: lastchg = 0 # Create the event object ev = self._create_event( data_status, hostname, lastchg, service_name, data['back_or_front'], custom_tags=custom_tags ) self.event(ev) # Store this host status so we can check against it later self.host_status[url][key] = data_status
[ "def", "_process_event", "(", "self", ",", "data", ",", "url", ",", "services_incl_filter", "=", "None", ",", "services_excl_filter", "=", "None", ",", "custom_tags", "=", "None", ")", ":", "hostname", "=", "data", "[", "'svname'", "]", "service_name", "=", ...
Main event processing loop. An event will be created for a service status change. Service checks on the server side can be used to provide the same functionality
[ "Main", "event", "processing", "loop", ".", "An", "event", "will", "be", "created", "for", "a", "service", "status", "change", ".", "Service", "checks", "on", "the", "server", "side", "can", "be", "used", "to", "provide", "the", "same", "functionality" ]
python
train
gwastro/pycbc-glue
pycbc_glue/offsetvector.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/offsetvector.py#L240-L256
def fromdeltas(cls, deltas): """ Construct an offsetvector from a dictionary of offset deltas as returned by the .deltas attribute. Example: >>> x = offsetvector({"H1": 0, "L1": 10, "V1": 20}) >>> y = offsetvector.fromdeltas(x.deltas) >>> y offsetvector({'V1': 20, 'H1': 0, 'L1': 10}) >>> y == x True See also .deltas, .fromkeys() """ return cls((key, value) for (refkey, key), value in deltas.items())
[ "def", "fromdeltas", "(", "cls", ",", "deltas", ")", ":", "return", "cls", "(", "(", "key", ",", "value", ")", "for", "(", "refkey", ",", "key", ")", ",", "value", "in", "deltas", ".", "items", "(", ")", ")" ]
Construct an offsetvector from a dictionary of offset deltas as returned by the .deltas attribute. Example: >>> x = offsetvector({"H1": 0, "L1": 10, "V1": 20}) >>> y = offsetvector.fromdeltas(x.deltas) >>> y offsetvector({'V1': 20, 'H1': 0, 'L1': 10}) >>> y == x True See also .deltas, .fromkeys()
[ "Construct", "an", "offsetvector", "from", "a", "dictionary", "of", "offset", "deltas", "as", "returned", "by", "the", ".", "deltas", "attribute", "." ]
python
train
google/sentencepiece
tensorflow/tf_sentencepiece/sentencepiece_processor_ops.py
https://github.com/google/sentencepiece/blob/ffa2c8218f7afbb06d0c1bb87c82efb6867db41a/tensorflow/tf_sentencepiece/sentencepiece_processor_ops.py#L79-L93
def id_to_piece(input, model_file=None, model_proto=None, name=None): """Converts vocabulary id into piece. Args: input: An arbitrary tensor of int32. model_file: The sentencepiece model file path. model_proto: The sentencepiece model serialized proto. Either `model_file` or `model_proto` must be set. name: The name argument that is passed to the op function. Returns: A tensor of string with the same shape as input. """ return _gen_sentencepiece_processor_op.sentencepiece_id_to_piece( input, model_file=model_file, model_proto=model_proto, name=name)
[ "def", "id_to_piece", "(", "input", ",", "model_file", "=", "None", ",", "model_proto", "=", "None", ",", "name", "=", "None", ")", ":", "return", "_gen_sentencepiece_processor_op", ".", "sentencepiece_id_to_piece", "(", "input", ",", "model_file", "=", "model_f...
Converts vocabulary id into piece. Args: input: An arbitrary tensor of int32. model_file: The sentencepiece model file path. model_proto: The sentencepiece model serialized proto. Either `model_file` or `model_proto` must be set. name: The name argument that is passed to the op function. Returns: A tensor of string with the same shape as input.
[ "Converts", "vocabulary", "id", "into", "piece", "." ]
python
train
mitsei/dlkit
dlkit/json_/learning/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/learning/sessions.py#L1648-L1666
def get_objective_ids_by_objective_bank(self, objective_bank_id): """Gets the list of ``Objective`` ``Ids`` associated with an ``ObjectiveBank``. arg: objective_bank_id (osid.id.Id): ``Id`` of the ``ObjectiveBank`` return: (osid.id.IdList) - list of related objectives raise: NotFound - ``objective_bank_id`` is not found raise: NullArgument - ``objective_bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinSession.get_resource_ids_by_bin id_list = [] for objective in self.get_objectives_by_objective_bank(objective_bank_id): id_list.append(objective.get_id()) return IdList(id_list)
[ "def", "get_objective_ids_by_objective_bank", "(", "self", ",", "objective_bank_id", ")", ":", "# Implemented from template for", "# osid.resource.ResourceBinSession.get_resource_ids_by_bin", "id_list", "=", "[", "]", "for", "objective", "in", "self", ".", "get_objectives_by_ob...
Gets the list of ``Objective`` ``Ids`` associated with an ``ObjectiveBank``. arg: objective_bank_id (osid.id.Id): ``Id`` of the ``ObjectiveBank`` return: (osid.id.IdList) - list of related objectives raise: NotFound - ``objective_bank_id`` is not found raise: NullArgument - ``objective_bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "list", "of", "Objective", "Ids", "associated", "with", "an", "ObjectiveBank", "." ]
python
train
eqcorrscan/EQcorrscan
eqcorrscan/utils/seismo_logs.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/seismo_logs.py#L84-L136
def rt_location_log(logfile): """ Extract location information from a RefTek raw log-file. Function to read a specific RefTek RT130 log-file and find all location information. :type logfile: str :param logfile: The logfile to look in :returns: list of tuples of lat, lon, elevation in decimal degrees and km. :rtype: list """ if os.name == 'nt': f = open(logfile, 'rb') else: f = open(logfile, 'rb') locations = [] for line_binary in f: try: line = line_binary.decode("utf8", "ignore") except UnicodeDecodeError: warnings.warn('Cannot decode line, skipping') print(line_binary) continue match = re.search("GPS: POSITION:", line) if match: # Line is of form: # jjj:hh:mm:ss GPS: POSITION: xDD:MM:SS.SS xDDD:MM:SS.SS xMMMMMMM loc = line[match.end() + 1:].rstrip().split(' ') lat_sign = loc[0][0] lat = loc[0][1:].split(':') lat = int(lat[0]) + (int(lat[1]) / 60.0) + (float(lat[2]) / 3600.0) if lat_sign == 'S': lat *= -1 lon_sign = loc[1][0] lon = loc[1][1:].split(':') lon = int(lon[0]) + (int(lon[1]) / 60.0) + (float(lon[2]) / 3600.0) if lon_sign == 'W': lon *= -1 elev_sign = loc[2][0] elev_unit = loc[2][-1] if not elev_unit == 'M': raise NotImplementedError('Elevation is not in M: unit=' + elev_unit) elev = int(loc[2][1:-1]) if elev_sign == '-': elev *= -1 # Convert to km elev /= 1000 locations.append((lat, lon, elev)) f.close() return locations
[ "def", "rt_location_log", "(", "logfile", ")", ":", "if", "os", ".", "name", "==", "'nt'", ":", "f", "=", "open", "(", "logfile", ",", "'rb'", ")", "else", ":", "f", "=", "open", "(", "logfile", ",", "'rb'", ")", "locations", "=", "[", "]", "for"...
Extract location information from a RefTek raw log-file. Function to read a specific RefTek RT130 log-file and find all location information. :type logfile: str :param logfile: The logfile to look in :returns: list of tuples of lat, lon, elevation in decimal degrees and km. :rtype: list
[ "Extract", "location", "information", "from", "a", "RefTek", "raw", "log", "-", "file", "." ]
python
train
UCL-INGI/INGInious
inginious/common/course_factory.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/common/course_factory.py#L97-L110
def _get_course_descriptor_path(self, courseid): """ :param courseid: the course id of the course :raise InvalidNameException, CourseNotFoundException :return: the path to the descriptor of the course """ if not id_checker(courseid): raise InvalidNameException("Course with invalid name: " + courseid) course_fs = self.get_course_fs(courseid) if course_fs.exists("course.yaml"): return courseid+"/course.yaml" if course_fs.exists("course.json"): return courseid+"/course.json" raise CourseNotFoundException()
[ "def", "_get_course_descriptor_path", "(", "self", ",", "courseid", ")", ":", "if", "not", "id_checker", "(", "courseid", ")", ":", "raise", "InvalidNameException", "(", "\"Course with invalid name: \"", "+", "courseid", ")", "course_fs", "=", "self", ".", "get_co...
:param courseid: the course id of the course :raise InvalidNameException, CourseNotFoundException :return: the path to the descriptor of the course
[ ":", "param", "courseid", ":", "the", "course", "id", "of", "the", "course", ":", "raise", "InvalidNameException", "CourseNotFoundException", ":", "return", ":", "the", "path", "to", "the", "descriptor", "of", "the", "course" ]
python
train
pyviz/holoviews
holoviews/element/util.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/element/util.py#L219-L229
def circular_layout(nodes): """ Lay out nodes on a circle and add node index. """ N = len(nodes) if not N: return ([], [], []) circ = np.pi/N*np.arange(N)*2 x = np.cos(circ) y = np.sin(circ) return (x, y, nodes)
[ "def", "circular_layout", "(", "nodes", ")", ":", "N", "=", "len", "(", "nodes", ")", "if", "not", "N", ":", "return", "(", "[", "]", ",", "[", "]", ",", "[", "]", ")", "circ", "=", "np", ".", "pi", "/", "N", "*", "np", ".", "arange", "(", ...
Lay out nodes on a circle and add node index.
[ "Lay", "out", "nodes", "on", "a", "circle", "and", "add", "node", "index", "." ]
python
train
QInfer/python-qinfer
src/qinfer/smc.py
https://github.com/QInfer/python-qinfer/blob/8170c84a0be1723f8c6b09e0d3c7a40a886f1fe3/src/qinfer/smc.py#L672-L716
def posterior_marginal(self, idx_param=0, res=100, smoothing=0, range_min=None, range_max=None): """ Returns an estimate of the marginal distribution of a given model parameter, based on taking the derivative of the interpolated cdf. :param int idx_param: Index of parameter to be marginalized. :param int res1: Resolution of of the axis. :param float smoothing: Standard deviation of the Gaussian kernel used to smooth; same units as parameter. :param float range_min: Minimum range of the output axis. :param float range_max: Maximum range of the output axis. .. seealso:: :meth:`SMCUpdater.plot_posterior_marginal` """ # We need to sort the particles to get cumsum to make sense. # interp1d would do it anyways (using argsort, too), so it's not a waste s = np.argsort(self.particle_locations[:,idx_param]) locs = self.particle_locations[s,idx_param] # relevant axis discretization r_min = np.min(locs) if range_min is None else range_min r_max = np.max(locs) if range_max is None else range_max ps = np.linspace(r_min, r_max, res) # interpolate the cdf of the marginal distribution using cumsum interp = scipy.interpolate.interp1d( np.append(locs, r_max + np.abs(r_max-r_min)), np.append(np.cumsum(self.particle_weights[s]), 1), #kind='cubic', bounds_error=False, fill_value=0, assume_sorted=True ) # get distribution from derivative of cdf, and smooth it pr = np.gradient(interp(ps), ps[1]-ps[0]) if smoothing > 0: gaussian_filter1d(pr, res*smoothing/(np.abs(r_max-r_min)), output=pr) del interp return ps, pr
[ "def", "posterior_marginal", "(", "self", ",", "idx_param", "=", "0", ",", "res", "=", "100", ",", "smoothing", "=", "0", ",", "range_min", "=", "None", ",", "range_max", "=", "None", ")", ":", "# We need to sort the particles to get cumsum to make sense.", "# i...
Returns an estimate of the marginal distribution of a given model parameter, based on taking the derivative of the interpolated cdf. :param int idx_param: Index of parameter to be marginalized. :param int res1: Resolution of of the axis. :param float smoothing: Standard deviation of the Gaussian kernel used to smooth; same units as parameter. :param float range_min: Minimum range of the output axis. :param float range_max: Maximum range of the output axis. .. seealso:: :meth:`SMCUpdater.plot_posterior_marginal`
[ "Returns", "an", "estimate", "of", "the", "marginal", "distribution", "of", "a", "given", "model", "parameter", "based", "on", "taking", "the", "derivative", "of", "the", "interpolated", "cdf", "." ]
python
train
juicer/juicer
juicer/admin/JuicerAdmin.py
https://github.com/juicer/juicer/blob/0c9f0fd59e293d45df6b46e81f675d33221c600d/juicer/admin/JuicerAdmin.py#L412-L437
def delete_user(self, login=None, envs=[], query='/users/'): """ `login` - Login or username of user to delete Delete user in specified environments """ juicer.utils.Log.log_debug("Delete User: %s", login) for env in envs: if envs.index(env) != 0 and juicer.utils.env_same_host(env, envs[envs.index(env) - 1]): juicer.utils.Log.log_info("environment `%s` shares a host with environment `%s`... skipping!", (env, envs[envs.index(env) - 1])) continue elif not juicer.utils.user_exists_p(login, self.connectors[env]): juicer.utils.Log.log_info("user `%s` doesn't exist in %s... skipping!", (login, env)) continue else: url = "%s%s/" % (query, login) _r = self.connectors[env].delete(url) if _r.status_code == Constants.PULP_DELETE_OK: juicer.utils.Log.log_info("deleted user `%s` in %s", (login, env)) else: _r.raise_for_status() return True
[ "def", "delete_user", "(", "self", ",", "login", "=", "None", ",", "envs", "=", "[", "]", ",", "query", "=", "'/users/'", ")", ":", "juicer", ".", "utils", ".", "Log", ".", "log_debug", "(", "\"Delete User: %s\"", ",", "login", ")", "for", "env", "in...
`login` - Login or username of user to delete Delete user in specified environments
[ "login", "-", "Login", "or", "username", "of", "user", "to", "delete" ]
python
train
wright-group/WrightTools
WrightTools/_dataset.py
https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/_dataset.py#L179-L188
def units(self, value): """Set units.""" if value is None: if "units" in self.attrs.keys(): self.attrs.pop("units") else: try: self.attrs["units"] = value except AttributeError: self.attrs["units"] = value
[ "def", "units", "(", "self", ",", "value", ")", ":", "if", "value", "is", "None", ":", "if", "\"units\"", "in", "self", ".", "attrs", ".", "keys", "(", ")", ":", "self", ".", "attrs", ".", "pop", "(", "\"units\"", ")", "else", ":", "try", ":", ...
Set units.
[ "Set", "units", "." ]
python
train
google/mobly
mobly/controllers/monsoon.py
https://github.com/google/mobly/blob/38ba2cf7d29a20e6a2fca1718eecb337df38db26/mobly/controllers/monsoon.py#L491-L495
def total_charge(self): """Total charged used in the unit of mAh. """ charge = (sum(self.data_points) / self.hz) * 1000 / 3600 return round(charge, self.sr)
[ "def", "total_charge", "(", "self", ")", ":", "charge", "=", "(", "sum", "(", "self", ".", "data_points", ")", "/", "self", ".", "hz", ")", "*", "1000", "/", "3600", "return", "round", "(", "charge", ",", "self", ".", "sr", ")" ]
Total charged used in the unit of mAh.
[ "Total", "charged", "used", "in", "the", "unit", "of", "mAh", "." ]
python
train
toastdriven/alligator
alligator/workers.py
https://github.com/toastdriven/alligator/blob/f18bcb35b350fc6b0886393f5246d69c892b36c7/alligator/workers.py#L67-L75
def interrupt(self): """ Prints an interrupt message to stdout. """ ident = self.ident() print('{} for "{}" saw interrupt. Finishing in-progress task.'.format( ident, self.to_consume ))
[ "def", "interrupt", "(", "self", ")", ":", "ident", "=", "self", ".", "ident", "(", ")", "print", "(", "'{} for \"{}\" saw interrupt. Finishing in-progress task.'", ".", "format", "(", "ident", ",", "self", ".", "to_consume", ")", ")" ]
Prints an interrupt message to stdout.
[ "Prints", "an", "interrupt", "message", "to", "stdout", "." ]
python
train
rocky/python-filecache
pyficache/main.py
https://github.com/rocky/python-filecache/blob/60709ccd837ef5df001faf3cb02d4979ba342a23/pyficache/main.py#L473-L487
def maxline(filename, use_cache_only=False): """Return the maximum line number filename after taking into account line remapping. If no remapping then this is the same as size""" if filename not in file2file_remap_lines: return size(filename, use_cache_only) max_lineno = -1 remap_line_entry = file2file_remap_lines.get(filename) if not remap_line_entry: return size(filename, use_cache_only) for t in remap_line_entry.from_to_pairs: max_lineno = max(max_lineno, t[1]) if max_lineno == -1: return size(filename, use_cache_only) else: return max_lineno
[ "def", "maxline", "(", "filename", ",", "use_cache_only", "=", "False", ")", ":", "if", "filename", "not", "in", "file2file_remap_lines", ":", "return", "size", "(", "filename", ",", "use_cache_only", ")", "max_lineno", "=", "-", "1", "remap_line_entry", "=", ...
Return the maximum line number filename after taking into account line remapping. If no remapping then this is the same as size
[ "Return", "the", "maximum", "line", "number", "filename", "after", "taking", "into", "account", "line", "remapping", ".", "If", "no", "remapping", "then", "this", "is", "the", "same", "as", "size" ]
python
train
iotile/coretools
iotilesensorgraph/iotile/sg/sim/trace.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilesensorgraph/iotile/sg/sim/trace.py#L39-L52
def save(self, out_path): """Save an ascii representation of this simulation trace. Args: out_path (str): The output path to save this simulation trace. """ out = { 'selectors': [str(x) for x in self.selectors], 'trace': [{'stream': str(DataStream.FromEncoded(x.stream)), 'time': x.raw_time, 'value': x.value, 'reading_id': x.reading_id} for x in self] } with open(out_path, "wb") as outfile: json.dump(out, outfile, indent=4)
[ "def", "save", "(", "self", ",", "out_path", ")", ":", "out", "=", "{", "'selectors'", ":", "[", "str", "(", "x", ")", "for", "x", "in", "self", ".", "selectors", "]", ",", "'trace'", ":", "[", "{", "'stream'", ":", "str", "(", "DataStream", ".",...
Save an ascii representation of this simulation trace. Args: out_path (str): The output path to save this simulation trace.
[ "Save", "an", "ascii", "representation", "of", "this", "simulation", "trace", "." ]
python
train
google/apitools
apitools/gen/gen_client_lib.py
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/gen_client_lib.py#L186-L193
def WriteIntermediateInit(self, out): """Write a simple __init__.py for an intermediate directory.""" printer = self._GetPrinter(out) printer('#!/usr/bin/env python') printer('"""Shared __init__.py for apitools."""') printer() printer('from pkgutil import extend_path') printer('__path__ = extend_path(__path__, __name__)')
[ "def", "WriteIntermediateInit", "(", "self", ",", "out", ")", ":", "printer", "=", "self", ".", "_GetPrinter", "(", "out", ")", "printer", "(", "'#!/usr/bin/env python'", ")", "printer", "(", "'\"\"\"Shared __init__.py for apitools.\"\"\"'", ")", "printer", "(", "...
Write a simple __init__.py for an intermediate directory.
[ "Write", "a", "simple", "__init__", ".", "py", "for", "an", "intermediate", "directory", "." ]
python
train
inasafe/inasafe
safe/report/extractors/analysis_question.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/report/extractors/analysis_question.py#L47-L81
def multi_exposure_analysis_question_extractor( impact_report, component_metadata): """Extracting analysis question from the impact layer. :param impact_report: the impact report that acts as a proxy to fetch all the data that extractor needed :type impact_report: safe.report.impact_report.ImpactReport :param component_metadata: the component metadata. Used to obtain information about the component we want to render :type component_metadata: safe.report.report_metadata. ReportComponentsMetadata :return: context for rendering phase :rtype: dict .. versionadded:: 4.3 """ context = {} extra_args = component_metadata.extra_args multi_exposure = impact_report.multi_exposure_impact_function provenance = multi_exposure.provenance header = resolve_from_dictionary(extra_args, 'header') analysis_questions = [] analysis_question = provenance['analysis_question'] analysis_questions.append(analysis_question) context['component_key'] = component_metadata.key context['header'] = header context['analysis_questions'] = analysis_questions return context
[ "def", "multi_exposure_analysis_question_extractor", "(", "impact_report", ",", "component_metadata", ")", ":", "context", "=", "{", "}", "extra_args", "=", "component_metadata", ".", "extra_args", "multi_exposure", "=", "impact_report", ".", "multi_exposure_impact_function...
Extracting analysis question from the impact layer. :param impact_report: the impact report that acts as a proxy to fetch all the data that extractor needed :type impact_report: safe.report.impact_report.ImpactReport :param component_metadata: the component metadata. Used to obtain information about the component we want to render :type component_metadata: safe.report.report_metadata. ReportComponentsMetadata :return: context for rendering phase :rtype: dict .. versionadded:: 4.3
[ "Extracting", "analysis", "question", "from", "the", "impact", "layer", "." ]
python
train
chrislit/abydos
abydos/fingerprint/_count.py
https://github.com/chrislit/abydos/blob/165466b3ff6afd8024a4c8660421b0c4e7773db9/abydos/fingerprint/_count.py#L44-L93
def fingerprint(self, word, n_bits=16, most_common=MOST_COMMON_LETTERS_CG): """Return the count fingerprint. Parameters ---------- word : str The word to fingerprint n_bits : int Number of bits in the fingerprint returned most_common : list The most common tokens in the target language, ordered by frequency Returns ------- int The count fingerprint Examples -------- >>> cf = Count() >>> bin(cf.fingerprint('hat')) '0b1010000000001' >>> bin(cf.fingerprint('niall')) '0b10001010000' >>> bin(cf.fingerprint('colin')) '0b101010000' >>> bin(cf.fingerprint('atcg')) '0b1010000000000' >>> bin(cf.fingerprint('entreatment')) '0b1111010000100000' """ if n_bits % 2: n_bits += 1 word = Counter(word) fingerprint = 0 for letter in most_common: if n_bits: fingerprint <<= 2 fingerprint += word[letter] & 3 n_bits -= 2 else: break if n_bits: fingerprint <<= n_bits return fingerprint
[ "def", "fingerprint", "(", "self", ",", "word", ",", "n_bits", "=", "16", ",", "most_common", "=", "MOST_COMMON_LETTERS_CG", ")", ":", "if", "n_bits", "%", "2", ":", "n_bits", "+=", "1", "word", "=", "Counter", "(", "word", ")", "fingerprint", "=", "0"...
Return the count fingerprint. Parameters ---------- word : str The word to fingerprint n_bits : int Number of bits in the fingerprint returned most_common : list The most common tokens in the target language, ordered by frequency Returns ------- int The count fingerprint Examples -------- >>> cf = Count() >>> bin(cf.fingerprint('hat')) '0b1010000000001' >>> bin(cf.fingerprint('niall')) '0b10001010000' >>> bin(cf.fingerprint('colin')) '0b101010000' >>> bin(cf.fingerprint('atcg')) '0b1010000000000' >>> bin(cf.fingerprint('entreatment')) '0b1111010000100000'
[ "Return", "the", "count", "fingerprint", "." ]
python
valid
phoebe-project/phoebe2
phoebe/backend/universe.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/backend/universe.py#L933-L1081
def update_position(self, time, xs, ys, zs, vxs, vys, vzs, ethetas, elongans, eincls, ds=None, Fs=None, ignore_effects=False, component_com_x=None, **kwargs): """ Update the position of the star into its orbit :parameter float time: the current time :parameter list xs: a list/array of x-positions of ALL COMPONENTS in the :class:`System` :parameter list ys: a list/array of y-positions of ALL COMPONENTS in the :class:`System` :parameter list zs: a list/array of z-positions of ALL COMPONENTS in the :class:`System` :parameter list vxs: a list/array of x-velocities of ALL COMPONENTS in the :class:`System` :parameter list vys: a list/array of y-velocities of ALL COMPONENTS in the :class:`System` :parameter list vzs: a list/array of z-velocities of ALL COMPONENTS in the :class:`System` :parameter list ethetas: a list/array of euler-thetas of ALL COMPONENTS in the :class:`System` :parameter list elongans: a list/array of euler-longans of ALL COMPONENTS in the :class:`System` :parameter list eincls: a list/array of euler-incls of ALL COMPONENTS in the :class:`System` :parameter list ds: (optional) a list/array of instantaneous distances of ALL COMPONENTS in the :class:`System` :parameter list Fs: (optional) a list/array of instantaneous syncpars of ALL COMPONENTS in the :class:`System` """ self.reset_time(time, ethetas[self.ind_self], elongans[self.ind_self], eincls[self.ind_self]) #-- Get current position/euler information # TODO: get rid of this ugly _value stuff pos = (_value(xs[self.ind_self]), _value(ys[self.ind_self]), _value(zs[self.ind_self])) vel = (_value(vxs[self.ind_self_vel]), _value(vys[self.ind_self_vel]), _value(vzs[self.ind_self_vel])) euler = (_value(ethetas[self.ind_self]), _value(elongans[self.ind_self]), _value(eincls[self.ind_self])) euler_vel = (_value(ethetas[self.ind_self_vel]), _value(elongans[self.ind_self_vel]), _value(eincls[self.ind_self_vel])) # TODO: eventually pass etheta to has_standard_mesh # TODO: implement reprojection as an option based on a nearby standard? if self.needs_remesh or not self.has_standard_mesh(): logger.debug("{}.update_position: remeshing at t={}".format(self.component, time)) # track whether we did the remesh or not, so we know if we should # compute local quantities if not otherwise necessary did_remesh = True # TODO: allow time dependence on d and F from dynamics # d = _value(ds[self.ind_self]) # F = _value(Fs[self.ind_self]) new_mesh_dict, scale = self._build_mesh(mesh_method=self.mesh_method) if self.mesh_method != 'wd': new_mesh_dict = self._offset_mesh(new_mesh_dict) # We only need the gradients where we'll compute local # quantities which, for a marching mesh, is at the vertices. new_mesh_dict['normgrads'] = new_mesh_dict.pop('vnormgrads', np.array([])) # And lastly, let's fill the velocities column - with zeros # at each of the vertices new_mesh_dict['velocities'] = np.zeros(new_mesh_dict['vertices'].shape if self.mesh_method != 'wd' else new_mesh_dict['centers'].shape) new_mesh_dict['tareas'] = np.array([]) # TODO: need to be very careful about self.sma vs self._scale - maybe need to make a self._instantaneous_scale??? # self._scale = scale if not self.has_standard_mesh(): # then we only computed this because we didn't already have a # standard_mesh... so let's save this for future use # TODO: eventually pass etheta to save_as_standard_mesh protomesh = mesh.ProtoMesh(**new_mesh_dict) self.save_as_standard_mesh(protomesh) # Here we'll build a scaledprotomesh directly from the newly # marched mesh # NOTE that we're using scale from the new # mesh rather than self._scale since the instantaneous separation # has likely changed since periastron scaledprotomesh = mesh.ScaledProtoMesh(scale=scale, **new_mesh_dict) else: logger.debug("{}.update_position: accessing standard mesh at t={}".format(self.component, self.time)) # track whether we did the remesh or not, so we know if we should # compute local quantities if not otherwise necessary did_remesh = False # We still need to go through scaledprotomesh instead of directly # to mesh since features may want to process the body-centric # coordinates before placing in orbit # TODO: eventually pass etheta to get_standard_mesh scaledprotomesh = self.get_standard_mesh(scaled=True) # TODO: can we avoid an extra copy here? if not ignore_effects and len(self.features): logger.debug("{}.update_position: processing features at t={}".format(self.component, self.time)) # First allow features to edit the coords_for_computations (pvertices). # Changes here WILL affect future computations for logg, teff, # intensities, etc. Note that these WILL NOT affect the # coords_for_observations automatically - those should probably be # perturbed as well, unless there is a good reason not to. for feature in self.features: # NOTE: these are ALWAYS done on the protomesh coords_for_observations = feature.process_coords_for_computations(scaledprotomesh.coords_for_computations, s=self.polar_direction_xyz, t=self.time) if scaledprotomesh._compute_at_vertices: scaledprotomesh.update_columns(pvertices=coords_for_observations) else: scaledprotomesh.update_columns(centers=coords_for_observations) raise NotImplementedError("areas are not updated for changed mesh") for feature in self.features: coords_for_observations = feature.process_coords_for_observations(scaledprotomesh.coords_for_computations, scaledprotomesh.coords_for_observations, s=self.polar_direction_xyz, t=self.time) if scaledprotomesh._compute_at_vertices: scaledprotomesh.update_columns(vertices=coords_for_observations) # TODO [DONE?]: centers either need to be supported or we need to report # vertices in the frontend as x, y, z instead of centers updated_props = libphoebe.mesh_properties(scaledprotomesh.vertices, scaledprotomesh.triangles, tnormals=True, areas=True) scaledprotomesh.update_columns(**updated_props) else: scaledprotomesh.update_columns(centers=coords_for_observations) raise NotImplementedError("areas are not updated for changed mesh") # TODO NOW [OPTIMIZE]: get rid of the deepcopy here - but without it the # mesh velocities build-up and do terrible things. It may be possible # to just clear the velocities in get_standard_mesh()? logger.debug("{}.update_position: placing in orbit, Mesh.from_scaledproto at t={}".format(self.component, self.time)) self._mesh = mesh.Mesh.from_scaledproto(scaledprotomesh.copy(), pos, vel, euler, euler_vel, self.polar_direction_xyz*self.freq_rot*self._scale, component_com_x) # Lastly, we'll recompute physical quantities (not observables) if # needed for this time-step. # TODO [DONE?]: make sure features smartly trigger needs_recompute_instantaneous # TODO: get rid of the or True here... the problem is that we're saving the standard mesh before filling local quantities if self.needs_recompute_instantaneous or did_remesh: logger.debug("{}.update_position: calling compute_local_quantities at t={}".format(self.component, self.time)) self.compute_local_quantities(xs, ys, zs, ignore_effects) return
[ "def", "update_position", "(", "self", ",", "time", ",", "xs", ",", "ys", ",", "zs", ",", "vxs", ",", "vys", ",", "vzs", ",", "ethetas", ",", "elongans", ",", "eincls", ",", "ds", "=", "None", ",", "Fs", "=", "None", ",", "ignore_effects", "=", "...
Update the position of the star into its orbit :parameter float time: the current time :parameter list xs: a list/array of x-positions of ALL COMPONENTS in the :class:`System` :parameter list ys: a list/array of y-positions of ALL COMPONENTS in the :class:`System` :parameter list zs: a list/array of z-positions of ALL COMPONENTS in the :class:`System` :parameter list vxs: a list/array of x-velocities of ALL COMPONENTS in the :class:`System` :parameter list vys: a list/array of y-velocities of ALL COMPONENTS in the :class:`System` :parameter list vzs: a list/array of z-velocities of ALL COMPONENTS in the :class:`System` :parameter list ethetas: a list/array of euler-thetas of ALL COMPONENTS in the :class:`System` :parameter list elongans: a list/array of euler-longans of ALL COMPONENTS in the :class:`System` :parameter list eincls: a list/array of euler-incls of ALL COMPONENTS in the :class:`System` :parameter list ds: (optional) a list/array of instantaneous distances of ALL COMPONENTS in the :class:`System` :parameter list Fs: (optional) a list/array of instantaneous syncpars of ALL COMPONENTS in the :class:`System`
[ "Update", "the", "position", "of", "the", "star", "into", "its", "orbit" ]
python
train
hatemile/hatemile-for-python
hatemile/implementation/navig.py
https://github.com/hatemile/hatemile-for-python/blob/1e914f9aa09f6f8d78282af131311546ecba9fb8/hatemile/implementation/navig.py#L165-L196
def _generate_list_skippers(self): """ Generate the list of skippers of page. :return: The list of skippers of page. :rtype: hatemile.util.html.htmldomelement.HTMLDOMElement """ container = self.parser.find( '#' + AccessibleNavigationImplementation.ID_CONTAINER_SKIPPERS ).first_result() html_list = None if container is None: local = self.parser.find('body').first_result() if local is not None: container = self.parser.create_element('div') container.set_attribute( 'id', AccessibleNavigationImplementation.ID_CONTAINER_SKIPPERS ) local.prepend_element(container) if container is not None: html_list = self.parser.find(container).find_children( 'ul' ).first_result() if html_list is None: html_list = self.parser.create_element('ul') container.append_element(html_list) self.list_skippers_added = True return html_list
[ "def", "_generate_list_skippers", "(", "self", ")", ":", "container", "=", "self", ".", "parser", ".", "find", "(", "'#'", "+", "AccessibleNavigationImplementation", ".", "ID_CONTAINER_SKIPPERS", ")", ".", "first_result", "(", ")", "html_list", "=", "None", "if"...
Generate the list of skippers of page. :return: The list of skippers of page. :rtype: hatemile.util.html.htmldomelement.HTMLDOMElement
[ "Generate", "the", "list", "of", "skippers", "of", "page", "." ]
python
train
iotile/coretools
iotilesensorgraph/iotile/sg/walker.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilesensorgraph/iotile/sg/walker.py#L120-L179
def seek(self, value, target="offset"): """Seek this stream to a specific offset or reading id. There are two modes of use. You can seek to a specific reading id, which means the walker will be positioned exactly at the reading pointed to by the reading ID. If the reading id cannot be found an exception will be raised. The reading id can be found but corresponds to a reading that is not selected by this walker, the walker will be moved to point at the first reading after that reading and False will be returned. If target=="offset", the walker will be positioned at the specified offset in the sensor log. It will also update the count of available readings based on that new location so that the count remains correct. The offset does not need to correspond to a reading selected by this walker. If offset does not point to a selected reading, the effective behavior will be as if the walker pointed to the next selected reading after `offset`. Args: value (int): The identifier to seek, either an offset or a reading id. target (str): The type of thing to seek. Can be offset or id. If id is given, then a reading with the given ID will be searched for. If offset is given then the walker will be positioned at the given offset. Returns: bool: True if an exact match was found, False otherwise. An exact match means that the offset or reading ID existed and corresponded to a reading selected by this walker. An inexact match means that the offset or reading ID existed but corresponded to reading that was not selected by this walker. If the offset or reading ID could not be found an Exception is thrown instead. Raises: ArgumentError: target is an invalid string, must be offset or id. UnresolvedIdentifierError: the desired offset or reading id could not be found. """ if target not in (u'offset', u'id'): raise ArgumentError("You must specify target as either offset or id", target=target) if target == u'offset': self._verify_offset(value) self.offset = value else: self.offset = self._find_id(value) self._count = self.engine.count_matching(self.selector, offset=self.offset) curr = self.engine.get(self.storage_type, self.offset) return self.matches(DataStream.FromEncoded(curr.stream))
[ "def", "seek", "(", "self", ",", "value", ",", "target", "=", "\"offset\"", ")", ":", "if", "target", "not", "in", "(", "u'offset'", ",", "u'id'", ")", ":", "raise", "ArgumentError", "(", "\"You must specify target as either offset or id\"", ",", "target", "="...
Seek this stream to a specific offset or reading id. There are two modes of use. You can seek to a specific reading id, which means the walker will be positioned exactly at the reading pointed to by the reading ID. If the reading id cannot be found an exception will be raised. The reading id can be found but corresponds to a reading that is not selected by this walker, the walker will be moved to point at the first reading after that reading and False will be returned. If target=="offset", the walker will be positioned at the specified offset in the sensor log. It will also update the count of available readings based on that new location so that the count remains correct. The offset does not need to correspond to a reading selected by this walker. If offset does not point to a selected reading, the effective behavior will be as if the walker pointed to the next selected reading after `offset`. Args: value (int): The identifier to seek, either an offset or a reading id. target (str): The type of thing to seek. Can be offset or id. If id is given, then a reading with the given ID will be searched for. If offset is given then the walker will be positioned at the given offset. Returns: bool: True if an exact match was found, False otherwise. An exact match means that the offset or reading ID existed and corresponded to a reading selected by this walker. An inexact match means that the offset or reading ID existed but corresponded to reading that was not selected by this walker. If the offset or reading ID could not be found an Exception is thrown instead. Raises: ArgumentError: target is an invalid string, must be offset or id. UnresolvedIdentifierError: the desired offset or reading id could not be found.
[ "Seek", "this", "stream", "to", "a", "specific", "offset", "or", "reading", "id", "." ]
python
train
Stewori/pytypes
pytypes/type_util.py
https://github.com/Stewori/pytypes/blob/b814d38709e84c0e0825caf8b721c20eb5a8ab3b/pytypes/type_util.py#L1148-L1179
def _find_Generic_super_origin(subclass, superclass_origin): """Helper for _issubclass_Generic. """ stack = [subclass] param_map = {} while len(stack) > 0: bs = stack.pop() if is_Generic(bs): if not bs.__origin__ is None and len(bs.__origin__.__parameters__) > 0: for i in range(len(bs.__args__)): ors = bs.__origin__.__parameters__[i] if bs.__args__[i] != ors and isinstance(bs.__args__[i], TypeVar): param_map[ors] = bs.__args__[i] if (bs.__origin__ is superclass_origin or \ (bs.__origin__ is None and bs is superclass_origin)): prms = [] try: if len(bs.__origin__.__parameters__) > len(bs.__parameters__): prms.extend(bs.__origin__.__parameters__) else: prms.extend(bs.__parameters__) except: prms.extend(bs.__parameters__) for i in range(len(prms)): while prms[i] in param_map: prms[i] = param_map[prms[i]] return prms try: stack.extend(bs.__orig_bases__) except AttributeError: stack.extend(bs.__bases__) return None
[ "def", "_find_Generic_super_origin", "(", "subclass", ",", "superclass_origin", ")", ":", "stack", "=", "[", "subclass", "]", "param_map", "=", "{", "}", "while", "len", "(", "stack", ")", ">", "0", ":", "bs", "=", "stack", ".", "pop", "(", ")", "if", ...
Helper for _issubclass_Generic.
[ "Helper", "for", "_issubclass_Generic", "." ]
python
train
Qiskit/qiskit-terra
qiskit/visualization/interactive/iplot_histogram.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/visualization/interactive/iplot_histogram.py#L53-L141
def iplot_histogram(data, figsize=None, number_to_keep=None, sort='asc', legend=None): """ Create a histogram representation. Graphical representation of the input array using a vertical bars style graph. Args: data (list or dict): This is either a list of dicts or a single dict containing the values to represent (ex. {'001' : 130}) figsize (tuple): Figure size in pixels. number_to_keep (int): The number of terms to plot and rest is made into a single bar called other values sort (string): Could be 'asc' or 'desc' legend (list): A list of strings to use for labels of the data. The number of entries must match the length of data. Raises: VisualizationError: When legend is provided and the length doesn't match the input data. """ # HTML html_template = Template(""" <p> <div id="histogram_$divNumber"></div> </p> """) # JavaScript javascript_template = Template(""" <script> requirejs.config({ paths: { qVisualization: "https://qvisualization.mybluemix.net/q-visualizations" } }); require(["qVisualization"], function(qVisualizations) { qVisualizations.plotState("histogram_$divNumber", "histogram", $executions, $options); }); </script> """) # Process data and execute div_number = str(time.time()) div_number = re.sub('[.]', '', div_number) # set default figure size if none provided if figsize is None: figsize = (7, 5) options = {'number_to_keep': 0 if number_to_keep is None else number_to_keep, 'sort': sort, 'show_legend': 0, 'width': int(figsize[0]), 'height': int(figsize[1])} if legend: options['show_legend'] = 1 data_to_plot = [] if isinstance(data, dict): data = [data] if legend and len(legend) != len(data): raise VisualizationError("Length of legendL (%s) doesn't match number " "of input executions: %s" % (len(legend), len(data))) for item, execution in enumerate(data): exec_data = process_data(execution, options['number_to_keep']) out_dict = {'data': exec_data} if legend: out_dict['name'] = legend[item] data_to_plot.append(out_dict) html = html_template.substitute({ 'divNumber': div_number }) javascript = javascript_template.substitute({ 'divNumber': div_number, 'executions': data_to_plot, 'options': options }) display(HTML(html + javascript))
[ "def", "iplot_histogram", "(", "data", ",", "figsize", "=", "None", ",", "number_to_keep", "=", "None", ",", "sort", "=", "'asc'", ",", "legend", "=", "None", ")", ":", "# HTML", "html_template", "=", "Template", "(", "\"\"\"\n <p>\n <div id=\"histogra...
Create a histogram representation. Graphical representation of the input array using a vertical bars style graph. Args: data (list or dict): This is either a list of dicts or a single dict containing the values to represent (ex. {'001' : 130}) figsize (tuple): Figure size in pixels. number_to_keep (int): The number of terms to plot and rest is made into a single bar called other values sort (string): Could be 'asc' or 'desc' legend (list): A list of strings to use for labels of the data. The number of entries must match the length of data. Raises: VisualizationError: When legend is provided and the length doesn't match the input data.
[ "Create", "a", "histogram", "representation", "." ]
python
test
sdispater/cleo
cleo/commands/command.py
https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/commands/command.py#L124-L131
def argument(self, key=None): """ Get the value of a command argument. """ if key is None: return self._args.arguments() return self._args.argument(key)
[ "def", "argument", "(", "self", ",", "key", "=", "None", ")", ":", "if", "key", "is", "None", ":", "return", "self", ".", "_args", ".", "arguments", "(", ")", "return", "self", ".", "_args", ".", "argument", "(", "key", ")" ]
Get the value of a command argument.
[ "Get", "the", "value", "of", "a", "command", "argument", "." ]
python
train
nephila/python-taiga
taiga/models/models.py
https://github.com/nephila/python-taiga/blob/5b471d6b8b59e5d410162a6f1c2f0d4188445a56/taiga/models/models.py#L1465-L1474
def add_task_attribute(self, name, **attrs): """ Add a new Task attribute and return a :class:`TaskAttribute` object. :param name: name of the :class:`TaskAttribute` :param attrs: optional attributes for :class:`TaskAttribute` """ return TaskAttributes(self.requester).create( self.id, name, **attrs )
[ "def", "add_task_attribute", "(", "self", ",", "name", ",", "*", "*", "attrs", ")", ":", "return", "TaskAttributes", "(", "self", ".", "requester", ")", ".", "create", "(", "self", ".", "id", ",", "name", ",", "*", "*", "attrs", ")" ]
Add a new Task attribute and return a :class:`TaskAttribute` object. :param name: name of the :class:`TaskAttribute` :param attrs: optional attributes for :class:`TaskAttribute`
[ "Add", "a", "new", "Task", "attribute", "and", "return", "a", ":", "class", ":", "TaskAttribute", "object", "." ]
python
train
edeposit/edeposit.amqp.antivirus
bin/edeposit_clamd_init.py
https://github.com/edeposit/edeposit.amqp.antivirus/blob/011b38bbe920819fab99a5891b1e70732321a598/bin/edeposit_clamd_init.py#L292-L322
def main(conf_file, overwrite, logger): """ Create configuration and log file. Restart the daemon when configuration is done. Args: conf_file (str): Path to the configuration file. overwrite (bool): Overwrite the configuration file with `clean` config? """ uid = pwd.getpwnam(get_username()).pw_uid # stop the daemon logger.info("Stopping the daemon.") sh.service(get_service_name(), "stop") # create files logger.info("Creating config file.") create_config( cnf_file=conf_file, uid=uid, overwrite=overwrite ) logger.info("Creating log file.") create_log( log_file=REQUIRED_SETTINGS["LogFile"], uid=uid ) # start the daemon logger.info("Starting the daemon..") sh.service(get_service_name(), "start")
[ "def", "main", "(", "conf_file", ",", "overwrite", ",", "logger", ")", ":", "uid", "=", "pwd", ".", "getpwnam", "(", "get_username", "(", ")", ")", ".", "pw_uid", "# stop the daemon", "logger", ".", "info", "(", "\"Stopping the daemon.\"", ")", "sh", ".", ...
Create configuration and log file. Restart the daemon when configuration is done. Args: conf_file (str): Path to the configuration file. overwrite (bool): Overwrite the configuration file with `clean` config?
[ "Create", "configuration", "and", "log", "file", ".", "Restart", "the", "daemon", "when", "configuration", "is", "done", "." ]
python
train
agile-geoscience/striplog
striplog/striplog.py
https://github.com/agile-geoscience/striplog/blob/8033b673a151f96c29802b43763e863519a3124c/striplog/striplog.py#L304-L316
def unique(self): """ Property. Summarize a Striplog with some statistics. Returns: List. A list of (Component, total thickness thickness) tuples. """ all_rx = set([iv.primary for iv in self]) table = {r: 0 for r in all_rx} for iv in self: table[iv.primary] += iv.thickness return sorted(table.items(), key=operator.itemgetter(1), reverse=True)
[ "def", "unique", "(", "self", ")", ":", "all_rx", "=", "set", "(", "[", "iv", ".", "primary", "for", "iv", "in", "self", "]", ")", "table", "=", "{", "r", ":", "0", "for", "r", "in", "all_rx", "}", "for", "iv", "in", "self", ":", "table", "["...
Property. Summarize a Striplog with some statistics. Returns: List. A list of (Component, total thickness thickness) tuples.
[ "Property", ".", "Summarize", "a", "Striplog", "with", "some", "statistics", "." ]
python
test
instacart/lore
lore/env.py
https://github.com/instacart/lore/blob/0367bde9a52e69162832906acc61e8d65c5ec5d4/lore/env.py#L227-L262
def check_requirements(): """Make sure all listed packages from requirements.txt have been installed into the virtualenv at boot. """ if not os.path.exists(REQUIREMENTS): sys.exit( ansi.error() + ' %s is missing. Please check it in.' % ansi.underline(REQUIREMENTS) ) with open(REQUIREMENTS, 'r', encoding='utf-8') as f: dependencies = f.readlines() vcs = [d for d in dependencies if re.match(r'^(-e )?(git|svn|hg|bzr).*', d)] dependencies = list(set(dependencies) - set(vcs)) missing = [] try: pkg_resources.require(dependencies) except ( pkg_resources.ContextualVersionConflict, pkg_resources.DistributionNotFound, pkg_resources.VersionConflict ) as error: missing.append(str(error)) except pkg_resources.RequirementParseError: pass if missing: missing = ' missing requirement:\n ' + os.linesep.join(missing) if '--env-checked' in sys.argv: sys.exit(ansi.error() + missing + '\nRequirement installation failure, please check for errors in:\n $ lore install\n') else: print(ansi.warning() + missing) import lore.__main__ lore.__main__.install_requirements(None) reboot('--env-checked')
[ "def", "check_requirements", "(", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "REQUIREMENTS", ")", ":", "sys", ".", "exit", "(", "ansi", ".", "error", "(", ")", "+", "' %s is missing. Please check it in.'", "%", "ansi", ".", "underline", ...
Make sure all listed packages from requirements.txt have been installed into the virtualenv at boot.
[ "Make", "sure", "all", "listed", "packages", "from", "requirements", ".", "txt", "have", "been", "installed", "into", "the", "virtualenv", "at", "boot", "." ]
python
train
niklasf/python-chess
chess/pgn.py
https://github.com/niklasf/python-chess/blob/d91f986ca3e046b300a0d7d9ee2a13b07610fe1a/chess/pgn.py#L492-L504
def from_board(cls: Type[GameT], board: chess.Board) -> GameT: """Creates a game from the move stack of a :class:`~chess.Board()`.""" # Setup the initial position. game = cls() game.setup(board.root()) node = game # type: GameNode # Replay all moves. for move in board.move_stack: node = node.add_variation(move) game.headers["Result"] = board.result() return game
[ "def", "from_board", "(", "cls", ":", "Type", "[", "GameT", "]", ",", "board", ":", "chess", ".", "Board", ")", "->", "GameT", ":", "# Setup the initial position.", "game", "=", "cls", "(", ")", "game", ".", "setup", "(", "board", ".", "root", "(", "...
Creates a game from the move stack of a :class:`~chess.Board()`.
[ "Creates", "a", "game", "from", "the", "move", "stack", "of", "a", ":", "class", ":", "~chess", ".", "Board", "()", "." ]
python
train
fitnr/convertdate
convertdate/mayan.py
https://github.com/fitnr/convertdate/blob/e920f168a87f99183b0aa7290d6c3af222582d43/convertdate/mayan.py#L111-L121
def _haab_count(day, month): '''Return the count of the given haab in the cycle. e.g. 0 Pop == 1, 5 Wayeb' == 365''' if day < 0 or day > 19: raise IndexError("Invalid day number") try: i = HAAB_MONTHS.index(month) except ValueError: raise ValueError("'{0}' is not a valid Haab' month".format(month)) return min(i * 20, 360) + day
[ "def", "_haab_count", "(", "day", ",", "month", ")", ":", "if", "day", "<", "0", "or", "day", ">", "19", ":", "raise", "IndexError", "(", "\"Invalid day number\"", ")", "try", ":", "i", "=", "HAAB_MONTHS", ".", "index", "(", "month", ")", "except", "...
Return the count of the given haab in the cycle. e.g. 0 Pop == 1, 5 Wayeb' == 365
[ "Return", "the", "count", "of", "the", "given", "haab", "in", "the", "cycle", ".", "e", ".", "g", ".", "0", "Pop", "==", "1", "5", "Wayeb", "==", "365" ]
python
train
MacHu-GWU/rolex-project
rolex/generator.py
https://github.com/MacHu-GWU/rolex-project/blob/a1111b410ed04b4b6eddd81df110fa2dacfa6537/rolex/generator.py#L387-L414
def day_interval(year, month, day, milliseconds=False, return_string=False): """ Return a start datetime and end datetime of a day. :param milliseconds: Minimum time resolution. :param return_string: If you want string instead of datetime, set True Usage Example:: >>> start, end = rolex.day_interval(2014, 6, 17) >>> start datetime(2014, 6, 17, 0, 0, 0) >>> end datetime(2014, 6, 17, 23, 59, 59) """ if milliseconds: # pragma: no cover delta = timedelta(milliseconds=1) else: delta = timedelta(seconds=1) start = datetime(year, month, day) end = datetime(year, month, day) + timedelta(days=1) - delta if not return_string: return start, end else: return str(start), str(end)
[ "def", "day_interval", "(", "year", ",", "month", ",", "day", ",", "milliseconds", "=", "False", ",", "return_string", "=", "False", ")", ":", "if", "milliseconds", ":", "# pragma: no cover", "delta", "=", "timedelta", "(", "milliseconds", "=", "1", ")", "...
Return a start datetime and end datetime of a day. :param milliseconds: Minimum time resolution. :param return_string: If you want string instead of datetime, set True Usage Example:: >>> start, end = rolex.day_interval(2014, 6, 17) >>> start datetime(2014, 6, 17, 0, 0, 0) >>> end datetime(2014, 6, 17, 23, 59, 59)
[ "Return", "a", "start", "datetime", "and", "end", "datetime", "of", "a", "day", "." ]
python
train
gisle/isoweek
isoweek.py
https://github.com/gisle/isoweek/blob/c6f2cc01f1dbc7cfdf75294421ad14ab4007d93b/isoweek.py#L93-L96
def day(self, num): """Return the given day of week as a date object. Day 0 is the Monday.""" d = date(self.year, 1, 4) # The Jan 4th must be in week 1 according to ISO return d + timedelta(weeks=self.week-1, days=-d.weekday() + num)
[ "def", "day", "(", "self", ",", "num", ")", ":", "d", "=", "date", "(", "self", ".", "year", ",", "1", ",", "4", ")", "# The Jan 4th must be in week 1 according to ISO", "return", "d", "+", "timedelta", "(", "weeks", "=", "self", ".", "week", "-", "1",...
Return the given day of week as a date object. Day 0 is the Monday.
[ "Return", "the", "given", "day", "of", "week", "as", "a", "date", "object", ".", "Day", "0", "is", "the", "Monday", "." ]
python
train
KnowledgeLinks/rdfframework
rdfframework/rml/rmlmanager.py
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/rml/rmlmanager.py#L133-L152
def make_processor(self, name, mappings, processor_type, **kwargs): """ Instantiates a RmlProcessor and registers it in the manager Args: ----- name: the name to register the processor mappings: the list RML mapping definitions to use processor_type: the name of the RML processor to use """ from .processor import Processor if self.processors.get(name): raise LookupError("processor has already been created") if isinstance(mappings, list): mappings = [self.get_rml(item) for item in mappings] else: mappings = [self.get_rml(mappings)] self.processors[name] = Processor[processor_type](mappings, **kwargs) self.processors[name].name = name return self.processors[name]
[ "def", "make_processor", "(", "self", ",", "name", ",", "mappings", ",", "processor_type", ",", "*", "*", "kwargs", ")", ":", "from", ".", "processor", "import", "Processor", "if", "self", ".", "processors", ".", "get", "(", "name", ")", ":", "raise", ...
Instantiates a RmlProcessor and registers it in the manager Args: ----- name: the name to register the processor mappings: the list RML mapping definitions to use processor_type: the name of the RML processor to use
[ "Instantiates", "a", "RmlProcessor", "and", "registers", "it", "in", "the", "manager" ]
python
train
ic-labs/django-icekit
icekit/workflow/admin.py
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit/workflow/admin.py#L116-L124
def _get_obj_ct(self, obj): """ Look up and return object's content type and cache for reuse """ if not hasattr(obj, '_wfct'): # Use polymorpic content type if available if hasattr(obj, 'polymorphic_ctype'): obj._wfct = obj.polymorphic_ctype else: obj._wfct = ContentType.objects.get_for_model(obj) return obj._wfct
[ "def", "_get_obj_ct", "(", "self", ",", "obj", ")", ":", "if", "not", "hasattr", "(", "obj", ",", "'_wfct'", ")", ":", "# Use polymorpic content type if available", "if", "hasattr", "(", "obj", ",", "'polymorphic_ctype'", ")", ":", "obj", ".", "_wfct", "=", ...
Look up and return object's content type and cache for reuse
[ "Look", "up", "and", "return", "object", "s", "content", "type", "and", "cache", "for", "reuse" ]
python
train
BlueBrain/hpcbench
hpcbench/benchmark/ior.py
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/benchmark/ior.py#L302-L310
def options(self): """Additional options appended to the ior command type: either string or a list of string """ options = self.attributes['options'] if isinstance(options, six.string_types): options = shlex.split(options) options = [str(e) for e in options] return options
[ "def", "options", "(", "self", ")", ":", "options", "=", "self", ".", "attributes", "[", "'options'", "]", "if", "isinstance", "(", "options", ",", "six", ".", "string_types", ")", ":", "options", "=", "shlex", ".", "split", "(", "options", ")", "optio...
Additional options appended to the ior command type: either string or a list of string
[ "Additional", "options", "appended", "to", "the", "ior", "command", "type", ":", "either", "string", "or", "a", "list", "of", "string" ]
python
train
ToFuProject/tofu
tofu/data/_core.py
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_core.py#L923-L935
def set_dtreat_indch(self, indch=None): """ Store the desired index array for the channels If None => all channels Must be a 1d array """ if indch is not None: indch = np.asarray(indch) assert indch.ndim==1 indch = _format_ind(indch, n=self._ddataRef['nch']) self._dtreat['indch'] = indch self._ddata['uptodate'] = False
[ "def", "set_dtreat_indch", "(", "self", ",", "indch", "=", "None", ")", ":", "if", "indch", "is", "not", "None", ":", "indch", "=", "np", ".", "asarray", "(", "indch", ")", "assert", "indch", ".", "ndim", "==", "1", "indch", "=", "_format_ind", "(", ...
Store the desired index array for the channels If None => all channels Must be a 1d array
[ "Store", "the", "desired", "index", "array", "for", "the", "channels" ]
python
train
pypa/pipenv
pipenv/vendor/orderedmultidict/orderedmultidict.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/orderedmultidict/orderedmultidict.py#L416-L442
def poplist(self, key, default=_absent): """ If <key> is in the dictionary, pop it and return its list of values. If <key> is not in the dictionary, return <default>. KeyError is raised if <default> is not provided and <key> is not in the dictionary. Example: omd = omdict([(1,1), (1,11), (1,111), (2,2), (3,3)]) omd.poplist(1) == [1, 11, 111] omd.allitems() == [(2,2), (3,3)] omd.poplist(2) == [2] omd.allitems() == [(3,3)] Raises: KeyError if <key> isn't in the dictionary and <default> isn't provided. Returns: List of <key>'s values. """ if key in self: values = self.getlist(key) del self._map[key] for node, nodekey, nodevalue in self._items: if nodekey == key: self._items.removenode(node) return values elif key not in self._map and default is not _absent: return default raise KeyError(key)
[ "def", "poplist", "(", "self", ",", "key", ",", "default", "=", "_absent", ")", ":", "if", "key", "in", "self", ":", "values", "=", "self", ".", "getlist", "(", "key", ")", "del", "self", ".", "_map", "[", "key", "]", "for", "node", ",", "nodekey...
If <key> is in the dictionary, pop it and return its list of values. If <key> is not in the dictionary, return <default>. KeyError is raised if <default> is not provided and <key> is not in the dictionary. Example: omd = omdict([(1,1), (1,11), (1,111), (2,2), (3,3)]) omd.poplist(1) == [1, 11, 111] omd.allitems() == [(2,2), (3,3)] omd.poplist(2) == [2] omd.allitems() == [(3,3)] Raises: KeyError if <key> isn't in the dictionary and <default> isn't provided. Returns: List of <key>'s values.
[ "If", "<key", ">", "is", "in", "the", "dictionary", "pop", "it", "and", "return", "its", "list", "of", "values", ".", "If", "<key", ">", "is", "not", "in", "the", "dictionary", "return", "<default", ">", ".", "KeyError", "is", "raised", "if", "<default...
python
train
bwhite/hadoopy
hadoopy/thirdparty/pyinstaller/PyInstaller/loader/carchive.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/loader/carchive.py#L84-L93
def add(self, dpos, dlen, ulen, flag, typcd, nm): """Add an entry to the table of contents. DPOS is data position. DLEN is data length. ULEN is the uncompressed data len. FLAG says if the data is compressed. TYPCD is the "type" of the entry (used by the C code) NM is the entry's name.""" self.data.append((dpos, dlen, ulen, flag, typcd, nm))
[ "def", "add", "(", "self", ",", "dpos", ",", "dlen", ",", "ulen", ",", "flag", ",", "typcd", ",", "nm", ")", ":", "self", ".", "data", ".", "append", "(", "(", "dpos", ",", "dlen", ",", "ulen", ",", "flag", ",", "typcd", ",", "nm", ")", ")" ]
Add an entry to the table of contents. DPOS is data position. DLEN is data length. ULEN is the uncompressed data len. FLAG says if the data is compressed. TYPCD is the "type" of the entry (used by the C code) NM is the entry's name.
[ "Add", "an", "entry", "to", "the", "table", "of", "contents", "." ]
python
train
nitely/django-djconfig
djconfig/conf.py
https://github.com/nitely/django-djconfig/blob/5e79a048ef5c9529075cad947b0c309115035d7e/djconfig/conf.py#L144-L160
def _reload_maybe(self): """ Reload the config if the config\ model has been updated. This is called\ once on every request by the middleware.\ Should not be called directly. """ ConfigModel = apps.get_model('djconfig.Config') data = dict( ConfigModel.objects .filter(key='_updated_at') .values_list('key', 'value')) if (not hasattr(self, '_updated_at') or self._updated_at != data.get('_updated_at')): self._reload()
[ "def", "_reload_maybe", "(", "self", ")", ":", "ConfigModel", "=", "apps", ".", "get_model", "(", "'djconfig.Config'", ")", "data", "=", "dict", "(", "ConfigModel", ".", "objects", ".", "filter", "(", "key", "=", "'_updated_at'", ")", ".", "values_list", "...
Reload the config if the config\ model has been updated. This is called\ once on every request by the middleware.\ Should not be called directly.
[ "Reload", "the", "config", "if", "the", "config", "\\", "model", "has", "been", "updated", ".", "This", "is", "called", "\\", "once", "on", "every", "request", "by", "the", "middleware", ".", "\\", "Should", "not", "be", "called", "directly", "." ]
python
train
juju/charm-helpers
charmhelpers/core/hookenv.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/core/hookenv.py#L604-L612
def relations_of_type(reltype=None): """Get relations of a specific type""" relation_data = [] reltype = reltype or relation_type() for relid in relation_ids(reltype): for relation in relations_for_id(relid): relation['__relid__'] = relid relation_data.append(relation) return relation_data
[ "def", "relations_of_type", "(", "reltype", "=", "None", ")", ":", "relation_data", "=", "[", "]", "reltype", "=", "reltype", "or", "relation_type", "(", ")", "for", "relid", "in", "relation_ids", "(", "reltype", ")", ":", "for", "relation", "in", "relatio...
Get relations of a specific type
[ "Get", "relations", "of", "a", "specific", "type" ]
python
train
eyurtsev/FlowCytometryTools
FlowCytometryTools/gui/dialogs.py
https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/gui/dialogs.py#L14-L33
def select_multi_directory_dialog(): """ Opens a directory selection dialog Style - specifies style of dialog (read wx documentation for information) """ import wx.lib.agw.multidirdialog as MDD app = wx.App(0) dlg = MDD.MultiDirDialog(None, title="Select directories", defaultPath=os.getcwd(), agwStyle=MDD.DD_MULTIPLE | MDD.DD_DIR_MUST_EXIST) if dlg.ShowModal() != wx.ID_OK: dlg.Destroy() return paths = dlg.GetPaths() dlg.Destroy() app.MainLoop() return paths
[ "def", "select_multi_directory_dialog", "(", ")", ":", "import", "wx", ".", "lib", ".", "agw", ".", "multidirdialog", "as", "MDD", "app", "=", "wx", ".", "App", "(", "0", ")", "dlg", "=", "MDD", ".", "MultiDirDialog", "(", "None", ",", "title", "=", ...
Opens a directory selection dialog Style - specifies style of dialog (read wx documentation for information)
[ "Opens", "a", "directory", "selection", "dialog", "Style", "-", "specifies", "style", "of", "dialog", "(", "read", "wx", "documentation", "for", "information", ")" ]
python
train
wummel/linkchecker
linkcheck/checker/fileurl.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/checker/fileurl.py#L243-L256
def is_parseable (self): """Check if content is parseable for recursion. @return: True if content is parseable @rtype: bool """ if self.is_directory(): return True if firefox.has_sqlite and firefox.extension.search(self.url): return True if self.content_type in self.ContentMimetypes: return True log.debug(LOG_CHECK, "File with content type %r is not parseable.", self.content_type) return False
[ "def", "is_parseable", "(", "self", ")", ":", "if", "self", ".", "is_directory", "(", ")", ":", "return", "True", "if", "firefox", ".", "has_sqlite", "and", "firefox", ".", "extension", ".", "search", "(", "self", ".", "url", ")", ":", "return", "True"...
Check if content is parseable for recursion. @return: True if content is parseable @rtype: bool
[ "Check", "if", "content", "is", "parseable", "for", "recursion", "." ]
python
train
dereneaton/ipyrad
ipyrad/analysis/tetrad.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L1668-L1682
def get_spans(maparr, spans): """ get span distance for each locus in original seqarray """ ## start at 0, finds change at 1-index of map file bidx = 1 spans = np.zeros((maparr[-1, 0], 2), np.uint64) ## read through marr and record when locus id changes for idx in xrange(1, maparr.shape[0]): cur = maparr[idx, 0] if cur != bidx: idy = idx + 1 spans[cur-2, 1] = idx spans[cur-1, 0] = idx bidx = cur spans[-1, 1] = maparr[-1, -1] return spans
[ "def", "get_spans", "(", "maparr", ",", "spans", ")", ":", "## start at 0, finds change at 1-index of map file", "bidx", "=", "1", "spans", "=", "np", ".", "zeros", "(", "(", "maparr", "[", "-", "1", ",", "0", "]", ",", "2", ")", ",", "np", ".", "uint6...
get span distance for each locus in original seqarray
[ "get", "span", "distance", "for", "each", "locus", "in", "original", "seqarray" ]
python
valid
saltstack/salt
salt/runners/vault.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/vault.py#L192-L239
def _expand_pattern_lists(pattern, **mappings): ''' Expands the pattern for any list-valued mappings, such that for any list of length N in the mappings present in the pattern, N copies of the pattern are returned, each with an element of the list substituted. pattern: A pattern to expand, for example ``by-role/{grains[roles]}`` mappings: A dictionary of variables that can be expanded into the pattern. Example: Given the pattern `` by-role/{grains[roles]}`` and the below grains .. code-block:: yaml grains: roles: - web - database This function will expand into two patterns, ``[by-role/web, by-role/database]``. Note that this method does not expand any non-list patterns. ''' expanded_patterns = [] f = string.Formatter() ''' This function uses a string.Formatter to get all the formatting tokens from the pattern, then recursively replaces tokens whose expanded value is a list. For a list with N items, it will create N new pattern strings and then continue with the next token. In practice this is expected to not be very expensive, since patterns will typically involve a handful of lists at most. ''' # pylint: disable=W0105 for (_, field_name, _, _) in f.parse(pattern): if field_name is None: continue (value, _) = f.get_field(field_name, None, mappings) if isinstance(value, list): token = '{{{0}}}'.format(field_name) expanded = [pattern.replace(token, six.text_type(elem)) for elem in value] for expanded_item in expanded: result = _expand_pattern_lists(expanded_item, **mappings) expanded_patterns += result return expanded_patterns return [pattern]
[ "def", "_expand_pattern_lists", "(", "pattern", ",", "*", "*", "mappings", ")", ":", "expanded_patterns", "=", "[", "]", "f", "=", "string", ".", "Formatter", "(", ")", "'''\n This function uses a string.Formatter to get all the formatting tokens from\n the pattern, t...
Expands the pattern for any list-valued mappings, such that for any list of length N in the mappings present in the pattern, N copies of the pattern are returned, each with an element of the list substituted. pattern: A pattern to expand, for example ``by-role/{grains[roles]}`` mappings: A dictionary of variables that can be expanded into the pattern. Example: Given the pattern `` by-role/{grains[roles]}`` and the below grains .. code-block:: yaml grains: roles: - web - database This function will expand into two patterns, ``[by-role/web, by-role/database]``. Note that this method does not expand any non-list patterns.
[ "Expands", "the", "pattern", "for", "any", "list", "-", "valued", "mappings", "such", "that", "for", "any", "list", "of", "length", "N", "in", "the", "mappings", "present", "in", "the", "pattern", "N", "copies", "of", "the", "pattern", "are", "returned", ...
python
train
delph-in/pydelphin
delphin/itsdb.py
https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/itsdb.py#L2063-L2072
def select(self, table, cols, mode='list', key_filter=True): """ Yield selected rows from *table*. This method just calls select_rows() on the rows read from *table*. """ if cols is None: cols = [c.name for c in self.relations[table]] rows = self.read_table(table, key_filter=key_filter) for row in select_rows(cols, rows, mode=mode): yield row
[ "def", "select", "(", "self", ",", "table", ",", "cols", ",", "mode", "=", "'list'", ",", "key_filter", "=", "True", ")", ":", "if", "cols", "is", "None", ":", "cols", "=", "[", "c", ".", "name", "for", "c", "in", "self", ".", "relations", "[", ...
Yield selected rows from *table*. This method just calls select_rows() on the rows read from *table*.
[ "Yield", "selected", "rows", "from", "*", "table", "*", ".", "This", "method", "just", "calls", "select_rows", "()", "on", "the", "rows", "read", "from", "*", "table", "*", "." ]
python
train