repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
nerdvegas/rez
src/rez/vendor/sortedcontainers/sortedset.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/sortedcontainers/sortedset.py#L218-L234
def difference_update(self, *iterables): """ Update the set, removing elements found in keeping only elements found in any of the *iterables*. """ _set = self._set values = set(chain(*iterables)) if (4 * len(values)) > len(_set): _list = self._list _set.difference_update(values) _list.clear() _list.update(_set) else: _discard = self.discard for value in values: _discard(value) return self
[ "def", "difference_update", "(", "self", ",", "*", "iterables", ")", ":", "_set", "=", "self", ".", "_set", "values", "=", "set", "(", "chain", "(", "*", "iterables", ")", ")", "if", "(", "4", "*", "len", "(", "values", ")", ")", ">", "len", "(",...
Update the set, removing elements found in keeping only elements found in any of the *iterables*.
[ "Update", "the", "set", "removing", "elements", "found", "in", "keeping", "only", "elements", "found", "in", "any", "of", "the", "*", "iterables", "*", "." ]
python
train
unistra/django-rest-framework-fine-permissions
rest_framework_fine_permissions/fields.py
https://github.com/unistra/django-rest-framework-fine-permissions/blob/71af5953648ef9f9bdfb64a4c0ed0ea62661fa61/rest_framework_fine_permissions/fields.py#L53-L71
def to_representation(self, obj): """ Represent data for the field. """ many = isinstance(obj, collections.Iterable) \ or isinstance(obj, models.Manager) \ and not isinstance(obj, dict) assert self.serializer is not None \ and issubclass(self.serializer, serializers.ModelSerializer), ( "Bad serializer defined %s" % self.serializer ) extra_params = {} if issubclass(self.serializer, ModelPermissionsSerializer): extra_params['cached_allowed_fields'] =\ self.parent.cached_allowed_fields ser = self.serializer(obj, context=self.context, many=many, **extra_params) return ser.data
[ "def", "to_representation", "(", "self", ",", "obj", ")", ":", "many", "=", "isinstance", "(", "obj", ",", "collections", ".", "Iterable", ")", "or", "isinstance", "(", "obj", ",", "models", ".", "Manager", ")", "and", "not", "isinstance", "(", "obj", ...
Represent data for the field.
[ "Represent", "data", "for", "the", "field", "." ]
python
train
pybel/pybel
src/pybel/manager/cache_manager.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/manager/cache_manager.py#L741-L749
def _get_annotation_entries_from_data(self, graph: BELGraph, data: EdgeData) -> Optional[List[NamespaceEntry]]: """Get the annotation entries from an edge data dictionary.""" annotations_dict = data.get(ANNOTATIONS) if annotations_dict is not None: return [ entry for url, names in self._iter_from_annotations_dict(graph, annotations_dict=annotations_dict) for entry in self.get_annotation_entries_by_names(url, names) ]
[ "def", "_get_annotation_entries_from_data", "(", "self", ",", "graph", ":", "BELGraph", ",", "data", ":", "EdgeData", ")", "->", "Optional", "[", "List", "[", "NamespaceEntry", "]", "]", ":", "annotations_dict", "=", "data", ".", "get", "(", "ANNOTATIONS", "...
Get the annotation entries from an edge data dictionary.
[ "Get", "the", "annotation", "entries", "from", "an", "edge", "data", "dictionary", "." ]
python
train
cherrypy/cheroot
cheroot/server.py
https://github.com/cherrypy/cheroot/blob/2af3b1798d66da697957480d3a8b4831a405770b/cheroot/server.py#L606-L634
def read_trailer_lines(self): """Read HTTP headers and yield them. Returns: Generator: yields CRLF separated lines. """ if not self.closed: raise ValueError( 'Cannot read trailers until the request body has been read.', ) while True: line = self.rfile.readline() if not line: # No more data--illegal end of headers raise ValueError('Illegal end of headers.') self.bytes_read += len(line) if self.maxlen and self.bytes_read > self.maxlen: raise IOError('Request Entity Too Large') if line == CRLF: # Normal end of headers break if not line.endswith(CRLF): raise ValueError('HTTP requires CRLF terminators') yield line
[ "def", "read_trailer_lines", "(", "self", ")", ":", "if", "not", "self", ".", "closed", ":", "raise", "ValueError", "(", "'Cannot read trailers until the request body has been read.'", ",", ")", "while", "True", ":", "line", "=", "self", ".", "rfile", ".", "read...
Read HTTP headers and yield them. Returns: Generator: yields CRLF separated lines.
[ "Read", "HTTP", "headers", "and", "yield", "them", "." ]
python
train
piface/pifacecommon
pifacecommon/interrupts.py
https://github.com/piface/pifacecommon/blob/006bca14c18d43ba2d9eafaa84ef83b512c51cf6/pifacecommon/interrupts.py#L309-L341
def handle_events( function_maps, event_queue, event_matches_function_map, terminate_signal): """Waits for events on the event queue and calls the registered functions. :param function_maps: A list of classes that have inheritted from :class:`FunctionMap`\ s describing what to do with events. :type function_maps: list :param event_queue: A queue to put events on. :type event_queue: :py:class:`multiprocessing.Queue` :param event_matches_function_map: A function that determines if the given event and :class:`FunctionMap` match. :type event_matches_function_map: function :param terminate_signal: The signal that, when placed on the event queue, causes this function to exit. """ while True: # print("HANDLE: Waiting for events!") event = event_queue.get() # print("HANDLE: It's an event!") if event == terminate_signal: return # if matching get the callback function, else function is None functions = map( lambda fm: fm.callback if event_matches_function_map(event, fm) else None, function_maps) # reduce to just the callback functions (remove None) # TODO: I think this can just be filter(None, functions) functions = filter(lambda f: f is not None, functions) for function in functions: function(event)
[ "def", "handle_events", "(", "function_maps", ",", "event_queue", ",", "event_matches_function_map", ",", "terminate_signal", ")", ":", "while", "True", ":", "# print(\"HANDLE: Waiting for events!\")", "event", "=", "event_queue", ".", "get", "(", ")", "# print(\"HANDLE...
Waits for events on the event queue and calls the registered functions. :param function_maps: A list of classes that have inheritted from :class:`FunctionMap`\ s describing what to do with events. :type function_maps: list :param event_queue: A queue to put events on. :type event_queue: :py:class:`multiprocessing.Queue` :param event_matches_function_map: A function that determines if the given event and :class:`FunctionMap` match. :type event_matches_function_map: function :param terminate_signal: The signal that, when placed on the event queue, causes this function to exit.
[ "Waits", "for", "events", "on", "the", "event", "queue", "and", "calls", "the", "registered", "functions", "." ]
python
test
allenai/allennlp
allennlp/semparse/worlds/world.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/worlds/world.py#L245-L285
def get_logical_form(self, action_sequence: List[str], add_var_function: bool = True) -> str: """ Takes an action sequence and constructs a logical form from it. This is useful if you want to get a logical form from a decoded sequence of actions generated by a transition based semantic parser. Parameters ---------- action_sequence : ``List[str]`` The sequence of actions as strings (eg.: ``['{START_SYMBOL} -> t', 't -> <e,t>', ...]``). add_var_function : ``bool`` (optional) ``var`` is a special function that some languages use within lambda functions to indicate the use of a variable (eg.: ``(lambda x (fb:row.row.year (var x)))``). Due to the way constrained decoding is currently implemented, it is easier for the decoder to not produce these functions. In that case, setting this flag adds the function in the logical form even though it is not present in the action sequence. """ # Basic outline: we assume that the bracketing that we get in the RHS of each action is the # correct bracketing for reconstructing the logical form. This is true when there is no # currying in the action sequence. Given this assumption, we just need to construct a tree # from the action sequence, then output all of the leaves in the tree, with brackets around # the children of all non-terminal nodes. remaining_actions = [action.split(" -> ") for action in action_sequence] tree = Tree(remaining_actions[0][1], []) try: remaining_actions = self._construct_node_from_actions(tree, remaining_actions[1:], add_var_function) except ParsingError: logger.error("Error parsing action sequence: %s", action_sequence) raise if remaining_actions: logger.error("Error parsing action sequence: %s", action_sequence) logger.error("Remaining actions were: %s", remaining_actions) raise ParsingError("Extra actions in action sequence") return nltk_tree_to_logical_form(tree)
[ "def", "get_logical_form", "(", "self", ",", "action_sequence", ":", "List", "[", "str", "]", ",", "add_var_function", ":", "bool", "=", "True", ")", "->", "str", ":", "# Basic outline: we assume that the bracketing that we get in the RHS of each action is the", "# correc...
Takes an action sequence and constructs a logical form from it. This is useful if you want to get a logical form from a decoded sequence of actions generated by a transition based semantic parser. Parameters ---------- action_sequence : ``List[str]`` The sequence of actions as strings (eg.: ``['{START_SYMBOL} -> t', 't -> <e,t>', ...]``). add_var_function : ``bool`` (optional) ``var`` is a special function that some languages use within lambda functions to indicate the use of a variable (eg.: ``(lambda x (fb:row.row.year (var x)))``). Due to the way constrained decoding is currently implemented, it is easier for the decoder to not produce these functions. In that case, setting this flag adds the function in the logical form even though it is not present in the action sequence.
[ "Takes", "an", "action", "sequence", "and", "constructs", "a", "logical", "form", "from", "it", ".", "This", "is", "useful", "if", "you", "want", "to", "get", "a", "logical", "form", "from", "a", "decoded", "sequence", "of", "actions", "generated", "by", ...
python
train
galactics/beyond
beyond/dates/date.py
https://github.com/galactics/beyond/blob/7a7590ff0fd4c0bac3e8e383ecca03caa98e5742/beyond/dates/date.py#L367-L377
def change_scale(self, new_scale): """ Args: new_scale (str) Return: Date """ offset = self.scale.offset(self._mjd, new_scale, self.eop) result = self.datetime + timedelta(seconds=offset) return self.__class__(result, scale=new_scale)
[ "def", "change_scale", "(", "self", ",", "new_scale", ")", ":", "offset", "=", "self", ".", "scale", ".", "offset", "(", "self", ".", "_mjd", ",", "new_scale", ",", "self", ".", "eop", ")", "result", "=", "self", ".", "datetime", "+", "timedelta", "(...
Args: new_scale (str) Return: Date
[ "Args", ":", "new_scale", "(", "str", ")", "Return", ":", "Date" ]
python
train
tensorforce/tensorforce
docs/m2r.py
https://github.com/tensorforce/tensorforce/blob/520a8d992230e382f08e315ede5fc477f5e26bfb/docs/m2r.py#L226-L238
def list(self, body, ordered=True): """Rendering list tags like ``<ul>`` and ``<ol>``. :param body: body contents of the list. :param ordered: whether this list is ordered or not. """ mark = '#. ' if ordered else '* ' lines = body.splitlines() for i, line in enumerate(lines): if line and not line.startswith(self.list_marker): lines[i] = ' ' * len(mark) + line return '\n{}\n'.format( '\n'.join(lines)).replace(self.list_marker, mark)
[ "def", "list", "(", "self", ",", "body", ",", "ordered", "=", "True", ")", ":", "mark", "=", "'#. '", "if", "ordered", "else", "'* '", "lines", "=", "body", ".", "splitlines", "(", ")", "for", "i", ",", "line", "in", "enumerate", "(", "lines", ")",...
Rendering list tags like ``<ul>`` and ``<ol>``. :param body: body contents of the list. :param ordered: whether this list is ordered or not.
[ "Rendering", "list", "tags", "like", "<ul", ">", "and", "<ol", ">", "." ]
python
valid
ambitioninc/rabbitmq-admin
rabbitmq_admin/base.py
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/base.py#L125-L133
def _delete(self, *args, **kwargs): """ A wrapper for deleting things :returns: The response of your delete :rtype: dict """ response = requests.delete(*args, **kwargs) response.raise_for_status()
[ "def", "_delete", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "response", "=", "requests", ".", "delete", "(", "*", "args", ",", "*", "*", "kwargs", ")", "response", ".", "raise_for_status", "(", ")" ]
A wrapper for deleting things :returns: The response of your delete :rtype: dict
[ "A", "wrapper", "for", "deleting", "things" ]
python
train
edx/auth-backends
auth_backends/backends.py
https://github.com/edx/auth-backends/blob/493f93e9d87d0237f0fea6d75c7b70646ad6d31e/auth_backends/backends.py#L66-L77
def _map_user_details(self, response): """Maps key/values from the response to key/values in the user model. Does not transfer any key/value that is empty or not present in the response. """ dest = {} for source_key, dest_key in self.CLAIMS_TO_DETAILS_KEY_MAP.items(): value = response.get(source_key) if value is not None: dest[dest_key] = value return dest
[ "def", "_map_user_details", "(", "self", ",", "response", ")", ":", "dest", "=", "{", "}", "for", "source_key", ",", "dest_key", "in", "self", ".", "CLAIMS_TO_DETAILS_KEY_MAP", ".", "items", "(", ")", ":", "value", "=", "response", ".", "get", "(", "sour...
Maps key/values from the response to key/values in the user model. Does not transfer any key/value that is empty or not present in the response.
[ "Maps", "key", "/", "values", "from", "the", "response", "to", "key", "/", "values", "in", "the", "user", "model", "." ]
python
train
deep-compute/deeputil
deeputil/streamcounter.py
https://github.com/deep-compute/deeputil/blob/9af5702bc3fd990688bf2aed16c20fa104be66df/deeputil/streamcounter.py#L47-L105
def add(self, item, count=1): ''' When we receive stream of data, we add them in the chunk which has limit on the no. of items that it will store. >>> s = StreamCounter(5,5) >>> data_stream = ['a','b','c','d'] >>> for item in data_stream: ... s.add(item) >>> s.chunk_size 5 >>> s.n_items_seen 4 >>> s.n_chunk_items_seen 4 >>> s.n_chunks 0 >>> from pprint import pprint >>> pprint(s.chunked_counts.get(s.n_chunks, {})) {'a': 1, 'b': 1, 'c': 1, 'd': 1} >>> s.counts_total 4 >>> data_stream = ['a','b','c','d','e','f','g','e'] >>> for item in data_stream: ... s.add(item) >>> s.chunk_size 5 >>> s.n_items_seen 12 >>> s.n_chunk_items_seen 2 >>> s.n_chunks 2 >>> s.chunked_counts.get(s.n_chunks, {}) {'g': 1, 'e': 1} ''' self.n_items_seen += count self.n_chunk_items_seen += count # get current chunk chunk_id = self.n_chunks chunk = self.chunked_counts.get(chunk_id, {}) self.chunked_counts[chunk_id] = chunk # update count in the current chunk counter dict if item in chunk: chunk[item] += count else: self.n_counts += 1 chunk[item] = count # is the current chunk done? if self.n_chunk_items_seen >= self.chunk_size: self.n_chunks += 1 self.n_chunk_items_seen = 0 # In case we reached max capacity in count entries, # drop oldest chunks until we come back within limit while self.n_counts >= self.max_counts: self._drop_oldest_chunk()
[ "def", "add", "(", "self", ",", "item", ",", "count", "=", "1", ")", ":", "self", ".", "n_items_seen", "+=", "count", "self", ".", "n_chunk_items_seen", "+=", "count", "# get current chunk", "chunk_id", "=", "self", ".", "n_chunks", "chunk", "=", "self", ...
When we receive stream of data, we add them in the chunk which has limit on the no. of items that it will store. >>> s = StreamCounter(5,5) >>> data_stream = ['a','b','c','d'] >>> for item in data_stream: ... s.add(item) >>> s.chunk_size 5 >>> s.n_items_seen 4 >>> s.n_chunk_items_seen 4 >>> s.n_chunks 0 >>> from pprint import pprint >>> pprint(s.chunked_counts.get(s.n_chunks, {})) {'a': 1, 'b': 1, 'c': 1, 'd': 1} >>> s.counts_total 4 >>> data_stream = ['a','b','c','d','e','f','g','e'] >>> for item in data_stream: ... s.add(item) >>> s.chunk_size 5 >>> s.n_items_seen 12 >>> s.n_chunk_items_seen 2 >>> s.n_chunks 2 >>> s.chunked_counts.get(s.n_chunks, {}) {'g': 1, 'e': 1}
[ "When", "we", "receive", "stream", "of", "data", "we", "add", "them", "in", "the", "chunk", "which", "has", "limit", "on", "the", "no", ".", "of", "items", "that", "it", "will", "store", ".", ">>>", "s", "=", "StreamCounter", "(", "5", "5", ")", ">...
python
train
rytilahti/python-songpal
songpal/device.py
https://github.com/rytilahti/python-songpal/blob/0443de6b3d960b9067a851d82261ca00e46b4618/songpal/device.py#L259-L262
async def get_inputs(self) -> List[Input]: """Return list of available outputs.""" res = await self.services["avContent"]["getCurrentExternalTerminalsStatus"]() return [Input.make(services=self.services, **x) for x in res if 'meta:zone:output' not in x['meta']]
[ "async", "def", "get_inputs", "(", "self", ")", "->", "List", "[", "Input", "]", ":", "res", "=", "await", "self", ".", "services", "[", "\"avContent\"", "]", "[", "\"getCurrentExternalTerminalsStatus\"", "]", "(", ")", "return", "[", "Input", ".", "make",...
Return list of available outputs.
[ "Return", "list", "of", "available", "outputs", "." ]
python
train
DataONEorg/d1_python
lib_common/src/d1_common/checksum.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/checksum.py#L43-L62
def create_checksum_object_from_stream( f, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM ): """Calculate the checksum of a stream. Args: f: file-like object Only requirement is a ``read()`` method that returns ``bytes``. algorithm: str Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``. Returns: Populated Checksum PyXB object. """ checksum_str = calculate_checksum_on_stream(f, algorithm) checksum_pyxb = d1_common.types.dataoneTypes.checksum(checksum_str) checksum_pyxb.algorithm = algorithm return checksum_pyxb
[ "def", "create_checksum_object_from_stream", "(", "f", ",", "algorithm", "=", "d1_common", ".", "const", ".", "DEFAULT_CHECKSUM_ALGORITHM", ")", ":", "checksum_str", "=", "calculate_checksum_on_stream", "(", "f", ",", "algorithm", ")", "checksum_pyxb", "=", "d1_common...
Calculate the checksum of a stream. Args: f: file-like object Only requirement is a ``read()`` method that returns ``bytes``. algorithm: str Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``. Returns: Populated Checksum PyXB object.
[ "Calculate", "the", "checksum", "of", "a", "stream", "." ]
python
train
JukeboxPipeline/jukeboxmaya
src/jukeboxmaya/main.py
https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/main.py#L17-L35
def load_mayaplugins(): """Loads the maya plugins (not jukebox plugins) of the pipeline :returns: None :rtype: None :raises: None """ mpp = os.environ.get('MAYA_PLUG_IN_PATH') if mpp is not None: ';'.join([mpp, MAYA_PLUGIN_PATH]) else: mpp = MAYA_PLUGIN_PATH # to simply load all plugins inside our plugin path, we override pluginpath temporarly os.environ['MAYA_PLUG_IN_PATH'] = MAYA_PLUGIN_PATH cmds.loadPlugin(allPlugins=True) # then we set the MAYA_PLUG_IN_PATH to the correct value # NOTE: this ignores the order of paths in MAYA_PLUG_IN_PATH completely os.environ['MAYA_PLUG_IN_PATH'] = mpp
[ "def", "load_mayaplugins", "(", ")", ":", "mpp", "=", "os", ".", "environ", ".", "get", "(", "'MAYA_PLUG_IN_PATH'", ")", "if", "mpp", "is", "not", "None", ":", "';'", ".", "join", "(", "[", "mpp", ",", "MAYA_PLUGIN_PATH", "]", ")", "else", ":", "mpp"...
Loads the maya plugins (not jukebox plugins) of the pipeline :returns: None :rtype: None :raises: None
[ "Loads", "the", "maya", "plugins", "(", "not", "jukebox", "plugins", ")", "of", "the", "pipeline" ]
python
train
log2timeline/plaso
plaso/parsers/bsm.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/bsm.py#L218-L231
def _FormatArgToken(self, token_data): """Formats an argument token as a dictionary of values. Args: token_data (bsm_token_data_arg32|bsm_token_data_arg64): AUT_ARG32 or AUT_ARG64 token data. Returns: dict[str, str]: token values. """ return { 'string': token_data.argument_value.rstrip('\x00'), 'num_arg': token_data.argument_index, 'is': token_data.argument_name}
[ "def", "_FormatArgToken", "(", "self", ",", "token_data", ")", ":", "return", "{", "'string'", ":", "token_data", ".", "argument_value", ".", "rstrip", "(", "'\\x00'", ")", ",", "'num_arg'", ":", "token_data", ".", "argument_index", ",", "'is'", ":", "token_...
Formats an argument token as a dictionary of values. Args: token_data (bsm_token_data_arg32|bsm_token_data_arg64): AUT_ARG32 or AUT_ARG64 token data. Returns: dict[str, str]: token values.
[ "Formats", "an", "argument", "token", "as", "a", "dictionary", "of", "values", "." ]
python
train
pantsbuild/pants
src/python/pants/pantsd/pants_daemon.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/pantsd/pants_daemon.py#L229-L250
def compute_invalidation_globs(bootstrap_options): """ Combine --pythonpath and --pants_config_files(pants.ini) files that are in {buildroot} dir with those invalidation_globs provided by users :param bootstrap_options: :return: A list of invalidation_globs """ buildroot = get_buildroot() invalidation_globs = [] globs = bootstrap_options.pythonpath + \ bootstrap_options.pants_config_files + \ bootstrap_options.pantsd_invalidation_globs for glob in globs: glob_relpath = os.path.relpath(glob, buildroot) if glob_relpath and (not glob_relpath.startswith("../")): invalidation_globs.extend([glob_relpath, glob_relpath + '/**']) else: logging.getLogger(__name__).warning("Changes to {}, outside of the buildroot" ", will not be invalidated.".format(glob)) return invalidation_globs
[ "def", "compute_invalidation_globs", "(", "bootstrap_options", ")", ":", "buildroot", "=", "get_buildroot", "(", ")", "invalidation_globs", "=", "[", "]", "globs", "=", "bootstrap_options", ".", "pythonpath", "+", "bootstrap_options", ".", "pants_config_files", "+", ...
Combine --pythonpath and --pants_config_files(pants.ini) files that are in {buildroot} dir with those invalidation_globs provided by users :param bootstrap_options: :return: A list of invalidation_globs
[ "Combine", "--", "pythonpath", "and", "--", "pants_config_files", "(", "pants", ".", "ini", ")", "files", "that", "are", "in", "{", "buildroot", "}", "dir", "with", "those", "invalidation_globs", "provided", "by", "users", ":", "param", "bootstrap_options", ":...
python
train
inasafe/inasafe
extras/data_audit.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/extras/data_audit.py#L77-L198
def IP_verified(directory, extensions_to_ignore=None, directories_to_ignore=None, files_to_ignore=None, verbose=False): """Find and audit potential data files that might violate IP This is the public function to be used to ascertain that all data in the specified directory tree has been audited according to the GA data IP tracking process. if IP_verified is False: # Stop and take remedial action ... else: # Proceed boldly with confidence verbose controls standard output. If verbose is False, only diagnostics about failed audits will appear. All files that check OK will pass silently. Optional arguments extensions_to_ignore, directories_to_ignore, and files_to_ignore are lists of things to skip. Examples are: extensions_to_ignore = ['.py','.c','.h', '.f'] # Ignore source code files_to_ignore = ['README.txt'] directories_to_ignore = ['.svn', 'misc'] None is also OK for these parameters. """ # Identify data files oldpath = None all_files = 0 ok_files = 0 all_files_accounted_for = True for dirpath, filename in identify_datafiles(directory, extensions_to_ignore, directories_to_ignore, files_to_ignore): if oldpath != dirpath: # Decide if dir header needs to be printed oldpath = dirpath first_time_this_dir = True all_files += 1 basename, ext = splitext(filename) license_filename = join(dirpath, basename + '.lic') # Look for a XML license file with the .lic status = 'OK' try: fid = open(license_filename) except IOError: status = 'NO LICENSE FILE' all_files_accounted_for = False else: fid.close() try: license_file_is_valid(license_filename, filename, dirpath, verbose=False) except audit_exceptions, e: all_files_accounted_for = False status = 'LICENSE FILE NOT VALID\n' status += 'REASON: %s\n' %e try: doc = xml2object(license_filename) except: status += 'XML file %s could not be read:'\ %license_filename fid = open(license_filename) status += fid.read() fid.close() else: pass #if verbose is True: # status += str(doc) if status == 'OK': ok_files += 1 else: # Only print status if there is a problem (no news is good news) if first_time_this_dir is True: print msg = ('Files without licensing info in dir: %s' % dirpath) print '.' * len(msg) print msg print '.' * len(msg) first_time_this_dir = False print filename + ' (Checksum = %s): '\ %str(compute_checksum(join(dirpath, filename))),\ status if verbose is True: print print '---------------------' print 'Audit result for dir: %s:' %directory print '---------------------' print 'Number of files audited: %d' %(all_files) print 'Number of files verified: %d' %(ok_files) print # Return result return all_files_accounted_for
[ "def", "IP_verified", "(", "directory", ",", "extensions_to_ignore", "=", "None", ",", "directories_to_ignore", "=", "None", ",", "files_to_ignore", "=", "None", ",", "verbose", "=", "False", ")", ":", "# Identify data files", "oldpath", "=", "None", "all_files", ...
Find and audit potential data files that might violate IP This is the public function to be used to ascertain that all data in the specified directory tree has been audited according to the GA data IP tracking process. if IP_verified is False: # Stop and take remedial action ... else: # Proceed boldly with confidence verbose controls standard output. If verbose is False, only diagnostics about failed audits will appear. All files that check OK will pass silently. Optional arguments extensions_to_ignore, directories_to_ignore, and files_to_ignore are lists of things to skip. Examples are: extensions_to_ignore = ['.py','.c','.h', '.f'] # Ignore source code files_to_ignore = ['README.txt'] directories_to_ignore = ['.svn', 'misc'] None is also OK for these parameters.
[ "Find", "and", "audit", "potential", "data", "files", "that", "might", "violate", "IP" ]
python
train
boriel/zxbasic
zxblex.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxblex.py#L448-L453
def t_preproc_ID(t): r'[_A-Za-z]+' t.value = t.value.strip() t.type = preprocessor.get(t.value.lower(), 'ID') return t
[ "def", "t_preproc_ID", "(", "t", ")", ":", "t", ".", "value", "=", "t", ".", "value", ".", "strip", "(", ")", "t", ".", "type", "=", "preprocessor", ".", "get", "(", "t", ".", "value", ".", "lower", "(", ")", ",", "'ID'", ")", "return", "t" ]
r'[_A-Za-z]+
[ "r", "[", "_A", "-", "Za", "-", "z", "]", "+" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py#L4506-L4520
def get_stp_mst_detail_output_msti_msti_bridge_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_stp_mst_detail = ET.Element("get_stp_mst_detail") config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, "output") msti = ET.SubElement(output, "msti") instance_id_key = ET.SubElement(msti, "instance-id") instance_id_key.text = kwargs.pop('instance_id') msti_bridge_id = ET.SubElement(msti, "msti-bridge-id") msti_bridge_id.text = kwargs.pop('msti_bridge_id') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_stp_mst_detail_output_msti_msti_bridge_id", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_stp_mst_detail", "=", "ET", ".", "Element", "(", "\"get_stp_mst_detail\"", ")", "config", "=...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
i3visio/osrframework
osrframework/thirdparties/pipl_com/lib/search.py
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/thirdparties/pipl_com/lib/search.py#L532-L540
def group_records_by_domain(self): """Return the records grouped by the domain they came from. The return value is a dict, a key in this dict is a domain and the value is a list of all the records with this domain. """ key_function = lambda record: record.source.domain return self.group_records(key_function)
[ "def", "group_records_by_domain", "(", "self", ")", ":", "key_function", "=", "lambda", "record", ":", "record", ".", "source", ".", "domain", "return", "self", ".", "group_records", "(", "key_function", ")" ]
Return the records grouped by the domain they came from. The return value is a dict, a key in this dict is a domain and the value is a list of all the records with this domain.
[ "Return", "the", "records", "grouped", "by", "the", "domain", "they", "came", "from", ".", "The", "return", "value", "is", "a", "dict", "a", "key", "in", "this", "dict", "is", "a", "domain", "and", "the", "value", "is", "a", "list", "of", "all", "the...
python
train
MaxStrange/AudioSegment
algorithms/asa.py
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L998-L1020
def _map_segmentation_mask_to_stft_domain(mask, times, frequencies, stft_times, stft_frequencies): """ Maps the given `mask`, which is in domain (`frequencies`, `times`) to the new domain (`stft_frequencies`, `stft_times`) and returns the result. """ assert mask.shape == (frequencies.shape[0], times.shape[0]), "Times is shape {} and frequencies is shape {}, but mask is shaped {}".format( times.shape, frequencies.shape, mask.shape ) result = np.zeros((stft_frequencies.shape[0], stft_times.shape[0])) if len(stft_times) > len(times): all_j = [j for j in range(len(stft_times))] idxs = [int(i) for i in np.linspace(0, len(times) - 1, num=len(stft_times))] all_i = [all_j[idx] for idx in idxs] else: all_i = [i for i in range(len(times))] idxs = [int(i) for i in np.linspace(0, len(stft_times) - 1, num=len(times))] all_j = [all_i[idx] for idx in idxs] for i, j in zip(all_i, all_j): result[:, j] = np.interp(stft_frequencies, frequencies, mask[:, i]) return result
[ "def", "_map_segmentation_mask_to_stft_domain", "(", "mask", ",", "times", ",", "frequencies", ",", "stft_times", ",", "stft_frequencies", ")", ":", "assert", "mask", ".", "shape", "==", "(", "frequencies", ".", "shape", "[", "0", "]", ",", "times", ".", "sh...
Maps the given `mask`, which is in domain (`frequencies`, `times`) to the new domain (`stft_frequencies`, `stft_times`) and returns the result.
[ "Maps", "the", "given", "mask", "which", "is", "in", "domain", "(", "frequencies", "times", ")", "to", "the", "new", "domain", "(", "stft_frequencies", "stft_times", ")", "and", "returns", "the", "result", "." ]
python
test
KE-works/pykechain
pykechain/models/customization.py
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/customization.py#L96-L126
def _save_customization(self, widgets): """ Save the complete customization to the activity. :param widgets: The complete set of widgets to be customized """ if len(widgets) > 0: # Get the current customization and only replace the 'ext' part of it customization = self.activity._json_data.get('customization', dict()) if customization: customization['ext'] = dict(widgets=widgets) else: customization = dict(ext=dict(widgets=widgets)) # Empty the customization if if the widgets list is empty else: customization = None # perform validation if customization: validate(customization, widgetconfig_json_schema) # Save to the activity and store the saved activity to self response = self._client._request("PUT", self._client._build_url("activity", activity_id=str(self.activity.id)), json=dict(customization=customization)) if response.status_code != requests.codes.ok: # pragma: no cover raise APIError("Could not save customization ({})".format(response)) else: # refresh the activity json self.activity = self._client.activity(pk=self.activity.id)
[ "def", "_save_customization", "(", "self", ",", "widgets", ")", ":", "if", "len", "(", "widgets", ")", ">", "0", ":", "# Get the current customization and only replace the 'ext' part of it", "customization", "=", "self", ".", "activity", ".", "_json_data", ".", "get...
Save the complete customization to the activity. :param widgets: The complete set of widgets to be customized
[ "Save", "the", "complete", "customization", "to", "the", "activity", "." ]
python
train
hanguokai/youku
youku/youku_playlists.py
https://github.com/hanguokai/youku/blob/b2df060c7dccfad990bcfa289fff68bb77d1e69b/youku/youku_playlists.py#L21-L31
def find_playlist_by_id(self, playlist_id): """doc: http://open.youku.com/docs/doc?id=66 """ url = 'https://openapi.youku.com/v2/playlists/show.json' params = { 'client_id': self.client_id, 'playlist_id': playlist_id } r = requests.get(url, params=params) check_error(r) return r.json()
[ "def", "find_playlist_by_id", "(", "self", ",", "playlist_id", ")", ":", "url", "=", "'https://openapi.youku.com/v2/playlists/show.json'", "params", "=", "{", "'client_id'", ":", "self", ".", "client_id", ",", "'playlist_id'", ":", "playlist_id", "}", "r", "=", "r...
doc: http://open.youku.com/docs/doc?id=66
[ "doc", ":", "http", ":", "//", "open", ".", "youku", ".", "com", "/", "docs", "/", "doc?id", "=", "66" ]
python
train
google/grumpy
third_party/stdlib/optparse.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/optparse.py#L1024-L1052
def add_option(self, *args, **kwargs): """add_option(Option) add_option(opt_str, ..., kwarg=val, ...) """ if type(args[0]) in types.StringTypes: option = self.option_class(*args, **kwargs) elif len(args) == 1 and not kwargs: option = args[0] if not isinstance(option, Option): raise TypeError, "not an Option instance: %r" % option else: raise TypeError, "invalid arguments" self._check_conflict(option) self.option_list.append(option) option.container = self for opt in option._short_opts: self._short_opt[opt] = option for opt in option._long_opts: self._long_opt[opt] = option if option.dest is not None: # option has a dest, we need a default if option.default is not NO_DEFAULT: self.defaults[option.dest] = option.default elif option.dest not in self.defaults: self.defaults[option.dest] = None return option
[ "def", "add_option", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "type", "(", "args", "[", "0", "]", ")", "in", "types", ".", "StringTypes", ":", "option", "=", "self", ".", "option_class", "(", "*", "args", ",", "*", ...
add_option(Option) add_option(opt_str, ..., kwarg=val, ...)
[ "add_option", "(", "Option", ")", "add_option", "(", "opt_str", "...", "kwarg", "=", "val", "...", ")" ]
python
valid
tk0miya/tk.phpautodoc
src/phply/phpparse.py
https://github.com/tk0miya/tk.phpautodoc/blob/cf789f64abaf76351485cee231a075227e665fb6/src/phply/phpparse.py#L1241-L1250
def p_static_scalar_namespace_name(p): '''static_scalar : namespace_name | NS_SEPARATOR namespace_name | NAMESPACE NS_SEPARATOR namespace_name''' if len(p) == 2: p[0] = ast.Constant(p[1], lineno=p.lineno(1)) elif len(p) == 3: p[0] = ast.Constant(p[1] + p[2], lineno=p.lineno(1)) else: p[0] = ast.Constant(p[1] + p[2] + p[3], lineno=p.lineno(1))
[ "def", "p_static_scalar_namespace_name", "(", "p", ")", ":", "if", "len", "(", "p", ")", "==", "2", ":", "p", "[", "0", "]", "=", "ast", ".", "Constant", "(", "p", "[", "1", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "el...
static_scalar : namespace_name | NS_SEPARATOR namespace_name | NAMESPACE NS_SEPARATOR namespace_name
[ "static_scalar", ":", "namespace_name", "|", "NS_SEPARATOR", "namespace_name", "|", "NAMESPACE", "NS_SEPARATOR", "namespace_name" ]
python
train
LionelR/pyair
pyair/stats.py
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/stats.py#L140-L143
def gmv(a, b): """Geometric mean variance """ return np.exp(np.square(np.log(a) - np.log(b)).mean())
[ "def", "gmv", "(", "a", ",", "b", ")", ":", "return", "np", ".", "exp", "(", "np", ".", "square", "(", "np", ".", "log", "(", "a", ")", "-", "np", ".", "log", "(", "b", ")", ")", ".", "mean", "(", ")", ")" ]
Geometric mean variance
[ "Geometric", "mean", "variance" ]
python
valid
ejeschke/ginga
ginga/rv/plugins/PlotTable.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/PlotTable.py#L237-L274
def plot_two_columns(self, reset_xlimits=False, reset_ylimits=False): """Simple line plot for two selected columns.""" self.clear_plot() if self.tab is None: # No table data to plot return plt_kw = { 'lw': self.settings.get('linewidth', 1), 'ls': self.settings.get('linestyle', '-'), 'color': self.settings.get('linecolor', 'blue'), 'ms': self.settings.get('markersize', 6), 'mew': self.settings.get('markerwidth', 0.5), 'mfc': self.settings.get('markercolor', 'red')} plt_kw['mec'] = plt_kw['mfc'] try: x_data, y_data, marker = self._get_plot_data() self.tab_plot.plot( x_data, y_data, xtitle=self._get_label('x'), ytitle=self._get_label('y'), marker=marker, **plt_kw) if reset_xlimits: self.set_ylim_cb() self.set_xlimits_widgets() if reset_ylimits: self.set_xlim_cb() self.set_ylimits_widgets() if not (reset_xlimits or reset_ylimits): self.set_xlim_cb(redraw=False) self.set_ylim_cb() except Exception as e: self.logger.error(str(e)) else: self.save_plot.set_enabled(True)
[ "def", "plot_two_columns", "(", "self", ",", "reset_xlimits", "=", "False", ",", "reset_ylimits", "=", "False", ")", ":", "self", ".", "clear_plot", "(", ")", "if", "self", ".", "tab", "is", "None", ":", "# No table data to plot", "return", "plt_kw", "=", ...
Simple line plot for two selected columns.
[ "Simple", "line", "plot", "for", "two", "selected", "columns", "." ]
python
train
apache/incubator-mxnet
python/mxnet/contrib/text/vocab.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/text/vocab.py#L162-L186
def to_indices(self, tokens): """Converts tokens to indices according to the vocabulary. Parameters ---------- tokens : str or list of strs A source token or tokens to be converted. Returns ------- int or list of ints A token index or a list of token indices according to the vocabulary. """ to_reduce = False if not isinstance(tokens, list): tokens = [tokens] to_reduce = True indices = [self.token_to_idx[token] if token in self.token_to_idx else C.UNKNOWN_IDX for token in tokens] return indices[0] if to_reduce else indices
[ "def", "to_indices", "(", "self", ",", "tokens", ")", ":", "to_reduce", "=", "False", "if", "not", "isinstance", "(", "tokens", ",", "list", ")", ":", "tokens", "=", "[", "tokens", "]", "to_reduce", "=", "True", "indices", "=", "[", "self", ".", "tok...
Converts tokens to indices according to the vocabulary. Parameters ---------- tokens : str or list of strs A source token or tokens to be converted. Returns ------- int or list of ints A token index or a list of token indices according to the vocabulary.
[ "Converts", "tokens", "to", "indices", "according", "to", "the", "vocabulary", "." ]
python
train
h2oai/h2o-3
h2o-py/h2o/group_by.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/group_by.py#L222-L252
def get_frame(self): """ Return the resulting H2OFrame containing the result(s) of aggregation(s) of the group by. The number of rows denote the number of groups generated by the group by operation. The number of columns depend on the number of aggregations performed, the number of columns specified in the col parameter. Generally, expect the number of columns to be (len(col) of aggregation 0 + len(col) of aggregation 1 +...+ len(col) of aggregation n) x (number of groups of the GroupBy object) +1 (for group-by group names). Note: - the count aggregation only generates one column; - if col is a str or int, len(col) = 1. """ if self._res is None: aggs = [] cols_operated = [] for k in self._aggs: aggs += (self._aggs[k]) col_used = self._aggs[k][1] if col_used not in cols_operated: cols_operated.append(col_used) for cind in cols_operated: if cind not in self._by: self._check_string_columns(cind) self._res = h2o.H2OFrame._expr(expr=ExprNode("GB", self._fr, self._by, *aggs)) return self._res
[ "def", "get_frame", "(", "self", ")", ":", "if", "self", ".", "_res", "is", "None", ":", "aggs", "=", "[", "]", "cols_operated", "=", "[", "]", "for", "k", "in", "self", ".", "_aggs", ":", "aggs", "+=", "(", "self", ".", "_aggs", "[", "k", "]",...
Return the resulting H2OFrame containing the result(s) of aggregation(s) of the group by. The number of rows denote the number of groups generated by the group by operation. The number of columns depend on the number of aggregations performed, the number of columns specified in the col parameter. Generally, expect the number of columns to be (len(col) of aggregation 0 + len(col) of aggregation 1 +...+ len(col) of aggregation n) x (number of groups of the GroupBy object) +1 (for group-by group names). Note: - the count aggregation only generates one column; - if col is a str or int, len(col) = 1.
[ "Return", "the", "resulting", "H2OFrame", "containing", "the", "result", "(", "s", ")", "of", "aggregation", "(", "s", ")", "of", "the", "group", "by", "." ]
python
test
ctuning/ck
ck/kernel.py
https://github.com/ctuning/ck/blob/7e009814e975f8742790d3106340088a46223714/ck/kernel.py#L3939-L3972
def flatten_dict(i): """ Any list item is converted to @number=value Any dict item is converted to #key=value # is always added at the beginning Input: { dict - python dictionary (prefix) - prefix (for recursion) (prune_keys) - list of keys to prune (can have wildcards) } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 dict - flattened dictionary } """ prefix='#' if i.get('prefix','')!='': prefix=str(i['prefix']) a=i['dict'] aa={} pk=i.get('prune_keys','') if pk=='': pk=[] flatten_dict_internal(a, aa, prefix, pk) return {'return':0, 'dict': aa}
[ "def", "flatten_dict", "(", "i", ")", ":", "prefix", "=", "'#'", "if", "i", ".", "get", "(", "'prefix'", ",", "''", ")", "!=", "''", ":", "prefix", "=", "str", "(", "i", "[", "'prefix'", "]", ")", "a", "=", "i", "[", "'dict'", "]", "aa", "=",...
Any list item is converted to @number=value Any dict item is converted to #key=value # is always added at the beginning Input: { dict - python dictionary (prefix) - prefix (for recursion) (prune_keys) - list of keys to prune (can have wildcards) } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 dict - flattened dictionary }
[ "Any", "list", "item", "is", "converted", "to", "@number", "=", "value", "Any", "dict", "item", "is", "converted", "to", "#key", "=", "value", "#", "is", "always", "added", "at", "the", "beginning" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/ietf_netconf_acm.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/ietf_netconf_acm.py#L280-L295
def nacm_rule_list_rule_context(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") nacm = ET.SubElement(config, "nacm", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-acm") rule_list = ET.SubElement(nacm, "rule-list") name_key = ET.SubElement(rule_list, "name") name_key.text = kwargs.pop('name') rule = ET.SubElement(rule_list, "rule") name_key = ET.SubElement(rule, "name") name_key.text = kwargs.pop('name') context = ET.SubElement(rule, "context", xmlns="http://tail-f.com/yang/acm") context.text = kwargs.pop('context') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "nacm_rule_list_rule_context", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "nacm", "=", "ET", ".", "SubElement", "(", "config", ",", "\"nacm\"", ",", "xmlns", "=", "\"urn:ietf:params:x...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L3541-L3557
def dvhat(s1): """ Find the unit vector corresponding to a state vector and the derivative of the unit vector. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dvhat_c.html :param s1: State to be normalized. :type s1: 6-Element Array of floats :return: Unit vector s1 / abs(s1), and its time derivative. :rtype: 6-Element Array of floats """ assert len(s1) is 6 s1 = stypes.toDoubleVector(s1) sout = stypes.emptyDoubleVector(6) libspice.dvhat_c(s1, sout) return stypes.cVectorToPython(sout)
[ "def", "dvhat", "(", "s1", ")", ":", "assert", "len", "(", "s1", ")", "is", "6", "s1", "=", "stypes", ".", "toDoubleVector", "(", "s1", ")", "sout", "=", "stypes", ".", "emptyDoubleVector", "(", "6", ")", "libspice", ".", "dvhat_c", "(", "s1", ",",...
Find the unit vector corresponding to a state vector and the derivative of the unit vector. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dvhat_c.html :param s1: State to be normalized. :type s1: 6-Element Array of floats :return: Unit vector s1 / abs(s1), and its time derivative. :rtype: 6-Element Array of floats
[ "Find", "the", "unit", "vector", "corresponding", "to", "a", "state", "vector", "and", "the", "derivative", "of", "the", "unit", "vector", "." ]
python
train
allenai/allennlp
allennlp/common/configuration.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/configuration.py#L34-L62
def full_name(cla55: Optional[type]) -> str: """ Return the full name (including module) of the given class. """ # Special case to handle None: if cla55 is None: return "?" if issubclass(cla55, Initializer) and cla55 not in [Initializer, PretrainedModelInitializer]: init_fn = cla55()._init_function return f"{init_fn.__module__}.{init_fn.__name__}" origin = getattr(cla55, '__origin__', None) args = getattr(cla55, '__args__', ()) # Special handling for compound types if origin in (Dict, dict): key_type, value_type = args return f"""Dict[{full_name(key_type)}, {full_name(value_type)}]""" elif origin in (Tuple, tuple, List, list, Sequence, collections.abc.Sequence): return f"""{_remove_prefix(str(origin))}[{", ".join(full_name(arg) for arg in args)}]""" elif origin == Union: # Special special case to handle optional types: if len(args) == 2 and args[-1] == type(None): return f"""Optional[{full_name(args[0])}]""" else: return f"""Union[{", ".join(full_name(arg) for arg in args)}]""" else: return _remove_prefix(f"{cla55.__module__}.{cla55.__name__}")
[ "def", "full_name", "(", "cla55", ":", "Optional", "[", "type", "]", ")", "->", "str", ":", "# Special case to handle None:", "if", "cla55", "is", "None", ":", "return", "\"?\"", "if", "issubclass", "(", "cla55", ",", "Initializer", ")", "and", "cla55", "n...
Return the full name (including module) of the given class.
[ "Return", "the", "full", "name", "(", "including", "module", ")", "of", "the", "given", "class", "." ]
python
train
inveniosoftware/invenio-communities
invenio_communities/models.py
https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/models.py#L355-L365
def delete(self): """Mark the community for deletion. :param delete_time: DateTime after which to delete the community. :type delete_time: datetime.datetime :raises: CommunitiesError """ if self.deleted_at is not None: raise CommunitiesError(community=self) else: self.deleted_at = datetime.utcnow()
[ "def", "delete", "(", "self", ")", ":", "if", "self", ".", "deleted_at", "is", "not", "None", ":", "raise", "CommunitiesError", "(", "community", "=", "self", ")", "else", ":", "self", ".", "deleted_at", "=", "datetime", ".", "utcnow", "(", ")" ]
Mark the community for deletion. :param delete_time: DateTime after which to delete the community. :type delete_time: datetime.datetime :raises: CommunitiesError
[ "Mark", "the", "community", "for", "deletion", "." ]
python
train
majerteam/deform_extensions
deform_extensions/__init__.py
https://github.com/majerteam/deform_extensions/blob/fdad612e4889a40f1944611264b943866a3cb96e/deform_extensions/__init__.py#L51-L62
def grouper(iterable, items, fillvalue=None): """ Collect data into fixed-length chunks or blocks e.g: grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx Got it from https://docs.python.org/2/library/itertools.html#recipes """ args = [iter(iterable)] * items return izip_longest(fillvalue=fillvalue, *args)
[ "def", "grouper", "(", "iterable", ",", "items", ",", "fillvalue", "=", "None", ")", ":", "args", "=", "[", "iter", "(", "iterable", ")", "]", "*", "items", "return", "izip_longest", "(", "fillvalue", "=", "fillvalue", ",", "*", "args", ")" ]
Collect data into fixed-length chunks or blocks e.g: grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx Got it from https://docs.python.org/2/library/itertools.html#recipes
[ "Collect", "data", "into", "fixed", "-", "length", "chunks", "or", "blocks" ]
python
train
craffel/mir_eval
mir_eval/pattern.py
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/pattern.py#L64-L79
def _n_onset_midi(patterns): """Computes the number of onset_midi objects in a pattern Parameters ---------- patterns : A list of patterns using the format returned by :func:`mir_eval.io.load_patterns()` Returns ------- n_onsets : int Number of onsets within the pattern. """ return len([o_m for pat in patterns for occ in pat for o_m in occ])
[ "def", "_n_onset_midi", "(", "patterns", ")", ":", "return", "len", "(", "[", "o_m", "for", "pat", "in", "patterns", "for", "occ", "in", "pat", "for", "o_m", "in", "occ", "]", ")" ]
Computes the number of onset_midi objects in a pattern Parameters ---------- patterns : A list of patterns using the format returned by :func:`mir_eval.io.load_patterns()` Returns ------- n_onsets : int Number of onsets within the pattern.
[ "Computes", "the", "number", "of", "onset_midi", "objects", "in", "a", "pattern" ]
python
train
saltstack/salt
salt/modules/virt.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/virt.py#L4229-L4252
def capabilities(**kwargs): ''' Return the hypervisor connection capabilities. :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.capabilities ''' conn = __get_conn(**kwargs) caps = ElementTree.fromstring(conn.getCapabilities()) conn.close() return { 'host': _parse_caps_host(caps.find('host')), 'guests': [_parse_caps_guest(guest) for guest in caps.findall('guest')] }
[ "def", "capabilities", "(", "*", "*", "kwargs", ")", ":", "conn", "=", "__get_conn", "(", "*", "*", "kwargs", ")", "caps", "=", "ElementTree", ".", "fromstring", "(", "conn", ".", "getCapabilities", "(", ")", ")", "conn", ".", "close", "(", ")", "ret...
Return the hypervisor connection capabilities. :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.capabilities
[ "Return", "the", "hypervisor", "connection", "capabilities", "." ]
python
train
mattloper/chumpy
chumpy/ch.py
https://github.com/mattloper/chumpy/blob/a3cfdb1be3c8265c369c507b22f6f3f89414c772/chumpy/ch.py#L1088-L1110
def tree_iterator(self, visited=None, path=None): ''' Generator function that traverse the dr tree start from this node (self). ''' if visited is None: visited = set() if self not in visited: if path and isinstance(path, list): path.append(self) visited.add(self) yield self if not hasattr(self, 'dterms'): yield for dterm in self.dterms: if hasattr(self, dterm): child = getattr(self, dterm) if hasattr(child, 'dterms') or hasattr(child, 'terms'): for node in child.tree_iterator(visited): yield node
[ "def", "tree_iterator", "(", "self", ",", "visited", "=", "None", ",", "path", "=", "None", ")", ":", "if", "visited", "is", "None", ":", "visited", "=", "set", "(", ")", "if", "self", "not", "in", "visited", ":", "if", "path", "and", "isinstance", ...
Generator function that traverse the dr tree start from this node (self).
[ "Generator", "function", "that", "traverse", "the", "dr", "tree", "start", "from", "this", "node", "(", "self", ")", "." ]
python
train
apache/spark
python/pyspark/mllib/stat/KernelDensity.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/stat/KernelDensity.py#L48-L52
def setSample(self, sample): """Set sample points from the population. Should be a RDD""" if not isinstance(sample, RDD): raise TypeError("samples should be a RDD, received %s" % type(sample)) self._sample = sample
[ "def", "setSample", "(", "self", ",", "sample", ")", ":", "if", "not", "isinstance", "(", "sample", ",", "RDD", ")", ":", "raise", "TypeError", "(", "\"samples should be a RDD, received %s\"", "%", "type", "(", "sample", ")", ")", "self", ".", "_sample", "...
Set sample points from the population. Should be a RDD
[ "Set", "sample", "points", "from", "the", "population", ".", "Should", "be", "a", "RDD" ]
python
train
LogicalDash/LiSE
ELiDE/ELiDE/board/board.py
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/board/board.py#L267-L305
def on_touch_up(self, touch): """Delegate touch handling if possible, else select something.""" if hasattr(self, '_lasttouch') and self._lasttouch == touch: return self._lasttouch = touch touch.push() touch.apply_transform_2d(self.to_local) if hasattr(self, 'protodest'): Logger.debug("Board: on_touch_up making a portal") touch.ungrab(self) ret = self.portal_touch_up(touch) touch.pop() return ret if self.app.selection and hasattr(self.app.selection, 'on_touch_up'): self.app.selection.dispatch('on_touch_up', touch) for candidate in self.selection_candidates: if candidate == self.app.selection: continue if candidate.collide_point(*touch.pos): Logger.debug("Board: selecting " + repr(candidate)) if hasattr(candidate, 'selected'): candidate.selected = True if hasattr(self.app.selection, 'selected'): self.app.selection.selected = False self.app.selection = candidate self.keep_selection = True parent = candidate.parent parent.remove_widget(candidate) parent.add_widget(candidate) break if not self.keep_selection: Logger.debug("Board: deselecting " + repr(self.app.selection)) if hasattr(self.app.selection, 'selected'): self.app.selection.selected = False self.app.selection = None self.keep_selection = False touch.ungrab(self) touch.pop() return
[ "def", "on_touch_up", "(", "self", ",", "touch", ")", ":", "if", "hasattr", "(", "self", ",", "'_lasttouch'", ")", "and", "self", ".", "_lasttouch", "==", "touch", ":", "return", "self", ".", "_lasttouch", "=", "touch", "touch", ".", "push", "(", ")", ...
Delegate touch handling if possible, else select something.
[ "Delegate", "touch", "handling", "if", "possible", "else", "select", "something", "." ]
python
train
Kortemme-Lab/klab
klab/bio/bonsai.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/bonsai.py#L63-L65
def from_non_aligned_residue_IDs(Chain, StartResidueID, EndResidueID, Sequence = None): '''A more forgiving method that does not care about the padding of the residue IDs.''' return PDBSection(Chain, PDB.ResidueID2String(StartResidueID), PDB.ResidueID2String(EndResidueID), Sequence = Sequence)
[ "def", "from_non_aligned_residue_IDs", "(", "Chain", ",", "StartResidueID", ",", "EndResidueID", ",", "Sequence", "=", "None", ")", ":", "return", "PDBSection", "(", "Chain", ",", "PDB", ".", "ResidueID2String", "(", "StartResidueID", ")", ",", "PDB", ".", "Re...
A more forgiving method that does not care about the padding of the residue IDs.
[ "A", "more", "forgiving", "method", "that", "does", "not", "care", "about", "the", "padding", "of", "the", "residue", "IDs", "." ]
python
train
belbio/bel
bel/lang/bel_specification.py
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_specification.py#L125-L143
def get_bel_versions() -> List[str]: """Get BEL Language versions supported Get the list of all BEL Language versions supported. The file this depends on is generated by belspec_yaml2json and is kept up to date using `make update_ebnf` or `make update_parsers`. You can also run `belspec_yaml2json` directly as it's added as a command by pip install. Returns: List[str]: list of versions """ spec_dir = config["bel"]["lang"]["specifications"] fn = f"{spec_dir}/versions.json" with open(fn, "r") as f: versions = json.load(f) return versions
[ "def", "get_bel_versions", "(", ")", "->", "List", "[", "str", "]", ":", "spec_dir", "=", "config", "[", "\"bel\"", "]", "[", "\"lang\"", "]", "[", "\"specifications\"", "]", "fn", "=", "f\"{spec_dir}/versions.json\"", "with", "open", "(", "fn", ",", "\"r\...
Get BEL Language versions supported Get the list of all BEL Language versions supported. The file this depends on is generated by belspec_yaml2json and is kept up to date using `make update_ebnf` or `make update_parsers`. You can also run `belspec_yaml2json` directly as it's added as a command by pip install. Returns: List[str]: list of versions
[ "Get", "BEL", "Language", "versions", "supported" ]
python
train
sgaynetdinov/py-vkontakte
vk/comment.py
https://github.com/sgaynetdinov/py-vkontakte/blob/c09654f89008b5847418bb66f1f9c408cd7aa128/vk/comment.py#L34-L39
def _get_comments_count(session, group_or_user_id, wall_id): """ https://vk.com/dev/wall.getComments """ response = session.fetch("wall.getComments", count=100, owner_id=group_or_user_id, post_id=wall_id) return response.get('count')
[ "def", "_get_comments_count", "(", "session", ",", "group_or_user_id", ",", "wall_id", ")", ":", "response", "=", "session", ".", "fetch", "(", "\"wall.getComments\"", ",", "count", "=", "100", ",", "owner_id", "=", "group_or_user_id", ",", "post_id", "=", "wa...
https://vk.com/dev/wall.getComments
[ "https", ":", "//", "vk", ".", "com", "/", "dev", "/", "wall", ".", "getComments" ]
python
train
uzumaxy/pyvalid
pyvalid/__accepts.py
https://github.com/uzumaxy/pyvalid/blob/74a1a64df1cc77cac55f12f0fe0f52292c6ae479/pyvalid/__accepts.py#L44-L51
def __wrap_accepted_val(self, value): """Wrap accepted value in the list if yet not wrapped. """ if isinstance(value, tuple): value = list(value) elif not isinstance(value, list): value = [value] return value
[ "def", "__wrap_accepted_val", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "tuple", ")", ":", "value", "=", "list", "(", "value", ")", "elif", "not", "isinstance", "(", "value", ",", "list", ")", ":", "value", "=", "[",...
Wrap accepted value in the list if yet not wrapped.
[ "Wrap", "accepted", "value", "in", "the", "list", "if", "yet", "not", "wrapped", "." ]
python
train
kensho-technologies/graphql-compiler
graphql_compiler/__init__.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/__init__.py#L103-L136
def graphql_to_gremlin(schema, graphql_query, parameters, type_equivalence_hints=None): """Compile the GraphQL input using the schema into a Gremlin query and associated metadata. Args: schema: GraphQL schema object describing the schema of the graph to be queried graphql_query: the GraphQL query to compile to Gremlin, as a string parameters: dict, mapping argument name to its value, for every parameter the query expects. type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union. Used as a workaround for GraphQL's lack of support for inheritance across "types" (i.e. non-interfaces), as well as a workaround for Gremlin's total lack of inheritance-awareness. The key-value pairs in the dict specify that the "key" type is equivalent to the "value" type, i.e. that the GraphQL type or interface in the key is the most-derived common supertype of every GraphQL type in the "value" GraphQL union. Recursive expansion of type equivalence hints is not performed, and only type-level correctness of this argument is enforced. See README.md for more details on everything this parameter does. ***** Be very careful with this option, as bad input here will lead to incorrect output queries being generated. ***** Returns: a CompilationResult object, containing: - query: string, the resulting compiled and parameterized query string - language: string, specifying the language to which the query was compiled - output_metadata: dict, output name -> OutputMetadata namedtuple object - input_metadata: dict, name of input variables -> inferred GraphQL type, based on use """ compilation_result = compile_graphql_to_gremlin( schema, graphql_query, type_equivalence_hints=type_equivalence_hints) return compilation_result._replace( query=insert_arguments_into_query(compilation_result, parameters))
[ "def", "graphql_to_gremlin", "(", "schema", ",", "graphql_query", ",", "parameters", ",", "type_equivalence_hints", "=", "None", ")", ":", "compilation_result", "=", "compile_graphql_to_gremlin", "(", "schema", ",", "graphql_query", ",", "type_equivalence_hints", "=", ...
Compile the GraphQL input using the schema into a Gremlin query and associated metadata. Args: schema: GraphQL schema object describing the schema of the graph to be queried graphql_query: the GraphQL query to compile to Gremlin, as a string parameters: dict, mapping argument name to its value, for every parameter the query expects. type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union. Used as a workaround for GraphQL's lack of support for inheritance across "types" (i.e. non-interfaces), as well as a workaround for Gremlin's total lack of inheritance-awareness. The key-value pairs in the dict specify that the "key" type is equivalent to the "value" type, i.e. that the GraphQL type or interface in the key is the most-derived common supertype of every GraphQL type in the "value" GraphQL union. Recursive expansion of type equivalence hints is not performed, and only type-level correctness of this argument is enforced. See README.md for more details on everything this parameter does. ***** Be very careful with this option, as bad input here will lead to incorrect output queries being generated. ***** Returns: a CompilationResult object, containing: - query: string, the resulting compiled and parameterized query string - language: string, specifying the language to which the query was compiled - output_metadata: dict, output name -> OutputMetadata namedtuple object - input_metadata: dict, name of input variables -> inferred GraphQL type, based on use
[ "Compile", "the", "GraphQL", "input", "using", "the", "schema", "into", "a", "Gremlin", "query", "and", "associated", "metadata", "." ]
python
train
apache/spark
python/pyspark/sql/types.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1068-L1077
def _has_nulltype(dt): """ Return whether there is NullType in `dt` or not """ if isinstance(dt, StructType): return any(_has_nulltype(f.dataType) for f in dt.fields) elif isinstance(dt, ArrayType): return _has_nulltype((dt.elementType)) elif isinstance(dt, MapType): return _has_nulltype(dt.keyType) or _has_nulltype(dt.valueType) else: return isinstance(dt, NullType)
[ "def", "_has_nulltype", "(", "dt", ")", ":", "if", "isinstance", "(", "dt", ",", "StructType", ")", ":", "return", "any", "(", "_has_nulltype", "(", "f", ".", "dataType", ")", "for", "f", "in", "dt", ".", "fields", ")", "elif", "isinstance", "(", "dt...
Return whether there is NullType in `dt` or not
[ "Return", "whether", "there", "is", "NullType", "in", "dt", "or", "not" ]
python
train
androguard/androguard
androguard/core/bytecodes/dvm.py
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecodes/dvm.py#L6851-L6877
def get_raw(self): """ Get the reconstructed code as bytearray :rtype: bytearray """ code_raw = self.code.get_raw() self.insns_size = (len(code_raw) // 2) + (len(code_raw) % 2) buff = bytearray() buff += pack("<H", self.registers_size) + \ pack("<H", self.ins_size) + \ pack("<H", self.outs_size) + \ pack("<H", self.tries_size) + \ pack("<I", self.debug_info_off) + \ pack("<I", self.insns_size) + \ code_raw if self.tries_size > 0: if (self.insns_size % 2 == 1): buff += pack("<H", self.padding) for i in self.tries: buff += i.get_raw() buff += self.handlers.get_raw() return buff
[ "def", "get_raw", "(", "self", ")", ":", "code_raw", "=", "self", ".", "code", ".", "get_raw", "(", ")", "self", ".", "insns_size", "=", "(", "len", "(", "code_raw", ")", "//", "2", ")", "+", "(", "len", "(", "code_raw", ")", "%", "2", ")", "bu...
Get the reconstructed code as bytearray :rtype: bytearray
[ "Get", "the", "reconstructed", "code", "as", "bytearray" ]
python
train
mickybart/python-atlasbroker
atlasbroker/service.py
https://github.com/mickybart/python-atlasbroker/blob/5b741c1348a6d33b342e0852a8a8900fa9ebf00a/atlasbroker/service.py#L141-L156
def deprovision(self, instance_id: str, details: DeprovisionDetails, async_allowed: bool) -> DeprovisionServiceSpec: """Deprovision an instance see openbrokerapi documentation Raises: ErrInstanceDoesNotExist: Instance does not exist. """ # Find the instance instance = self._backend.find(instance_id) if not instance.isProvisioned(): # the instance does not exist raise ErrInstanceDoesNotExist() return self._backend.delete(instance)
[ "def", "deprovision", "(", "self", ",", "instance_id", ":", "str", ",", "details", ":", "DeprovisionDetails", ",", "async_allowed", ":", "bool", ")", "->", "DeprovisionServiceSpec", ":", "# Find the instance", "instance", "=", "self", ".", "_backend", ".", "find...
Deprovision an instance see openbrokerapi documentation Raises: ErrInstanceDoesNotExist: Instance does not exist.
[ "Deprovision", "an", "instance", "see", "openbrokerapi", "documentation", "Raises", ":", "ErrInstanceDoesNotExist", ":", "Instance", "does", "not", "exist", "." ]
python
train
fkarb/xltable
xltable/expression.py
https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/expression.py#L59-L69
def value(self): """Set a calculated value for this Expression. Used when writing formulas using XlsxWriter to give cells an initial value when the sheet is loaded without being calculated. """ try: if isinstance(self.__value, Expression): return self.__value.value return self.__value except AttributeError: return 0
[ "def", "value", "(", "self", ")", ":", "try", ":", "if", "isinstance", "(", "self", ".", "__value", ",", "Expression", ")", ":", "return", "self", ".", "__value", ".", "value", "return", "self", ".", "__value", "except", "AttributeError", ":", "return", ...
Set a calculated value for this Expression. Used when writing formulas using XlsxWriter to give cells an initial value when the sheet is loaded without being calculated.
[ "Set", "a", "calculated", "value", "for", "this", "Expression", ".", "Used", "when", "writing", "formulas", "using", "XlsxWriter", "to", "give", "cells", "an", "initial", "value", "when", "the", "sheet", "is", "loaded", "without", "being", "calculated", "." ]
python
train
numenta/nupic
src/nupic/algorithms/temporal_memory.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/temporal_memory.py#L801-L837
def _adaptSegment(cls, connections, segment, prevActiveCells, permanenceIncrement, permanenceDecrement): """ Updates synapses on segment. Strengthens active synapses; weakens inactive synapses. :param connections: (Object) Connections instance for the tm :param segment: (int) Segment to adapt :param prevActiveCells: (list) Active cells in `t-1` :param permanenceIncrement: (float) Amount to increment active synapses :param permanenceDecrement: (float) Amount to decrement inactive synapses """ # Destroying a synapse modifies the set that we're iterating through. synapsesToDestroy = [] for synapse in connections.synapsesForSegment(segment): permanence = synapse.permanence if binSearch(prevActiveCells, synapse.presynapticCell) != -1: permanence += permanenceIncrement else: permanence -= permanenceDecrement # Keep permanence within min/max bounds permanence = max(0.0, min(1.0, permanence)) if permanence < EPSILON: synapsesToDestroy.append(synapse) else: connections.updateSynapsePermanence(synapse, permanence) for synapse in synapsesToDestroy: connections.destroySynapse(synapse) if connections.numSynapses(segment) == 0: connections.destroySegment(segment)
[ "def", "_adaptSegment", "(", "cls", ",", "connections", ",", "segment", ",", "prevActiveCells", ",", "permanenceIncrement", ",", "permanenceDecrement", ")", ":", "# Destroying a synapse modifies the set that we're iterating through.", "synapsesToDestroy", "=", "[", "]", "fo...
Updates synapses on segment. Strengthens active synapses; weakens inactive synapses. :param connections: (Object) Connections instance for the tm :param segment: (int) Segment to adapt :param prevActiveCells: (list) Active cells in `t-1` :param permanenceIncrement: (float) Amount to increment active synapses :param permanenceDecrement: (float) Amount to decrement inactive synapses
[ "Updates", "synapses", "on", "segment", ".", "Strengthens", "active", "synapses", ";", "weakens", "inactive", "synapses", "." ]
python
valid
rocky/python-filecache
pyficache/main.py
https://github.com/rocky/python-filecache/blob/60709ccd837ef5df001faf3cb02d4979ba342a23/pyficache/main.py#L213-L224
def clear_file_format_cache(): """Remove syntax-formatted lines in the cache. Use this when you change the Pygments syntax or Token formatting and want to redo how files may have previously been syntax marked.""" for fname, cache_info in file_cache.items(): for format, lines in cache_info.lines.items(): if 'plain' == format: continue file_cache[fname].lines[format] = None pass pass pass
[ "def", "clear_file_format_cache", "(", ")", ":", "for", "fname", ",", "cache_info", "in", "file_cache", ".", "items", "(", ")", ":", "for", "format", ",", "lines", "in", "cache_info", ".", "lines", ".", "items", "(", ")", ":", "if", "'plain'", "==", "f...
Remove syntax-formatted lines in the cache. Use this when you change the Pygments syntax or Token formatting and want to redo how files may have previously been syntax marked.
[ "Remove", "syntax", "-", "formatted", "lines", "in", "the", "cache", ".", "Use", "this", "when", "you", "change", "the", "Pygments", "syntax", "or", "Token", "formatting", "and", "want", "to", "redo", "how", "files", "may", "have", "previously", "been", "s...
python
train
dictatorlib/dictator
dictator/__init__.py
https://github.com/dictatorlib/dictator/blob/b77b1709b6fff174f13b0f0c5dbe740b4c07d712/dictator/__init__.py#L328-L355
def iterkeys(self, match=None, count=1): """Return an iterator over the db's keys. ``match`` allows for filtering the keys by pattern. ``count`` allows for hint the minimum number of returns. >>> dc = Dictator() >>> dc['1'] = 'abc' >>> dc['2'] = 'def' >>> dc['3'] = 'ghi' >>> itr = dc.iterkeys() >>> type(itr) <type 'generator'> >>> list(reversed([item for item in itr])) ['1', '2', '3'] >>> dc.clear() :param match: pattern to filter keys :type match: str :param count: minimum number of returns :type count: int :return: iterator over key. :rtype: generator """ logger.debug('call iterkeys %s', match) if match is None: match = '*' for key in self._redis.scan_iter(match=match, count=count): yield key
[ "def", "iterkeys", "(", "self", ",", "match", "=", "None", ",", "count", "=", "1", ")", ":", "logger", ".", "debug", "(", "'call iterkeys %s'", ",", "match", ")", "if", "match", "is", "None", ":", "match", "=", "'*'", "for", "key", "in", "self", "....
Return an iterator over the db's keys. ``match`` allows for filtering the keys by pattern. ``count`` allows for hint the minimum number of returns. >>> dc = Dictator() >>> dc['1'] = 'abc' >>> dc['2'] = 'def' >>> dc['3'] = 'ghi' >>> itr = dc.iterkeys() >>> type(itr) <type 'generator'> >>> list(reversed([item for item in itr])) ['1', '2', '3'] >>> dc.clear() :param match: pattern to filter keys :type match: str :param count: minimum number of returns :type count: int :return: iterator over key. :rtype: generator
[ "Return", "an", "iterator", "over", "the", "db", "s", "keys", ".", "match", "allows", "for", "filtering", "the", "keys", "by", "pattern", ".", "count", "allows", "for", "hint", "the", "minimum", "number", "of", "returns", "." ]
python
train
sorgerlab/indra
indra/assemblers/cx/assembler.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cx/assembler.py#L160-L170
def save_model(self, file_name='model.cx'): """Save the assembled CX network in a file. Parameters ---------- file_name : Optional[str] The name of the file to save the CX network to. Default: model.cx """ with open(file_name, 'wt') as fh: cx_str = self.print_cx() fh.write(cx_str)
[ "def", "save_model", "(", "self", ",", "file_name", "=", "'model.cx'", ")", ":", "with", "open", "(", "file_name", ",", "'wt'", ")", "as", "fh", ":", "cx_str", "=", "self", ".", "print_cx", "(", ")", "fh", ".", "write", "(", "cx_str", ")" ]
Save the assembled CX network in a file. Parameters ---------- file_name : Optional[str] The name of the file to save the CX network to. Default: model.cx
[ "Save", "the", "assembled", "CX", "network", "in", "a", "file", "." ]
python
train
ajdavis/mongo-mockup-db
mockupdb/__init__.py
https://github.com/ajdavis/mongo-mockup-db/blob/ff8a3f793def59e9037397ef60607fbda6949dac/mockupdb/__init__.py#L2000-L2012
def raise_args_err(message='bad arguments', error_class=TypeError): """Throw an error with standard message, displaying function call. >>> def f(a, *args, **kwargs): ... raise_args_err() ... >>> f(1, 2, x='y') Traceback (most recent call last): ... TypeError: bad arguments: f(1, 2, x='y') """ frame = inspect.currentframe().f_back raise error_class(message + ': ' + format_call(frame))
[ "def", "raise_args_err", "(", "message", "=", "'bad arguments'", ",", "error_class", "=", "TypeError", ")", ":", "frame", "=", "inspect", ".", "currentframe", "(", ")", ".", "f_back", "raise", "error_class", "(", "message", "+", "': '", "+", "format_call", "...
Throw an error with standard message, displaying function call. >>> def f(a, *args, **kwargs): ... raise_args_err() ... >>> f(1, 2, x='y') Traceback (most recent call last): ... TypeError: bad arguments: f(1, 2, x='y')
[ "Throw", "an", "error", "with", "standard", "message", "displaying", "function", "call", "." ]
python
train
google/mobly
mobly/controllers/iperf_server.py
https://github.com/google/mobly/blob/38ba2cf7d29a20e6a2fca1718eecb337df38db26/mobly/controllers/iperf_server.py#L125-L145
def start(self, extra_args="", tag=""): """Starts iperf server on specified port. Args: extra_args: A string representing extra arguments to start iperf server with. tag: Appended to log file name to identify logs from different iperf runs. """ if self.started: return utils.create_dir(self.log_path) if tag: tag = tag + ',' out_file_name = "IPerfServer,{},{}{}.log".format( self.port, tag, len(self.log_files)) full_out_path = os.path.join(self.log_path, out_file_name) cmd = '%s %s > %s' % (self.iperf_str, extra_args, full_out_path) self.iperf_process = utils.start_standing_subprocess(cmd, shell=True) self.log_files.append(full_out_path) self.started = True
[ "def", "start", "(", "self", ",", "extra_args", "=", "\"\"", ",", "tag", "=", "\"\"", ")", ":", "if", "self", ".", "started", ":", "return", "utils", ".", "create_dir", "(", "self", ".", "log_path", ")", "if", "tag", ":", "tag", "=", "tag", "+", ...
Starts iperf server on specified port. Args: extra_args: A string representing extra arguments to start iperf server with. tag: Appended to log file name to identify logs from different iperf runs.
[ "Starts", "iperf", "server", "on", "specified", "port", "." ]
python
train
saltstack/salt
salt/modules/keystoneng.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/keystoneng.py#L530-L544
def user_create(auth=None, **kwargs): ''' Create a user CLI Example: .. code-block:: bash salt '*' keystoneng.user_create name=user1 salt '*' keystoneng.user_create name=user2 password=1234 enabled=False salt '*' keystoneng.user_create name=user3 domain_id=b62e76fbeeff4e8fb77073f591cf211e ''' cloud = get_openstack_cloud(auth) kwargs = _clean_kwargs(keep_name=True, **kwargs) return cloud.create_user(**kwargs)
[ "def", "user_create", "(", "auth", "=", "None", ",", "*", "*", "kwargs", ")", ":", "cloud", "=", "get_openstack_cloud", "(", "auth", ")", "kwargs", "=", "_clean_kwargs", "(", "keep_name", "=", "True", ",", "*", "*", "kwargs", ")", "return", "cloud", "....
Create a user CLI Example: .. code-block:: bash salt '*' keystoneng.user_create name=user1 salt '*' keystoneng.user_create name=user2 password=1234 enabled=False salt '*' keystoneng.user_create name=user3 domain_id=b62e76fbeeff4e8fb77073f591cf211e
[ "Create", "a", "user" ]
python
train
pandas-dev/pandas
pandas/core/arrays/datetimelike.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimelike.py#L68-L89
def _scalar_from_string( self, value: str, ) -> Union[Period, Timestamp, Timedelta, NaTType]: """ Construct a scalar type from a string. Parameters ---------- value : str Returns ------- Period, Timestamp, or Timedelta, or NaT Whatever the type of ``self._scalar_type`` is. Notes ----- This should call ``self._check_compatible_with`` before unboxing the result. """ raise AbstractMethodError(self)
[ "def", "_scalar_from_string", "(", "self", ",", "value", ":", "str", ",", ")", "->", "Union", "[", "Period", ",", "Timestamp", ",", "Timedelta", ",", "NaTType", "]", ":", "raise", "AbstractMethodError", "(", "self", ")" ]
Construct a scalar type from a string. Parameters ---------- value : str Returns ------- Period, Timestamp, or Timedelta, or NaT Whatever the type of ``self._scalar_type`` is. Notes ----- This should call ``self._check_compatible_with`` before unboxing the result.
[ "Construct", "a", "scalar", "type", "from", "a", "string", "." ]
python
train
KelSolaar/Manager
manager/components_manager.py
https://github.com/KelSolaar/Manager/blob/39c8153fc021fc8a76e345a6e336ec2644f089d1/manager/components_manager.py#L1270-L1298
def filter_components(self, pattern, category=None): """ Filters the Components using given regex pattern. Usage:: >>> manager = Manager(("./manager/tests/tests_manager/resources/components/core",)) >>> manager.register_components() True >>> manager.filter_components("\w+A$") [u'core.tests_component_a'] :param pattern: Regex filtering pattern. :type pattern: unicode :param category: Category filter. :type category: unicode :return: Matching Components. :rtype: list """ filtered_components = [] for component, profile in self: if category: if profile.category != category: continue if re.search(pattern, component): filtered_components.append(component) return filtered_components
[ "def", "filter_components", "(", "self", ",", "pattern", ",", "category", "=", "None", ")", ":", "filtered_components", "=", "[", "]", "for", "component", ",", "profile", "in", "self", ":", "if", "category", ":", "if", "profile", ".", "category", "!=", "...
Filters the Components using given regex pattern. Usage:: >>> manager = Manager(("./manager/tests/tests_manager/resources/components/core",)) >>> manager.register_components() True >>> manager.filter_components("\w+A$") [u'core.tests_component_a'] :param pattern: Regex filtering pattern. :type pattern: unicode :param category: Category filter. :type category: unicode :return: Matching Components. :rtype: list
[ "Filters", "the", "Components", "using", "given", "regex", "pattern", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_rally.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_rally.py#L72-L114
def cmd_rally_add(self, args): '''handle rally add''' if len(args) < 1: alt = self.settings.rallyalt else: alt = float(args[0]) if len(args) < 2: break_alt = self.settings.rally_breakalt else: break_alt = float(args[1]) if len(args) < 3: flag = self.settings.rally_flags else: flag = int(args[2]) #currently only supporting autoland values: #True (nonzero) and False (zero) if (flag != 0): flag = 2 if not self.have_list: print("Please list rally points first") return if (self.rallyloader.rally_count() > 4): print ("Only 5 rally points possible per flight plan.") return try: latlon = self.module('map').click_position except Exception: print("No map available") return if latlon is None: print("No map click position available") return land_hdg = 0.0 self.rallyloader.create_and_append_rally_point(latlon[0] * 1e7, latlon[1] * 1e7, alt, break_alt, land_hdg, flag) self.send_rally_points() print("Added Rally point at %s %f %f, autoland: %s" % (str(latlon), alt, break_alt, bool(flag & 2)))
[ "def", "cmd_rally_add", "(", "self", ",", "args", ")", ":", "if", "len", "(", "args", ")", "<", "1", ":", "alt", "=", "self", ".", "settings", ".", "rallyalt", "else", ":", "alt", "=", "float", "(", "args", "[", "0", "]", ")", "if", "len", "(",...
handle rally add
[ "handle", "rally", "add" ]
python
train
ga4gh/ga4gh-server
ga4gh/server/backend.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/backend.py#L477-L485
def featureSetsGenerator(self, request): """ Returns a generator over the (featureSet, nextPageToken) pairs defined by the specified request. """ dataset = self.getDataRepository().getDataset(request.dataset_id) return self._topLevelObjectGenerator( request, dataset.getNumFeatureSets(), dataset.getFeatureSetByIndex)
[ "def", "featureSetsGenerator", "(", "self", ",", "request", ")", ":", "dataset", "=", "self", ".", "getDataRepository", "(", ")", ".", "getDataset", "(", "request", ".", "dataset_id", ")", "return", "self", ".", "_topLevelObjectGenerator", "(", "request", ",",...
Returns a generator over the (featureSet, nextPageToken) pairs defined by the specified request.
[ "Returns", "a", "generator", "over", "the", "(", "featureSet", "nextPageToken", ")", "pairs", "defined", "by", "the", "specified", "request", "." ]
python
train
python/performance
performance/compare.py
https://github.com/python/performance/blob/2a9524c0a5714e85106671bc61d750e800fe17db/performance/compare.py#L106-L123
def is_significant(sample1, sample2): """Determine whether two samples differ significantly. This uses a Student's two-sample, two-tailed t-test with alpha=0.95. Args: sample1: one sample. sample2: the other sample. Returns: (significant, t_score) where significant is a bool indicating whether the two samples differ significantly; t_score is the score from the two-sample T test. """ deg_freedom = len(sample1) + len(sample2) - 2 critical_value = tdist95conf_level(deg_freedom) t_score = tscore(sample1, sample2) return (abs(t_score) >= critical_value, t_score)
[ "def", "is_significant", "(", "sample1", ",", "sample2", ")", ":", "deg_freedom", "=", "len", "(", "sample1", ")", "+", "len", "(", "sample2", ")", "-", "2", "critical_value", "=", "tdist95conf_level", "(", "deg_freedom", ")", "t_score", "=", "tscore", "("...
Determine whether two samples differ significantly. This uses a Student's two-sample, two-tailed t-test with alpha=0.95. Args: sample1: one sample. sample2: the other sample. Returns: (significant, t_score) where significant is a bool indicating whether the two samples differ significantly; t_score is the score from the two-sample T test.
[ "Determine", "whether", "two", "samples", "differ", "significantly", "." ]
python
test
snare/voltron
voltron/plugin.py
https://github.com/snare/voltron/blob/4ee3cbe6f7c1e38303f5dc6114c48b60217253c3/voltron/plugin.py#L133-L144
def valid_web_plugin(self, plugin): """ Validate a web plugin, ensuring it is a web plugin and has the necessary fields present. `plugin` is a subclass of scruffy's Plugin class. """ if (issubclass(plugin, WebPlugin) and hasattr(plugin, 'plugin_type') and plugin.plugin_type == 'web' and hasattr(plugin, 'name') and plugin.name != None): return True return False
[ "def", "valid_web_plugin", "(", "self", ",", "plugin", ")", ":", "if", "(", "issubclass", "(", "plugin", ",", "WebPlugin", ")", "and", "hasattr", "(", "plugin", ",", "'plugin_type'", ")", "and", "plugin", ".", "plugin_type", "==", "'web'", "and", "hasattr"...
Validate a web plugin, ensuring it is a web plugin and has the necessary fields present. `plugin` is a subclass of scruffy's Plugin class.
[ "Validate", "a", "web", "plugin", "ensuring", "it", "is", "a", "web", "plugin", "and", "has", "the", "necessary", "fields", "present", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_map/srtm.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_map/srtm.py#L111-L117
def createFileList(self): """SRTM data is split into different directories, get a list of all of them and create a dictionary for easy lookup.""" global childFileListDownload if childFileListDownload is None or not childFileListDownload.is_alive(): childFileListDownload = multiprocessing.Process(target=self.createFileListHTTP) childFileListDownload.start()
[ "def", "createFileList", "(", "self", ")", ":", "global", "childFileListDownload", "if", "childFileListDownload", "is", "None", "or", "not", "childFileListDownload", ".", "is_alive", "(", ")", ":", "childFileListDownload", "=", "multiprocessing", ".", "Process", "("...
SRTM data is split into different directories, get a list of all of them and create a dictionary for easy lookup.
[ "SRTM", "data", "is", "split", "into", "different", "directories", "get", "a", "list", "of", "all", "of", "them", "and", "create", "a", "dictionary", "for", "easy", "lookup", "." ]
python
train
Clinical-Genomics/scout
scout/utils/requests.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/utils/requests.py#L144-L179
def fetch_ensembl_genes(build='37'): """Fetch the ensembl genes Args: build(str): ['37', '38'] """ if build == '37': url = 'http://grch37.ensembl.org' else: url = 'http://www.ensembl.org' LOG.info("Fetching ensembl genes from %s", url) dataset_name = 'hsapiens_gene_ensembl' dataset = pybiomart.Dataset(name=dataset_name, host=url) attributes = [ 'chromosome_name', 'start_position', 'end_position', 'ensembl_gene_id', 'hgnc_symbol', 'hgnc_id', ] filters = { 'chromosome_name': CHROMOSOMES, } result = dataset.query( attributes = attributes, filters = filters, use_attr_names=True, ) return result
[ "def", "fetch_ensembl_genes", "(", "build", "=", "'37'", ")", ":", "if", "build", "==", "'37'", ":", "url", "=", "'http://grch37.ensembl.org'", "else", ":", "url", "=", "'http://www.ensembl.org'", "LOG", ".", "info", "(", "\"Fetching ensembl genes from %s\"", ",",...
Fetch the ensembl genes Args: build(str): ['37', '38']
[ "Fetch", "the", "ensembl", "genes", "Args", ":", "build", "(", "str", ")", ":", "[", "37", "38", "]" ]
python
test
saltstack/salt
salt/modules/solr.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/solr.py#L313-L335
def _get_admin_info(command, host=None, core_name=None): ''' PRIVATE METHOD Calls the _http_request method and passes the admin command to execute and stores the data. This data is fairly static but should be refreshed periodically to make sure everything this OK. The data object will contain the JSON response. command : str The admin command to execute. host : str (None) The solr host to query. __opts__['host'] is default core_name: str (None) The name of the solr core if using cores. Leave this blank if you are not using cores or if you want to check all cores. Return: dict<str,obj>:: {'success':boolean, 'data':dict, 'errors':list, 'warnings':list} ''' url = _format_url("admin/{0}".format(command), host, core_name=core_name) resp = _http_request(url) return resp
[ "def", "_get_admin_info", "(", "command", ",", "host", "=", "None", ",", "core_name", "=", "None", ")", ":", "url", "=", "_format_url", "(", "\"admin/{0}\"", ".", "format", "(", "command", ")", ",", "host", ",", "core_name", "=", "core_name", ")", "resp"...
PRIVATE METHOD Calls the _http_request method and passes the admin command to execute and stores the data. This data is fairly static but should be refreshed periodically to make sure everything this OK. The data object will contain the JSON response. command : str The admin command to execute. host : str (None) The solr host to query. __opts__['host'] is default core_name: str (None) The name of the solr core if using cores. Leave this blank if you are not using cores or if you want to check all cores. Return: dict<str,obj>:: {'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
[ "PRIVATE", "METHOD", "Calls", "the", "_http_request", "method", "and", "passes", "the", "admin", "command", "to", "execute", "and", "stores", "the", "data", ".", "This", "data", "is", "fairly", "static", "but", "should", "be", "refreshed", "periodically", "to"...
python
train
SatelliteQE/nailgun
nailgun/entities.py
https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/entities.py#L1447-L1468
def path(self, which=None): """Extend ``nailgun.entity_mixins.Entity.path``. The format of the returned path depends on the value of ``which``: build_pxe_default /config_templates/build_pxe_default clone /config_templates/clone revision /config_templates/revision ``super`` is called otherwise. """ if which in ('build_pxe_default', 'clone', 'revision'): prefix = 'self' if which == 'clone' else 'base' return '{0}/{1}'.format( super(ConfigTemplate, self).path(prefix), which ) return super(ConfigTemplate, self).path(which)
[ "def", "path", "(", "self", ",", "which", "=", "None", ")", ":", "if", "which", "in", "(", "'build_pxe_default'", ",", "'clone'", ",", "'revision'", ")", ":", "prefix", "=", "'self'", "if", "which", "==", "'clone'", "else", "'base'", "return", "'{0}/{1}'...
Extend ``nailgun.entity_mixins.Entity.path``. The format of the returned path depends on the value of ``which``: build_pxe_default /config_templates/build_pxe_default clone /config_templates/clone revision /config_templates/revision ``super`` is called otherwise.
[ "Extend", "nailgun", ".", "entity_mixins", ".", "Entity", ".", "path", "." ]
python
train
janpipek/physt
physt/histogram_base.py
https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram_base.py#L406-L453
def merge_bins(self, amount: Optional[int] = None, *, min_frequency: Optional[float] = None, axis: Optional[AxisIdentifier] = None, inplace: bool = False) -> 'HistogramBase': """Reduce the number of bins and add their content: Parameters ---------- amount: How many adjacent bins to join together. min_frequency: Try to have at least this value in each bin (this is not enforce e.g. for minima between high bins) axis: int or None On which axis to do this (None => all) inplace: Whether to modify this histogram or return a new one """ if not inplace: histogram = self.copy() histogram.merge_bins(amount, min_frequency=min_frequency, axis=axis, inplace=True) return histogram elif axis is None: for i in range(self.ndim): self.merge_bins(amount=amount, min_frequency=min_frequency, axis=i, inplace=True) else: axis = self._get_axis(axis) if amount is not None: if not amount == int(amount): raise RuntimeError("Amount must be integer") bin_map = [(i, i // amount) for i in range(self.shape[axis])] elif min_frequency is not None: if self.ndim == 1: check = self.frequencies else: check = self.projection(axis).frequencies bin_map = [] current_new = 0 current_sum = 0 for i, freq in enumerate(check): if freq >= min_frequency and current_sum > 0: current_sum = 0 current_new += 1 bin_map.append((i, current_new)) current_sum += freq if current_sum > min_frequency: current_sum = 0 current_new += 1 else: raise NotImplementedError("Not yet implemented.") new_binning = self._binnings[axis].apply_bin_map(bin_map) self._change_binning(new_binning, bin_map, axis=axis) return self
[ "def", "merge_bins", "(", "self", ",", "amount", ":", "Optional", "[", "int", "]", "=", "None", ",", "*", ",", "min_frequency", ":", "Optional", "[", "float", "]", "=", "None", ",", "axis", ":", "Optional", "[", "AxisIdentifier", "]", "=", "None", ",...
Reduce the number of bins and add their content: Parameters ---------- amount: How many adjacent bins to join together. min_frequency: Try to have at least this value in each bin (this is not enforce e.g. for minima between high bins) axis: int or None On which axis to do this (None => all) inplace: Whether to modify this histogram or return a new one
[ "Reduce", "the", "number", "of", "bins", "and", "add", "their", "content", ":" ]
python
train
heuer/segno
segno/encoder.py
https://github.com/heuer/segno/blob/64d912a2bd17d0b5ff3e8b5d37098edfc663c2b3/segno/encoder.py#L919-L942
def calc_format_info(version, error, mask_pattern): """\ Returns the format information for the provided error level and mask patttern. ISO/IEC 18004:2015(E) -- 7.9 Format information (page 55) ISO/IEC 18004:2015(E) -- Table C.1 — Valid format information bit sequences (page 80) :param int version: Version constant :param int error: Error level constant. :param int mask_pattern: Mask pattern number. """ fmt = mask_pattern if version > 0: if error == consts.ERROR_LEVEL_L: fmt += 0x08 elif error == consts.ERROR_LEVEL_H: fmt += 0x10 elif error == consts.ERROR_LEVEL_Q: fmt += 0x18 format_info = consts.FORMAT_INFO[fmt] else: fmt += consts.ERROR_LEVEL_TO_MICRO_MAPPING[version][error] << 2 format_info = consts.FORMAT_INFO_MICRO[fmt] return format_info
[ "def", "calc_format_info", "(", "version", ",", "error", ",", "mask_pattern", ")", ":", "fmt", "=", "mask_pattern", "if", "version", ">", "0", ":", "if", "error", "==", "consts", ".", "ERROR_LEVEL_L", ":", "fmt", "+=", "0x08", "elif", "error", "==", "con...
\ Returns the format information for the provided error level and mask patttern. ISO/IEC 18004:2015(E) -- 7.9 Format information (page 55) ISO/IEC 18004:2015(E) -- Table C.1 — Valid format information bit sequences (page 80) :param int version: Version constant :param int error: Error level constant. :param int mask_pattern: Mask pattern number.
[ "\\", "Returns", "the", "format", "information", "for", "the", "provided", "error", "level", "and", "mask", "patttern", "." ]
python
train
deepmipt/DeepPavlov
deeppavlov/core/data/utils.py
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/data/utils.py#L83-L125
def download(dest_file_path: [List[Union[str, Path]]], source_url: str, force_download=True): """Download a file from URL to one or several target locations Args: dest_file_path: path or list of paths to the file destination files (including file name) source_url: the source URL force_download: download file if it already exists, or not """ if isinstance(dest_file_path, list): dest_file_paths = [Path(path) for path in dest_file_path] else: dest_file_paths = [Path(dest_file_path).absolute()] if not force_download: to_check = list(dest_file_paths) dest_file_paths = [] for p in to_check: if p.exists(): log.info(f'File already exists in {p}') else: dest_file_paths.append(p) if dest_file_paths: cache_dir = os.getenv('DP_CACHE_DIR') cached_exists = False if cache_dir: first_dest_path = Path(cache_dir) / md5(source_url.encode('utf8')).hexdigest()[:15] cached_exists = first_dest_path.exists() else: first_dest_path = dest_file_paths.pop() if not cached_exists: first_dest_path.parent.mkdir(parents=True, exist_ok=True) simple_download(source_url, first_dest_path) else: log.info(f'Found cached {source_url} in {first_dest_path}') for dest_path in dest_file_paths: dest_path.parent.mkdir(parents=True, exist_ok=True) shutil.copy(str(first_dest_path), str(dest_path))
[ "def", "download", "(", "dest_file_path", ":", "[", "List", "[", "Union", "[", "str", ",", "Path", "]", "]", "]", ",", "source_url", ":", "str", ",", "force_download", "=", "True", ")", ":", "if", "isinstance", "(", "dest_file_path", ",", "list", ")", ...
Download a file from URL to one or several target locations Args: dest_file_path: path or list of paths to the file destination files (including file name) source_url: the source URL force_download: download file if it already exists, or not
[ "Download", "a", "file", "from", "URL", "to", "one", "or", "several", "target", "locations" ]
python
test
yyuu/botornado
boto/iam/connection.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/iam/connection.py#L867-L882
def deactivate_mfa_device(self, user_name, serial_number): """ Deactivates the specified MFA device and removes it from association with the user. :type user_name: string :param user_name: The username of the user :type serial_number: string :param seriasl_number: The serial number which uniquely identifies the MFA device. """ params = {'UserName' : user_name, 'SerialNumber' : serial_number} return self.get_response('DeactivateMFADevice', params)
[ "def", "deactivate_mfa_device", "(", "self", ",", "user_name", ",", "serial_number", ")", ":", "params", "=", "{", "'UserName'", ":", "user_name", ",", "'SerialNumber'", ":", "serial_number", "}", "return", "self", ".", "get_response", "(", "'DeactivateMFADevice'"...
Deactivates the specified MFA device and removes it from association with the user. :type user_name: string :param user_name: The username of the user :type serial_number: string :param seriasl_number: The serial number which uniquely identifies the MFA device.
[ "Deactivates", "the", "specified", "MFA", "device", "and", "removes", "it", "from", "association", "with", "the", "user", "." ]
python
train
klen/muffin-admin
muffin_admin/plugin.py
https://github.com/klen/muffin-admin/blob/404dc8e5107e943b7c42fa21c679c34ddb4de1d5/muffin_admin/plugin.py#L101-L113
def register(self, *handlers, **params): """ Ensure that handler is not registered. """ for handler in handlers: if issubclass(handler, PWModel): handler = type( handler._meta.db_table.title() + 'Admin', (PWAdminHandler,), dict(model=handler, **params)) self.app.register(handler) continue name = handler.name.lower() self.handlers[name] = handler
[ "def", "register", "(", "self", ",", "*", "handlers", ",", "*", "*", "params", ")", ":", "for", "handler", "in", "handlers", ":", "if", "issubclass", "(", "handler", ",", "PWModel", ")", ":", "handler", "=", "type", "(", "handler", ".", "_meta", ".",...
Ensure that handler is not registered.
[ "Ensure", "that", "handler", "is", "not", "registered", "." ]
python
train
Bogdanp/dramatiq
dramatiq/brokers/rabbitmq.py
https://github.com/Bogdanp/dramatiq/blob/a8cc2728478e794952a5a50c3fb19ec455fe91b6/dramatiq/brokers/rabbitmq.py#L183-L224
def declare_queue(self, queue_name): """Declare a queue. Has no effect if a queue with the given name already exists. Parameters: queue_name(str): The name of the new queue. Raises: ConnectionClosed: If the underlying channel or connection has been closed. """ attempts = 1 while True: try: if queue_name not in self.queues: self.emit_before("declare_queue", queue_name) self._declare_queue(queue_name) self.queues.add(queue_name) self.emit_after("declare_queue", queue_name) delayed_name = dq_name(queue_name) self._declare_dq_queue(queue_name) self.delay_queues.add(delayed_name) self.emit_after("declare_delay_queue", delayed_name) self._declare_xq_queue(queue_name) break except (pika.exceptions.AMQPConnectionError, pika.exceptions.AMQPChannelError) as e: # pragma: no cover # Delete the channel and the connection so that the next # caller may initiate new ones of each. del self.channel del self.connection attempts += 1 if attempts > MAX_DECLARE_ATTEMPTS: raise ConnectionClosed(e) from None self.logger.debug( "Retrying declare due to closed connection. [%d/%d]", attempts, MAX_DECLARE_ATTEMPTS, )
[ "def", "declare_queue", "(", "self", ",", "queue_name", ")", ":", "attempts", "=", "1", "while", "True", ":", "try", ":", "if", "queue_name", "not", "in", "self", ".", "queues", ":", "self", ".", "emit_before", "(", "\"declare_queue\"", ",", "queue_name", ...
Declare a queue. Has no effect if a queue with the given name already exists. Parameters: queue_name(str): The name of the new queue. Raises: ConnectionClosed: If the underlying channel or connection has been closed.
[ "Declare", "a", "queue", ".", "Has", "no", "effect", "if", "a", "queue", "with", "the", "given", "name", "already", "exists", "." ]
python
train
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L4292-L4297
def intSubset(self): """Get the internal subset of a document """ ret = libxml2mod.xmlGetIntSubset(self._o) if ret is None:raise treeError('xmlGetIntSubset() failed') __tmp = xmlDtd(_obj=ret) return __tmp
[ "def", "intSubset", "(", "self", ")", ":", "ret", "=", "libxml2mod", ".", "xmlGetIntSubset", "(", "self", ".", "_o", ")", "if", "ret", "is", "None", ":", "raise", "treeError", "(", "'xmlGetIntSubset() failed'", ")", "__tmp", "=", "xmlDtd", "(", "_obj", "...
Get the internal subset of a document
[ "Get", "the", "internal", "subset", "of", "a", "document" ]
python
train
olsoneric/pedemath
pedemath/vec3.py
https://github.com/olsoneric/pedemath/blob/4bffcfe7089e421d603eb0a9708b84789c2d16be/pedemath/vec3.py#L73-L76
def dot_v3(v, w): """Return the dotproduct of two vectors.""" return sum([x * y for x, y in zip(v, w)])
[ "def", "dot_v3", "(", "v", ",", "w", ")", ":", "return", "sum", "(", "[", "x", "*", "y", "for", "x", ",", "y", "in", "zip", "(", "v", ",", "w", ")", "]", ")" ]
Return the dotproduct of two vectors.
[ "Return", "the", "dotproduct", "of", "two", "vectors", "." ]
python
train
asobrien/randomOrg
randomorg/_rand_core.py
https://github.com/asobrien/randomOrg/blob/76c3f167c5689992d32cd1f827816254158160f7/randomorg/_rand_core.py#L111-L131
def string(num, length, digits=False, upper=True, lower=True, unique=False): """Random strings.""" function = 'strings' # Convert arguments to random.org style # for a discussion on the method see: http://bit.ly/TKGkOF digits = convert(digits) upper = convert(upper) lower = convert(lower) unique = convert(unique) opts = {'num': num, 'len': length, 'digits': digits, 'upperalpha': upper, 'loweralpha': lower, 'format': 'plain', 'rnd': 'new'} seq = get_http(RANDOM_URL, function, opts) seq = seq.strip().split('\n') # convert to list # seq_arr = str_to_arr(seq) return seq
[ "def", "string", "(", "num", ",", "length", ",", "digits", "=", "False", ",", "upper", "=", "True", ",", "lower", "=", "True", ",", "unique", "=", "False", ")", ":", "function", "=", "'strings'", "# Convert arguments to random.org style", "# for a discussion o...
Random strings.
[ "Random", "strings", "." ]
python
train
liamw9534/bt-manager
bt_manager/interface.py
https://github.com/liamw9534/bt-manager/blob/51be2919394ce8134c698359649bfad09eedf4ec/bt_manager/interface.py#L193-L211
def set_property(self, name, value): """ Helper to set a property value by name, translating to correct dbus type See also :py:meth:`get_property` :param str name: The property name in the object's dictionary whose value shall be set. :param value: Properties new value to be assigned. :return: :raises KeyError: if the property key is not found in the object's dictionary :raises dbus.Exception: org.bluez.Error.DoesNotExist :raises dbus.Exception: org.bluez.Error.InvalidArguments """ typeof = type(self.get_property(name)) self._interface.SetProperty(name, translate_to_dbus_type(typeof, value))
[ "def", "set_property", "(", "self", ",", "name", ",", "value", ")", ":", "typeof", "=", "type", "(", "self", ".", "get_property", "(", "name", ")", ")", "self", ".", "_interface", ".", "SetProperty", "(", "name", ",", "translate_to_dbus_type", "(", "type...
Helper to set a property value by name, translating to correct dbus type See also :py:meth:`get_property` :param str name: The property name in the object's dictionary whose value shall be set. :param value: Properties new value to be assigned. :return: :raises KeyError: if the property key is not found in the object's dictionary :raises dbus.Exception: org.bluez.Error.DoesNotExist :raises dbus.Exception: org.bluez.Error.InvalidArguments
[ "Helper", "to", "set", "a", "property", "value", "by", "name", "translating", "to", "correct", "dbus", "type" ]
python
train
chrislit/abydos
abydos/phonetic/_beider_morse.py
https://github.com/chrislit/abydos/blob/165466b3ff6afd8024a4c8660421b0c4e7773db9/abydos/phonetic/_beider_morse.py#L758-L786
def _language_index_from_code(self, code, name_mode): """Return the index value for a language code. This returns l_any if more than one code is specified or the code is out of bounds. Parameters ---------- code : int The language code to interpret name_mode : str The name mode of the algorithm: ``gen`` (default), ``ash`` (Ashkenazi), or ``sep`` (Sephardic) Returns ------- int Language code index """ if code < 1 or code > sum( _LANG_DICT[_] for _ in BMDATA[name_mode]['languages'] ): # code out of range return L_ANY if ( code & (code - 1) ) != 0: # choice was more than one language; use any return L_ANY return code
[ "def", "_language_index_from_code", "(", "self", ",", "code", ",", "name_mode", ")", ":", "if", "code", "<", "1", "or", "code", ">", "sum", "(", "_LANG_DICT", "[", "_", "]", "for", "_", "in", "BMDATA", "[", "name_mode", "]", "[", "'languages'", "]", ...
Return the index value for a language code. This returns l_any if more than one code is specified or the code is out of bounds. Parameters ---------- code : int The language code to interpret name_mode : str The name mode of the algorithm: ``gen`` (default), ``ash`` (Ashkenazi), or ``sep`` (Sephardic) Returns ------- int Language code index
[ "Return", "the", "index", "value", "for", "a", "language", "code", "." ]
python
valid
log2timeline/dfvfs
dfvfs/vfs/apfs_container_file_system.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/apfs_container_file_system.py#L75-L92
def FileEntryExistsByPathSpec(self, path_spec): """Determines if a file entry for a path specification exists. Args: path_spec (PathSpec): a path specification. Returns: bool: True if the file entry exists. """ volume_index = apfs_helper.APFSContainerPathSpecGetVolumeIndex(path_spec) # The virtual root file has not corresponding volume index but # should have a location. if volume_index is None: location = getattr(path_spec, 'location', None) return location is not None and location == self.LOCATION_ROOT return 0 <= volume_index < self._fsapfs_container.number_of_volumes
[ "def", "FileEntryExistsByPathSpec", "(", "self", ",", "path_spec", ")", ":", "volume_index", "=", "apfs_helper", ".", "APFSContainerPathSpecGetVolumeIndex", "(", "path_spec", ")", "# The virtual root file has not corresponding volume index but", "# should have a location.", "if",...
Determines if a file entry for a path specification exists. Args: path_spec (PathSpec): a path specification. Returns: bool: True if the file entry exists.
[ "Determines", "if", "a", "file", "entry", "for", "a", "path", "specification", "exists", "." ]
python
train
klmitch/tendril
tendril/connection.py
https://github.com/klmitch/tendril/blob/207102c83e88f8f1fa7ba605ef0aab2ae9078b36/tendril/connection.py#L283-L307
def framers(self, value): """ Set the framers in use for the connection. The framer states will be reset next time their respective framer is used. """ # Handle sequence values if isinstance(value, collections.Sequence): if len(value) != 2: raise ValueError('need exactly 2 values to unpack') elif (not isinstance(value[0], framers.Framer) or not isinstance(value[1], framers.Framer)): raise ValueError("framer must be an instance of " "tendril.Framer") self._send_framer, self._recv_framer = value # If we have a single value, assume it's a framer else: if not isinstance(value, framers.Framer): raise ValueError("framer must be an instance of " "tendril.Framer") self._send_framer = value self._recv_framer = value
[ "def", "framers", "(", "self", ",", "value", ")", ":", "# Handle sequence values", "if", "isinstance", "(", "value", ",", "collections", ".", "Sequence", ")", ":", "if", "len", "(", "value", ")", "!=", "2", ":", "raise", "ValueError", "(", "'need exactly 2...
Set the framers in use for the connection. The framer states will be reset next time their respective framer is used.
[ "Set", "the", "framers", "in", "use", "for", "the", "connection", ".", "The", "framer", "states", "will", "be", "reset", "next", "time", "their", "respective", "framer", "is", "used", "." ]
python
train
oriontvv/pyaspeller
pyaspeller/speller.py
https://github.com/oriontvv/pyaspeller/blob/9a76d1f1fb00c7eabfa006f8e0f145f764c7a8d6/pyaspeller/speller.py#L165-L170
def dictionary(self, value): """Set dictionary""" self._dictionary = value or {} if not isinstance(self._dictionary, dict): raise BadArgumentError("dictionary must be dict: {}".format( self._dictionary))
[ "def", "dictionary", "(", "self", ",", "value", ")", ":", "self", ".", "_dictionary", "=", "value", "or", "{", "}", "if", "not", "isinstance", "(", "self", ".", "_dictionary", ",", "dict", ")", ":", "raise", "BadArgumentError", "(", "\"dictionary must be d...
Set dictionary
[ "Set", "dictionary" ]
python
test
ga4gh/ga4gh-server
ga4gh/server/datamodel/datasets.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datamodel/datasets.py#L79-L86
def addBiosample(self, biosample): """ Adds the specified biosample to this dataset. """ id_ = biosample.getId() self._biosampleIdMap[id_] = biosample self._biosampleIds.append(id_) self._biosampleNameMap[biosample.getName()] = biosample
[ "def", "addBiosample", "(", "self", ",", "biosample", ")", ":", "id_", "=", "biosample", ".", "getId", "(", ")", "self", ".", "_biosampleIdMap", "[", "id_", "]", "=", "biosample", "self", ".", "_biosampleIds", ".", "append", "(", "id_", ")", "self", "....
Adds the specified biosample to this dataset.
[ "Adds", "the", "specified", "biosample", "to", "this", "dataset", "." ]
python
train
annoviko/pyclustering
pyclustering/nnet/fsync.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/nnet/fsync.py#L268-L287
def __calculate(self, t, step, int_step): """! @brief Calculates new amplitudes for oscillators in the network in line with current step. @param[in] t (double): Time of simulation. @param[in] step (double): Step of solution at the end of which states of oscillators should be calculated. @param[in] int_step (double): Step differentiation that is used for solving differential equation. @return (list) New states (phases) for oscillators. """ next_amplitudes = [0.0] * self._num_osc; for index in range (0, self._num_osc, 1): z = numpy.array(self.__amplitude[index], dtype = numpy.complex128, ndmin = 1); result = odeint(self.__calculate_amplitude, z.view(numpy.float64), numpy.arange(t - step, t, int_step), (index , )); next_amplitudes[index] = (result[len(result) - 1]).view(numpy.complex128); return next_amplitudes;
[ "def", "__calculate", "(", "self", ",", "t", ",", "step", ",", "int_step", ")", ":", "next_amplitudes", "=", "[", "0.0", "]", "*", "self", ".", "_num_osc", "for", "index", "in", "range", "(", "0", ",", "self", ".", "_num_osc", ",", "1", ")", ":", ...
! @brief Calculates new amplitudes for oscillators in the network in line with current step. @param[in] t (double): Time of simulation. @param[in] step (double): Step of solution at the end of which states of oscillators should be calculated. @param[in] int_step (double): Step differentiation that is used for solving differential equation. @return (list) New states (phases) for oscillators.
[ "!" ]
python
valid
OpenMath/py-openmath
openmath/convert_pickle.py
https://github.com/OpenMath/py-openmath/blob/4906aa9ccf606f533675c28823772e07c30fd220/openmath/convert_pickle.py#L167-L178
def OMSymbol(self, module, name): r""" Helper function to build an OMS object EXAMPLES:: >>> from openmath.convert_pickle import PickleConverter >>> converter = PickleConverter() >>> o = converter.OMSymbol(module="foo.bar", name="baz"); o OMSymbol(name='baz', cd='foo.bar', id=None, cdbase='http://python.org/') """ return om.OMSymbol(cdbase=self._cdbase, cd=module, name=name)
[ "def", "OMSymbol", "(", "self", ",", "module", ",", "name", ")", ":", "return", "om", ".", "OMSymbol", "(", "cdbase", "=", "self", ".", "_cdbase", ",", "cd", "=", "module", ",", "name", "=", "name", ")" ]
r""" Helper function to build an OMS object EXAMPLES:: >>> from openmath.convert_pickle import PickleConverter >>> converter = PickleConverter() >>> o = converter.OMSymbol(module="foo.bar", name="baz"); o OMSymbol(name='baz', cd='foo.bar', id=None, cdbase='http://python.org/')
[ "r", "Helper", "function", "to", "build", "an", "OMS", "object", "EXAMPLES", "::" ]
python
test
bpsmith/tia
tia/analysis/model/pos.py
https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/model/pos.py#L138-L169
def plot_rets(self, ls=1, ax=None): """Plot each of the position returns :param ls: True, if positions should be broken into long/short :param ax: Axes :param regr: True, if regression line is shown """ import matplotlib.pyplot as plt from tia.util.mplot import AxesFormat if ax is None: ax = plt.gca() frame = self.frame if not ls: ax.scatter(frame.index, frame.ret, c='k', marker='o', label='All') else: if len(self.long_pids) > 0: lframe = frame.ix[frame.index.isin(self.long_pids)] ax.scatter(lframe.index, lframe.ret, c='k', marker='o', label='Long') if len(self.short_pids) > 0: sframe = frame.ix[frame.index.isin(self.short_pids)] ax.scatter(sframe.index, sframe.ret, c='r', marker='o', label='Short') # set some boundaries AxesFormat().Y.percent().apply() ax.set_xlim(0, frame.index.max() + 3) ax.set_xlabel('pid') ax.set_ylabel('return') ax.legend(loc='upper left') return ax
[ "def", "plot_rets", "(", "self", ",", "ls", "=", "1", ",", "ax", "=", "None", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "from", "tia", ".", "util", ".", "mplot", "import", "AxesFormat", "if", "ax", "is", "None", ":", "ax", "=", ...
Plot each of the position returns :param ls: True, if positions should be broken into long/short :param ax: Axes :param regr: True, if regression line is shown
[ "Plot", "each", "of", "the", "position", "returns" ]
python
train
AlejandroFrias/case-conversion
case_conversion/case_parse.py
https://github.com/AlejandroFrias/case-conversion/blob/79ebce1403fbdac949b2da21b8f6fbe3234ddb31/case_conversion/case_parse.py#L126-L138
def _simple_acronym_detection(s, i, words, *args): """Detect acronyms based on runs of upper-case letters.""" # Combine each letter into a single string. acronym = ''.join(words[s:i]) # Remove original letters in word list. for _ in xrange(s, i): del words[s] # Replace them with new word grouping. words.insert(s, ''.join(acronym)) return s
[ "def", "_simple_acronym_detection", "(", "s", ",", "i", ",", "words", ",", "*", "args", ")", ":", "# Combine each letter into a single string.", "acronym", "=", "''", ".", "join", "(", "words", "[", "s", ":", "i", "]", ")", "# Remove original letters in word lis...
Detect acronyms based on runs of upper-case letters.
[ "Detect", "acronyms", "based", "on", "runs", "of", "upper", "-", "case", "letters", "." ]
python
train
Vital-Fernandez/dazer
bin/lib/CodeTools/various.py
https://github.com/Vital-Fernandez/dazer/blob/3c9ae8ae6d40ea33f22cc20dc11365d6d6e65244/bin/lib/CodeTools/various.py#L80-L82
def ufloatDict_stdev(self, ufloat_dict): 'This gives us a dictionary of nominal values from a dictionary of uncertainties' return OrderedDict(izip(ufloat_dict.keys(), map(lambda x: x.std_dev, ufloat_dict.values())))
[ "def", "ufloatDict_stdev", "(", "self", ",", "ufloat_dict", ")", ":", "return", "OrderedDict", "(", "izip", "(", "ufloat_dict", ".", "keys", "(", ")", ",", "map", "(", "lambda", "x", ":", "x", ".", "std_dev", ",", "ufloat_dict", ".", "values", "(", ")"...
This gives us a dictionary of nominal values from a dictionary of uncertainties
[ "This", "gives", "us", "a", "dictionary", "of", "nominal", "values", "from", "a", "dictionary", "of", "uncertainties" ]
python
train
rigetti/pyquil
pyquil/quilatom.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/quilatom.py#L195-L236
def format_parameter(element): """ Formats a particular parameter. Essentially the same as built-in formatting except using 'i' instead of 'j' for the imaginary number. :param element: {int, float, long, complex, Parameter} Formats a parameter for Quil output. """ if isinstance(element, integer_types) or isinstance(element, np.int_): return repr(element) elif isinstance(element, float): return _check_for_pi(element) elif isinstance(element, complex): out = '' r = element.real i = element.imag if i == 0: return repr(r) if r != 0: out += repr(r) if i == 1: assert np.isclose(r, 0, atol=1e-14) out = 'i' elif i == -1: assert np.isclose(r, 0, atol=1e-14) out = '-i' elif i < 0: out += repr(i) + 'i' elif r != 0: out += '+' + repr(i) + 'i' else: out += repr(i) + 'i' return out elif isinstance(element, MemoryReference): return str(element) elif isinstance(element, Expression): return _expression_to_string(element) elif isinstance(element, MemoryReference): return element.out() assert False, "Invalid parameter: %r" % element
[ "def", "format_parameter", "(", "element", ")", ":", "if", "isinstance", "(", "element", ",", "integer_types", ")", "or", "isinstance", "(", "element", ",", "np", ".", "int_", ")", ":", "return", "repr", "(", "element", ")", "elif", "isinstance", "(", "e...
Formats a particular parameter. Essentially the same as built-in formatting except using 'i' instead of 'j' for the imaginary number. :param element: {int, float, long, complex, Parameter} Formats a parameter for Quil output.
[ "Formats", "a", "particular", "parameter", ".", "Essentially", "the", "same", "as", "built", "-", "in", "formatting", "except", "using", "i", "instead", "of", "j", "for", "the", "imaginary", "number", "." ]
python
train
crytic/slither
slither/detectors/statements/deprecated_calls.py
https://github.com/crytic/slither/blob/04c147f7e50223c6af458ca430befae747ccd259/slither/detectors/statements/deprecated_calls.py#L88-L105
def detect_deprecated_references_in_node(self, node): """ Detects if a node makes use of any deprecated standards. Returns: list of tuple: (detecting_signature, original_text, recommended_text)""" # Define our results list results = [] # If this node has an expression, we check the underlying expression. if node.expression: results += self.detect_deprecation_in_expression(node.expression) # Check if there is usage of any deprecated solidity variables or functions for dep_node in self.DEPRECATED_NODE_TYPES: if node.type == dep_node[0]: results.append(dep_node) return results
[ "def", "detect_deprecated_references_in_node", "(", "self", ",", "node", ")", ":", "# Define our results list", "results", "=", "[", "]", "# If this node has an expression, we check the underlying expression.", "if", "node", ".", "expression", ":", "results", "+=", "self", ...
Detects if a node makes use of any deprecated standards. Returns: list of tuple: (detecting_signature, original_text, recommended_text)
[ "Detects", "if", "a", "node", "makes", "use", "of", "any", "deprecated", "standards", "." ]
python
train
proycon/clam
clam/common/client.py
https://github.com/proycon/clam/blob/09d15cfc26d7cbe0f5976cdd5424dc446d10dbf3/clam/common/client.py#L530-L532
def upload(self,project, inputtemplate, sourcefile, **kwargs): """Alias for ``addinputfile()``""" return self.addinputfile(project, inputtemplate,sourcefile, **kwargs)
[ "def", "upload", "(", "self", ",", "project", ",", "inputtemplate", ",", "sourcefile", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "addinputfile", "(", "project", ",", "inputtemplate", ",", "sourcefile", ",", "*", "*", "kwargs", ")" ]
Alias for ``addinputfile()``
[ "Alias", "for", "addinputfile", "()" ]
python
train
ReadabilityHoldings/python-readability-api
readability/clients.py
https://github.com/ReadabilityHoldings/python-readability-api/blob/4b746166877d5a8dc29222aedccb18c2506a5385/readability/clients.py#L119-L126
def get_article(self, article_id): """ Get a single article represented by `article_id`. :param article_id: ID of the article to retrieve. """ url = self._generate_url('articles/{0}'.format(article_id)) return self.get(url)
[ "def", "get_article", "(", "self", ",", "article_id", ")", ":", "url", "=", "self", ".", "_generate_url", "(", "'articles/{0}'", ".", "format", "(", "article_id", ")", ")", "return", "self", ".", "get", "(", "url", ")" ]
Get a single article represented by `article_id`. :param article_id: ID of the article to retrieve.
[ "Get", "a", "single", "article", "represented", "by", "article_id", "." ]
python
train
CEA-COSMIC/ModOpt
modopt/signal/svd.py
https://github.com/CEA-COSMIC/ModOpt/blob/019b189cb897cbb4d210c44a100daaa08468830c/modopt/signal/svd.py#L63-L89
def calculate_svd(data): """Calculate Singular Value Decomposition This method calculates the Singular Value Decomposition (SVD) of the input data using SciPy. Parameters ---------- data : np.ndarray Input data array, 2D matrix Returns ------- tuple of left singular vector, singular values and right singular vector Raises ------ TypeError For invalid data type """ if (not isinstance(data, np.ndarray)) or (data.ndim != 2): raise TypeError('Input data must be a 2D np.ndarray.') return svd(data, check_finite=False, lapack_driver='gesvd', full_matrices=False)
[ "def", "calculate_svd", "(", "data", ")", ":", "if", "(", "not", "isinstance", "(", "data", ",", "np", ".", "ndarray", ")", ")", "or", "(", "data", ".", "ndim", "!=", "2", ")", ":", "raise", "TypeError", "(", "'Input data must be a 2D np.ndarray.'", ")",...
Calculate Singular Value Decomposition This method calculates the Singular Value Decomposition (SVD) of the input data using SciPy. Parameters ---------- data : np.ndarray Input data array, 2D matrix Returns ------- tuple of left singular vector, singular values and right singular vector Raises ------ TypeError For invalid data type
[ "Calculate", "Singular", "Value", "Decomposition" ]
python
train
SystemRDL/systemrdl-compiler
systemrdl/core/helpers.py
https://github.com/SystemRDL/systemrdl-compiler/blob/6ae64f2bb6ecbbe9db356e20e8ac94e85bdeed3a/systemrdl/core/helpers.py#L16-L27
def get_ID_text(token): """ Get the text from the ID token. Strips off leading slash escape if present """ if isinstance(token, CommonToken): text = token.text else: text = token.getText() text = text.lstrip('\\') return text
[ "def", "get_ID_text", "(", "token", ")", ":", "if", "isinstance", "(", "token", ",", "CommonToken", ")", ":", "text", "=", "token", ".", "text", "else", ":", "text", "=", "token", ".", "getText", "(", ")", "text", "=", "text", ".", "lstrip", "(", "...
Get the text from the ID token. Strips off leading slash escape if present
[ "Get", "the", "text", "from", "the", "ID", "token", ".", "Strips", "off", "leading", "slash", "escape", "if", "present" ]
python
train
olsoneric/pedemath
pedemath/vec3.py
https://github.com/olsoneric/pedemath/blob/4bffcfe7089e421d603eb0a9708b84789c2d16be/pedemath/vec3.py#L120-L125
def cross_v3(vec_a, vec_b): """Return the crossproduct between vec_a and vec_b.""" return Vec3(vec_a.y * vec_b.z - vec_a.z * vec_b.y, vec_a.z * vec_b.x - vec_a.x * vec_b.z, vec_a.x * vec_b.y - vec_a.y * vec_b.x)
[ "def", "cross_v3", "(", "vec_a", ",", "vec_b", ")", ":", "return", "Vec3", "(", "vec_a", ".", "y", "*", "vec_b", ".", "z", "-", "vec_a", ".", "z", "*", "vec_b", ".", "y", ",", "vec_a", ".", "z", "*", "vec_b", ".", "x", "-", "vec_a", ".", "x",...
Return the crossproduct between vec_a and vec_b.
[ "Return", "the", "crossproduct", "between", "vec_a", "and", "vec_b", "." ]
python
train
SCIP-Interfaces/PySCIPOpt
examples/finished/read_tsplib.py
https://github.com/SCIP-Interfaces/PySCIPOpt/blob/9c960b40d94a48b0304d73dbe28b467b9c065abe/examples/finished/read_tsplib.py#L216-L262
def read_atsplib(filename): "basic function for reading a ATSP problem on the TSPLIB format" "NOTE: only works for explicit matrices" if filename[-3:] == ".gz": f = gzip.open(filename, 'r') data = f.readlines() else: f = open(filename, 'r') data = f.readlines() for line in data: if line.find("DIMENSION") >= 0: n = int(line.split()[1]) break else: raise IOError("'DIMENSION' keyword not found in file '%s'" % filename) for line in data: if line.find("EDGE_WEIGHT_TYPE") >= 0: if line.split()[1] == "EXPLICIT": break else: raise IOError("'EDGE_WEIGHT_TYPE' is not 'EXPLICIT' in file '%s'" % filename) for k,line in enumerate(data): if line.find("EDGE_WEIGHT_SECTION") >= 0: break else: raise IOError("'EDGE_WEIGHT_SECTION' not found in file '%s'" % filename) c = {} # flatten list of distances dist = [] for line in data[k+1:]: if line.find("EOF") >= 0: break for val in line.split(): dist.append(int(val)) k = 0 for i in range(n): for j in range(n): c[i+1,j+1] = dist[k] k += 1 return n,c
[ "def", "read_atsplib", "(", "filename", ")", ":", "\"NOTE: only works for explicit matrices\"", "if", "filename", "[", "-", "3", ":", "]", "==", "\".gz\"", ":", "f", "=", "gzip", ".", "open", "(", "filename", ",", "'r'", ")", "data", "=", "f", ".", "read...
basic function for reading a ATSP problem on the TSPLIB format
[ "basic", "function", "for", "reading", "a", "ATSP", "problem", "on", "the", "TSPLIB", "format" ]
python
train
probcomp/crosscat
src/LocalEngine.py
https://github.com/probcomp/crosscat/blob/4a05bddb06a45f3b7b3e05e095720f16257d1535/src/LocalEngine.py#L322-L340
def simple_predictive_sample(self, M_c, X_L, X_D, Y, Q, seed, n=1): """Sample values from predictive distribution of the given latent state. :param Y: A list of constraints to apply when sampling. Each constraint is a triplet of (r, d, v): r is the row index, d is the column index and v is the value of the constraint :type Y: list of lists :param Q: A list of values to sample. Each value is doublet of (r, d): r is the row index, d is the column index :type Q: list of lists :param n: the number of samples to draw :type n: int :returns: list of floats. Samples in the same order specified by Q """ get_next_seed = make_get_next_seed(seed) samples = _do_simple_predictive_sample( M_c, X_L, X_D, Y, Q, n, get_next_seed) return samples
[ "def", "simple_predictive_sample", "(", "self", ",", "M_c", ",", "X_L", ",", "X_D", ",", "Y", ",", "Q", ",", "seed", ",", "n", "=", "1", ")", ":", "get_next_seed", "=", "make_get_next_seed", "(", "seed", ")", "samples", "=", "_do_simple_predictive_sample",...
Sample values from predictive distribution of the given latent state. :param Y: A list of constraints to apply when sampling. Each constraint is a triplet of (r, d, v): r is the row index, d is the column index and v is the value of the constraint :type Y: list of lists :param Q: A list of values to sample. Each value is doublet of (r, d): r is the row index, d is the column index :type Q: list of lists :param n: the number of samples to draw :type n: int :returns: list of floats. Samples in the same order specified by Q
[ "Sample", "values", "from", "predictive", "distribution", "of", "the", "given", "latent", "state", "." ]
python
train
jpoullet2000/atlasclient
atlasclient/models.py
https://github.com/jpoullet2000/atlasclient/blob/4548b441143ebf7fc4075d113db5ca5a23e0eed2/atlasclient/models.py#L128-L139
def create(self, **kwargs): """Create a new instance of this resource type. As a general rule, the identifier should have been provided, but in some subclasses the identifier is server-side-generated. Those classes have to overload this method to deal with that scenario. """ if self.primary_key in kwargs: del kwargs[self.primary_key] data = self._generate_input_dict(**kwargs) self.load(self.client.post('/'.join(self.url.split('/')[:-1]) + 's', data=data)) return self
[ "def", "create", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "primary_key", "in", "kwargs", ":", "del", "kwargs", "[", "self", ".", "primary_key", "]", "data", "=", "self", ".", "_generate_input_dict", "(", "*", "*", "kwargs", ...
Create a new instance of this resource type. As a general rule, the identifier should have been provided, but in some subclasses the identifier is server-side-generated. Those classes have to overload this method to deal with that scenario.
[ "Create", "a", "new", "instance", "of", "this", "resource", "type", "." ]
python
train
waqasbhatti/astrobase
astrobase/magnitudes.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/magnitudes.py#L221-L241
def jhk_to_rmag(jmag,hmag,kmag): '''Converts given J, H, Ks mags to an R magnitude value. Parameters ---------- jmag,hmag,kmag : float 2MASS J, H, Ks mags of the object. Returns ------- float The converted R band magnitude. ''' return convert_constants(jmag,hmag,kmag, RJHK, RJH, RJK, RHK, RJ, RH, RK)
[ "def", "jhk_to_rmag", "(", "jmag", ",", "hmag", ",", "kmag", ")", ":", "return", "convert_constants", "(", "jmag", ",", "hmag", ",", "kmag", ",", "RJHK", ",", "RJH", ",", "RJK", ",", "RHK", ",", "RJ", ",", "RH", ",", "RK", ")" ]
Converts given J, H, Ks mags to an R magnitude value. Parameters ---------- jmag,hmag,kmag : float 2MASS J, H, Ks mags of the object. Returns ------- float The converted R band magnitude.
[ "Converts", "given", "J", "H", "Ks", "mags", "to", "an", "R", "magnitude", "value", "." ]
python
valid
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/service_endpoint/service_endpoint_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/service_endpoint/service_endpoint_client.py#L202-L218
def update_service_endpoints(self, endpoints, project): """UpdateServiceEndpoints. [Preview API] Update the service endpoints. :param [ServiceEndpoint] endpoints: Names of the service endpoints to update. :param str project: Project ID or project name :rtype: [ServiceEndpoint] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') content = self._serialize.body(endpoints, '[ServiceEndpoint]') response = self._send(http_method='PUT', location_id='e85f1c62-adfc-4b74-b618-11a150fb195e', version='5.0-preview.2', route_values=route_values, content=content) return self._deserialize('[ServiceEndpoint]', self._unwrap_collection(response))
[ "def", "update_service_endpoints", "(", "self", ",", "endpoints", ",", "project", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", ...
UpdateServiceEndpoints. [Preview API] Update the service endpoints. :param [ServiceEndpoint] endpoints: Names of the service endpoints to update. :param str project: Project ID or project name :rtype: [ServiceEndpoint]
[ "UpdateServiceEndpoints", ".", "[", "Preview", "API", "]", "Update", "the", "service", "endpoints", ".", ":", "param", "[", "ServiceEndpoint", "]", "endpoints", ":", "Names", "of", "the", "service", "endpoints", "to", "update", ".", ":", "param", "str", "pro...
python
train
pandas-dev/pandas
pandas/io/pytables.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L4254-L4264
def get_attrs(self): """ retrieve our attributes """ self.non_index_axes = [] self.nan_rep = None self.levels = [] self.index_axes = [a.infer(self) for a in self.indexables if a.is_an_indexable] self.values_axes = [a.infer(self) for a in self.indexables if not a.is_an_indexable] self.data_columns = [a.name for a in self.values_axes]
[ "def", "get_attrs", "(", "self", ")", ":", "self", ".", "non_index_axes", "=", "[", "]", "self", ".", "nan_rep", "=", "None", "self", ".", "levels", "=", "[", "]", "self", ".", "index_axes", "=", "[", "a", ".", "infer", "(", "self", ")", "for", "...
retrieve our attributes
[ "retrieve", "our", "attributes" ]
python
train
aiogram/aiogram
aiogram/dispatcher/filters/filters.py
https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/dispatcher/filters/filters.py#L247-L263
async def check(self, *args): """ All filters must return a positive result :param args: :return: """ data = {} for target in self.targets: result = await target(*args) if not result: return False if isinstance(result, dict): data.update(result) if not data: return True return data
[ "async", "def", "check", "(", "self", ",", "*", "args", ")", ":", "data", "=", "{", "}", "for", "target", "in", "self", ".", "targets", ":", "result", "=", "await", "target", "(", "*", "args", ")", "if", "not", "result", ":", "return", "False", "...
All filters must return a positive result :param args: :return:
[ "All", "filters", "must", "return", "a", "positive", "result" ]
python
train