repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
helixyte/everest
everest/resources/storing.py
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/resources/storing.py#L131-L138
def load_collection_from_url(resource, url, content_type=None): """ Creates a new collection for the registered resource and calls `load_into_collection_from_url` with it. """ coll = create_staging_collection(resource) load_into_collection_from_url(coll, url, content_type=content_type) return coll
[ "def", "load_collection_from_url", "(", "resource", ",", "url", ",", "content_type", "=", "None", ")", ":", "coll", "=", "create_staging_collection", "(", "resource", ")", "load_into_collection_from_url", "(", "coll", ",", "url", ",", "content_type", "=", "content...
Creates a new collection for the registered resource and calls `load_into_collection_from_url` with it.
[ "Creates", "a", "new", "collection", "for", "the", "registered", "resource", "and", "calls", "load_into_collection_from_url", "with", "it", "." ]
python
train
KelSolaar/Foundations
foundations/guerilla.py
https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/guerilla.py#L57-L77
def base_warfare(name, bases, attributes): """ Adds any number of attributes to an existing class. :param name: Name. :type name: unicode :param bases: Bases. :type bases: list :param attributes: Attributes. :type attributes: dict :return: Base. :rtype: object """ assert len(bases) == 1, "{0} | '{1}' object has multiple bases!".format(__name__, name) base = foundations.common.get_first_item(bases) for name, value in attributes.iteritems(): if name != "__metaclass__": setattr(base, name, value) return base
[ "def", "base_warfare", "(", "name", ",", "bases", ",", "attributes", ")", ":", "assert", "len", "(", "bases", ")", "==", "1", ",", "\"{0} | '{1}' object has multiple bases!\"", ".", "format", "(", "__name__", ",", "name", ")", "base", "=", "foundations", "."...
Adds any number of attributes to an existing class. :param name: Name. :type name: unicode :param bases: Bases. :type bases: list :param attributes: Attributes. :type attributes: dict :return: Base. :rtype: object
[ "Adds", "any", "number", "of", "attributes", "to", "an", "existing", "class", "." ]
python
train
ojake/django-tracked-model
tracked_model/serializer.py
https://github.com/ojake/django-tracked-model/blob/19bc48874dd2e5fb5defedc6b8c5c3915cce1424/tracked_model/serializer.py#L15-L29
def _related_field_data(field, obj): """Returns relation ``field`` as a dict. Dict contains related pk info and some meta information for reconstructing objects. """ data = _basic_field_data(field, obj) relation_info = { Field.REL_DB_TABLE: field.rel.to._meta.db_table, Field.REL_APP: field.rel.to._meta.app_label, Field.REL_MODEL: field.rel.to.__name__ } data[Field.TYPE] = FieldType.REL data[Field.REL] = relation_info return data
[ "def", "_related_field_data", "(", "field", ",", "obj", ")", ":", "data", "=", "_basic_field_data", "(", "field", ",", "obj", ")", "relation_info", "=", "{", "Field", ".", "REL_DB_TABLE", ":", "field", ".", "rel", ".", "to", ".", "_meta", ".", "db_table"...
Returns relation ``field`` as a dict. Dict contains related pk info and some meta information for reconstructing objects.
[ "Returns", "relation", "field", "as", "a", "dict", "." ]
python
train
opendatateam/udata
udata/features/transfer/actions.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/features/transfer/actions.py#L19-L31
def request_transfer(subject, recipient, comment): '''Initiate a transfer request''' TransferPermission(subject).test() if recipient == (subject.organization or subject.owner): raise ValueError( 'Recipient should be different than the current owner') transfer = Transfer.objects.create( owner=subject.organization or subject.owner, recipient=recipient, subject=subject, comment=comment ) return transfer
[ "def", "request_transfer", "(", "subject", ",", "recipient", ",", "comment", ")", ":", "TransferPermission", "(", "subject", ")", ".", "test", "(", ")", "if", "recipient", "==", "(", "subject", ".", "organization", "or", "subject", ".", "owner", ")", ":", ...
Initiate a transfer request
[ "Initiate", "a", "transfer", "request" ]
python
train
marrow/util
marrow/util/compat.py
https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/compat.py#L106-L115
def native(s, encoding='utf-8', fallback='iso-8859-1'): """Convert a given string into a native string.""" if isinstance(s, str): return s if str is unicode: # Python 3.x -> return unicodestr(s, encoding, fallback) return bytestring(s, encoding, fallback)
[ "def", "native", "(", "s", ",", "encoding", "=", "'utf-8'", ",", "fallback", "=", "'iso-8859-1'", ")", ":", "if", "isinstance", "(", "s", ",", "str", ")", ":", "return", "s", "if", "str", "is", "unicode", ":", "# Python 3.x ->", "return", "unicodestr", ...
Convert a given string into a native string.
[ "Convert", "a", "given", "string", "into", "a", "native", "string", "." ]
python
train
log2timeline/plaso
plaso/parsers/firefox_cache.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/firefox_cache.py#L156-L204
def _GetFirefoxConfig(self, file_object, display_name): """Determine cache file block size. Args: file_object (dfvfs.FileIO): a file-like object. display_name (str): display name. Returns: firefox_cache_config: namedtuple containing the block size and first record offset. Raises: UnableToParseFile: if no valid cache record could be found. """ # There ought to be a valid record within the first 4 MiB. We use this # limit to prevent reading large invalid files. to_read = min(file_object.get_size(), self._INITIAL_CACHE_FILE_SIZE) while file_object.get_offset() < to_read: offset = file_object.get_offset() try: cache_entry, _ = self._ReadCacheEntry( file_object, display_name, self._MINIMUM_BLOCK_SIZE) # We have not yet determined the block size, so we use the smallest # possible size. record_size = ( self._CACHE_ENTRY_HEADER_SIZE + cache_entry.request_size + cache_entry.information_size) if record_size >= 4096: # _CACHE_003_ block_size = 4096 elif record_size >= 1024: # _CACHE_002_ block_size = 1024 else: # _CACHE_001_ block_size = 256 return self.FIREFOX_CACHE_CONFIG(block_size, offset) except IOError: logger.debug('[{0:s}] {1:s}:{2:d}: Invalid record.'.format( self.NAME, display_name, offset)) raise errors.UnableToParseFile( 'Could not find a valid cache record. Not a Firefox cache file.')
[ "def", "_GetFirefoxConfig", "(", "self", ",", "file_object", ",", "display_name", ")", ":", "# There ought to be a valid record within the first 4 MiB. We use this", "# limit to prevent reading large invalid files.", "to_read", "=", "min", "(", "file_object", ".", "get_size", "...
Determine cache file block size. Args: file_object (dfvfs.FileIO): a file-like object. display_name (str): display name. Returns: firefox_cache_config: namedtuple containing the block size and first record offset. Raises: UnableToParseFile: if no valid cache record could be found.
[ "Determine", "cache", "file", "block", "size", "." ]
python
train
PythonCharmers/python-future
src/libfuturize/fixer_util.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/libfuturize/fixer_util.py#L502-L520
def wrap_in_fn_call(fn_name, args, prefix=None): """ Example: >>> wrap_in_fn_call("oldstr", (arg,)) oldstr(arg) >>> wrap_in_fn_call("olddiv", (arg1, arg2)) olddiv(arg1, arg2) >>> wrap_in_fn_call("olddiv", [arg1, comma, arg2, comma, arg3]) olddiv(arg1, arg2, arg3) """ assert len(args) > 0 if len(args) == 2: expr1, expr2 = args newargs = [expr1, Comma(), expr2] else: newargs = args return Call(Name(fn_name), newargs, prefix=prefix)
[ "def", "wrap_in_fn_call", "(", "fn_name", ",", "args", ",", "prefix", "=", "None", ")", ":", "assert", "len", "(", "args", ")", ">", "0", "if", "len", "(", "args", ")", "==", "2", ":", "expr1", ",", "expr2", "=", "args", "newargs", "=", "[", "exp...
Example: >>> wrap_in_fn_call("oldstr", (arg,)) oldstr(arg) >>> wrap_in_fn_call("olddiv", (arg1, arg2)) olddiv(arg1, arg2) >>> wrap_in_fn_call("olddiv", [arg1, comma, arg2, comma, arg3]) olddiv(arg1, arg2, arg3)
[ "Example", ":", ">>>", "wrap_in_fn_call", "(", "oldstr", "(", "arg", "))", "oldstr", "(", "arg", ")" ]
python
train
seleniumbase/SeleniumBase
seleniumbase/fixtures/page_actions.py
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/fixtures/page_actions.py#L136-L158
def hover_element_and_click(driver, element, click_selector, click_by=By.CSS_SELECTOR, timeout=settings.SMALL_TIMEOUT): """ Similar to hover_and_click(), but assumes top element is already found. """ start_ms = time.time() * 1000.0 stop_ms = start_ms + (timeout * 1000.0) hover = ActionChains(driver).move_to_element(element) hover.perform() for x in range(int(timeout * 10)): try: element = driver.find_element(by=click_by, value=click_selector) element.click() return element except Exception: now_ms = time.time() * 1000.0 if now_ms >= stop_ms: break time.sleep(0.1) raise NoSuchElementException( "Element {%s} was not present after %s seconds!" % (click_selector, timeout))
[ "def", "hover_element_and_click", "(", "driver", ",", "element", ",", "click_selector", ",", "click_by", "=", "By", ".", "CSS_SELECTOR", ",", "timeout", "=", "settings", ".", "SMALL_TIMEOUT", ")", ":", "start_ms", "=", "time", ".", "time", "(", ")", "*", "...
Similar to hover_and_click(), but assumes top element is already found.
[ "Similar", "to", "hover_and_click", "()", "but", "assumes", "top", "element", "is", "already", "found", "." ]
python
train
gwastro/pycbc
pycbc/inference/sampler/base_mcmc.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/sampler/base_mcmc.py#L372-L383
def p0(self): """A dictionary of the initial position of the walkers. This is set by using ``set_p0``. If not set yet, a ``ValueError`` is raised when the attribute is accessed. """ if self._p0 is None: raise ValueError("initial positions not set; run set_p0") # convert to dict p0 = {param: self._p0[..., k] for (k, param) in enumerate(self.sampling_params)} return p0
[ "def", "p0", "(", "self", ")", ":", "if", "self", ".", "_p0", "is", "None", ":", "raise", "ValueError", "(", "\"initial positions not set; run set_p0\"", ")", "# convert to dict", "p0", "=", "{", "param", ":", "self", ".", "_p0", "[", "...", ",", "k", "]...
A dictionary of the initial position of the walkers. This is set by using ``set_p0``. If not set yet, a ``ValueError`` is raised when the attribute is accessed.
[ "A", "dictionary", "of", "the", "initial", "position", "of", "the", "walkers", "." ]
python
train
shoeffner/cvloop
cvloop/functions.py
https://github.com/shoeffner/cvloop/blob/3ddd311e9b679d16c8fd36779931380374de343c/cvloop/functions.py#L188-L211
def find_faces(self, image, draw_box=False): """Uses a haarcascade to detect faces inside an image. Args: image: The image. draw_box: If True, the image will be marked with a rectangle. Return: The faces as returned by OpenCV's detectMultiScale method for cascades. """ frame_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) faces = self.cascade.detectMultiScale( frame_gray, scaleFactor=1.3, minNeighbors=5, minSize=(50, 50), flags=0) if draw_box: for x, y, w, h in faces: cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2) return faces
[ "def", "find_faces", "(", "self", ",", "image", ",", "draw_box", "=", "False", ")", ":", "frame_gray", "=", "cv2", ".", "cvtColor", "(", "image", ",", "cv2", ".", "COLOR_RGB2GRAY", ")", "faces", "=", "self", ".", "cascade", ".", "detectMultiScale", "(", ...
Uses a haarcascade to detect faces inside an image. Args: image: The image. draw_box: If True, the image will be marked with a rectangle. Return: The faces as returned by OpenCV's detectMultiScale method for cascades.
[ "Uses", "a", "haarcascade", "to", "detect", "faces", "inside", "an", "image", "." ]
python
train
pyopenapi/pyswagger
pyswagger/primitives/render.py
https://github.com/pyopenapi/pyswagger/blob/333c4ca08e758cd2194943d9904a3eda3fe43977/pyswagger/primitives/render.py#L266-L285
def render(self, obj, opt=None): """ render a Schema/Parameter :param obj Schema/Parameter: the swagger spec object :param opt dict: render option :return: values that can be passed to Operation.__call__ :rtype: depends on type of 'obj' """ opt = self.default() if opt == None else opt if not isinstance(opt, dict): raise ValueError('Not a dict: {0}'.format(opt)) if isinstance(obj, Parameter): if getattr(obj, 'in', None) == 'body': return self._generate(obj.schema, opt) return self._generate(obj, opt=opt) elif isinstance(obj, Schema): return self._generate(obj, opt) else: raise ValueError('Not a Schema/Parameter: {0}'.format(obj))
[ "def", "render", "(", "self", ",", "obj", ",", "opt", "=", "None", ")", ":", "opt", "=", "self", ".", "default", "(", ")", "if", "opt", "==", "None", "else", "opt", "if", "not", "isinstance", "(", "opt", ",", "dict", ")", ":", "raise", "ValueErro...
render a Schema/Parameter :param obj Schema/Parameter: the swagger spec object :param opt dict: render option :return: values that can be passed to Operation.__call__ :rtype: depends on type of 'obj'
[ "render", "a", "Schema", "/", "Parameter" ]
python
train
Miserlou/SoundScrape
soundscrape/soundscrape.py
https://github.com/Miserlou/SoundScrape/blob/efc63b99ce7e78b352e2ba22d5e51f83445546d7/soundscrape/soundscrape.py#L515-L529
def get_hard_track_url(item_id): """ Hard-scrapes a track. """ streams_url = "https://api.soundcloud.com/i1/tracks/%s/streams/?client_id=%s&app_version=%s" % ( item_id, AGGRESSIVE_CLIENT_ID, APP_VERSION) response = requests.get(streams_url) json_response = response.json() if response.status_code == 200: hard_track_url = json_response['http_mp3_128_url'] return hard_track_url else: return None
[ "def", "get_hard_track_url", "(", "item_id", ")", ":", "streams_url", "=", "\"https://api.soundcloud.com/i1/tracks/%s/streams/?client_id=%s&app_version=%s\"", "%", "(", "item_id", ",", "AGGRESSIVE_CLIENT_ID", ",", "APP_VERSION", ")", "response", "=", "requests", ".", "get",...
Hard-scrapes a track.
[ "Hard", "-", "scrapes", "a", "track", "." ]
python
train
savoirfairelinux/num2words
num2words/lang_JA.py
https://github.com/savoirfairelinux/num2words/blob/f4b2bac098ae8e4850cf2f185f6ff52a5979641f/num2words/lang_JA.py#L25-L42
def select_text(text, reading=False, prefer=None): """Select the correct text from the Japanese number, reading and alternatives""" # select kanji number or kana reading if reading: text = text[1] else: text = text[0] # select the preferred one or the first one from multiple alternatives if not isinstance(text, strtype): common = set(text) & set(prefer or set()) if len(common) == 1: text = common.pop() else: text = text[0] return text
[ "def", "select_text", "(", "text", ",", "reading", "=", "False", ",", "prefer", "=", "None", ")", ":", "# select kanji number or kana reading", "if", "reading", ":", "text", "=", "text", "[", "1", "]", "else", ":", "text", "=", "text", "[", "0", "]", "...
Select the correct text from the Japanese number, reading and alternatives
[ "Select", "the", "correct", "text", "from", "the", "Japanese", "number", "reading", "and", "alternatives" ]
python
test
senaite/senaite.core
bika/lims/exportimport/instruments/resultsimport.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/exportimport/instruments/resultsimport.py#L857-L892
def calculateTotalResults(self, objid, analysis): """ If an AR(objid) has an analysis that has a calculation then check if param analysis is used on the calculations formula. Here we are dealing with two types of analysis. 1. Calculated Analysis - Results are calculated. 2. Analysis - Results are captured and not calculated :param objid: AR ID or Worksheet's Reference Sample IDs :param analysis: Analysis Object """ analyses = self._getZODBAnalyses(objid) # Filter Analyses With Calculation analyses_with_calculation = filter( lambda an: an.getCalculation(), analyses) for analysis_with_calc in analyses_with_calculation: # Get the calculation to get the formula so that we can check # if param analysis keyword is used on the calculation formula calcultion = analysis_with_calc.getCalculation() formula = calcultion.getMinifiedFormula() # The analysis that we are currenly on analysis_keyword = analysis.getKeyword() if analysis_keyword not in formula: continue # If the analysis_keyword is in the formula, it means that this # analysis is a dependent on that calculated analysis calc_passed = analysis_with_calc.calculateResult(override=self._override[1]) if calc_passed: api.do_transition_for(analysis_with_calc, "submit") self.log( "${request_id}: calculated result for " "'${analysis_keyword}': '${analysis_result}'", mapping={"request_id": objid, "analysis_keyword": analysis_with_calc.getKeyword(), "analysis_result": str(analysis_with_calc.getResult())} )
[ "def", "calculateTotalResults", "(", "self", ",", "objid", ",", "analysis", ")", ":", "analyses", "=", "self", ".", "_getZODBAnalyses", "(", "objid", ")", "# Filter Analyses With Calculation", "analyses_with_calculation", "=", "filter", "(", "lambda", "an", ":", "...
If an AR(objid) has an analysis that has a calculation then check if param analysis is used on the calculations formula. Here we are dealing with two types of analysis. 1. Calculated Analysis - Results are calculated. 2. Analysis - Results are captured and not calculated :param objid: AR ID or Worksheet's Reference Sample IDs :param analysis: Analysis Object
[ "If", "an", "AR", "(", "objid", ")", "has", "an", "analysis", "that", "has", "a", "calculation", "then", "check", "if", "param", "analysis", "is", "used", "on", "the", "calculations", "formula", ".", "Here", "we", "are", "dealing", "with", "two", "types"...
python
train
Chilipp/sphinx-nbexamples
sphinx_nbexamples/__init__.py
https://github.com/Chilipp/sphinx-nbexamples/blob/08e0319ff3c70f8a931dfa8890caf48add4d0470/sphinx_nbexamples/__init__.py#L786-L875
def recursive_processing(self, base_dir, target_dir, it): """Method to recursivly process the notebooks in the `base_dir` Parameters ---------- base_dir: str Path to the base example directory (see the `examples_dir` parameter for the :class:`Gallery` class) target_dir: str Path to the output directory for the rst files (see the `gallery_dirs` parameter for the :class:`Gallery` class) it: iterable The iterator over the subdirectories and files in `base_dir` generated by the :func:`os.walk` function""" try: file_dir, dirs, files = next(it) except StopIteration: return '', [] readme_files = {'README.md', 'README.rst', 'README.txt'} if readme_files.intersection(files): foutdir = file_dir.replace(base_dir, target_dir) create_dirs(foutdir) this_nbps = [ NotebookProcessor( infile=f, outfile=os.path.join(foutdir, os.path.basename(f)), disable_warnings=self.disable_warnings, preprocess=( (self.preprocess is True or f in self.preprocess) and not (self.dont_preprocess is True or f in self.dont_preprocess)), clear=((self.clear is True or f in self.clear) and not (self.dont_clear is True or f in self.dont_clear)), code_example=self.code_examples.get(f), supplementary_files=self.supplementary_files.get(f), other_supplementary_files=self.osf.get(f), thumbnail_figure=self.thumbnail_figures.get(f), url=self.get_url(f.replace(base_dir, '')), **self._nbp_kws) for f in map(lambda f: os.path.join(file_dir, f), filter(self.pattern.match, files))] readme_file = next(iter(readme_files.intersection(files))) else: return '', [] labels = OrderedDict() this_label = 'gallery_' + foutdir.replace(os.path.sep, '_') if this_label.endswith('_'): this_label = this_label[:-1] for d in dirs: label, nbps = self.recursive_processing( base_dir, target_dir, it) if label: labels[label] = nbps s = ".. _%s:\n\n" % this_label with open(os.path.join(file_dir, readme_file)) as f: s += f.read().rstrip() + '\n\n' s += "\n\n.. toctree::\n\n" s += ''.join(' %s\n' % os.path.splitext(os.path.basename( nbp.get_out_file()))[0] for nbp in this_nbps) for d in dirs: findex = os.path.join(d, 'index.rst') if os.path.exists(os.path.join(foutdir, findex)): s += ' %s\n' % os.path.splitext(findex)[0] s += '\n' for nbp in this_nbps: code_div = nbp.code_div if code_div is not None: s += code_div + '\n' else: s += nbp.thumbnail_div + '\n' s += "\n.. raw:: html\n\n <div style='clear:both'></div>\n" for label, nbps in labels.items(): s += '\n.. only:: html\n\n .. rubric:: :ref:`%s`\n\n' % ( label) for nbp in nbps: code_div = nbp.code_div if code_div is not None: s += code_div + '\n' else: s += nbp.thumbnail_div + '\n' s += "\n.. raw:: html\n\n <div style='clear:both'></div>\n" s += '\n' with open(os.path.join(foutdir, 'index.rst'), 'w') as f: f.write(s) return this_label, list(chain(this_nbps, *labels.values()))
[ "def", "recursive_processing", "(", "self", ",", "base_dir", ",", "target_dir", ",", "it", ")", ":", "try", ":", "file_dir", ",", "dirs", ",", "files", "=", "next", "(", "it", ")", "except", "StopIteration", ":", "return", "''", ",", "[", "]", "readme_...
Method to recursivly process the notebooks in the `base_dir` Parameters ---------- base_dir: str Path to the base example directory (see the `examples_dir` parameter for the :class:`Gallery` class) target_dir: str Path to the output directory for the rst files (see the `gallery_dirs` parameter for the :class:`Gallery` class) it: iterable The iterator over the subdirectories and files in `base_dir` generated by the :func:`os.walk` function
[ "Method", "to", "recursivly", "process", "the", "notebooks", "in", "the", "base_dir" ]
python
test
istresearch/scrapy-cluster
crawler/crawling/meta_passthrough_middleware.py
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/crawler/crawling/meta_passthrough_middleware.py#L10-L31
def setup(self, settings): ''' Does the actual setup of the middleware ''' # set up the default sc logger my_level = settings.get('SC_LOG_LEVEL', 'INFO') my_name = settings.get('SC_LOGGER_NAME', 'sc-logger') my_output = settings.get('SC_LOG_STDOUT', True) my_json = settings.get('SC_LOG_JSON', False) my_dir = settings.get('SC_LOG_DIR', 'logs') my_bytes = settings.get('SC_LOG_MAX_BYTES', '10MB') my_file = settings.get('SC_LOG_FILE', 'main.log') my_backups = settings.get('SC_LOG_BACKUPS', 5) self.logger = LogFactory.get_instance(json=my_json, name=my_name, stdout=my_output, level=my_level, dir=my_dir, file=my_file, bytes=my_bytes, backups=my_backups)
[ "def", "setup", "(", "self", ",", "settings", ")", ":", "# set up the default sc logger", "my_level", "=", "settings", ".", "get", "(", "'SC_LOG_LEVEL'", ",", "'INFO'", ")", "my_name", "=", "settings", ".", "get", "(", "'SC_LOGGER_NAME'", ",", "'sc-logger'", "...
Does the actual setup of the middleware
[ "Does", "the", "actual", "setup", "of", "the", "middleware" ]
python
train
pytroll/pyspectral
pyspectral/config.py
https://github.com/pytroll/pyspectral/blob/fd296c0e0bdf5364fa180134a1292665d6bc50a3/pyspectral/config.py#L71-L87
def get_config(): """Get the configuration from file""" if CONFIG_FILE is not None: configfile = CONFIG_FILE else: configfile = BUILTIN_CONFIG_FILE config = {} with open(configfile, 'r') as fp_: config = recursive_dict_update(config, yaml.load(fp_, Loader=UnsafeLoader)) app_dirs = AppDirs('pyspectral', 'pytroll') user_datadir = app_dirs.user_data_dir config['rsr_dir'] = expanduser(config.get('rsr_dir', user_datadir)) config['rayleigh_dir'] = expanduser(config.get('rayleigh_dir', user_datadir)) return config
[ "def", "get_config", "(", ")", ":", "if", "CONFIG_FILE", "is", "not", "None", ":", "configfile", "=", "CONFIG_FILE", "else", ":", "configfile", "=", "BUILTIN_CONFIG_FILE", "config", "=", "{", "}", "with", "open", "(", "configfile", ",", "'r'", ")", "as", ...
Get the configuration from file
[ "Get", "the", "configuration", "from", "file" ]
python
train
libtcod/python-tcod
tcod/libtcodpy.py
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/libtcodpy.py#L690-L700
def bsp_traverse_post_order( node: tcod.bsp.BSP, callback: Callable[[tcod.bsp.BSP, Any], None], userData: Any = 0, ) -> None: """Traverse this nodes hierarchy with a callback. .. deprecated:: 2.0 Use :any:`BSP.post_order` instead. """ _bsp_traverse(node.post_order(), callback, userData)
[ "def", "bsp_traverse_post_order", "(", "node", ":", "tcod", ".", "bsp", ".", "BSP", ",", "callback", ":", "Callable", "[", "[", "tcod", ".", "bsp", ".", "BSP", ",", "Any", "]", ",", "None", "]", ",", "userData", ":", "Any", "=", "0", ",", ")", "-...
Traverse this nodes hierarchy with a callback. .. deprecated:: 2.0 Use :any:`BSP.post_order` instead.
[ "Traverse", "this", "nodes", "hierarchy", "with", "a", "callback", "." ]
python
train
Azure/blobxfer
blobxfer/util.py
https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/util.py#L146-L157
def join_thread(thr): # type: (threading.Thread) -> None """Join a thread :type threading.Thread thr: thread to join """ if on_python2(): while True: thr.join(timeout=1) if not thr.isAlive(): break else: thr.join()
[ "def", "join_thread", "(", "thr", ")", ":", "# type: (threading.Thread) -> None", "if", "on_python2", "(", ")", ":", "while", "True", ":", "thr", ".", "join", "(", "timeout", "=", "1", ")", "if", "not", "thr", ".", "isAlive", "(", ")", ":", "break", "e...
Join a thread :type threading.Thread thr: thread to join
[ "Join", "a", "thread", ":", "type", "threading", ".", "Thread", "thr", ":", "thread", "to", "join" ]
python
train
biolink/ontobio
ontobio/sim/api/owlsim2.py
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sim/api/owlsim2.py#L249-L266
def filtered_search( self, id_list: List, negated_classes: List, limit: Optional[int] = 100, taxon_filter: Optional[int] = None, category_filter: Optional[str] = None, method: Optional[SimAlgorithm] = SimAlgorithm.PHENODIGM) -> SimResult: """ Owlsim2 filtered search, resolves taxon and category to a namespace, calls search_by_attribute_set, and converts to SimResult object """ if len(negated_classes) > 0: logging.warning("Owlsim2 does not support negation, ignoring neg classes") namespace_filter = self._get_namespace_filter(taxon_filter, category_filter) owlsim_results = search_by_attribute_set(self.url, tuple(id_list), limit, namespace_filter) return self._simsearch_to_simresult(owlsim_results, method)
[ "def", "filtered_search", "(", "self", ",", "id_list", ":", "List", ",", "negated_classes", ":", "List", ",", "limit", ":", "Optional", "[", "int", "]", "=", "100", ",", "taxon_filter", ":", "Optional", "[", "int", "]", "=", "None", ",", "category_filter...
Owlsim2 filtered search, resolves taxon and category to a namespace, calls search_by_attribute_set, and converts to SimResult object
[ "Owlsim2", "filtered", "search", "resolves", "taxon", "and", "category", "to", "a", "namespace", "calls", "search_by_attribute_set", "and", "converts", "to", "SimResult", "object" ]
python
train
carpedm20/fbchat
fbchat/_client.py
https://github.com/carpedm20/fbchat/blob/f480d68b5773473e6daba7f66075ee30e8d737a8/fbchat/_client.py#L3510-L3534
def onMessageUnsent( self, mid=None, author_id=None, thread_id=None, thread_type=None, ts=None, msg=None, ): """ Called when the client is listening, and someone unsends (deletes for everyone) a message :param mid: ID of the unsent message :param author_id: The ID of the person who unsent the message :param thread_id: Thread ID that the action was sent to. See :ref:`intro_threads` :param thread_type: Type of thread that the action was sent to. See :ref:`intro_threads` :param ts: A timestamp of the action :param msg: A full set of the data recieved :type thread_type: models.ThreadType """ log.info( "{} unsent the message {} in {} ({}) at {}s".format( author_id, repr(mid), thread_id, thread_type.name, ts / 1000 ) )
[ "def", "onMessageUnsent", "(", "self", ",", "mid", "=", "None", ",", "author_id", "=", "None", ",", "thread_id", "=", "None", ",", "thread_type", "=", "None", ",", "ts", "=", "None", ",", "msg", "=", "None", ",", ")", ":", "log", ".", "info", "(", ...
Called when the client is listening, and someone unsends (deletes for everyone) a message :param mid: ID of the unsent message :param author_id: The ID of the person who unsent the message :param thread_id: Thread ID that the action was sent to. See :ref:`intro_threads` :param thread_type: Type of thread that the action was sent to. See :ref:`intro_threads` :param ts: A timestamp of the action :param msg: A full set of the data recieved :type thread_type: models.ThreadType
[ "Called", "when", "the", "client", "is", "listening", "and", "someone", "unsends", "(", "deletes", "for", "everyone", ")", "a", "message" ]
python
train
inveniosoftware-attic/invenio-knowledge
invenio_knowledge/manage.py
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/manage.py#L44-L83
def load(name, filepath, separator="---"): """Load given file into knowledge base. Simply load data into an existing knowledge base: .. code-block:: console $ inveniomanage knowledge load mykb /path/to/file.kb The file is expected to have a mapping with values: ``foo<seperator>bar`` (per line). ``<separator>`` is by default set to **---**, but can be overridden with ``-s someseperator`` or ``--sep someseperator``. """ current_app.logger.info( ">>> Going to load knowledge base {0} into '{1}'...".format( filepath, name ) ) if not os.path.isfile(filepath): current_app.logger.error( "Path to non-existing file\n", file=sys.stderr ) sys.exit(1) try: get_kb_by_name(name) except NoResultFound: current_app.logger.error( "KB does not exist\n", file=sys.stderr ) sys.exit(1) num_added = load_kb_mappings_file(name, filepath, separator) current_app.logger.info( ">>> Knowledge '{0}' updated successfully with {1} entries.".format( name, num_added ) )
[ "def", "load", "(", "name", ",", "filepath", ",", "separator", "=", "\"---\"", ")", ":", "current_app", ".", "logger", ".", "info", "(", "\">>> Going to load knowledge base {0} into '{1}'...\"", ".", "format", "(", "filepath", ",", "name", ")", ")", "if", "not...
Load given file into knowledge base. Simply load data into an existing knowledge base: .. code-block:: console $ inveniomanage knowledge load mykb /path/to/file.kb The file is expected to have a mapping with values: ``foo<seperator>bar`` (per line). ``<separator>`` is by default set to **---**, but can be overridden with ``-s someseperator`` or ``--sep someseperator``.
[ "Load", "given", "file", "into", "knowledge", "base", "." ]
python
train
Gandi/gandi.cli
gandi/cli/modules/network.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/network.py#L324-L335
def usable_id(cls, id): """ Retrieve id from input which can be num or id.""" try: qry_id = int(id) except Exception: qry_id = None if not qry_id: msg = 'unknown identifier %s' % id cls.error(msg) return qry_id
[ "def", "usable_id", "(", "cls", ",", "id", ")", ":", "try", ":", "qry_id", "=", "int", "(", "id", ")", "except", "Exception", ":", "qry_id", "=", "None", "if", "not", "qry_id", ":", "msg", "=", "'unknown identifier %s'", "%", "id", "cls", ".", "error...
Retrieve id from input which can be num or id.
[ "Retrieve", "id", "from", "input", "which", "can", "be", "num", "or", "id", "." ]
python
train
keon/algorithms
algorithms/backtrack/pattern_match.py
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/backtrack/pattern_match.py#L17-L42
def pattern_match(pattern, string): """ :type pattern: str :type string: str :rtype: bool """ def backtrack(pattern, string, dic): if len(pattern) == 0 and len(string) > 0: return False if len(pattern) == len(string) == 0: return True for end in range(1, len(string)-len(pattern)+2): if pattern[0] not in dic and string[:end] not in dic.values(): dic[pattern[0]] = string[:end] if backtrack(pattern[1:], string[end:], dic): return True del dic[pattern[0]] elif pattern[0] in dic and dic[pattern[0]] == string[:end]: if backtrack(pattern[1:], string[end:], dic): return True return False return backtrack(pattern, string, {})
[ "def", "pattern_match", "(", "pattern", ",", "string", ")", ":", "def", "backtrack", "(", "pattern", ",", "string", ",", "dic", ")", ":", "if", "len", "(", "pattern", ")", "==", "0", "and", "len", "(", "string", ")", ">", "0", ":", "return", "False...
:type pattern: str :type string: str :rtype: bool
[ ":", "type", "pattern", ":", "str", ":", "type", "string", ":", "str", ":", "rtype", ":", "bool" ]
python
train
bukun/TorCMS
helper_scripts/script_meta_xlsx_import_v2.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/helper_scripts/script_meta_xlsx_import_v2.py#L29-L59
def get_meta(catid, sig): ''' Get metadata of dataset via ID. ''' meta_base = './static/dataset_list' if os.path.exists(meta_base): pass else: return False pp_data = {'logo': '', 'kind': '9'} for wroot, wdirs, wfiles in os.walk(meta_base): for wdir in wdirs: if wdir.lower().endswith(sig): # Got the dataset of certain ID. ds_base = pathlib.Path(os.path.join(wroot, wdir)) for uu in ds_base.iterdir(): if uu.name.endswith('.xlsx'): meta_dic = chuli_meta('u' + sig[2:], uu) pp_data['title'] = meta_dic['title'] pp_data['cnt_md'] = meta_dic['anytext'] pp_data['user_name'] = 'admin' pp_data['def_cat_uid'] = catid pp_data['gcat0'] = catid pp_data['def_cat_pid'] = catid[:2] + '00' pp_data['extinfo'] = {} elif uu.name.startswith('thumbnail_'): pp_data['logo'] = os.path.join(wroot, wdir, uu.name).strip('.') return pp_data
[ "def", "get_meta", "(", "catid", ",", "sig", ")", ":", "meta_base", "=", "'./static/dataset_list'", "if", "os", ".", "path", ".", "exists", "(", "meta_base", ")", ":", "pass", "else", ":", "return", "False", "pp_data", "=", "{", "'logo'", ":", "''", ",...
Get metadata of dataset via ID.
[ "Get", "metadata", "of", "dataset", "via", "ID", "." ]
python
train
tsroten/dragonmapper
dragonmapper/transcriptions.py
https://github.com/tsroten/dragonmapper/blob/68eaf43c32725f4b4923c01284cfc0112079e8ab/dragonmapper/transcriptions.py#L76-L80
def _accented_vowel_to_numbered(vowel): """Convert an accented Pinyin vowel to a numbered Pinyin vowel.""" for numbered_vowel, accented_vowel in _PINYIN_TONES.items(): if vowel == accented_vowel: return tuple(numbered_vowel)
[ "def", "_accented_vowel_to_numbered", "(", "vowel", ")", ":", "for", "numbered_vowel", ",", "accented_vowel", "in", "_PINYIN_TONES", ".", "items", "(", ")", ":", "if", "vowel", "==", "accented_vowel", ":", "return", "tuple", "(", "numbered_vowel", ")" ]
Convert an accented Pinyin vowel to a numbered Pinyin vowel.
[ "Convert", "an", "accented", "Pinyin", "vowel", "to", "a", "numbered", "Pinyin", "vowel", "." ]
python
train
saltstack/salt
salt/modules/mssql.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mssql.py#L260-L279
def role_remove(role, **kwargs): ''' Remove a database role. CLI Example: .. code-block:: bash salt minion mssql.role_create role=test_role01 ''' try: conn = _get_connection(**kwargs) conn.autocommit(True) cur = conn.cursor() cur.execute('DROP ROLE {0}'.format(role)) conn.autocommit(True) conn.close() return True except Exception as e: return 'Could not create the role: {0}'.format(e)
[ "def", "role_remove", "(", "role", ",", "*", "*", "kwargs", ")", ":", "try", ":", "conn", "=", "_get_connection", "(", "*", "*", "kwargs", ")", "conn", ".", "autocommit", "(", "True", ")", "cur", "=", "conn", ".", "cursor", "(", ")", "cur", ".", ...
Remove a database role. CLI Example: .. code-block:: bash salt minion mssql.role_create role=test_role01
[ "Remove", "a", "database", "role", "." ]
python
train
aouyar/PyMunin
pysysinfo/asterisk.py
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/asterisk.py#L137-L150
def _connect(self): """Connect to Asterisk Manager Interface.""" try: if sys.version_info[:2] >= (2,6): self._conn = telnetlib.Telnet(self._amihost, self._amiport, connTimeout) else: self._conn = telnetlib.Telnet(self._amihost, self._amiport) except: raise Exception( "Connection to Asterisk Manager Interface on " "host %s and port %s failed." % (self._amihost, self._amiport) )
[ "def", "_connect", "(", "self", ")", ":", "try", ":", "if", "sys", ".", "version_info", "[", ":", "2", "]", ">=", "(", "2", ",", "6", ")", ":", "self", ".", "_conn", "=", "telnetlib", ".", "Telnet", "(", "self", ".", "_amihost", ",", "self", "....
Connect to Asterisk Manager Interface.
[ "Connect", "to", "Asterisk", "Manager", "Interface", "." ]
python
train
fronzbot/blinkpy
blinkpy/blinkpy.py
https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/blinkpy.py#L250-L285
def download_videos(self, path, since=None, camera='all', stop=10): """ Download all videos from server since specified time. :param path: Path to write files. /path/<cameraname>_<recorddate>.mp4 :param since: Date and time to get videos from. Ex: "2018/07/28 12:33:00" to retrieve videos since July 28th 2018 at 12:33:00 :param camera: Camera name to retrieve. Defaults to "all". Use a list for multiple cameras. :param stop: Page to stop on (~25 items per page. Default page 10). """ if since is None: since_epochs = self.last_refresh else: parsed_datetime = parse(since, fuzzy=True) since_epochs = parsed_datetime.timestamp() formatted_date = get_time(time_to_convert=since_epochs) _LOGGER.info("Retrieving videos since %s", formatted_date) if not isinstance(camera, list): camera = [camera] for page in range(1, stop): response = api.request_videos(self, time=since_epochs, page=page) _LOGGER.debug("Processing page %s", page) try: result = response['videos'] if not result: raise IndexError except (KeyError, IndexError): _LOGGER.info("No videos found on page %s. Exiting.", page) break self._parse_downloaded_items(result, camera, path)
[ "def", "download_videos", "(", "self", ",", "path", ",", "since", "=", "None", ",", "camera", "=", "'all'", ",", "stop", "=", "10", ")", ":", "if", "since", "is", "None", ":", "since_epochs", "=", "self", ".", "last_refresh", "else", ":", "parsed_datet...
Download all videos from server since specified time. :param path: Path to write files. /path/<cameraname>_<recorddate>.mp4 :param since: Date and time to get videos from. Ex: "2018/07/28 12:33:00" to retrieve videos since July 28th 2018 at 12:33:00 :param camera: Camera name to retrieve. Defaults to "all". Use a list for multiple cameras. :param stop: Page to stop on (~25 items per page. Default page 10).
[ "Download", "all", "videos", "from", "server", "since", "specified", "time", "." ]
python
train
ultrabug/py3status
py3status/py3.py
https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/py3.py#L192-L214
def _thresholds_init(self): """ Initiate and check any thresholds set """ thresholds = getattr(self._py3status_module, "thresholds", []) self._thresholds = {} if isinstance(thresholds, list): try: thresholds.sort() except TypeError: pass self._thresholds[None] = [(x[0], self._get_color(x[1])) for x in thresholds] return elif isinstance(thresholds, dict): for key, value in thresholds.items(): if isinstance(value, list): try: value.sort() except TypeError: pass self._thresholds[key] = [ (x[0], self._get_color(x[1])) for x in value ]
[ "def", "_thresholds_init", "(", "self", ")", ":", "thresholds", "=", "getattr", "(", "self", ".", "_py3status_module", ",", "\"thresholds\"", ",", "[", "]", ")", "self", ".", "_thresholds", "=", "{", "}", "if", "isinstance", "(", "thresholds", ",", "list",...
Initiate and check any thresholds set
[ "Initiate", "and", "check", "any", "thresholds", "set" ]
python
train
optimizely/python-sdk
optimizely/helpers/condition.py
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/helpers/condition.py#L345-L367
def loads(conditions_string): """ Deserializes the conditions property into its corresponding components: the condition_structure and the condition_list. Args: conditions_string: String defining valid and/or conditions. Returns: A tuple of (condition_structure, condition_list). condition_structure: nested list of operators and placeholders for operands. condition_list: list of conditions whose index correspond to the values of the placeholders. """ decoder = ConditionDecoder(_audience_condition_deserializer) # Create a custom JSONDecoder using the ConditionDecoder's object_hook method # to create the condition_structure as well as populate the condition_list json_decoder = json.JSONDecoder(object_hook=decoder.object_hook) # Perform the decoding condition_structure = json_decoder.decode(conditions_string) condition_list = decoder.condition_list return (condition_structure, condition_list)
[ "def", "loads", "(", "conditions_string", ")", ":", "decoder", "=", "ConditionDecoder", "(", "_audience_condition_deserializer", ")", "# Create a custom JSONDecoder using the ConditionDecoder's object_hook method", "# to create the condition_structure as well as populate the condition_list...
Deserializes the conditions property into its corresponding components: the condition_structure and the condition_list. Args: conditions_string: String defining valid and/or conditions. Returns: A tuple of (condition_structure, condition_list). condition_structure: nested list of operators and placeholders for operands. condition_list: list of conditions whose index correspond to the values of the placeholders.
[ "Deserializes", "the", "conditions", "property", "into", "its", "corresponding", "components", ":", "the", "condition_structure", "and", "the", "condition_list", "." ]
python
train
trevisanj/f311
f311/explorer/gui/a_XFileMainWindow.py
https://github.com/trevisanj/f311/blob/9e502a3d1e1f74d4290a8a0bae9a34ef8d7b29f7/f311/explorer/gui/a_XFileMainWindow.py#L165-L176
def _add_log_tab(self): """Adds element to pages and new tab""" # text_tab = "Log (Alt+&{})".format(len(self.pages)+1) text_tab = "Log" self.pages.append(MyPage(text_tab=text_tab)) # ### Log tab te = self.textEdit_log = self.keep_ref(QTextEdit()) te.setReadOnly(True) self.tabWidget.addTab(te, text_tab)
[ "def", "_add_log_tab", "(", "self", ")", ":", "# text_tab = \"Log (Alt+&{})\".format(len(self.pages)+1)\r", "text_tab", "=", "\"Log\"", "self", ".", "pages", ".", "append", "(", "MyPage", "(", "text_tab", "=", "text_tab", ")", ")", "# ### Log tab\r", "te", "=", "s...
Adds element to pages and new tab
[ "Adds", "element", "to", "pages", "and", "new", "tab" ]
python
train
limpyd/redis-limpyd
limpyd/contrib/collection.py
https://github.com/limpyd/redis-limpyd/blob/3c745dde1390a0bd09690b77a089dcc08c6c7e43/limpyd/contrib/collection.py#L561-L603
def _add_filters(self, **filters): """ In addition to the normal _add_filters, this one accept RedisField objects on the right part of a filter. The value will be fetched from redis when calling the collection. The filter value can also be a model instance, in which case its PK will be fetched when calling the collection, too. """ string_filters = filters.copy() for key, value in filters.items(): is_extended = False if isinstance(value, RedisField): # we will fetch the value when running the collection if (not isinstance(value, SingleValueField) or getattr(value, '_instance', None) is None): raise ValueError('If a field is used as a filter value, it ' 'must be a simple value field attached to ' 'an instance') is_extended = True elif isinstance(value, RedisModel): # we will fetch the PK when running the collection is_extended = True if is_extended: if self._field_is_pk(key): # create an RawFilter which will be used in _get_pk raw_filter = RawFilter(key, value) self._lazy_collection['pks'].add(raw_filter) else: # create an ParsedFilter which will be used in _prepare_sets index, suffix, extra_field_parts = self._parse_filter_key(key) parsed_filter = ParsedFilter(index, suffix, extra_field_parts, value) self._lazy_collection['sets'].append(parsed_filter) string_filters.pop(key) super(ExtendedCollectionManager, self)._add_filters(**string_filters) return self
[ "def", "_add_filters", "(", "self", ",", "*", "*", "filters", ")", ":", "string_filters", "=", "filters", ".", "copy", "(", ")", "for", "key", ",", "value", "in", "filters", ".", "items", "(", ")", ":", "is_extended", "=", "False", "if", "isinstance", ...
In addition to the normal _add_filters, this one accept RedisField objects on the right part of a filter. The value will be fetched from redis when calling the collection. The filter value can also be a model instance, in which case its PK will be fetched when calling the collection, too.
[ "In", "addition", "to", "the", "normal", "_add_filters", "this", "one", "accept", "RedisField", "objects", "on", "the", "right", "part", "of", "a", "filter", ".", "The", "value", "will", "be", "fetched", "from", "redis", "when", "calling", "the", "collection...
python
train
twisted/epsilon
epsilon/hotfixes/trial_assertwarns.py
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/hotfixes/trial_assertwarns.py#L8-L57
def failUnlessWarns(self, category, message, filename, f, *args, **kwargs): """ Fail if the given function doesn't generate the specified warning when called. It calls the function, checks the warning, and forwards the result of the function if everything is fine. @param category: the category of the warning to check. @param message: the output message of the warning to check. @param filename: the filename where the warning should come from. @param f: the function which is supposed to generate the warning. @type f: any callable. @param args: the arguments to C{f}. @param kwargs: the keywords arguments to C{f}. @return: the result of the original function C{f}. """ warningsShown = [] def warnExplicit(*args): warningsShown.append(args) origExplicit = warnings.warn_explicit try: warnings.warn_explicit = warnExplicit result = f(*args, **kwargs) finally: warnings.warn_explicit = origExplicit if not warningsShown: self.fail("No warnings emitted") first = warningsShown[0] for other in warningsShown[1:]: if other[:2] != first[:2]: self.fail("Can't handle different warnings") gotMessage, gotCategory, gotFilename, lineno = first[:4] self.assertEqual(gotMessage, message) self.assertIdentical(gotCategory, category) # Use starts with because of .pyc/.pyo issues. self.failUnless( filename.startswith(gotFilename), 'Warning in %r, expected %r' % (gotFilename, filename)) # It would be nice to be able to check the line number as well, but # different configurations actually end up reporting different line # numbers (generally the variation is only 1 line, but that's enough # to fail the test erroneously...). # self.assertEqual(lineno, xxx) return result
[ "def", "failUnlessWarns", "(", "self", ",", "category", ",", "message", ",", "filename", ",", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "warningsShown", "=", "[", "]", "def", "warnExplicit", "(", "*", "args", ")", ":", "warningsShown", ...
Fail if the given function doesn't generate the specified warning when called. It calls the function, checks the warning, and forwards the result of the function if everything is fine. @param category: the category of the warning to check. @param message: the output message of the warning to check. @param filename: the filename where the warning should come from. @param f: the function which is supposed to generate the warning. @type f: any callable. @param args: the arguments to C{f}. @param kwargs: the keywords arguments to C{f}. @return: the result of the original function C{f}.
[ "Fail", "if", "the", "given", "function", "doesn", "t", "generate", "the", "specified", "warning", "when", "called", ".", "It", "calls", "the", "function", "checks", "the", "warning", "and", "forwards", "the", "result", "of", "the", "function", "if", "everyt...
python
train
AntagonistHQ/openprovider.py
openprovider/modules/ssl.py
https://github.com/AntagonistHQ/openprovider.py/blob/5871c3d5b3661e23667f147f49f20389c817a0a4/openprovider/modules/ssl.py#L127-L135
def retrieve_approver_email_list(self, domain, product_id): """Retrieve the list of allowed approver email addresses.""" response = self.request(E.retrieveApproverEmailListSslCertRequest( E.domain(domain), E.productId(product_id) )) return [str(i) for i in response.data.array[0].item]
[ "def", "retrieve_approver_email_list", "(", "self", ",", "domain", ",", "product_id", ")", ":", "response", "=", "self", ".", "request", "(", "E", ".", "retrieveApproverEmailListSslCertRequest", "(", "E", ".", "domain", "(", "domain", ")", ",", "E", ".", "pr...
Retrieve the list of allowed approver email addresses.
[ "Retrieve", "the", "list", "of", "allowed", "approver", "email", "addresses", "." ]
python
train
spyder-ide/spyder
spyder/plugins/outlineexplorer/widgets.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/outlineexplorer/widgets.py#L131-L143
def item_at_line(root_item, line): """ Find and return the item of the outline explorer under which is located the specified 'line' of the editor. """ previous_item = root_item item = root_item for item in get_item_children(root_item): if item.line > line: return previous_item previous_item = item else: return item
[ "def", "item_at_line", "(", "root_item", ",", "line", ")", ":", "previous_item", "=", "root_item", "item", "=", "root_item", "for", "item", "in", "get_item_children", "(", "root_item", ")", ":", "if", "item", ".", "line", ">", "line", ":", "return", "previ...
Find and return the item of the outline explorer under which is located the specified 'line' of the editor.
[ "Find", "and", "return", "the", "item", "of", "the", "outline", "explorer", "under", "which", "is", "located", "the", "specified", "line", "of", "the", "editor", "." ]
python
train
msmbuilder/msmbuilder
msmbuilder/featurizer/featurizer.py
https://github.com/msmbuilder/msmbuilder/blob/556a93a170782f47be53f4a1e9d740fb1c8272b3/msmbuilder/featurizer/featurizer.py#L1508-L1546
def partial_transform(self, traj): """Featurize an MD trajectory into a vector space with the raw cartesian coordinates. Parameters ---------- traj : mdtraj.Trajectory A molecular dynamics trajectory to featurize. Returns ------- features : np.ndarray, dtype=float, shape=(n_samples, n_features) A featurized trajectory is a 2D array of shape `(length_of_trajectory x n_features)` where each `features[i]` vector is computed by applying the featurization function to the `i`th snapshot of the input trajectory. Notes ----- If you requested superposition (gave `ref_traj` in __init__) the input trajectory will be modified. See Also -------- transform : simultaneously featurize a collection of MD trajectories """ # Optionally take only certain atoms if self.atom_indices is not None: p_traj = traj.atom_slice(self.atom_indices) else: p_traj = traj # Optionally superpose to a reference trajectory. if self.ref_traj is not None: p_traj.superpose(self.ref_traj, parallel=False) # Get the positions and reshape. value = p_traj.xyz.reshape(len(p_traj), -1) return value
[ "def", "partial_transform", "(", "self", ",", "traj", ")", ":", "# Optionally take only certain atoms", "if", "self", ".", "atom_indices", "is", "not", "None", ":", "p_traj", "=", "traj", ".", "atom_slice", "(", "self", ".", "atom_indices", ")", "else", ":", ...
Featurize an MD trajectory into a vector space with the raw cartesian coordinates. Parameters ---------- traj : mdtraj.Trajectory A molecular dynamics trajectory to featurize. Returns ------- features : np.ndarray, dtype=float, shape=(n_samples, n_features) A featurized trajectory is a 2D array of shape `(length_of_trajectory x n_features)` where each `features[i]` vector is computed by applying the featurization function to the `i`th snapshot of the input trajectory. Notes ----- If you requested superposition (gave `ref_traj` in __init__) the input trajectory will be modified. See Also -------- transform : simultaneously featurize a collection of MD trajectories
[ "Featurize", "an", "MD", "trajectory", "into", "a", "vector", "space", "with", "the", "raw", "cartesian", "coordinates", "." ]
python
train
michaelaye/pyciss
pyciss/io.py
https://github.com/michaelaye/pyciss/blob/019256424466060babead7edab86736c881b0831/pyciss/io.py#L81-L86
def get_db_root(): "Read dbroot folder from config and mkdir if required." d = get_config() dbroot = Path(d['pyciss_db']['path']) dbroot.mkdir(exist_ok=True) return dbroot
[ "def", "get_db_root", "(", ")", ":", "d", "=", "get_config", "(", ")", "dbroot", "=", "Path", "(", "d", "[", "'pyciss_db'", "]", "[", "'path'", "]", ")", "dbroot", ".", "mkdir", "(", "exist_ok", "=", "True", ")", "return", "dbroot" ]
Read dbroot folder from config and mkdir if required.
[ "Read", "dbroot", "folder", "from", "config", "and", "mkdir", "if", "required", "." ]
python
train
chaoss/grimoirelab-kingarthur
arthur/utils.py
https://github.com/chaoss/grimoirelab-kingarthur/blob/9d6a638bee68d5e5c511f045eeebf06340fd3252/arthur/utils.py#L69-L74
def writer_acquire(self): """Acquire the lock to write""" self._order_mutex.acquire() self._access_mutex.acquire() self._order_mutex.release()
[ "def", "writer_acquire", "(", "self", ")", ":", "self", ".", "_order_mutex", ".", "acquire", "(", ")", "self", ".", "_access_mutex", ".", "acquire", "(", ")", "self", ".", "_order_mutex", ".", "release", "(", ")" ]
Acquire the lock to write
[ "Acquire", "the", "lock", "to", "write" ]
python
test
materialsproject/pymatgen
pymatgen/analysis/chemenv/utils/coordination_geometry_utils.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/chemenv/utils/coordination_geometry_utils.py#L170-L253
def diamond_functions(xx, yy, y_x0, x_y0): """ Method that creates two upper and lower functions based on points xx and yy as well as intercepts defined by y_x0 and x_y0. The resulting functions form kind of a distorted diamond-like structure aligned from point xx to point yy. Schematically : xx is symbolized by x, yy is symbolized by y, y_x0 is equal to the distance from x to a, x_y0 is equal to the distance from x to b, the lines a-p and b-q are parallel to the line x-y such that points p and q are obtained automatically. In case of an increasing diamond the lower function is x-b-q and the upper function is a-p-y while in case of a decreasing diamond, the lower function is a-p-y and the upper function is x-b-q. Increasing diamond | Decreasing diamond p--y x----b / /| |\ \ / / | | \ q / / | a \ | a / | \ \ | | / q \ \ | |/ / \ \| x----b p--y Args: xx: First point yy: Second point Returns: A dictionary with the lower and upper diamond functions. """ npxx = np.array(xx) npyy = np.array(yy) if np.any(npxx == npyy): raise RuntimeError('Invalid points for diamond_functions') if np.all(npxx < npyy) or np.all(npxx > npyy): if npxx[0] < npyy[0]: p1 = npxx p2 = npyy else: p1 = npyy p2 = npxx else: if npxx[0] < npyy[0]: p1 = npxx p2 = npyy else: p1 = npyy p2 = npxx slope = (p2[1]-p1[1]) / (p2[0]- p1[0]) if slope > 0.0: x_bpoint = p1[0] + x_y0 myy = p1[1] bq_intercept = myy - slope*x_bpoint myx = p1[0] myy = p1[1] + y_x0 ap_intercept = myy - slope*myx x_ppoint = (p2[1] - ap_intercept) / slope def lower(x): return np.where(x <= x_bpoint, p1[1] * np.ones_like(x), slope * x + bq_intercept) def upper(x): return np.where(x >= x_ppoint, p2[1] * np.ones_like(x), slope * x + ap_intercept) else: x_bpoint = p1[0] + x_y0 myy = p1[1] bq_intercept = myy - slope * x_bpoint myx = p1[0] myy = p1[1] - y_x0 ap_intercept = myy - slope * myx x_ppoint = (p2[1] - ap_intercept) / slope def lower(x): return np.where(x >= x_ppoint, p2[1] * np.ones_like(x), slope * x + ap_intercept) def upper(x): return np.where(x <= x_bpoint, p1[1] * np.ones_like(x), slope * x + bq_intercept) return {'lower': lower, 'upper': upper}
[ "def", "diamond_functions", "(", "xx", ",", "yy", ",", "y_x0", ",", "x_y0", ")", ":", "npxx", "=", "np", ".", "array", "(", "xx", ")", "npyy", "=", "np", ".", "array", "(", "yy", ")", "if", "np", ".", "any", "(", "npxx", "==", "npyy", ")", ":...
Method that creates two upper and lower functions based on points xx and yy as well as intercepts defined by y_x0 and x_y0. The resulting functions form kind of a distorted diamond-like structure aligned from point xx to point yy. Schematically : xx is symbolized by x, yy is symbolized by y, y_x0 is equal to the distance from x to a, x_y0 is equal to the distance from x to b, the lines a-p and b-q are parallel to the line x-y such that points p and q are obtained automatically. In case of an increasing diamond the lower function is x-b-q and the upper function is a-p-y while in case of a decreasing diamond, the lower function is a-p-y and the upper function is x-b-q. Increasing diamond | Decreasing diamond p--y x----b / /| |\ \ / / | | \ q / / | a \ | a / | \ \ | | / q \ \ | |/ / \ \| x----b p--y Args: xx: First point yy: Second point Returns: A dictionary with the lower and upper diamond functions.
[ "Method", "that", "creates", "two", "upper", "and", "lower", "functions", "based", "on", "points", "xx", "and", "yy", "as", "well", "as", "intercepts", "defined", "by", "y_x0", "and", "x_y0", ".", "The", "resulting", "functions", "form", "kind", "of", "a",...
python
train
ryanpetrello/python-zombie
zombie/proxy/client.py
https://github.com/ryanpetrello/python-zombie/blob/638916572d8ee5ebbdb2dcfc5000a952e99f280f/zombie/proxy/client.py#L168-L179
def wait(self, method, *args): """ Call a method on the zombie.js Browser instance and wait on a callback. :param method: the method to call, e.g., html() :param args: one of more arguments for the method """ methodargs = encode_args(args, extra=True) js = """ %s(%s wait_callback); """ % (method, methodargs) self._send(js)
[ "def", "wait", "(", "self", ",", "method", ",", "*", "args", ")", ":", "methodargs", "=", "encode_args", "(", "args", ",", "extra", "=", "True", ")", "js", "=", "\"\"\"\n %s(%s wait_callback);\n \"\"\"", "%", "(", "method", ",", "methodargs", "...
Call a method on the zombie.js Browser instance and wait on a callback. :param method: the method to call, e.g., html() :param args: one of more arguments for the method
[ "Call", "a", "method", "on", "the", "zombie", ".", "js", "Browser", "instance", "and", "wait", "on", "a", "callback", "." ]
python
train
wickman/compactor
compactor/process.py
https://github.com/wickman/compactor/blob/52714be3d84aa595a212feccb4d92ec250cede2a/compactor/process.py#L49-L71
def install(cls, mbox): """A decorator to indicate a remotely callable method on a process. .. code-block:: python from compactor.process import Process class PingProcess(Process): @Process.install('ping') def ping(self, from_pid, body): # do something The installed method should take ``from_pid`` and ``body`` parameters. ``from_pid`` is the process calling the method. ``body`` is a ``bytes`` stream that was delivered with the message, possibly empty. :param mbox: Incoming messages to this "mailbox" will be dispatched to this method. :type mbox: ``str`` """ def wrap(fn): setattr(fn, cls.INSTALL_ATTRIBUTE, mbox) return fn return wrap
[ "def", "install", "(", "cls", ",", "mbox", ")", ":", "def", "wrap", "(", "fn", ")", ":", "setattr", "(", "fn", ",", "cls", ".", "INSTALL_ATTRIBUTE", ",", "mbox", ")", "return", "fn", "return", "wrap" ]
A decorator to indicate a remotely callable method on a process. .. code-block:: python from compactor.process import Process class PingProcess(Process): @Process.install('ping') def ping(self, from_pid, body): # do something The installed method should take ``from_pid`` and ``body`` parameters. ``from_pid`` is the process calling the method. ``body`` is a ``bytes`` stream that was delivered with the message, possibly empty. :param mbox: Incoming messages to this "mailbox" will be dispatched to this method. :type mbox: ``str``
[ "A", "decorator", "to", "indicate", "a", "remotely", "callable", "method", "on", "a", "process", "." ]
python
train
Neurita/boyle
boyle/files/file_tree_map.py
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/files/file_tree_map.py#L403-L423
def count_node_match(self, pattern, adict=None): """ Return the number of nodes that match the pattern. :param pattern: :param adict: :return: int """ mydict = self._filetree if adict is None else adict k = 0 if isinstance(mydict, dict): names = mydict.keys() k += len(filter_list(names, pattern)) for nom in names: k += self.count_node_match(pattern, mydict[nom]) else: k = len(filter_list(mydict, pattern)) return k
[ "def", "count_node_match", "(", "self", ",", "pattern", ",", "adict", "=", "None", ")", ":", "mydict", "=", "self", ".", "_filetree", "if", "adict", "is", "None", "else", "adict", "k", "=", "0", "if", "isinstance", "(", "mydict", ",", "dict", ")", ":...
Return the number of nodes that match the pattern. :param pattern: :param adict: :return: int
[ "Return", "the", "number", "of", "nodes", "that", "match", "the", "pattern", "." ]
python
valid
objectrocket/python-client
objectrocket/instances/mongodb.py
https://github.com/objectrocket/python-client/blob/a65868c7511ff49a5fbe304e53bf592b7fc6d5ef/objectrocket/instances/mongodb.py#L48-L60
def compaction(self, request_compaction=False): """Retrieve a report on, or request compaction for this instance. :param bool request_compaction: A boolean indicating whether or not to request compaction. """ url = self._service_url + 'compaction/' if request_compaction: response = requests.post(url, **self._instances._default_request_kwargs) else: response = requests.get(url, **self._instances._default_request_kwargs) return response.json()
[ "def", "compaction", "(", "self", ",", "request_compaction", "=", "False", ")", ":", "url", "=", "self", ".", "_service_url", "+", "'compaction/'", "if", "request_compaction", ":", "response", "=", "requests", ".", "post", "(", "url", ",", "*", "*", "self"...
Retrieve a report on, or request compaction for this instance. :param bool request_compaction: A boolean indicating whether or not to request compaction.
[ "Retrieve", "a", "report", "on", "or", "request", "compaction", "for", "this", "instance", "." ]
python
train
alpha-xone/xbbg
xbbg/io/files.py
https://github.com/alpha-xone/xbbg/blob/70226eb19a72a08144b5d8cea9db4913200f7bc5/xbbg/io/files.py#L191-L201
def file_modified_time(file_name) -> pd.Timestamp: """ File modified time in python Args: file_name: file name Returns: pd.Timestamp """ return pd.to_datetime(time.ctime(os.path.getmtime(filename=file_name)))
[ "def", "file_modified_time", "(", "file_name", ")", "->", "pd", ".", "Timestamp", ":", "return", "pd", ".", "to_datetime", "(", "time", ".", "ctime", "(", "os", ".", "path", ".", "getmtime", "(", "filename", "=", "file_name", ")", ")", ")" ]
File modified time in python Args: file_name: file name Returns: pd.Timestamp
[ "File", "modified", "time", "in", "python" ]
python
valid
ZeitOnline/briefkasten
application/briefkasten/dropbox.py
https://github.com/ZeitOnline/briefkasten/blob/ce6b6eeb89196014fe21d68614c20059d02daa11/application/briefkasten/dropbox.py#L268-L307
def _create_encrypted_zip(self, source='dirty', fs_target_dir=None): """ creates a zip file from the drop and encrypts it to the editors. the encrypted archive is created inside fs_target_dir""" backup_recipients = [r for r in self.editors if checkRecipient(self.gpg_context, r)] # this will be handled by watchdog, no need to send for each drop if not backup_recipients: self.status = u'500 no valid keys at all' return self.status # calculate paths fs_backup = join(self.fs_path, '%s.zip' % source) if fs_target_dir is None: fs_backup_pgp = join(self.fs_path, '%s.zip.pgp' % source) else: fs_backup_pgp = join(fs_target_dir, '%s.zip.pgp' % self.drop_id) fs_source = dict( dirty=self.fs_dirty_attachments, clean=self.fs_cleansed_attachments ) # create archive with ZipFile(fs_backup, 'w', ZIP_STORED) as backup: if exists(join(self.fs_path, 'message')): backup.write(join(self.fs_path, 'message'), arcname='message') for fs_attachment in fs_source[source]: backup.write(fs_attachment, arcname=split(fs_attachment)[-1]) # encrypt archive with open(fs_backup, "rb") as backup: self.gpg_context.encrypt_file( backup, backup_recipients, always_trust=True, output=fs_backup_pgp ) # cleanup remove(fs_backup) return fs_backup_pgp
[ "def", "_create_encrypted_zip", "(", "self", ",", "source", "=", "'dirty'", ",", "fs_target_dir", "=", "None", ")", ":", "backup_recipients", "=", "[", "r", "for", "r", "in", "self", ".", "editors", "if", "checkRecipient", "(", "self", ".", "gpg_context", ...
creates a zip file from the drop and encrypts it to the editors. the encrypted archive is created inside fs_target_dir
[ "creates", "a", "zip", "file", "from", "the", "drop", "and", "encrypts", "it", "to", "the", "editors", ".", "the", "encrypted", "archive", "is", "created", "inside", "fs_target_dir" ]
python
valid
zimeon/iiif
iiif/generators/mandlebrot_100k.py
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/generators/mandlebrot_100k.py#L55-L63
def mpixel(self, z, n=0): """Iteration in Mandlebrot coordinate z.""" z = z * z + self.c if (abs(z) > 2.0): return self.color(n) n += 1 if (n > self.max_iter): return None return self.mpixel(z, n)
[ "def", "mpixel", "(", "self", ",", "z", ",", "n", "=", "0", ")", ":", "z", "=", "z", "*", "z", "+", "self", ".", "c", "if", "(", "abs", "(", "z", ")", ">", "2.0", ")", ":", "return", "self", ".", "color", "(", "n", ")", "n", "+=", "1", ...
Iteration in Mandlebrot coordinate z.
[ "Iteration", "in", "Mandlebrot", "coordinate", "z", "." ]
python
train
manrajgrover/halo
halo/halo.py
https://github.com/manrajgrover/halo/blob/0ac5149dea965b27b09f0776df9095ebf013fb4d/halo/halo.py#L455-L465
def succeed(self, text=None): """Shows and persists success symbol and text and exits. Parameters ---------- text : None, optional Text to be shown alongside success symbol. Returns ------- self """ return self.stop_and_persist(symbol=LogSymbols.SUCCESS.value, text=text)
[ "def", "succeed", "(", "self", ",", "text", "=", "None", ")", ":", "return", "self", ".", "stop_and_persist", "(", "symbol", "=", "LogSymbols", ".", "SUCCESS", ".", "value", ",", "text", "=", "text", ")" ]
Shows and persists success symbol and text and exits. Parameters ---------- text : None, optional Text to be shown alongside success symbol. Returns ------- self
[ "Shows", "and", "persists", "success", "symbol", "and", "text", "and", "exits", ".", "Parameters", "----------", "text", ":", "None", "optional", "Text", "to", "be", "shown", "alongside", "success", "symbol", ".", "Returns", "-------", "self" ]
python
train
budacom/trading-bots
trading_bots/contrib/clients.py
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L436-L438
def fetch_closed_orders(self, limit: int) -> List[Order]: """Fetch latest closed orders, must provide a limit.""" return self._fetch_orders_limit(self._closed_orders, limit)
[ "def", "fetch_closed_orders", "(", "self", ",", "limit", ":", "int", ")", "->", "List", "[", "Order", "]", ":", "return", "self", ".", "_fetch_orders_limit", "(", "self", ".", "_closed_orders", ",", "limit", ")" ]
Fetch latest closed orders, must provide a limit.
[ "Fetch", "latest", "closed", "orders", "must", "provide", "a", "limit", "." ]
python
train
yyuu/botornado
boto/ec2/connection.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/ec2/connection.py#L1235-L1255
def disassociate_address(self, public_ip=None, association_id=None): """ Disassociate an Elastic IP address from a currently running instance. :type public_ip: string :param public_ip: The public IP address for EC2 elastic IPs. :type association_id: string :param association_id: The association ID for a VPC based elastic ip. :rtype: bool :return: True if successful """ params = {} if public_ip is not None: params['PublicIp'] = public_ip elif association_id is not None: params['AssociationId'] = association_id return self.get_status('DisassociateAddress', params, verb='POST')
[ "def", "disassociate_address", "(", "self", ",", "public_ip", "=", "None", ",", "association_id", "=", "None", ")", ":", "params", "=", "{", "}", "if", "public_ip", "is", "not", "None", ":", "params", "[", "'PublicIp'", "]", "=", "public_ip", "elif", "as...
Disassociate an Elastic IP address from a currently running instance. :type public_ip: string :param public_ip: The public IP address for EC2 elastic IPs. :type association_id: string :param association_id: The association ID for a VPC based elastic ip. :rtype: bool :return: True if successful
[ "Disassociate", "an", "Elastic", "IP", "address", "from", "a", "currently", "running", "instance", "." ]
python
train
universalcore/unicore.distribute
unicore/distribute/utils.py
https://github.com/universalcore/unicore.distribute/blob/f3216fefd9df5aef31b3d1b666eb3f79db032d98/unicore/distribute/utils.py#L310-L326
def format_content_type_object(repo, content_type, uuid): """ Return a content object from a repository for a given content_type and uuid :param Repo repo: The git repository. :param str content_type: The content type to list :returns: dict """ try: storage_manager = StorageManager(repo) model_class = load_model_class(repo, content_type) return dict(storage_manager.get(model_class, uuid)) except GitCommandError: raise NotFound('Object does not exist.')
[ "def", "format_content_type_object", "(", "repo", ",", "content_type", ",", "uuid", ")", ":", "try", ":", "storage_manager", "=", "StorageManager", "(", "repo", ")", "model_class", "=", "load_model_class", "(", "repo", ",", "content_type", ")", "return", "dict",...
Return a content object from a repository for a given content_type and uuid :param Repo repo: The git repository. :param str content_type: The content type to list :returns: dict
[ "Return", "a", "content", "object", "from", "a", "repository", "for", "a", "given", "content_type", "and", "uuid" ]
python
train
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L14063-L14095
def get_cpuid_leaf_by_ordinal(self, ordinal): """Used to enumerate CPUID information override values. in ordinal of type int The ordinal number of the leaf to get. out idx of type int CPUID leaf index. out idx_sub of type int CPUID leaf sub-index. out val_eax of type int CPUID leaf value for register eax. out val_ebx of type int CPUID leaf value for register ebx. out val_ecx of type int CPUID leaf value for register ecx. out val_edx of type int CPUID leaf value for register edx. raises :class:`OleErrorInvalidarg` Invalid ordinal number is out of range. """ if not isinstance(ordinal, baseinteger): raise TypeError("ordinal can only be an instance of type baseinteger") (idx, idx_sub, val_eax, val_ebx, val_ecx, val_edx) = self._call("getCPUIDLeafByOrdinal", in_p=[ordinal]) return (idx, idx_sub, val_eax, val_ebx, val_ecx, val_edx)
[ "def", "get_cpuid_leaf_by_ordinal", "(", "self", ",", "ordinal", ")", ":", "if", "not", "isinstance", "(", "ordinal", ",", "baseinteger", ")", ":", "raise", "TypeError", "(", "\"ordinal can only be an instance of type baseinteger\"", ")", "(", "idx", ",", "idx_sub",...
Used to enumerate CPUID information override values. in ordinal of type int The ordinal number of the leaf to get. out idx of type int CPUID leaf index. out idx_sub of type int CPUID leaf sub-index. out val_eax of type int CPUID leaf value for register eax. out val_ebx of type int CPUID leaf value for register ebx. out val_ecx of type int CPUID leaf value for register ecx. out val_edx of type int CPUID leaf value for register edx. raises :class:`OleErrorInvalidarg` Invalid ordinal number is out of range.
[ "Used", "to", "enumerate", "CPUID", "information", "override", "values", "." ]
python
train
jonhadfield/creds
lib/creds/users.py
https://github.com/jonhadfield/creds/blob/b2053b43516cf742c6e4c2b79713bc625592f47c/lib/creds/users.py#L172-L179
def from_json(cls, file_path=None): """Create collection from a JSON file.""" with io.open(file_path, encoding=text_type('utf-8')) as stream: try: users_json = json.load(stream) except ValueError: raise ValueError('No JSON object could be decoded') return cls.construct_user_list(raw_users=users_json.get('users'))
[ "def", "from_json", "(", "cls", ",", "file_path", "=", "None", ")", ":", "with", "io", ".", "open", "(", "file_path", ",", "encoding", "=", "text_type", "(", "'utf-8'", ")", ")", "as", "stream", ":", "try", ":", "users_json", "=", "json", ".", "load"...
Create collection from a JSON file.
[ "Create", "collection", "from", "a", "JSON", "file", "." ]
python
train
JoaoFelipe/pyposast
pyposast/visitor.py
https://github.com/JoaoFelipe/pyposast/blob/497c88c66b451ff2cd7354be1af070c92e119f41/pyposast/visitor.py#L245-L251
def visit_Constant(self, node): """PEP 511: Constants are generated by optimizers""" nnode = self.dnode(node) node.first_line, node.first_col = ast_pos(nnode, self.bytes_pos_to_utf8) node.last_line = node.first_line node.last_col = node.first_col + len(repr(node.value)) node.uid = (node.last_line, node.last_col)
[ "def", "visit_Constant", "(", "self", ",", "node", ")", ":", "nnode", "=", "self", ".", "dnode", "(", "node", ")", "node", ".", "first_line", ",", "node", ".", "first_col", "=", "ast_pos", "(", "nnode", ",", "self", ".", "bytes_pos_to_utf8", ")", "node...
PEP 511: Constants are generated by optimizers
[ "PEP", "511", ":", "Constants", "are", "generated", "by", "optimizers" ]
python
train
DataONEorg/d1_python
client_cli/src/d1_cli/impl/command_parser.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/impl/command_parser.py#L441-L450
def do_updatereplication(self, line): """updatereplication <identifier> [identifier ...] Update the Replication Policy on one or more existing Science Data Objects.""" pids = self._split_args(line, 1, -1) self._command_processor.update_replication_policy(pids) self._print_info_if_verbose( "Added replication policy update operation for identifiers {} to write queue".format( ", ".join(pids) ) )
[ "def", "do_updatereplication", "(", "self", ",", "line", ")", ":", "pids", "=", "self", ".", "_split_args", "(", "line", ",", "1", ",", "-", "1", ")", "self", ".", "_command_processor", ".", "update_replication_policy", "(", "pids", ")", "self", ".", "_p...
updatereplication <identifier> [identifier ...] Update the Replication Policy on one or more existing Science Data Objects.
[ "updatereplication", "<identifier", ">", "[", "identifier", "...", "]", "Update", "the", "Replication", "Policy", "on", "one", "or", "more", "existing", "Science", "Data", "Objects", "." ]
python
train
scanny/python-pptx
pptx/oxml/chart/axis.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/oxml/chart/axis.py#L149-L157
def maximum(self, value): """ Set the value of the ``<c:max>`` child element to the float *value*, or remove the max element if *value* is |None|. """ self._remove_max() if value is None: return self._add_max(val=value)
[ "def", "maximum", "(", "self", ",", "value", ")", ":", "self", ".", "_remove_max", "(", ")", "if", "value", "is", "None", ":", "return", "self", ".", "_add_max", "(", "val", "=", "value", ")" ]
Set the value of the ``<c:max>`` child element to the float *value*, or remove the max element if *value* is |None|.
[ "Set", "the", "value", "of", "the", "<c", ":", "max", ">", "child", "element", "to", "the", "float", "*", "value", "*", "or", "remove", "the", "max", "element", "if", "*", "value", "*", "is", "|None|", "." ]
python
train
openai/pachi-py
pachi_py/pachi/tools/sgflib/sgflib.py
https://github.com/openai/pachi-py/blob/65f29fdd28747d34f2c3001f4016913e4aaeb8fc/pachi_py/pachi/tools/sgflib/sgflib.py#L338-L343
def mainline(self): """ Returns the main line of the game (variation A) as a 'GameTree'.""" if self.variations: return GameTree(self.data + self.variations[0].mainline()) else: return self
[ "def", "mainline", "(", "self", ")", ":", "if", "self", ".", "variations", ":", "return", "GameTree", "(", "self", ".", "data", "+", "self", ".", "variations", "[", "0", "]", ".", "mainline", "(", ")", ")", "else", ":", "return", "self" ]
Returns the main line of the game (variation A) as a 'GameTree'.
[ "Returns", "the", "main", "line", "of", "the", "game", "(", "variation", "A", ")", "as", "a", "GameTree", "." ]
python
train
jmvrbanac/Specter
specter/expect.py
https://github.com/jmvrbanac/Specter/blob/1f5a729b0aa16242add8c1c754efa268335e3944/specter/expect.py#L41-L53
def serialize(self): """Serializes the ExpectAssert object for collection. Warning, this will only grab the available information. It is strongly that you only call this once all specs and tests have completed. """ converted_dict = { 'success': self.success, 'assertion': str(self), 'required': self.required } return converted_dict
[ "def", "serialize", "(", "self", ")", ":", "converted_dict", "=", "{", "'success'", ":", "self", ".", "success", ",", "'assertion'", ":", "str", "(", "self", ")", ",", "'required'", ":", "self", ".", "required", "}", "return", "converted_dict" ]
Serializes the ExpectAssert object for collection. Warning, this will only grab the available information. It is strongly that you only call this once all specs and tests have completed.
[ "Serializes", "the", "ExpectAssert", "object", "for", "collection", "." ]
python
train
admiralobvious/flask-simpleldap
flask_simpleldap/__init__.py
https://github.com/admiralobvious/flask-simpleldap/blob/c0554780f84c42866b241c0d68fd76aaaf065e02/flask_simpleldap/__init__.py#L158-L201
def get_object_details(self, user=None, group=None, dn_only=False): """Returns a ``dict`` with the object's (user or group) details. :param str user: Username of the user object you want details for. :param str group: Name of the group object you want details for. :param bool dn_only: If we should only retrieve the object's distinguished name or not. Default: ``False``. """ query = None fields = None if user is not None: if not dn_only: fields = current_app.config['LDAP_USER_FIELDS'] query = ldap_filter.filter_format( current_app.config['LDAP_USER_OBJECT_FILTER'], (user,)) elif group is not None: if not dn_only: fields = current_app.config['LDAP_GROUP_FIELDS'] query = ldap_filter.filter_format( current_app.config['LDAP_GROUP_OBJECT_FILTER'], (group,)) conn = self.bind try: records = conn.search_s(current_app.config['LDAP_BASE_DN'], ldap.SCOPE_SUBTREE, query, fields) conn.unbind_s() result = {} if records: if dn_only: if current_app.config['LDAP_OPENLDAP']: if records: return records[0][0] else: if current_app.config['LDAP_OBJECTS_DN'] \ in records[0][1]: dn = records[0][1][ current_app.config['LDAP_OBJECTS_DN']] return dn[0] for k, v in list(records[0][1].items()): result[k] = v return result except ldap.LDAPError as e: raise LDAPException(self.error(e.args))
[ "def", "get_object_details", "(", "self", ",", "user", "=", "None", ",", "group", "=", "None", ",", "dn_only", "=", "False", ")", ":", "query", "=", "None", "fields", "=", "None", "if", "user", "is", "not", "None", ":", "if", "not", "dn_only", ":", ...
Returns a ``dict`` with the object's (user or group) details. :param str user: Username of the user object you want details for. :param str group: Name of the group object you want details for. :param bool dn_only: If we should only retrieve the object's distinguished name or not. Default: ``False``.
[ "Returns", "a", "dict", "with", "the", "object", "s", "(", "user", "or", "group", ")", "details", "." ]
python
train
skorch-dev/skorch
skorch/net.py
https://github.com/skorch-dev/skorch/blob/5b9b8b7b7712cb6e5aaa759d9608ea6269d5bcd3/skorch/net.py#L273-L288
def notify(self, method_name, **cb_kwargs): """Call the callback method specified in ``method_name`` with parameters specified in ``cb_kwargs``. Method names can be one of: * on_train_begin * on_train_end * on_epoch_begin * on_epoch_end * on_batch_begin * on_batch_end """ getattr(self, method_name)(self, **cb_kwargs) for _, cb in self.callbacks_: getattr(cb, method_name)(self, **cb_kwargs)
[ "def", "notify", "(", "self", ",", "method_name", ",", "*", "*", "cb_kwargs", ")", ":", "getattr", "(", "self", ",", "method_name", ")", "(", "self", ",", "*", "*", "cb_kwargs", ")", "for", "_", ",", "cb", "in", "self", ".", "callbacks_", ":", "get...
Call the callback method specified in ``method_name`` with parameters specified in ``cb_kwargs``. Method names can be one of: * on_train_begin * on_train_end * on_epoch_begin * on_epoch_end * on_batch_begin * on_batch_end
[ "Call", "the", "callback", "method", "specified", "in", "method_name", "with", "parameters", "specified", "in", "cb_kwargs", "." ]
python
train
gtaylor/paypal-python
paypal/interface.py
https://github.com/gtaylor/paypal-python/blob/aa7a987ea9e9b7f37bcd8a8b54a440aad6c871b1/paypal/interface.py#L456-L466
def generate_cart_upload_redirect_url(self, **kwargs): """https://www.sandbox.paypal.com/webscr ?cmd=_cart &upload=1 """ required_vals = ('business', 'item_name_1', 'amount_1', 'quantity_1') self._check_required(required_vals, **kwargs) url = "%s?cmd=_cart&upload=1" % self.config.PAYPAL_URL_BASE additional = self._encode_utf8(**kwargs) additional = urlencode(additional) return url + "&" + additional
[ "def", "generate_cart_upload_redirect_url", "(", "self", ",", "*", "*", "kwargs", ")", ":", "required_vals", "=", "(", "'business'", ",", "'item_name_1'", ",", "'amount_1'", ",", "'quantity_1'", ")", "self", ".", "_check_required", "(", "required_vals", ",", "*"...
https://www.sandbox.paypal.com/webscr ?cmd=_cart &upload=1
[ "https", ":", "//", "www", ".", "sandbox", ".", "paypal", ".", "com", "/", "webscr", "?cmd", "=", "_cart", "&upload", "=", "1" ]
python
train
ella/ella
ella/core/views.py
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/core/views.py#L538-L562
def export(request, count, name='', content_type=None): """ Export banners. :Parameters: - `count`: number of objects to pass into the template - `name`: name of the template ( page/export/banner.html is default ) - `models`: list of Model classes to include """ t_list = [] if name: t_list.append('page/export/%s.html' % name) t_list.append('page/export/banner.html') try: cat = Category.objects.get_by_tree_path('') except Category.DoesNotExist: raise Http404() listing = Listing.objects.get_listing(count=count, category=cat) return render( request, t_list, { 'category' : cat, 'listing' : listing }, content_type=content_type )
[ "def", "export", "(", "request", ",", "count", ",", "name", "=", "''", ",", "content_type", "=", "None", ")", ":", "t_list", "=", "[", "]", "if", "name", ":", "t_list", ".", "append", "(", "'page/export/%s.html'", "%", "name", ")", "t_list", ".", "ap...
Export banners. :Parameters: - `count`: number of objects to pass into the template - `name`: name of the template ( page/export/banner.html is default ) - `models`: list of Model classes to include
[ "Export", "banners", "." ]
python
train
Qiskit/qiskit-terra
qiskit/tools/qcvv/fitters.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/tools/qcvv/fitters.py#L29-L32
def osc_fit_fun(x, a, tau, f, phi, c): """Function used to fit the decay cosine.""" # pylint: disable=invalid-name return a * np.exp(-x / tau) * np.cos(2 * np.pi * f * x + phi) + c
[ "def", "osc_fit_fun", "(", "x", ",", "a", ",", "tau", ",", "f", ",", "phi", ",", "c", ")", ":", "# pylint: disable=invalid-name", "return", "a", "*", "np", ".", "exp", "(", "-", "x", "/", "tau", ")", "*", "np", ".", "cos", "(", "2", "*", "np", ...
Function used to fit the decay cosine.
[ "Function", "used", "to", "fit", "the", "decay", "cosine", "." ]
python
test
taskcluster/taskcluster-client.py
taskcluster/aio/queue.py
https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/queue.py#L335-L346
async def reportCompleted(self, *args, **kwargs): """ Report Run Completed Report a task completed, resolving the run as `completed`. This method gives output: ``v1/task-status-response.json#`` This method is ``stable`` """ return await self._makeApiCall(self.funcinfo["reportCompleted"], *args, **kwargs)
[ "async", "def", "reportCompleted", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "await", "self", ".", "_makeApiCall", "(", "self", ".", "funcinfo", "[", "\"reportCompleted\"", "]", ",", "*", "args", ",", "*", "*", "kwargs...
Report Run Completed Report a task completed, resolving the run as `completed`. This method gives output: ``v1/task-status-response.json#`` This method is ``stable``
[ "Report", "Run", "Completed" ]
python
train
PolyJIT/benchbuild
benchbuild/utils/wrapping.py
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/wrapping.py#L43-L65
def strip_path_prefix(ipath, prefix): """ Strip prefix from path. Args: ipath: input path prefix: the prefix to remove, if it is found in :ipath: Examples: >>> strip_path_prefix("/foo/bar", "/bar") '/foo/bar' >>> strip_path_prefix("/foo/bar", "/") 'foo/bar' >>> strip_path_prefix("/foo/bar", "/foo") '/bar' >>> strip_path_prefix("/foo/bar", "None") '/foo/bar' """ if prefix is None: return ipath return ipath[len(prefix):] if ipath.startswith(prefix) else ipath
[ "def", "strip_path_prefix", "(", "ipath", ",", "prefix", ")", ":", "if", "prefix", "is", "None", ":", "return", "ipath", "return", "ipath", "[", "len", "(", "prefix", ")", ":", "]", "if", "ipath", ".", "startswith", "(", "prefix", ")", "else", "ipath" ...
Strip prefix from path. Args: ipath: input path prefix: the prefix to remove, if it is found in :ipath: Examples: >>> strip_path_prefix("/foo/bar", "/bar") '/foo/bar' >>> strip_path_prefix("/foo/bar", "/") 'foo/bar' >>> strip_path_prefix("/foo/bar", "/foo") '/bar' >>> strip_path_prefix("/foo/bar", "None") '/foo/bar'
[ "Strip", "prefix", "from", "path", "." ]
python
train
csparpa/pyowm
pyowm/weatherapi25/forecaster.py
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/weatherapi25/forecaster.py#L430-L444
def get_weather_at(self, timeobject): """ Gives the *Weather* item in the forecast that is closest in time to the time value conveyed by the parameter :param timeobject: may be a UNIX time, a ``datetime.datetime`` object or an ISO8601-formatted string in the format ``YYYY-MM-DD HH:MM:SS+00`` :type timeobject: long/int, ``datetime.datetime`` or str) :returns: a *Weather* object """ return weatherutils. \ find_closest_weather(self._forecast.get_weathers(), timeformatutils.to_UNIXtime(timeobject))
[ "def", "get_weather_at", "(", "self", ",", "timeobject", ")", ":", "return", "weatherutils", ".", "find_closest_weather", "(", "self", ".", "_forecast", ".", "get_weathers", "(", ")", ",", "timeformatutils", ".", "to_UNIXtime", "(", "timeobject", ")", ")" ]
Gives the *Weather* item in the forecast that is closest in time to the time value conveyed by the parameter :param timeobject: may be a UNIX time, a ``datetime.datetime`` object or an ISO8601-formatted string in the format ``YYYY-MM-DD HH:MM:SS+00`` :type timeobject: long/int, ``datetime.datetime`` or str) :returns: a *Weather* object
[ "Gives", "the", "*", "Weather", "*", "item", "in", "the", "forecast", "that", "is", "closest", "in", "time", "to", "the", "time", "value", "conveyed", "by", "the", "parameter" ]
python
train
shaunduncan/nosqlite
nosqlite.py
https://github.com/shaunduncan/nosqlite/blob/3033c029b7c8290c66a8b36dc512e560505d4c85/nosqlite.py#L203-L287
def _apply_query(self, query, document): """ Applies a query to a document. Returns True if the document meets the criteria of the supplied query. The ``query`` argument generally follows mongodb style syntax and consists of the following logical checks and operators. Logical: $and, $or, $nor, $not Operators: $eq, $ne, $gt, $gte, $lt, $lte, $mod, $in, $nin, $all If no logical operator is supplied, it assumed that all field checks must pass. For example, these are equivalent: {'foo': 'bar', 'baz': 'qux'} {'$and': [{'foo': 'bar'}, {'baz': 'qux'}]} Both logical and operational queries can be nested in a complex fashion: { 'bar': 'baz', '$or': [ { 'foo': { '$gte': 0, '$lte': 10, '$mod': [2, 0] } }, { 'foo': { '$gt': 10, '$mod': [2, 1] } }, ] } In the previous example, this will return any document where the 'bar' key is equal to 'baz' and either the 'foo' key is an even number between 0 and 10 or is an odd number greater than 10. """ matches = [] # A list of booleans reapply = lambda q: self._apply_query(q, document) for field, value in query.items(): # A more complex query type $and, $or, etc if field == '$and': matches.append(all(map(reapply, value))) elif field == '$or': matches.append(any(map(reapply, value))) elif field == '$nor': matches.append(not any(map(reapply, value))) elif field == '$not': matches.append(not self._apply_query(value, document)) # Invoke a query operator elif isinstance(value, dict): for operator, arg in value.items(): if not self._get_operator_fn(operator)(field, arg, document): matches.append(False) break else: matches.append(True) # Standard elif value != document.get(field, None): # check if field contains a dot if '.' in field: nodes = field.split('.') document_section = document try: for path in nodes[:-1]: document_section = document_section.get(path, None) except AttributeError: document_section = None if document_section is None: matches.append(False) else: if value != document_section.get(nodes[-1], None): matches.append(False) else: matches.append(False) return all(matches)
[ "def", "_apply_query", "(", "self", ",", "query", ",", "document", ")", ":", "matches", "=", "[", "]", "# A list of booleans", "reapply", "=", "lambda", "q", ":", "self", ".", "_apply_query", "(", "q", ",", "document", ")", "for", "field", ",", "value", ...
Applies a query to a document. Returns True if the document meets the criteria of the supplied query. The ``query`` argument generally follows mongodb style syntax and consists of the following logical checks and operators. Logical: $and, $or, $nor, $not Operators: $eq, $ne, $gt, $gte, $lt, $lte, $mod, $in, $nin, $all If no logical operator is supplied, it assumed that all field checks must pass. For example, these are equivalent: {'foo': 'bar', 'baz': 'qux'} {'$and': [{'foo': 'bar'}, {'baz': 'qux'}]} Both logical and operational queries can be nested in a complex fashion: { 'bar': 'baz', '$or': [ { 'foo': { '$gte': 0, '$lte': 10, '$mod': [2, 0] } }, { 'foo': { '$gt': 10, '$mod': [2, 1] } }, ] } In the previous example, this will return any document where the 'bar' key is equal to 'baz' and either the 'foo' key is an even number between 0 and 10 or is an odd number greater than 10.
[ "Applies", "a", "query", "to", "a", "document", ".", "Returns", "True", "if", "the", "document", "meets", "the", "criteria", "of", "the", "supplied", "query", ".", "The", "query", "argument", "generally", "follows", "mongodb", "style", "syntax", "and", "cons...
python
train
push-things/django-th
th_wallabag/my_wallabag.py
https://github.com/push-things/django-th/blob/86c999d16bcf30b6224206e5b40824309834ac8c/th_wallabag/my_wallabag.py#L191-L207
def auth(self, request): """ let's auth the user to the Service :param request: request object :return: callback url :rtype: string that contains the url to redirect after auth """ service = UserService.objects.get(user=request.user, name='ServiceWallabag') callback_url = '%s://%s%s' % (request.scheme, request.get_host(), reverse('wallabag_callback')) params = {'username': service.username, 'password': service.password, 'client_id': service.client_id, 'client_secret': service.client_secret} access_token = Wall.get_token(host=service.host, **params) request.session['oauth_token'] = access_token return callback_url
[ "def", "auth", "(", "self", ",", "request", ")", ":", "service", "=", "UserService", ".", "objects", ".", "get", "(", "user", "=", "request", ".", "user", ",", "name", "=", "'ServiceWallabag'", ")", "callback_url", "=", "'%s://%s%s'", "%", "(", "request"...
let's auth the user to the Service :param request: request object :return: callback url :rtype: string that contains the url to redirect after auth
[ "let", "s", "auth", "the", "user", "to", "the", "Service", ":", "param", "request", ":", "request", "object", ":", "return", ":", "callback", "url", ":", "rtype", ":", "string", "that", "contains", "the", "url", "to", "redirect", "after", "auth" ]
python
train
jelmer/python-fastimport
fastimport/parser.py
https://github.com/jelmer/python-fastimport/blob/5cef9e037b7d7b37f58f522ac9ea4e343e6a1dff/fastimport/parser.py#L446-L457
def _get_from(self, required_for=None): """Parse a from section.""" line = self.next_line() if line is None: return None elif line.startswith(b'from '): return line[len(b'from '):] elif required_for: self.abort(errors.MissingSection, required_for, 'from') else: self.push_line(line) return None
[ "def", "_get_from", "(", "self", ",", "required_for", "=", "None", ")", ":", "line", "=", "self", ".", "next_line", "(", ")", "if", "line", "is", "None", ":", "return", "None", "elif", "line", ".", "startswith", "(", "b'from '", ")", ":", "return", "...
Parse a from section.
[ "Parse", "a", "from", "section", "." ]
python
train
jmbhughes/suvi-trainer
suvitrainer/gui.py
https://github.com/jmbhughes/suvi-trainer/blob/3d89894a4a037286221974c7eb5634d229b4f5d4/suvitrainer/gui.py#L459-L476
def disable_multicolor(self): """ swap from the multicolor image to the single color image """ # disable the multicolor image for color in ['red', 'green', 'blue']: self.multicolorscales[color].config(state=tk.DISABLED, bg='grey') self.multicolorframes[color].config(bg='grey') self.multicolorlabels[color].config(bg='grey') self.multicolordropdowns[color].config(bg='grey', state=tk.DISABLED) self.multicolorminscale[color].config(bg='grey', state=tk.DISABLED) self.multicolormaxscale[color].config(bg='grey', state=tk.DISABLED) # enable the single color self.singlecolorscale.config(state=tk.NORMAL, bg=self.single_color_theme) self.singlecolorframe.config(bg=self.single_color_theme) self.singlecolorlabel.config(bg=self.single_color_theme) self.singlecolordropdown.config(bg=self.single_color_theme, state=tk.NORMAL) self.singlecolorminscale.config(bg=self.single_color_theme, state=tk.NORMAL) self.singlecolormaxscale.config(bg=self.single_color_theme, state=tk.NORMAL)
[ "def", "disable_multicolor", "(", "self", ")", ":", "# disable the multicolor image", "for", "color", "in", "[", "'red'", ",", "'green'", ",", "'blue'", "]", ":", "self", ".", "multicolorscales", "[", "color", "]", ".", "config", "(", "state", "=", "tk", "...
swap from the multicolor image to the single color image
[ "swap", "from", "the", "multicolor", "image", "to", "the", "single", "color", "image" ]
python
train
singularityhub/sregistry-cli
sregistry/main/dropbox/__init__.py
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/dropbox/__init__.py#L40-L60
def _get_metadata(self, image_file=None, dbx_metadata=None): '''this is a wrapper around the main client.get_metadata to first parse a Dropbox FileMetadata into a dicionary, then pass it on to the primary get_metadata function. Parameters ========== image_file: the full path to the image file that had metadata extracted metadata: the Dropbox FileMetadata to parse. ''' metadata = dict() if dbx_metadata is not None: for key in dbx_metadata.__dir__(): value = getattr(dbx_metadata, key) if type(value) in [str, datetime.datetime, bool, int, float]: metadata[key.strip('_')] = value return self.get_metadata(image_file, names=metadata)
[ "def", "_get_metadata", "(", "self", ",", "image_file", "=", "None", ",", "dbx_metadata", "=", "None", ")", ":", "metadata", "=", "dict", "(", ")", "if", "dbx_metadata", "is", "not", "None", ":", "for", "key", "in", "dbx_metadata", ".", "__dir__", "(", ...
this is a wrapper around the main client.get_metadata to first parse a Dropbox FileMetadata into a dicionary, then pass it on to the primary get_metadata function. Parameters ========== image_file: the full path to the image file that had metadata extracted metadata: the Dropbox FileMetadata to parse.
[ "this", "is", "a", "wrapper", "around", "the", "main", "client", ".", "get_metadata", "to", "first", "parse", "a", "Dropbox", "FileMetadata", "into", "a", "dicionary", "then", "pass", "it", "on", "to", "the", "primary", "get_metadata", "function", "." ]
python
test
Opentrons/opentrons
update-server/otupdate/buildroot/name_management.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/update-server/otupdate/buildroot/name_management.py#L98-L146
async def setup_hostname() -> str: """ Intended to be run when the server starts. Sets the machine hostname. The machine hostname is set from the systemd-generated machine-id, which changes at every boot. Once the hostname is set, we restart avahi. This is a separate task from establishing and changing the opentrons machine name, which is UTF-8 and stored in /etc/machine-info as the PRETTY_HOSTNAME and used in the avahi service name. :returns: the hostname """ machine_id = open('/etc/machine-id').read().strip() hostname = machine_id[:6] with open('/etc/hostname', 'w') as ehn: ehn.write(f'{hostname}\n') # First, we run hostnamed which will set the transient hostname # and loaded static hostname from the value we just wrote to # /etc/hostname LOG.debug("Setting hostname") proc = await asyncio.create_subprocess_exec( 'hostname', '-F', '/etc/hostname', stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) stdout, stderr = await proc.communicate() ret = proc.returncode if ret != 0: LOG.error( f'Error starting hostname: {ret} ' f'stdout: {stdout} stderr: {stderr}') raise RuntimeError("Couldn't run hostname") # Then, with the hostname set, we can restart avahi LOG.debug("Restarting avahi") proc = await asyncio.create_subprocess_exec( 'systemctl', 'restart', 'avahi-daemon', stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) stdout, stderr = await proc.communicate() ret = proc.returncode if ret != 0: LOG.error( f'Error restarting avahi-daemon: {ret} ' f'stdout: {stdout} stderr: {stderr}') raise RuntimeError("Error restarting avahi") LOG.debug("Updated hostname and restarted avahi OK") return hostname
[ "async", "def", "setup_hostname", "(", ")", "->", "str", ":", "machine_id", "=", "open", "(", "'/etc/machine-id'", ")", ".", "read", "(", ")", ".", "strip", "(", ")", "hostname", "=", "machine_id", "[", ":", "6", "]", "with", "open", "(", "'/etc/hostna...
Intended to be run when the server starts. Sets the machine hostname. The machine hostname is set from the systemd-generated machine-id, which changes at every boot. Once the hostname is set, we restart avahi. This is a separate task from establishing and changing the opentrons machine name, which is UTF-8 and stored in /etc/machine-info as the PRETTY_HOSTNAME and used in the avahi service name. :returns: the hostname
[ "Intended", "to", "be", "run", "when", "the", "server", "starts", ".", "Sets", "the", "machine", "hostname", "." ]
python
train
fastai/fastai
fastai/vision/image.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/image.py#L589-L592
def _get_crop_target(target_px:Union[int,TensorImageSize], mult:int=None)->Tuple[int,int]: "Calc crop shape of `target_px` to nearest multiple of `mult`." target_r,target_c = tis2hw(target_px) return _round_multiple(target_r,mult),_round_multiple(target_c,mult)
[ "def", "_get_crop_target", "(", "target_px", ":", "Union", "[", "int", ",", "TensorImageSize", "]", ",", "mult", ":", "int", "=", "None", ")", "->", "Tuple", "[", "int", ",", "int", "]", ":", "target_r", ",", "target_c", "=", "tis2hw", "(", "target_px"...
Calc crop shape of `target_px` to nearest multiple of `mult`.
[ "Calc", "crop", "shape", "of", "target_px", "to", "nearest", "multiple", "of", "mult", "." ]
python
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L2004-L2078
def assessments(self, words=[], negation=True): """ Returns a list of (chunk, polarity, subjectivity, label)-tuples for the given list of words: where chunk is a list of successive words: a known word optionally preceded by a modifier ("very good") or a negation ("not good"). """ a = [] m = None # Preceding modifier (i.e., adverb or adjective). n = None # Preceding negation (e.g., "not beautiful"). for w, pos in words: # Only assess known words, preferably by part-of-speech tag. # Including unknown words (polarity 0.0 and subjectivity 0.0) lowers the average. if w is None: continue if w in self and pos in self[w]: p, s, i = self[w][pos] # Known word not preceded by a modifier ("good"). if m is None: a.append(dict(w=[w], p=p, s=s, i=i, n=1, x=self.labeler.get(w))) # Known word preceded by a modifier ("really good"). if m is not None: a[-1]["w"].append(w) a[-1]["p"] = max(-1.0, min(p * a[-1]["i"], +1.0)) a[-1]["s"] = max(-1.0, min(s * a[-1]["i"], +1.0)) a[-1]["i"] = i a[-1]["x"] = self.labeler.get(w) # Known word preceded by a negation ("not really good"). if n is not None: a[-1]["w"].insert(0, n) a[-1]["i"] = 1.0 / a[-1]["i"] a[-1]["n"] = -1 # Known word may be a negation. # Known word may be modifying the next word (i.e., it is a known adverb). m = None n = None if pos and pos in self.modifiers or any(map(self[w].__contains__, self.modifiers)): m = (w, pos) if negation and w in self.negations: n = w else: # Unknown word may be a negation ("not good"). if negation and w in self.negations: n = w # Unknown word. Retain negation across small words ("not a good"). elif n and len(w.strip("'")) > 1: n = None # Unknown word may be a negation preceded by a modifier ("really not good"). if n is not None and m is not None and (pos in self.modifiers or self.modifier(m[0])): a[-1]["w"].append(n) a[-1]["n"] = -1 n = None # Unknown word. Retain modifier across small words ("really is a good"). elif m and len(w) > 2: m = None # Exclamation marks boost previous word. if w == "!" and len(a) > 0: a[-1]["w"].append("!") a[-1]["p"] = max(-1.0, min(a[-1]["p"] * 1.25, +1.0)) # Exclamation marks in parentheses indicate sarcasm. if w == "(!)": a.append(dict(w=[w], p=0.0, s=1.0, i=1.0, n=1, x=IRONY)) # EMOTICONS: {("grin", +1.0): set((":-D", ":D"))} if w.isalpha() is False and len(w) <= 5 and w not in PUNCTUATION: # speedup for (type, p), e in EMOTICONS.items(): if w in map(lambda e: e.lower(), e): a.append(dict(w=[w], p=p, s=1.0, i=1.0, n=1, x=MOOD)) break for i in range(len(a)): w = a[i]["w"] p = a[i]["p"] s = a[i]["s"] n = a[i]["n"] x = a[i]["x"] # "not good" = slightly bad, "not bad" = slightly good. a[i] = (w, p * -0.5 if n < 0 else p, s, x) return a
[ "def", "assessments", "(", "self", ",", "words", "=", "[", "]", ",", "negation", "=", "True", ")", ":", "a", "=", "[", "]", "m", "=", "None", "# Preceding modifier (i.e., adverb or adjective).", "n", "=", "None", "# Preceding negation (e.g., \"not beautiful\").", ...
Returns a list of (chunk, polarity, subjectivity, label)-tuples for the given list of words: where chunk is a list of successive words: a known word optionally preceded by a modifier ("very good") or a negation ("not good").
[ "Returns", "a", "list", "of", "(", "chunk", "polarity", "subjectivity", "label", ")", "-", "tuples", "for", "the", "given", "list", "of", "words", ":", "where", "chunk", "is", "a", "list", "of", "successive", "words", ":", "a", "known", "word", "optional...
python
train
hydraplatform/hydra-base
hydra_base/lib/template.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/template.py#L1708-L1722
def delete_typeattr(typeattr,**kwargs): """ Remove an typeattr from an existing type """ tmpltype = get_templatetype(typeattr.type_id, user_id=kwargs.get('user_id')) ta = db.DBSession.query(TypeAttr).filter(TypeAttr.type_id == typeattr.type_id, TypeAttr.attr_id == typeattr.attr_id).one() tmpltype.typeattrs.remove(ta) db.DBSession.flush() return 'OK'
[ "def", "delete_typeattr", "(", "typeattr", ",", "*", "*", "kwargs", ")", ":", "tmpltype", "=", "get_templatetype", "(", "typeattr", ".", "type_id", ",", "user_id", "=", "kwargs", ".", "get", "(", "'user_id'", ")", ")", "ta", "=", "db", ".", "DBSession", ...
Remove an typeattr from an existing type
[ "Remove", "an", "typeattr", "from", "an", "existing", "type" ]
python
train
frmdstryr/enamlx
enamlx/widgets/table_view.py
https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/enamlx/widgets/table_view.py#L63-L69
def _update_proxy(self, change): """ An observer which sends state change to the proxy. """ if change['name'] == 'items': self._update_visible_area() super(TableView, self)._update_proxy(change)
[ "def", "_update_proxy", "(", "self", ",", "change", ")", ":", "if", "change", "[", "'name'", "]", "==", "'items'", ":", "self", ".", "_update_visible_area", "(", ")", "super", "(", "TableView", ",", "self", ")", ".", "_update_proxy", "(", "change", ")" ]
An observer which sends state change to the proxy.
[ "An", "observer", "which", "sends", "state", "change", "to", "the", "proxy", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/slugs.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/slugs.py#L7883-L7899
def slugs_navigation_encode(self, u_m, phi_c, theta_c, psiDot_c, ay_body, totalDist, dist2Go, fromWP, toWP, h_c): ''' Data used in the navigation algorithm. u_m : Measured Airspeed prior to the nav filter in m/s (float) phi_c : Commanded Roll (float) theta_c : Commanded Pitch (float) psiDot_c : Commanded Turn rate (float) ay_body : Y component of the body acceleration (float) totalDist : Total Distance to Run on this leg of Navigation (float) dist2Go : Remaining distance to Run on this leg of Navigation (float) fromWP : Origin WP (uint8_t) toWP : Destination WP (uint8_t) h_c : Commanded altitude in 0.1 m (uint16_t) ''' return MAVLink_slugs_navigation_message(u_m, phi_c, theta_c, psiDot_c, ay_body, totalDist, dist2Go, fromWP, toWP, h_c)
[ "def", "slugs_navigation_encode", "(", "self", ",", "u_m", ",", "phi_c", ",", "theta_c", ",", "psiDot_c", ",", "ay_body", ",", "totalDist", ",", "dist2Go", ",", "fromWP", ",", "toWP", ",", "h_c", ")", ":", "return", "MAVLink_slugs_navigation_message", "(", "...
Data used in the navigation algorithm. u_m : Measured Airspeed prior to the nav filter in m/s (float) phi_c : Commanded Roll (float) theta_c : Commanded Pitch (float) psiDot_c : Commanded Turn rate (float) ay_body : Y component of the body acceleration (float) totalDist : Total Distance to Run on this leg of Navigation (float) dist2Go : Remaining distance to Run on this leg of Navigation (float) fromWP : Origin WP (uint8_t) toWP : Destination WP (uint8_t) h_c : Commanded altitude in 0.1 m (uint16_t)
[ "Data", "used", "in", "the", "navigation", "algorithm", "." ]
python
train
dpmcmlxxvi/pixelscan
pixelscan/pixelscan.py
https://github.com/dpmcmlxxvi/pixelscan/blob/d641207b13a8fc5bf7ac9964b982971652bb0a7e/pixelscan/pixelscan.py#L52-L61
def hilbertrot(n, x, y, rx, ry): """Rotates and flips a quadrant appropriately for the Hilbert scan generator. See https://en.wikipedia.org/wiki/Hilbert_curve. """ if ry == 0: if rx == 1: x = n - 1 - x y = n - 1 - y return y, x return x, y
[ "def", "hilbertrot", "(", "n", ",", "x", ",", "y", ",", "rx", ",", "ry", ")", ":", "if", "ry", "==", "0", ":", "if", "rx", "==", "1", ":", "x", "=", "n", "-", "1", "-", "x", "y", "=", "n", "-", "1", "-", "y", "return", "y", ",", "x", ...
Rotates and flips a quadrant appropriately for the Hilbert scan generator. See https://en.wikipedia.org/wiki/Hilbert_curve.
[ "Rotates", "and", "flips", "a", "quadrant", "appropriately", "for", "the", "Hilbert", "scan", "generator", ".", "See", "https", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "Hilbert_curve", "." ]
python
train
marcharper/python-ternary
ternary/colormapping.py
https://github.com/marcharper/python-ternary/blob/a4bef393ec9df130d4b55707293c750498a01843/ternary/colormapping.py#L73-L101
def colorbar_hack(ax, vmin, vmax, cmap, scientific=False, cbarlabel=None, **kwargs): """ Colorbar hack to insert colorbar on ternary plot. Called by heatmap, not intended for direct usage. Parameters ---------- vmin: float Minimum value to portray in colorbar vmax: float Maximum value to portray in colorbar cmap: Matplotlib colormap Matplotlib colormap to use """ # http://stackoverflow.com/questions/8342549/matplotlib-add-colorbar-to-a-sequence-of-line-plots norm = plt.Normalize(vmin=vmin, vmax=vmax) sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) sm._A = [] cb = plt.colorbar(sm, ax=ax, **kwargs) if cbarlabel is not None: cb.set_label(cbarlabel) if scientific: cb.locator = matplotlib.ticker.LinearLocator(numticks=7) cb.formatter = matplotlib.ticker.ScalarFormatter() cb.formatter.set_powerlimits((0, 0)) cb.update_ticks()
[ "def", "colorbar_hack", "(", "ax", ",", "vmin", ",", "vmax", ",", "cmap", ",", "scientific", "=", "False", ",", "cbarlabel", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# http://stackoverflow.com/questions/8342549/matplotlib-add-colorbar-to-a-sequence-of-line-plo...
Colorbar hack to insert colorbar on ternary plot. Called by heatmap, not intended for direct usage. Parameters ---------- vmin: float Minimum value to portray in colorbar vmax: float Maximum value to portray in colorbar cmap: Matplotlib colormap Matplotlib colormap to use
[ "Colorbar", "hack", "to", "insert", "colorbar", "on", "ternary", "plot", ".", "Called", "by", "heatmap", "not", "intended", "for", "direct", "usage", ".", "Parameters", "----------", "vmin", ":", "float", "Minimum", "value", "to", "portray", "in", "colorbar", ...
python
train
helto4real/python-packages
smhi/smhi/smhi_lib.py
https://github.com/helto4real/python-packages/blob/8b65342eea34e370ea6fc5abdcb55e544c51fec5/smhi/smhi/smhi_lib.py#L258-L306
def _get_forecast(api_result: dict) -> List[SmhiForecast]: """Converts results fråm API to SmhiForeCast list""" forecasts = [] # Need the ordered dict to get # the days in order in next stage forecasts_ordered = OrderedDict() forecasts_ordered = _get_all_forecast_from_api(api_result) # Used to calc the daycount day_nr = 1 for day in forecasts_ordered: forecasts_day = forecasts_ordered[day] if day_nr == 1: # Add the most recent forecast forecasts.append(copy.deepcopy(forecasts_day[0])) total_precipitation = float(0.0) forecast_temp_max = -100.0 forecast_temp_min = 100.0 forecast = None for forcast_day in forecasts_day: temperature = forcast_day.temperature if forecast_temp_min > temperature: forecast_temp_min = temperature if forecast_temp_max < temperature: forecast_temp_max = temperature if forcast_day.valid_time.hour == 12: forecast = copy.deepcopy(forcast_day) total_precipitation = total_precipitation + \ forcast_day._total_precipitation if forecast is None: # We passed 12 noon, set to current forecast = forecasts_day[0] forecast._temperature_max = forecast_temp_max forecast._temperature_min = forecast_temp_min forecast._total_precipitation = total_precipitation forecast._mean_precipitation = total_precipitation/24 forecasts.append(forecast) day_nr = day_nr + 1 return forecasts
[ "def", "_get_forecast", "(", "api_result", ":", "dict", ")", "->", "List", "[", "SmhiForecast", "]", ":", "forecasts", "=", "[", "]", "# Need the ordered dict to get\r", "# the days in order in next stage\r", "forecasts_ordered", "=", "OrderedDict", "(", ")", "forecas...
Converts results fråm API to SmhiForeCast list
[ "Converts", "results", "fråm", "API", "to", "SmhiForeCast", "list" ]
python
train
pmacosta/pexdoc
pexdoc/exh.py
https://github.com/pmacosta/pexdoc/blob/201ac243e5781347feb75896a4231429fe6da4b1/pexdoc/exh.py#L1025-L1063
def _get_exceptions_db(self): """Return a list of dictionaries suitable to be used with ptrie module.""" template = "{extype} ({exmsg}){raised}" if not self._full_cname: # When full callable name is not used the calling path is # irrelevant and there is no function associated with an # exception ret = [] for _, fdict in self._ex_dict.items(): for key in fdict.keys(): ret.append( { "name": fdict[key]["name"], "data": template.format( extype=_ex_type_str(key[0]), exmsg=key[1], raised="*" if fdict[key]["raised"][0] else "", ), } ) return ret # When full callable name is used, all calling paths are saved ret = [] for fdict in self._ex_dict.values(): for key in fdict.keys(): for func_name in fdict[key]["function"]: rindex = fdict[key]["function"].index(func_name) raised = fdict[key]["raised"][rindex] ret.append( { "name": self.decode_call(func_name), "data": template.format( extype=_ex_type_str(key[0]), exmsg=key[1], raised="*" if raised else "", ), } ) return ret
[ "def", "_get_exceptions_db", "(", "self", ")", ":", "template", "=", "\"{extype} ({exmsg}){raised}\"", "if", "not", "self", ".", "_full_cname", ":", "# When full callable name is not used the calling path is", "# irrelevant and there is no function associated with an", "# exception...
Return a list of dictionaries suitable to be used with ptrie module.
[ "Return", "a", "list", "of", "dictionaries", "suitable", "to", "be", "used", "with", "ptrie", "module", "." ]
python
train
smdabdoub/phylotoast
bin/transform_biom.py
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/transform_biom.py#L78-L88
def arcsin_sqrt(biom_tbl): """ Applies the arcsine square root transform to the given BIOM-format table """ arcsint = lambda data, id_, md: np.arcsin(np.sqrt(data)) tbl_relabd = relative_abd(biom_tbl) tbl_asin = tbl_relabd.transform(arcsint, inplace=False) return tbl_asin
[ "def", "arcsin_sqrt", "(", "biom_tbl", ")", ":", "arcsint", "=", "lambda", "data", ",", "id_", ",", "md", ":", "np", ".", "arcsin", "(", "np", ".", "sqrt", "(", "data", ")", ")", "tbl_relabd", "=", "relative_abd", "(", "biom_tbl", ")", "tbl_asin", "=...
Applies the arcsine square root transform to the given BIOM-format table
[ "Applies", "the", "arcsine", "square", "root", "transform", "to", "the", "given", "BIOM", "-", "format", "table" ]
python
train
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L730-L734
def chat_delete(self, chat_id, **kwargs): "https://developer.zendesk.com/rest_api/docs/chat/chats#delete-chat" api_path = "/api/v2/chats/{chat_id}" api_path = api_path.format(chat_id=chat_id) return self.call(api_path, method="DELETE", **kwargs)
[ "def", "chat_delete", "(", "self", ",", "chat_id", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/chats/{chat_id}\"", "api_path", "=", "api_path", ".", "format", "(", "chat_id", "=", "chat_id", ")", "return", "self", ".", "call", "(", "api_...
https://developer.zendesk.com/rest_api/docs/chat/chats#delete-chat
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "chat", "/", "chats#delete", "-", "chat" ]
python
train
peterdemin/pip-compile-multi
pipcompilemulti/cli_v2.py
https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/cli_v2.py#L49-L60
def skipper(func): """Decorator that memorizes base_dir, in_ext and out_ext from OPTIONS and skips execution for duplicates.""" @functools.wraps(func) def wrapped(): """Dummy docstring to make pylint happy.""" key = (OPTIONS['base_dir'], OPTIONS['in_ext'], OPTIONS['out_ext']) if key not in seen: seen[key] = func() return seen[key] seen = {} return wrapped
[ "def", "skipper", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapped", "(", ")", ":", "\"\"\"Dummy docstring to make pylint happy.\"\"\"", "key", "=", "(", "OPTIONS", "[", "'base_dir'", "]", ",", "OPTIONS", "[", "'in_ext...
Decorator that memorizes base_dir, in_ext and out_ext from OPTIONS and skips execution for duplicates.
[ "Decorator", "that", "memorizes", "base_dir", "in_ext", "and", "out_ext", "from", "OPTIONS", "and", "skips", "execution", "for", "duplicates", "." ]
python
train
SoftwareDefinedBuildings/XBOS
apps/Data_quality_analysis/Clean_Data.py
https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Clean_Data.py#L308-L331
def _utc_to_local(self, data, local_zone="America/Los_Angeles"): """ Adjust index of dataframe according to timezone that is requested by user. Parameters ---------- data : pd.DataFrame() Pandas dataframe of json timeseries response from server. local_zone : str pytz.timezone string of specified local timezone to change index to. Returns ------- pd.DataFrame() Pandas dataframe with timestamp index adjusted for local timezone. """ # Accounts for localtime shift data.index = data.index.tz_localize(pytz.utc).tz_convert(local_zone) # Gets rid of extra offset information so can compare with csv data data.index = data.index.tz_localize(None) return data
[ "def", "_utc_to_local", "(", "self", ",", "data", ",", "local_zone", "=", "\"America/Los_Angeles\"", ")", ":", "# Accounts for localtime shift", "data", ".", "index", "=", "data", ".", "index", ".", "tz_localize", "(", "pytz", ".", "utc", ")", ".", "tz_convert...
Adjust index of dataframe according to timezone that is requested by user. Parameters ---------- data : pd.DataFrame() Pandas dataframe of json timeseries response from server. local_zone : str pytz.timezone string of specified local timezone to change index to. Returns ------- pd.DataFrame() Pandas dataframe with timestamp index adjusted for local timezone.
[ "Adjust", "index", "of", "dataframe", "according", "to", "timezone", "that", "is", "requested", "by", "user", "." ]
python
train
eallik/spinoff
spinoff/actor/cell.py
https://github.com/eallik/spinoff/blob/06b00d6b86c7422c9cb8f9a4b2915906e92b7d52/spinoff/actor/cell.py#L94-L108
def lookup_cell(self, uri): """Looks up a local actor by its location relative to this actor.""" steps = uri.steps if steps[0] == '': found = self.root steps.popleft() else: found = self for step in steps: assert step != '' found = found.get_child(step) if not found: break found = found._cell return found
[ "def", "lookup_cell", "(", "self", ",", "uri", ")", ":", "steps", "=", "uri", ".", "steps", "if", "steps", "[", "0", "]", "==", "''", ":", "found", "=", "self", ".", "root", "steps", ".", "popleft", "(", ")", "else", ":", "found", "=", "self", ...
Looks up a local actor by its location relative to this actor.
[ "Looks", "up", "a", "local", "actor", "by", "its", "location", "relative", "to", "this", "actor", "." ]
python
train
edeposit/edeposit.amqp.harvester
src/edeposit/amqp/harvester/scrappers/grada_cz.py
https://github.com/edeposit/edeposit.amqp.harvester/blob/38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e/src/edeposit/amqp/harvester/scrappers/grada_cz.py#L148-L182
def _parse_format_pages_isbn(html_chunk): """ Parse format, number of pages and ISBN. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: tuple: (format, pages, isbn), all as string. """ ppi = get_first_content( html_chunk.find("div", {"class": "price-overflow"}) ) if not ppi: return None, None, None # all information this function should parse are at one line ppi = filter(lambda x: x.strip(), ppi.split("<br />"))[0] # parse isbn isbn = dhtmlparser.parseString(ppi) isbn = isbn.find("b") isbn = isbn[0].getContent() if isbn else None # parse pages and format pages = None book_format = None details = ppi.split("|") if len(details) >= 2: book_format = details[0].strip() pages = details[1].strip() return book_format, pages, isbn
[ "def", "_parse_format_pages_isbn", "(", "html_chunk", ")", ":", "ppi", "=", "get_first_content", "(", "html_chunk", ".", "find", "(", "\"div\"", ",", "{", "\"class\"", ":", "\"price-overflow\"", "}", ")", ")", "if", "not", "ppi", ":", "return", "None", ",", ...
Parse format, number of pages and ISBN. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: tuple: (format, pages, isbn), all as string.
[ "Parse", "format", "number", "of", "pages", "and", "ISBN", "." ]
python
train
pudo/normality
normality/encoding.py
https://github.com/pudo/normality/blob/b53cc2c6e5c6205573d2010f72d90808710a4b58/normality/encoding.py#L8-L19
def normalize_encoding(encoding, default=DEFAULT_ENCODING): """Normalize the encoding name, replace ASCII w/ UTF-8.""" if encoding is None: return default encoding = encoding.lower().strip() if encoding in ['', 'ascii']: return default try: codecs.lookup(encoding) return encoding except LookupError: return default
[ "def", "normalize_encoding", "(", "encoding", ",", "default", "=", "DEFAULT_ENCODING", ")", ":", "if", "encoding", "is", "None", ":", "return", "default", "encoding", "=", "encoding", ".", "lower", "(", ")", ".", "strip", "(", ")", "if", "encoding", "in", ...
Normalize the encoding name, replace ASCII w/ UTF-8.
[ "Normalize", "the", "encoding", "name", "replace", "ASCII", "w", "/", "UTF", "-", "8", "." ]
python
train
nudomarinero/wquantiles
wquantiles.py
https://github.com/nudomarinero/wquantiles/blob/55120dfd9730688c9dc32020563e53ebb902e7f0/wquantiles.py#L11-L54
def quantile_1D(data, weights, quantile): """ Compute the weighted quantile of a 1D numpy array. Parameters ---------- data : ndarray Input array (one dimension). weights : ndarray Array with the weights of the same size of `data`. quantile : float Quantile to compute. It must have a value between 0 and 1. Returns ------- quantile_1D : float The output value. """ # Check the data if not isinstance(data, np.matrix): data = np.asarray(data) if not isinstance(weights, np.matrix): weights = np.asarray(weights) nd = data.ndim if nd != 1: raise TypeError("data must be a one dimensional array") ndw = weights.ndim if ndw != 1: raise TypeError("weights must be a one dimensional array") if data.shape != weights.shape: raise TypeError("the length of data and weights must be the same") if ((quantile > 1.) or (quantile < 0.)): raise ValueError("quantile must have a value between 0. and 1.") # Sort the data ind_sorted = np.argsort(data) sorted_data = data[ind_sorted] sorted_weights = weights[ind_sorted] # Compute the auxiliary arrays Sn = np.cumsum(sorted_weights) # TODO: Check that the weights do not sum zero #assert Sn != 0, "The sum of the weights must not be zero" Pn = (Sn-0.5*sorted_weights)/Sn[-1] # Get the value of the weighted median return np.interp(quantile, Pn, sorted_data)
[ "def", "quantile_1D", "(", "data", ",", "weights", ",", "quantile", ")", ":", "# Check the data", "if", "not", "isinstance", "(", "data", ",", "np", ".", "matrix", ")", ":", "data", "=", "np", ".", "asarray", "(", "data", ")", "if", "not", "isinstance"...
Compute the weighted quantile of a 1D numpy array. Parameters ---------- data : ndarray Input array (one dimension). weights : ndarray Array with the weights of the same size of `data`. quantile : float Quantile to compute. It must have a value between 0 and 1. Returns ------- quantile_1D : float The output value.
[ "Compute", "the", "weighted", "quantile", "of", "a", "1D", "numpy", "array", "." ]
python
train
Pixelapse/pyglass
pyglass/api.py
https://github.com/Pixelapse/pyglass/blob/83cd0ff2b0b7cdaf4ec6f54559a626e67455cd33/pyglass/api.py#L9-L23
def preview(src_path): ''' Generates a preview of src_path in the requested format. :returns: A list of preview paths, one for each page. ''' previews = [] if sketch.is_sketchfile(src_path): previews = sketch.preview(src_path) if not previews: previews = quicklook.preview(src_path) previews = [safely_decode(preview) for preview in previews] return previews
[ "def", "preview", "(", "src_path", ")", ":", "previews", "=", "[", "]", "if", "sketch", ".", "is_sketchfile", "(", "src_path", ")", ":", "previews", "=", "sketch", ".", "preview", "(", "src_path", ")", "if", "not", "previews", ":", "previews", "=", "qu...
Generates a preview of src_path in the requested format. :returns: A list of preview paths, one for each page.
[ "Generates", "a", "preview", "of", "src_path", "in", "the", "requested", "format", ".", ":", "returns", ":", "A", "list", "of", "preview", "paths", "one", "for", "each", "page", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/file_io.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/file_io.py#L347-L354
def webify_file(srcfilename: str, destfilename: str) -> None: """ Rewrites a file from ``srcfilename`` to ``destfilename``, HTML-escaping it in the process. """ with open(srcfilename) as infile, open(destfilename, 'w') as ofile: for line_ in infile: ofile.write(escape(line_))
[ "def", "webify_file", "(", "srcfilename", ":", "str", ",", "destfilename", ":", "str", ")", "->", "None", ":", "with", "open", "(", "srcfilename", ")", "as", "infile", ",", "open", "(", "destfilename", ",", "'w'", ")", "as", "ofile", ":", "for", "line_...
Rewrites a file from ``srcfilename`` to ``destfilename``, HTML-escaping it in the process.
[ "Rewrites", "a", "file", "from", "srcfilename", "to", "destfilename", "HTML", "-", "escaping", "it", "in", "the", "process", "." ]
python
train
tensorflow/mesh
mesh_tensorflow/ops.py
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4014-L4032
def multiply(x1, x2, output_shape=None, name=None): """Binary multiplication with broadcasting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor """ if not isinstance(x2, Tensor): return ScalarMultiplyOperation(x1, x2).outputs[0] with tf.name_scope(name, default_name="mul"): x1, x2 = binary_arguments_to_tensors(x1, x2) return einsum( [x1, x2], output_shape=_infer_binary_broadcast_shape( x1.shape, x2.shape, output_shape))
[ "def", "multiply", "(", "x1", ",", "x2", ",", "output_shape", "=", "None", ",", "name", "=", "None", ")", ":", "if", "not", "isinstance", "(", "x2", ",", "Tensor", ")", ":", "return", "ScalarMultiplyOperation", "(", "x1", ",", "x2", ")", ".", "output...
Binary multiplication with broadcasting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor
[ "Binary", "multiplication", "with", "broadcasting", "." ]
python
train
twilio/twilio-python
twilio/rest/api/v2010/account/address/dependent_phone_number.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/address/dependent_phone_number.py#L147-L161
def get_instance(self, payload): """ Build an instance of DependentPhoneNumberInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.address.dependent_phone_number.DependentPhoneNumberInstance :rtype: twilio.rest.api.v2010.account.address.dependent_phone_number.DependentPhoneNumberInstance """ return DependentPhoneNumberInstance( self._version, payload, account_sid=self._solution['account_sid'], address_sid=self._solution['address_sid'], )
[ "def", "get_instance", "(", "self", ",", "payload", ")", ":", "return", "DependentPhoneNumberInstance", "(", "self", ".", "_version", ",", "payload", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", "address_sid", "=", "self",...
Build an instance of DependentPhoneNumberInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.address.dependent_phone_number.DependentPhoneNumberInstance :rtype: twilio.rest.api.v2010.account.address.dependent_phone_number.DependentPhoneNumberInstance
[ "Build", "an", "instance", "of", "DependentPhoneNumberInstance" ]
python
train
uw-it-aca/uw-restclients-sws
uw_sws/section.py
https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/section.py#L221-L233
def get_section_by_url(url, include_instructor_not_on_time_schedule=True): """ Returns a uw_sws.models.Section object for the passed section url. """ if not course_url_pattern.match(url): raise InvalidSectionURL(url) return _json_to_section( get_resource(url), include_instructor_not_on_time_schedule=( include_instructor_not_on_time_schedule))
[ "def", "get_section_by_url", "(", "url", ",", "include_instructor_not_on_time_schedule", "=", "True", ")", ":", "if", "not", "course_url_pattern", ".", "match", "(", "url", ")", ":", "raise", "InvalidSectionURL", "(", "url", ")", "return", "_json_to_section", "(",...
Returns a uw_sws.models.Section object for the passed section url.
[ "Returns", "a", "uw_sws", ".", "models", ".", "Section", "object", "for", "the", "passed", "section", "url", "." ]
python
train
twisted/txacme
src/txacme/endpoint.py
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/endpoint.py#L131-L154
def load_or_create_client_key(pem_path): """ Load the client key from a directory, creating it if it does not exist. .. note:: The client key that will be created will be a 2048-bit RSA key. :type pem_path: ``twisted.python.filepath.FilePath`` :param pem_path: The certificate directory to use, as with the endpoint. """ acme_key_file = pem_path.asTextMode().child(u'client.key') if acme_key_file.exists(): key = serialization.load_pem_private_key( acme_key_file.getContent(), password=None, backend=default_backend()) else: key = generate_private_key(u'rsa') acme_key_file.setContent( key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption())) return JWKRSA(key=key)
[ "def", "load_or_create_client_key", "(", "pem_path", ")", ":", "acme_key_file", "=", "pem_path", ".", "asTextMode", "(", ")", ".", "child", "(", "u'client.key'", ")", "if", "acme_key_file", ".", "exists", "(", ")", ":", "key", "=", "serialization", ".", "loa...
Load the client key from a directory, creating it if it does not exist. .. note:: The client key that will be created will be a 2048-bit RSA key. :type pem_path: ``twisted.python.filepath.FilePath`` :param pem_path: The certificate directory to use, as with the endpoint.
[ "Load", "the", "client", "key", "from", "a", "directory", "creating", "it", "if", "it", "does", "not", "exist", "." ]
python
train
andsor/pypercolate
percolate/percolate.py
https://github.com/andsor/pypercolate/blob/92478c1fc4d4ff5ae157f7607fd74f6f9ec360ac/percolate/percolate.py#L638-L705
def _microcanonical_average_moments(moments, alpha): """ Compute the average moments of the cluster size distributions Helper function for :func:`microcanonical_averages` Parameters ---------- moments : 2-D :py:class:`numpy.ndarray` of int ``moments.shape[1] == 5`. Each array ``moments[r, :]`` is the ``moments`` field of the output of :func:`sample_states`: The ``k``-th entry is the ``k``-th raw moment of the (absolute) cluster size distribution. alpha: float Significance level. Returns ------- ret : dict Moment statistics ret['moments'] : 1-D :py:class:`numpy.ndarray` of float, size 5 The ``k``-th entry is the average ``k``-th raw moment of the (absolute) cluster size distribution, with ``k`` ranging from ``0`` to ``4``. ret['moments_ci'] : 2-D :py:class:`numpy.ndarray` of float, shape (5,2) ``ret['moments_ci'][k]`` are the lower and upper bounds of the normal confidence interval of the average ``k``-th raw moment of the (absolute) cluster size distribution, with ``k`` ranging from ``0`` to ``4``. See Also -------- sample_states : computation of moments microcanonical_averages : moment statistics """ ret = dict() runs = moments.shape[0] sqrt_n = np.sqrt(runs) moments_sample_mean = moments.mean(axis=0) ret['moments'] = moments_sample_mean moments_sample_std = moments.std(axis=0, ddof=1) ret['moments_ci'] = np.empty((5, 2)) for k in range(5): if moments_sample_std[k]: old_settings = np.seterr(all='raise') ret['moments_ci'][k] = scipy.stats.t.interval( 1 - alpha, df=runs - 1, loc=moments_sample_mean[k], scale=moments_sample_std[k] / sqrt_n ) np.seterr(**old_settings) else: ret['moments_ci'][k] = ( moments_sample_mean[k] * np.ones(2) ) return ret
[ "def", "_microcanonical_average_moments", "(", "moments", ",", "alpha", ")", ":", "ret", "=", "dict", "(", ")", "runs", "=", "moments", ".", "shape", "[", "0", "]", "sqrt_n", "=", "np", ".", "sqrt", "(", "runs", ")", "moments_sample_mean", "=", "moments"...
Compute the average moments of the cluster size distributions Helper function for :func:`microcanonical_averages` Parameters ---------- moments : 2-D :py:class:`numpy.ndarray` of int ``moments.shape[1] == 5`. Each array ``moments[r, :]`` is the ``moments`` field of the output of :func:`sample_states`: The ``k``-th entry is the ``k``-th raw moment of the (absolute) cluster size distribution. alpha: float Significance level. Returns ------- ret : dict Moment statistics ret['moments'] : 1-D :py:class:`numpy.ndarray` of float, size 5 The ``k``-th entry is the average ``k``-th raw moment of the (absolute) cluster size distribution, with ``k`` ranging from ``0`` to ``4``. ret['moments_ci'] : 2-D :py:class:`numpy.ndarray` of float, shape (5,2) ``ret['moments_ci'][k]`` are the lower and upper bounds of the normal confidence interval of the average ``k``-th raw moment of the (absolute) cluster size distribution, with ``k`` ranging from ``0`` to ``4``. See Also -------- sample_states : computation of moments microcanonical_averages : moment statistics
[ "Compute", "the", "average", "moments", "of", "the", "cluster", "size", "distributions" ]
python
valid
rgalanakis/goless
goless/backends.py
https://github.com/rgalanakis/goless/blob/286cd69482ae5a56c899a0c0d5d895772d96e83d/goless/backends.py#L227-L268
def calculate_backend(name_from_env, backends=None): """ Calculates which backend to use with the following algorithm: - Try to read the GOLESS_BACKEND environment variable. Usually 'gevent' or 'stackless'. If a value is set but no backend is available or it fails to be created, this function will error. - Determine the default backend (gevent for PyPy, stackless for Python). If no default can be determined or created, continue. - Try to create all the runtimes and choose the first one to create successfully. - If no runtime can be created, return a NullBackend, which will error when accessed. The "default" backend is the less-easy backend for a runtime. Since PyPy has stackless by default, gevent is intentional. Since Stackless is a separate interpreter for CPython, that is more intentional than gevent. We feel this is a good default behavior. """ if backends is None: backends = _default_backends if name_from_env: if name_from_env not in backends: raise RuntimeError( 'Invalid backend %r specified. Valid backends are: %s' % (name_from_env, _default_backends.keys())) # Allow this to raise, since it was explicitly set from the environment # noinspection PyCallingNonCallable return backends[name_from_env]() try: return _calc_default(backends) except SystemError: pass for maker in backends.values(): # noinspection PyBroadException try: return maker() except Exception: pass return NullBackend()
[ "def", "calculate_backend", "(", "name_from_env", ",", "backends", "=", "None", ")", ":", "if", "backends", "is", "None", ":", "backends", "=", "_default_backends", "if", "name_from_env", ":", "if", "name_from_env", "not", "in", "backends", ":", "raise", "Runt...
Calculates which backend to use with the following algorithm: - Try to read the GOLESS_BACKEND environment variable. Usually 'gevent' or 'stackless'. If a value is set but no backend is available or it fails to be created, this function will error. - Determine the default backend (gevent for PyPy, stackless for Python). If no default can be determined or created, continue. - Try to create all the runtimes and choose the first one to create successfully. - If no runtime can be created, return a NullBackend, which will error when accessed. The "default" backend is the less-easy backend for a runtime. Since PyPy has stackless by default, gevent is intentional. Since Stackless is a separate interpreter for CPython, that is more intentional than gevent. We feel this is a good default behavior.
[ "Calculates", "which", "backend", "to", "use", "with", "the", "following", "algorithm", ":" ]
python
train
joowani/quadriga
quadriga/book.py
https://github.com/joowani/quadriga/blob/412f88f414ef0cb53efa6d5841b9674eb9718359/quadriga/book.py#L37-L47
def get_ticker(self): """Return the latest ticker information. :return: Latest ticker information. :rtype: dict """ self._log('get ticker') return self._rest_client.get( endpoint='/ticker', params={'book': self.name} )
[ "def", "get_ticker", "(", "self", ")", ":", "self", ".", "_log", "(", "'get ticker'", ")", "return", "self", ".", "_rest_client", ".", "get", "(", "endpoint", "=", "'/ticker'", ",", "params", "=", "{", "'book'", ":", "self", ".", "name", "}", ")" ]
Return the latest ticker information. :return: Latest ticker information. :rtype: dict
[ "Return", "the", "latest", "ticker", "information", "." ]
python
train
ibm-watson-iot/iot-python
src/wiotp/sdk/api/dsc/connectors.py
https://github.com/ibm-watson-iot/iot-python/blob/195f05adce3fba4ec997017e41e02ebd85c0c4cc/src/wiotp/sdk/api/dsc/connectors.py#L159-L185
def find(self, nameFilter=None, typeFilter=None, enabledFilter=None, serviceId=None): """ Gets the list of Historian connectors, they are used to configure the Watson IoT Platform to store IoT data in compatible services. Parameters: - nameFilter(string) - Filter the results by the specified name - typeFilter(string) - Filter the results by the specified type, Available values : cloudant, eventstreams - enabledFilter(boolean) - Filter the results by the enabled flag - serviceId(string) - Filter the results by the service id - limit(number) - Max number of results returned, defaults 25 - bookmark(string) - used for paging through results Throws APIException on failure. """ queryParms = {} if nameFilter: queryParms["name"] = nameFilter if typeFilter: queryParms["type"] = typeFilter if enabledFilter: queryParms["enabled"] = enabledFilter if serviceId: queryParms["serviceId"] = serviceId return IterableConnectorList(self._apiClient, filters=queryParms)
[ "def", "find", "(", "self", ",", "nameFilter", "=", "None", ",", "typeFilter", "=", "None", ",", "enabledFilter", "=", "None", ",", "serviceId", "=", "None", ")", ":", "queryParms", "=", "{", "}", "if", "nameFilter", ":", "queryParms", "[", "\"name\"", ...
Gets the list of Historian connectors, they are used to configure the Watson IoT Platform to store IoT data in compatible services. Parameters: - nameFilter(string) - Filter the results by the specified name - typeFilter(string) - Filter the results by the specified type, Available values : cloudant, eventstreams - enabledFilter(boolean) - Filter the results by the enabled flag - serviceId(string) - Filter the results by the service id - limit(number) - Max number of results returned, defaults 25 - bookmark(string) - used for paging through results Throws APIException on failure.
[ "Gets", "the", "list", "of", "Historian", "connectors", "they", "are", "used", "to", "configure", "the", "Watson", "IoT", "Platform", "to", "store", "IoT", "data", "in", "compatible", "services", ".", "Parameters", ":", "-", "nameFilter", "(", "string", ")",...
python
test
opendns/pyinvestigate
investigate/investigate.py
https://github.com/opendns/pyinvestigate/blob/a182e73a750f03e906d9b25842d556db8d2fd54f/investigate/investigate.py#L123-L129
def cooccurrences(self, domain): '''Get the cooccurrences of the given domain. For details, see https://investigate.umbrella.com/docs/api#co-occurrences ''' uri = self._uris["cooccurrences"].format(domain) return self.get_parse(uri)
[ "def", "cooccurrences", "(", "self", ",", "domain", ")", ":", "uri", "=", "self", ".", "_uris", "[", "\"cooccurrences\"", "]", ".", "format", "(", "domain", ")", "return", "self", ".", "get_parse", "(", "uri", ")" ]
Get the cooccurrences of the given domain. For details, see https://investigate.umbrella.com/docs/api#co-occurrences
[ "Get", "the", "cooccurrences", "of", "the", "given", "domain", "." ]
python
train