repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
VonStruddle/PyHunter
pyhunter/pyhunter.py
https://github.com/VonStruddle/PyHunter/blob/e14882d22527102515458cddeb8e0aa1c02da549/pyhunter/pyhunter.py#L463-L478
def delete_leads_list(self, leads_list_id): """ Delete a leads list. :param leads_list_id: The id of the list to delete. :return: 204 Response. """ params = self.base_params endpoint = self.base_endpoint.format( 'leads_lists/' + str(leads_list_id) ) return self._query_hunter(endpoint, params, 'delete')
[ "def", "delete_leads_list", "(", "self", ",", "leads_list_id", ")", ":", "params", "=", "self", ".", "base_params", "endpoint", "=", "self", ".", "base_endpoint", ".", "format", "(", "'leads_lists/'", "+", "str", "(", "leads_list_id", ")", ")", "return", "se...
Delete a leads list. :param leads_list_id: The id of the list to delete. :return: 204 Response.
[ "Delete", "a", "leads", "list", "." ]
python
train
sdispater/poetry
get-poetry.py
https://github.com/sdispater/poetry/blob/2d27acd76c165dd49f11934520a7973de7a3762a/get-poetry.py#L466-L490
def make_lib(self, version): """ Packs everything into a single lib/ directory. """ if os.path.exists(POETRY_LIB_BACKUP): shutil.rmtree(POETRY_LIB_BACKUP) # Backup the current installation if os.path.exists(POETRY_LIB): shutil.copytree(POETRY_LIB, POETRY_LIB_BACKUP) shutil.rmtree(POETRY_LIB) try: self._make_lib(version) except Exception: if not os.path.exists(POETRY_LIB_BACKUP): raise shutil.copytree(POETRY_LIB_BACKUP, POETRY_LIB) shutil.rmtree(POETRY_LIB_BACKUP) raise finally: if os.path.exists(POETRY_LIB_BACKUP): shutil.rmtree(POETRY_LIB_BACKUP)
[ "def", "make_lib", "(", "self", ",", "version", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "POETRY_LIB_BACKUP", ")", ":", "shutil", ".", "rmtree", "(", "POETRY_LIB_BACKUP", ")", "# Backup the current installation", "if", "os", ".", "path", ".", ...
Packs everything into a single lib/ directory.
[ "Packs", "everything", "into", "a", "single", "lib", "/", "directory", "." ]
python
train
saltstack/salt
salt/modules/omapi.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/omapi.py#L102-L130
def delete_host(mac=None, name=None): ''' Delete the host with the given mac or name. CLI Examples: .. code-block:: bash salt dhcp-server omapi.delete_host name=host1 salt dhcp-server omapi.delete_host mac=ab:ab:ab:ab:ab:ab ''' if not (mac or name): raise TypeError('At least one argument is required') o = _conn() msg = omapi.OmapiMessage.open(b'host') if mac: msg.obj.append((b'hardware-address', omapi.pack_mac(mac))) msg.obj.append((b'hardware-type', struct.pack(b'!I', 1))) if name: msg.obj.append((b'name', salt.utils.stringutils.to_bytes(name))) response = o.query_server(msg) if response.opcode != omapi.OMAPI_OP_UPDATE: return None if response.handle == 0: return False response = o.query_server(omapi.OmapiMessage.delete(response.handle)) if response.opcode != omapi.OMAPI_OP_STATUS: return False return True
[ "def", "delete_host", "(", "mac", "=", "None", ",", "name", "=", "None", ")", ":", "if", "not", "(", "mac", "or", "name", ")", ":", "raise", "TypeError", "(", "'At least one argument is required'", ")", "o", "=", "_conn", "(", ")", "msg", "=", "omapi",...
Delete the host with the given mac or name. CLI Examples: .. code-block:: bash salt dhcp-server omapi.delete_host name=host1 salt dhcp-server omapi.delete_host mac=ab:ab:ab:ab:ab:ab
[ "Delete", "the", "host", "with", "the", "given", "mac", "or", "name", "." ]
python
train
elifesciences/elife-tools
elifetools/parseJATS.py
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L879-L906
def inline_graphics(soup): """ inline-graphic tags """ inline_graphics = [] inline_graphic_tags = raw_parser.inline_graphic(soup) position = 1 for tag in inline_graphic_tags: item = {} copy_attribute(tag.attrs, 'xlink:href', item, 'xlink_href') # Get the tag type nodenames = ["sub-article"] details = tag_details(tag, nodenames) copy_attribute(details, 'type', item) # Increment the position item['position'] = position # Ordinal should be the same as position in this case but set it anyway item['ordinal'] = tag_ordinal(tag) inline_graphics.append(item) return inline_graphics
[ "def", "inline_graphics", "(", "soup", ")", ":", "inline_graphics", "=", "[", "]", "inline_graphic_tags", "=", "raw_parser", ".", "inline_graphic", "(", "soup", ")", "position", "=", "1", "for", "tag", "in", "inline_graphic_tags", ":", "item", "=", "{", "}",...
inline-graphic tags
[ "inline", "-", "graphic", "tags" ]
python
train
pybel/pybel-tools
src/pybel_tools/utils.py
https://github.com/pybel/pybel-tools/blob/3491adea0ac4ee60f57275ef72f9b73da6dbfe0c/src/pybel_tools/utils.py#L140-L159
def calculate_global_tanimoto_set_distances(dict_of_sets: Mapping[X, Set]) -> Mapping[X, Mapping[X, float]]: r"""Calculate an alternative distance matrix based on the following equation. .. math:: distance(A, B)=1- \|A \cup B\| / \| \cup_{s \in S} s\| :param dict_of_sets: A dict of {x: set of y} :return: A similarity matrix based on the alternative tanimoto distance as a dict of dicts """ universe = set(itt.chain.from_iterable(dict_of_sets.values())) universe_size = len(universe) result: Dict[X, Dict[X, float]] = defaultdict(dict) for x, y in itt.combinations(dict_of_sets, 2): result[x][y] = result[y][x] = 1.0 - len(dict_of_sets[x] | dict_of_sets[y]) / universe_size for x in dict_of_sets: result[x][x] = 1.0 - len(x) / universe_size return dict(result)
[ "def", "calculate_global_tanimoto_set_distances", "(", "dict_of_sets", ":", "Mapping", "[", "X", ",", "Set", "]", ")", "->", "Mapping", "[", "X", ",", "Mapping", "[", "X", ",", "float", "]", "]", ":", "universe", "=", "set", "(", "itt", ".", "chain", "...
r"""Calculate an alternative distance matrix based on the following equation. .. math:: distance(A, B)=1- \|A \cup B\| / \| \cup_{s \in S} s\| :param dict_of_sets: A dict of {x: set of y} :return: A similarity matrix based on the alternative tanimoto distance as a dict of dicts
[ "r", "Calculate", "an", "alternative", "distance", "matrix", "based", "on", "the", "following", "equation", "." ]
python
valid
dw/mitogen
mitogen/fork.py
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/fork.py#L92-L105
def on_fork(): """ Should be called by any program integrating Mitogen each time the process is forked, in the context of the new child. """ reset_logging_framework() # Must be first! fixup_prngs() mitogen.core.Latch._on_fork() mitogen.core.Side._on_fork() mitogen.core.ExternalContext.service_stub_lock = threading.Lock() mitogen__service = sys.modules.get('mitogen.service') if mitogen__service: mitogen__service._pool_lock = threading.Lock()
[ "def", "on_fork", "(", ")", ":", "reset_logging_framework", "(", ")", "# Must be first!", "fixup_prngs", "(", ")", "mitogen", ".", "core", ".", "Latch", ".", "_on_fork", "(", ")", "mitogen", ".", "core", ".", "Side", ".", "_on_fork", "(", ")", "mitogen", ...
Should be called by any program integrating Mitogen each time the process is forked, in the context of the new child.
[ "Should", "be", "called", "by", "any", "program", "integrating", "Mitogen", "each", "time", "the", "process", "is", "forked", "in", "the", "context", "of", "the", "new", "child", "." ]
python
train
kivy/python-for-android
pythonforandroid/toolchain.py
https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/toolchain.py#L818-L833
def clean_builds(self, _args): """Delete all build caches for each recipe, python-install, java code and compiled libs collection. This does *not* delete the package download cache or the final distributions. You can also use clean_recipe_build to delete the build of a specific recipe. """ ctx = self.ctx if exists(ctx.build_dir): shutil.rmtree(ctx.build_dir) if exists(ctx.python_installs_dir): shutil.rmtree(ctx.python_installs_dir) libs_dir = join(self.ctx.build_dir, 'libs_collections') if exists(libs_dir): shutil.rmtree(libs_dir)
[ "def", "clean_builds", "(", "self", ",", "_args", ")", ":", "ctx", "=", "self", ".", "ctx", "if", "exists", "(", "ctx", ".", "build_dir", ")", ":", "shutil", ".", "rmtree", "(", "ctx", ".", "build_dir", ")", "if", "exists", "(", "ctx", ".", "python...
Delete all build caches for each recipe, python-install, java code and compiled libs collection. This does *not* delete the package download cache or the final distributions. You can also use clean_recipe_build to delete the build of a specific recipe.
[ "Delete", "all", "build", "caches", "for", "each", "recipe", "python", "-", "install", "java", "code", "and", "compiled", "libs", "collection", "." ]
python
train
opendatateam/udata
udata/search/__init__.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/__init__.py#L167-L170
def unindex_model_on_delete(sender, document, **kwargs): '''Unindex Mongo document on post_delete''' if current_app.config.get('AUTO_INDEX'): unindex.delay(document)
[ "def", "unindex_model_on_delete", "(", "sender", ",", "document", ",", "*", "*", "kwargs", ")", ":", "if", "current_app", ".", "config", ".", "get", "(", "'AUTO_INDEX'", ")", ":", "unindex", ".", "delay", "(", "document", ")" ]
Unindex Mongo document on post_delete
[ "Unindex", "Mongo", "document", "on", "post_delete" ]
python
train
ic-labs/django-icekit
icekit/content_collections/page_type_plugins.py
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit/content_collections/page_type_plugins.py#L13-L18
def get_context(self, request, page, **kwargs): """ Include in context items to be visible on listing page """ context = super(ListingPagePlugin, self).get_context( request, page, **kwargs) context['items_to_list'] = page.get_items_to_list(request) return context
[ "def", "get_context", "(", "self", ",", "request", ",", "page", ",", "*", "*", "kwargs", ")", ":", "context", "=", "super", "(", "ListingPagePlugin", ",", "self", ")", ".", "get_context", "(", "request", ",", "page", ",", "*", "*", "kwargs", ")", "co...
Include in context items to be visible on listing page
[ "Include", "in", "context", "items", "to", "be", "visible", "on", "listing", "page" ]
python
train
patarapolw/AnkiTools
AnkiTools/tools/sampling.py
https://github.com/patarapolw/AnkiTools/blob/fab6836dfd9cf5171d9cbff5c55fbb14d2786f05/AnkiTools/tools/sampling.py#L15-L90
def get_representative_json(file_input=None, formatted=False, annotate_is_json=False, sampling_substitution_regex=('(.+)', '\\1_sample'), do_not_sample=('sqlite_stat1', ), sampling_limits=None): """ :param None|str file_input: :param bool formatted: :param bool annotate_is_json: :param tuple sampling_substitution_regex: to shorten string by one, try ('(.+).{1}', '\\1') or ('(.+)s', '\\1') :param list|tuple do_not_sample: :param None|dict sampling_limits: :return: """ if file_input is None: file_input = get_collection_path() source = file_input if sampling_limits is None: sampling_limits = { 'notes': 10, 'cards': 10 } if os.path.splitext(file_input)[1] == '.apkg': from AnkiTools.convert import anki_convert tempdir = mkdtemp() temp_anki2 = os.path.join(tempdir, 'temp.anki2') anki_convert(file_input, out_file=temp_anki2) file_input = temp_anki2 output_json = OrderedDict( _meta=OrderedDict( generated=datetime.fromtimestamp(datetime.now().timestamp()).isoformat(), source=os.path.abspath(source), data=OrderedDict() ) ) with sqlite3.connect(file_input) as conn: cursor = conn.execute("SELECT name FROM sqlite_master WHERE type='table';") for row in cursor: table_name = row[0] key = table_name output = list(read_anki_table(conn, table_name)) if table_name not in output_json['_meta']['data'].keys(): output_json['_meta']['data'][table_name] = OrderedDict() output_json['_meta']['data'][table_name]['number_of_entries'] = len(output) if len(output) >= 1: if len(output) > 1: if table_name in do_not_sample: output_json[key] = output else: re_match, re_replace = sampling_substitution_regex key = re.sub(re_match, re_replace, key) output_json[key] = random.sample(output, sampling_limits.get(table_name, 10)) else: output_json[key] = output[0] if formatted: to_format = output_json[key] if isinstance(output_json[key], (dict, OrderedDict)): _format_representative_json(to_format, annotate_is_json) else: for item in to_format: _format_representative_json(item, annotate_is_json) else: output_json[key] = None return output_json
[ "def", "get_representative_json", "(", "file_input", "=", "None", ",", "formatted", "=", "False", ",", "annotate_is_json", "=", "False", ",", "sampling_substitution_regex", "=", "(", "'(.+)'", ",", "'\\\\1_sample'", ")", ",", "do_not_sample", "=", "(", "'sqlite_st...
:param None|str file_input: :param bool formatted: :param bool annotate_is_json: :param tuple sampling_substitution_regex: to shorten string by one, try ('(.+).{1}', '\\1') or ('(.+)s', '\\1') :param list|tuple do_not_sample: :param None|dict sampling_limits: :return:
[ ":", "param", "None|str", "file_input", ":", ":", "param", "bool", "formatted", ":", ":", "param", "bool", "annotate_is_json", ":", ":", "param", "tuple", "sampling_substitution_regex", ":", "to", "shorten", "string", "by", "one", "try", "(", "(", ".", "+", ...
python
train
Clinical-Genomics/scout
scout/adapter/mongo/case.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/case.py#L142-L161
def nr_cases(self, institute_id=None): """Return the number of cases This function will change when we migrate to 3.7.1 Args: collaborator(str): Institute id Returns: nr_cases(int) """ query = {} if institute_id: query['collaborators'] = institute_id LOG.debug("Fetch all cases with query {0}".format(query)) nr_cases = self.case_collection.find(query).count() return nr_cases
[ "def", "nr_cases", "(", "self", ",", "institute_id", "=", "None", ")", ":", "query", "=", "{", "}", "if", "institute_id", ":", "query", "[", "'collaborators'", "]", "=", "institute_id", "LOG", ".", "debug", "(", "\"Fetch all cases with query {0}\"", ".", "fo...
Return the number of cases This function will change when we migrate to 3.7.1 Args: collaborator(str): Institute id Returns: nr_cases(int)
[ "Return", "the", "number", "of", "cases" ]
python
test
openstax/cnx-archive
cnxarchive/database.py
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/database.py#L174-L189
def get_minor_version(module_ident, plpy): """Retrieve minor version only given module_ident.""" # Make sure to always return the max minor version that is already in the # database, in case the given module_ident is not the latest version plan = plpy.prepare('''\ WITH t AS ( SELECT uuid, major_version FROM modules WHERE module_ident = $1 ) SELECT MAX(m.minor_version) AS minor_version FROM modules m, t WHERE m.uuid = t.uuid AND m.major_version = t.major_version ''', ('integer',)) results = plpy.execute(plan, (module_ident,), 1) return results[0]['minor_version']
[ "def", "get_minor_version", "(", "module_ident", ",", "plpy", ")", ":", "# Make sure to always return the max minor version that is already in the", "# database, in case the given module_ident is not the latest version", "plan", "=", "plpy", ".", "prepare", "(", "'''\\\n WITH ...
Retrieve minor version only given module_ident.
[ "Retrieve", "minor", "version", "only", "given", "module_ident", "." ]
python
train
metric-learn/metric-learn
metric_learn/mmc.py
https://github.com/metric-learn/metric-learn/blob/d945df1342c69012608bb70b92520392a0853de6/metric_learn/mmc.py#L305-L316
def _fS1(self, pos_pairs, A): """The gradient of the similarity constraint function w.r.t. A. f = \sum_{ij}(x_i-x_j)A(x_i-x_j)' = \sum_{ij}d_ij*A*d_ij' df/dA = d(d_ij*A*d_ij')/dA Note that d_ij*A*d_ij' = tr(d_ij*A*d_ij') = tr(d_ij'*d_ij*A) so, d(d_ij*A*d_ij')/dA = d_ij'*d_ij """ dim = pos_pairs.shape[2] diff = pos_pairs[:, 0, :] - pos_pairs[:, 1, :] return np.einsum('ij,ik->jk', diff, diff)
[ "def", "_fS1", "(", "self", ",", "pos_pairs", ",", "A", ")", ":", "dim", "=", "pos_pairs", ".", "shape", "[", "2", "]", "diff", "=", "pos_pairs", "[", ":", ",", "0", ",", ":", "]", "-", "pos_pairs", "[", ":", ",", "1", ",", ":", "]", "return"...
The gradient of the similarity constraint function w.r.t. A. f = \sum_{ij}(x_i-x_j)A(x_i-x_j)' = \sum_{ij}d_ij*A*d_ij' df/dA = d(d_ij*A*d_ij')/dA Note that d_ij*A*d_ij' = tr(d_ij*A*d_ij') = tr(d_ij'*d_ij*A) so, d(d_ij*A*d_ij')/dA = d_ij'*d_ij
[ "The", "gradient", "of", "the", "similarity", "constraint", "function", "w", ".", "r", ".", "t", ".", "A", "." ]
python
train
xray7224/PyPump
pypump/models/person.py
https://github.com/xray7224/PyPump/blob/f921f691c39fe021f4fd124b6bc91718c9e49b4a/pypump/models/person.py#L106-L120
def favorites(self): """ :class:`Feed <pypump.models.feed.Feed>` with all objects liked/favorited by the person. Example: >>> for like in pump.me.favorites[:3]: ... print(like) ... note by alice@example.org image by bob@example.org comment by evan@e14n.com """ if self._favorites is None: self._favorites = Favorites(self.links['favorites'], pypump=self._pump) return self._favorites
[ "def", "favorites", "(", "self", ")", ":", "if", "self", ".", "_favorites", "is", "None", ":", "self", ".", "_favorites", "=", "Favorites", "(", "self", ".", "links", "[", "'favorites'", "]", ",", "pypump", "=", "self", ".", "_pump", ")", "return", "...
:class:`Feed <pypump.models.feed.Feed>` with all objects liked/favorited by the person. Example: >>> for like in pump.me.favorites[:3]: ... print(like) ... note by alice@example.org image by bob@example.org comment by evan@e14n.com
[ ":", "class", ":", "Feed", "<pypump", ".", "models", ".", "feed", ".", "Feed", ">", "with", "all", "objects", "liked", "/", "favorited", "by", "the", "person", "." ]
python
train
nephila/djangocms-installer
djangocms_installer/install/__init__.py
https://github.com/nephila/djangocms-installer/blob/9fec66d5f8b1e9a0f3c0ec66dd777db578fab07e/djangocms_installer/install/__init__.py#L15-L79
def check_install(config_data): """ Here we do some **really** basic environment sanity checks. Basically we test for the more delicate and failing-prone dependencies: * database driver * Pillow image format support Many other errors will go undetected """ errors = [] # PIL tests try: from PIL import Image try: im = Image.open(os.path.join(os.path.dirname(__file__), '../share/test_image.png')) im.load() except IOError: # pragma: no cover errors.append( 'Pillow is not compiled with PNG support, see "Libraries installation issues" ' 'documentation section: https://djangocms-installer.readthedocs.io/en/latest/' 'libraries.html.' ) try: im = Image.open(os.path.join(os.path.dirname(__file__), '../share/test_image.jpg')) im.load() except IOError: # pragma: no cover errors.append( 'Pillow is not compiled with JPEG support, see "Libraries installation issues" ' 'documentation section: https://djangocms-installer.readthedocs.io/en/latest/' 'libraries.html' ) except ImportError: # pragma: no cover errors.append( 'Pillow is not installed check for installation errors and see "Libraries installation' ' issues" documentation section: https://djangocms-installer.readthedocs.io/en/latest/' 'libraries.html' ) # PostgreSQL test if config_data.db_driver == 'psycopg2' and not config_data.no_db_driver: # pragma: no cover try: import psycopg2 # NOQA except ImportError: errors.append( 'PostgreSQL driver is not installed, but you configured a PostgreSQL database, ' 'please check your installation and see "Libraries installation issues" ' 'documentation section: https://djangocms-installer.readthedocs.io/en/latest/' 'libraries.html' ) # MySQL test if config_data.db_driver == 'mysqlclient' and not config_data.no_db_driver: # pragma: no cover # NOQA try: import MySQLdb # NOQA except ImportError: errors.append( 'MySQL driver is not installed, but you configured a MySQL database, please check ' 'your installation and see "Libraries installation issues" documentation section: ' 'https://djangocms-installer.readthedocs.io/en/latest/libraries.html' ) if errors: # pragma: no cover raise EnvironmentError('\n'.join(errors))
[ "def", "check_install", "(", "config_data", ")", ":", "errors", "=", "[", "]", "# PIL tests", "try", ":", "from", "PIL", "import", "Image", "try", ":", "im", "=", "Image", ".", "open", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", "....
Here we do some **really** basic environment sanity checks. Basically we test for the more delicate and failing-prone dependencies: * database driver * Pillow image format support Many other errors will go undetected
[ "Here", "we", "do", "some", "**", "really", "**", "basic", "environment", "sanity", "checks", "." ]
python
valid
hid-io/layouts-python
layouts/__init__.py
https://github.com/hid-io/layouts-python/blob/b347578bfb4198fd812ecd7a2d9c7e551a856280/layouts/__init__.py#L413-L456
def compose(self, text, minimal_clears=False, no_clears=False): ''' Returns the sequence of combinations necessary to compose given text. If the text expression is not possible with the given layout an ComposeException is thrown. Iterate over the string, converting each character into a key sequence. Between each character, an empty combo is inserted to handle duplicate strings (and USB HID codes between characters) @param text: Input UTF-8 string @param minimal_clears: Set to True to minimize the number of code clears. False (default) includes a clear after every character. @param no_clears: Set to True to not add any code clears (useful for input sequences). False (default) to include code clears. @returns: Sequence of combinations needed to generate the given text string ''' sequence = [] clear = self.json_data['to_hid_keyboard']['0x00'] # No Event for char in text: # Make sure the composition element is available if char not in self.json_data['composition']: raise ComposeException("'{}' is not defined as a composition in the layout '{}'".format(char, self.name)) # Lookup the sequence to handle this character lookup = self.json_data['composition'][char] # If using minimal clears, check to see if we need to re-use any codes # Only need to check the most recent addition with the first combo if sequence and set(tuple(lookup[0])) & set(tuple(sequence[-1])) and not no_clears: sequence.extend([[clear]]) # Add to overall sequence sequence.extend(lookup) # Add empty combo for sequence splitting if not minimal_clears and not no_clears: # Blindly add a clear combo between characters sequence.extend([[clear]]) # When using minimal clears, we still need to add a final clear if minimal_clears and not no_clears: sequence.extend([[clear]]) return sequence
[ "def", "compose", "(", "self", ",", "text", ",", "minimal_clears", "=", "False", ",", "no_clears", "=", "False", ")", ":", "sequence", "=", "[", "]", "clear", "=", "self", ".", "json_data", "[", "'to_hid_keyboard'", "]", "[", "'0x00'", "]", "# No Event",...
Returns the sequence of combinations necessary to compose given text. If the text expression is not possible with the given layout an ComposeException is thrown. Iterate over the string, converting each character into a key sequence. Between each character, an empty combo is inserted to handle duplicate strings (and USB HID codes between characters) @param text: Input UTF-8 string @param minimal_clears: Set to True to minimize the number of code clears. False (default) includes a clear after every character. @param no_clears: Set to True to not add any code clears (useful for input sequences). False (default) to include code clears. @returns: Sequence of combinations needed to generate the given text string
[ "Returns", "the", "sequence", "of", "combinations", "necessary", "to", "compose", "given", "text", "." ]
python
train
edx/ease
ease/feature_extractor.py
https://github.com/edx/ease/blob/a7890ed403da94d03726b0639cd8ebda45af6bbb/ease/feature_extractor.py#L75-L95
def get_good_pos_ngrams(self): """ Gets a set of gramatically correct part of speech sequences from an input file called essaycorpus.txt Returns the set and caches the file """ if(os.path.isfile(NGRAM_PATH)): good_pos_ngrams = pickle.load(open(NGRAM_PATH, 'rb')) elif os.path.isfile(ESSAY_CORPUS_PATH): essay_corpus = open(ESSAY_CORPUS_PATH).read() essay_corpus = util_functions.sub_chars(essay_corpus) good_pos_ngrams = util_functions.regenerate_good_tokens(essay_corpus) pickle.dump(good_pos_ngrams, open(NGRAM_PATH, 'wb')) else: #Hard coded list in case the needed files cannot be found good_pos_ngrams=['NN PRP', 'NN PRP .', 'NN PRP . DT', 'PRP .', 'PRP . DT', 'PRP . DT NNP', '. DT', '. DT NNP', '. DT NNP NNP', 'DT NNP', 'DT NNP NNP', 'DT NNP NNP NNP', 'NNP NNP', 'NNP NNP NNP', 'NNP NNP NNP NNP', 'NNP NNP NNP .', 'NNP NNP .', 'NNP NNP . TO', 'NNP .', 'NNP . TO', 'NNP . TO NNP', '. TO', '. TO NNP', '. TO NNP NNP', 'TO NNP', 'TO NNP NNP'] return set(good_pos_ngrams)
[ "def", "get_good_pos_ngrams", "(", "self", ")", ":", "if", "(", "os", ".", "path", ".", "isfile", "(", "NGRAM_PATH", ")", ")", ":", "good_pos_ngrams", "=", "pickle", ".", "load", "(", "open", "(", "NGRAM_PATH", ",", "'rb'", ")", ")", "elif", "os", "....
Gets a set of gramatically correct part of speech sequences from an input file called essaycorpus.txt Returns the set and caches the file
[ "Gets", "a", "set", "of", "gramatically", "correct", "part", "of", "speech", "sequences", "from", "an", "input", "file", "called", "essaycorpus", ".", "txt", "Returns", "the", "set", "and", "caches", "the", "file" ]
python
valid
raiden-network/raiden
raiden/utils/cli.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/utils/cli.py#L354-L424
def apply_config_file( command_function: Union[click.Command, click.Group], cli_params: Dict[str, Any], ctx, config_file_option_name='config_file', ): """ Applies all options set in the config file to `cli_params` """ paramname_to_param = {param.name: param for param in command_function.params} path_params = { param.name for param in command_function.params if isinstance(param.type, (click.Path, click.File)) } config_file_path = Path(cli_params[config_file_option_name]) config_file_values = dict() try: with config_file_path.open() as config_file: config_file_values = load(config_file) except OSError as ex: # Silently ignore if 'file not found' and the config file path is the default config_file_param = paramname_to_param[config_file_option_name] config_file_default_path = Path( config_file_param.type.expand_default(config_file_param.get_default(ctx), cli_params), ) default_config_missing = ( ex.errno == errno.ENOENT and config_file_path.resolve() == config_file_default_path.resolve() ) if default_config_missing: cli_params['config_file'] = None else: click.secho(f"Error opening config file: {ex}", fg='red') sys.exit(1) except TomlError as ex: click.secho(f'Error loading config file: {ex}', fg='red') sys.exit(1) for config_name, config_value in config_file_values.items(): config_name_int = config_name.replace('-', '_') if config_name_int not in paramname_to_param: click.secho( f"Unknown setting '{config_name}' found in config file - ignoring.", fg='yellow', ) continue if config_name_int in path_params: # Allow users to use `~` in paths in the config file config_value = os.path.expanduser(config_value) if config_name_int == LOG_CONFIG_OPTION_NAME: # Uppercase log level names config_value = {k: v.upper() for k, v in config_value.items()} else: # Pipe config file values through cli converter to ensure correct types # We exclude `log-config` because it already is a dict when loading from toml try: config_value = paramname_to_param[config_name_int].type.convert( config_value, paramname_to_param[config_name_int], ctx, ) except click.BadParameter as ex: click.secho(f"Invalid config file setting '{config_name}': {ex}", fg='red') sys.exit(1) # Use the config file value if the value from the command line is the default if cli_params[config_name_int] == paramname_to_param[config_name_int].get_default(ctx): cli_params[config_name_int] = config_value
[ "def", "apply_config_file", "(", "command_function", ":", "Union", "[", "click", ".", "Command", ",", "click", ".", "Group", "]", ",", "cli_params", ":", "Dict", "[", "str", ",", "Any", "]", ",", "ctx", ",", "config_file_option_name", "=", "'config_file'", ...
Applies all options set in the config file to `cli_params`
[ "Applies", "all", "options", "set", "in", "the", "config", "file", "to", "cli_params" ]
python
train
h2oai/h2o-3
scripts/addjavamessage2ignore.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/scripts/addjavamessage2ignore.py#L273-L285
def save_dict(): """ Save the ignored java message dict stored in g_ok_java_messages into a pickle file for future use. :return: none """ global g_ok_java_messages global g_save_java_message_filename global g_dict_changed if g_dict_changed: with open(g_save_java_message_filename,'wb') as ofile: pickle.dump(g_ok_java_messages,ofile)
[ "def", "save_dict", "(", ")", ":", "global", "g_ok_java_messages", "global", "g_save_java_message_filename", "global", "g_dict_changed", "if", "g_dict_changed", ":", "with", "open", "(", "g_save_java_message_filename", ",", "'wb'", ")", "as", "ofile", ":", "pickle", ...
Save the ignored java message dict stored in g_ok_java_messages into a pickle file for future use. :return: none
[ "Save", "the", "ignored", "java", "message", "dict", "stored", "in", "g_ok_java_messages", "into", "a", "pickle", "file", "for", "future", "use", "." ]
python
test
awslabs/sockeye
sockeye/utils.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/utils.py#L791-L800
def read_metrics_file(path: str) -> List[Dict[str, Any]]: """ Reads lines metrics file and returns list of mappings of key and values. :param path: File to read metric values from. :return: Dictionary of metric names (e.g. perplexity-train) mapping to a list of values. """ with open(path) as fin: metrics = [parse_metrics_line(i, line.strip()) for i, line in enumerate(fin, 1)] return metrics
[ "def", "read_metrics_file", "(", "path", ":", "str", ")", "->", "List", "[", "Dict", "[", "str", ",", "Any", "]", "]", ":", "with", "open", "(", "path", ")", "as", "fin", ":", "metrics", "=", "[", "parse_metrics_line", "(", "i", ",", "line", ".", ...
Reads lines metrics file and returns list of mappings of key and values. :param path: File to read metric values from. :return: Dictionary of metric names (e.g. perplexity-train) mapping to a list of values.
[ "Reads", "lines", "metrics", "file", "and", "returns", "list", "of", "mappings", "of", "key", "and", "values", "." ]
python
train
tammoippen/geohash-hilbert
geohash_hilbert/_int2str.py
https://github.com/tammoippen/geohash-hilbert/blob/b74f0fc1bff0234d8ff367e4129c3324676b0b36/geohash_hilbert/_int2str.py#L52-L71
def decode_int(tag, bits_per_char=6): """Decode string into int assuming encoding with `encode_int()` It is using 2, 4 or 6 bits per coding character (default 6). Parameters: tag: str Encoded integer. bits_per_char: int The number of bits per coding character. Returns: int: the decoded string """ if bits_per_char == 6: return _decode_int64(tag) if bits_per_char == 4: return _decode_int16(tag) if bits_per_char == 2: return _decode_int4(tag) raise ValueError('`bits_per_char` must be in {6, 4, 2}')
[ "def", "decode_int", "(", "tag", ",", "bits_per_char", "=", "6", ")", ":", "if", "bits_per_char", "==", "6", ":", "return", "_decode_int64", "(", "tag", ")", "if", "bits_per_char", "==", "4", ":", "return", "_decode_int16", "(", "tag", ")", "if", "bits_p...
Decode string into int assuming encoding with `encode_int()` It is using 2, 4 or 6 bits per coding character (default 6). Parameters: tag: str Encoded integer. bits_per_char: int The number of bits per coding character. Returns: int: the decoded string
[ "Decode", "string", "into", "int", "assuming", "encoding", "with", "encode_int", "()" ]
python
train
hyperledger/sawtooth-core
validator/sawtooth_validator/journal/consensus/consensus_factory.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/journal/consensus/consensus_factory.py#L59-L78
def get_configured_consensus_module(block_id, state_view): """Returns the consensus_module based on the consensus module set by the "sawtooth_settings" transaction family. Args: block_id (str): the block id associated with the current state_view state_view (:obj:`StateView`): the current state view to use for setting values Raises: UnknownConsensusModuleError: Thrown when an invalid consensus module has been configured. """ settings_view = SettingsView(state_view) default_consensus = \ 'genesis' if block_id == NULL_BLOCK_IDENTIFIER else 'devmode' consensus_module_name = settings_view.get_setting( 'sawtooth.consensus.algorithm', default_value=default_consensus) return ConsensusFactory.get_consensus_module( consensus_module_name)
[ "def", "get_configured_consensus_module", "(", "block_id", ",", "state_view", ")", ":", "settings_view", "=", "SettingsView", "(", "state_view", ")", "default_consensus", "=", "'genesis'", "if", "block_id", "==", "NULL_BLOCK_IDENTIFIER", "else", "'devmode'", "consensus_...
Returns the consensus_module based on the consensus module set by the "sawtooth_settings" transaction family. Args: block_id (str): the block id associated with the current state_view state_view (:obj:`StateView`): the current state view to use for setting values Raises: UnknownConsensusModuleError: Thrown when an invalid consensus module has been configured.
[ "Returns", "the", "consensus_module", "based", "on", "the", "consensus", "module", "set", "by", "the", "sawtooth_settings", "transaction", "family", "." ]
python
train
CI-WATER/gsshapy
gsshapy/orm/wms_dataset.py
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/wms_dataset.py#L144-L180
def write(self, session, directory, name, maskMap): """ Write from database to file. *session* = SQLAlchemy session object\n *directory* = to which directory will the files be written (e.g.: '/example/path')\n *name* = name of file that will be written (e.g.: 'my_project.ext')\n """ # Assemble Path to file name_split = name.split('.') name = name_split[0] # Default extension extension = '' if len(name_split) >= 2: extension = name_split[-1] # Run name preprocessor method if present try: name = self._namePreprocessor(name) except: 'DO NOTHING' if extension == '': filename = '{0}.{1}'.format(name, self.fileExtension) else: filename = '{0}.{1}'.format(name, extension) filePath = os.path.join(directory, filename) with open(filePath, 'w') as openFile: # Write Lines self._write(session=session, openFile=openFile, maskMap=maskMap)
[ "def", "write", "(", "self", ",", "session", ",", "directory", ",", "name", ",", "maskMap", ")", ":", "# Assemble Path to file", "name_split", "=", "name", ".", "split", "(", "'.'", ")", "name", "=", "name_split", "[", "0", "]", "# Default extension", "ext...
Write from database to file. *session* = SQLAlchemy session object\n *directory* = to which directory will the files be written (e.g.: '/example/path')\n *name* = name of file that will be written (e.g.: 'my_project.ext')\n
[ "Write", "from", "database", "to", "file", "." ]
python
train
googleapis/google-cloud-python
dataproc/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/dataproc/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client.py#L180-L278
def create_cluster( self, project_id, region, cluster, request_id=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Creates a cluster in a project. Example: >>> from google.cloud import dataproc_v1beta2 >>> >>> client = dataproc_v1beta2.ClusterControllerClient() >>> >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> >>> # TODO: Initialize `region`: >>> region = '' >>> >>> # TODO: Initialize `cluster`: >>> cluster = {} >>> >>> response = client.create_cluster(project_id, region, cluster) >>> >>> def callback(operation_future): ... # Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata = response.metadata() Args: project_id (str): Required. The ID of the Google Cloud Platform project that the cluster belongs to. region (str): Required. The Cloud Dataproc region in which to handle the request. cluster (Union[dict, ~google.cloud.dataproc_v1beta2.types.Cluster]): Required. The cluster to create. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.dataproc_v1beta2.types.Cluster` request_id (str): Optional. A unique id used to identify the request. If the server receives two ``CreateClusterRequest`` requests with the same id, then the second request will be ignored and the first ``google.longrunning.Operation`` created and stored in the backend is returned. It is recommended to always set this value to a `UUID <https://en.wikipedia.org/wiki/Universally_unique_identifier>`__. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (\_), and hyphens (-). The maximum length is 40 characters. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "create_cluster" not in self._inner_api_calls: self._inner_api_calls[ "create_cluster" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_cluster, default_retry=self._method_configs["CreateCluster"].retry, default_timeout=self._method_configs["CreateCluster"].timeout, client_info=self._client_info, ) request = clusters_pb2.CreateClusterRequest( project_id=project_id, region=region, cluster=cluster, request_id=request_id ) operation = self._inner_api_calls["create_cluster"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, clusters_pb2.Cluster, metadata_type=proto_operations_pb2.ClusterOperationMetadata, )
[ "def", "create_cluster", "(", "self", ",", "project_id", ",", "region", ",", "cluster", ",", "request_id", "=", "None", ",", "retry", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", "=", "google", ".", "api...
Creates a cluster in a project. Example: >>> from google.cloud import dataproc_v1beta2 >>> >>> client = dataproc_v1beta2.ClusterControllerClient() >>> >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> >>> # TODO: Initialize `region`: >>> region = '' >>> >>> # TODO: Initialize `cluster`: >>> cluster = {} >>> >>> response = client.create_cluster(project_id, region, cluster) >>> >>> def callback(operation_future): ... # Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata = response.metadata() Args: project_id (str): Required. The ID of the Google Cloud Platform project that the cluster belongs to. region (str): Required. The Cloud Dataproc region in which to handle the request. cluster (Union[dict, ~google.cloud.dataproc_v1beta2.types.Cluster]): Required. The cluster to create. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.dataproc_v1beta2.types.Cluster` request_id (str): Optional. A unique id used to identify the request. If the server receives two ``CreateClusterRequest`` requests with the same id, then the second request will be ignored and the first ``google.longrunning.Operation`` created and stored in the backend is returned. It is recommended to always set this value to a `UUID <https://en.wikipedia.org/wiki/Universally_unique_identifier>`__. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (\_), and hyphens (-). The maximum length is 40 characters. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
[ "Creates", "a", "cluster", "in", "a", "project", "." ]
python
train
vtkiorg/vtki
vtki/filters.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/filters.py#L153-L196
def slice(dataset, normal='x', origin=None, generate_triangles=False, contour=False): """Slice a dataset by a plane at the specified origin and normal vector orientation. If no origin is specified, the center of the input dataset will be used. Parameters ---------- normal : tuple(float) or str Length 3 tuple for the normal vector direction. Can also be specified as a string conventional direction such as ``'x'`` for ``(1,0,0)`` or ``'-x'`` for ``(-1,0,0)```, etc. origin : tuple(float) The center (x,y,z) coordinate of the plane on which the slice occurs generate_triangles: bool, optional If this is enabled (``False`` by default), the output will be triangles otherwise, the output will be the intersection polygons. contour : bool, optional If True, apply a ``contour`` filter after slicing """ if isinstance(normal, str): normal = NORMALS[normal.lower()] # find center of data if origin not specified if origin is None: origin = dataset.center if not is_inside_bounds(origin, dataset.bounds): raise AssertionError('Slice is outside data bounds.') # create the plane for clipping plane = _generate_plane(normal, origin) # create slice alg = vtk.vtkCutter() # Construct the cutter object alg.SetInputDataObject(dataset) # Use the grid as the data we desire to cut alg.SetCutFunction(plane) # the the cutter to use the plane we made if not generate_triangles: alg.GenerateTrianglesOff() alg.Update() # Perfrom the Cut output = _get_output(alg) if contour: return output.contour() return output
[ "def", "slice", "(", "dataset", ",", "normal", "=", "'x'", ",", "origin", "=", "None", ",", "generate_triangles", "=", "False", ",", "contour", "=", "False", ")", ":", "if", "isinstance", "(", "normal", ",", "str", ")", ":", "normal", "=", "NORMALS", ...
Slice a dataset by a plane at the specified origin and normal vector orientation. If no origin is specified, the center of the input dataset will be used. Parameters ---------- normal : tuple(float) or str Length 3 tuple for the normal vector direction. Can also be specified as a string conventional direction such as ``'x'`` for ``(1,0,0)`` or ``'-x'`` for ``(-1,0,0)```, etc. origin : tuple(float) The center (x,y,z) coordinate of the plane on which the slice occurs generate_triangles: bool, optional If this is enabled (``False`` by default), the output will be triangles otherwise, the output will be the intersection polygons. contour : bool, optional If True, apply a ``contour`` filter after slicing
[ "Slice", "a", "dataset", "by", "a", "plane", "at", "the", "specified", "origin", "and", "normal", "vector", "orientation", ".", "If", "no", "origin", "is", "specified", "the", "center", "of", "the", "input", "dataset", "will", "be", "used", "." ]
python
train
mbedmicro/pyOCD
pyocd/__main__.py
https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/__main__.py#L308-L340
def run(self, args=None): """! @brief Main entry point for command line processing.""" try: self._args = self.build_parser().parse_args(args) # Running without a subcommand will print usage. if self._args.cmd is None: if self._args.help_options: self.show_options_help() else: self._parser.print_help() return 1 # The default log level differs for some subcommands. self._default_log_level = DEFAULT_CMD_LOG_LEVEL[self._args.cmd] self._setup_logging() # Pass any options to DAPAccess. if hasattr(self._args, 'daparg'): DAPAccess.set_args(self._args.daparg) # Invoke subcommand. self._COMMANDS[self._args.cmd](self) # Successful exit. return 0 except KeyboardInterrupt: return 0 except exceptions.Error as e: LOG.error(e, exc_info=Session.get_current().log_tracebacks) except Exception as e: LOG.error("uncaught exception: %s", e, exc_info=Session.get_current().log_tracebacks) return 1
[ "def", "run", "(", "self", ",", "args", "=", "None", ")", ":", "try", ":", "self", ".", "_args", "=", "self", ".", "build_parser", "(", ")", ".", "parse_args", "(", "args", ")", "# Running without a subcommand will print usage.", "if", "self", ".", "_args"...
! @brief Main entry point for command line processing.
[ "!" ]
python
train
serkanyersen/underscore.py
src/underscore.py
https://github.com/serkanyersen/underscore.py/blob/07c25c3f0f789536e4ad47aa315faccc0da9602f/src/underscore.py#L1355-L1365
def mixin(self): """ Add your own custom functions to the Underscore object, ensuring that they're correctly added to the OOP wrapper as well. """ methods = self.obj for i, k in enumerate(methods): setattr(underscore, k, methods[k]) self.makeStatic() return self._wrap(self.obj)
[ "def", "mixin", "(", "self", ")", ":", "methods", "=", "self", ".", "obj", "for", "i", ",", "k", "in", "enumerate", "(", "methods", ")", ":", "setattr", "(", "underscore", ",", "k", ",", "methods", "[", "k", "]", ")", "self", ".", "makeStatic", "...
Add your own custom functions to the Underscore object, ensuring that they're correctly added to the OOP wrapper as well.
[ "Add", "your", "own", "custom", "functions", "to", "the", "Underscore", "object", "ensuring", "that", "they", "re", "correctly", "added", "to", "the", "OOP", "wrapper", "as", "well", "." ]
python
train
spacetelescope/drizzlepac
drizzlepac/util.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/util.py#L1171-L1219
def base_taskname(taskname, packagename=None): """ Extract the base name of the task. Many tasks in the `drizzlepac` have "compound" names such as 'drizzlepac.sky'. This function will search for the presence of a dot in the input `taskname` and if found, it will return the string to the right of the right-most dot. If a dot is not found, it will return the input string. Parameters ---------- taskname : str, None Full task name. If it is `None`, :py:func:`base_taskname` will return `None`\ . packagename : str, None (Default = None) Package name. It is assumed that a compound task name is formed by concatenating `packagename` + '.' + `taskname`\ . If `packagename` is not `None`, :py:func:`base_taskname` will check that the string to the left of the right-most dot matches `packagename` and will raise an `AssertionError` if the package name derived from the input `taskname` does not match the supplied `packagename`\ . This is intended as a check for discrepancies that may arise during the development of the tasks. If `packagename` is `None`, no such check will be performed. Raises ------ AssertionError Raised when package name derived from the input `taskname` does not match the supplied `packagename` """ if not isinstance(taskname, str): return taskname indx = taskname.rfind('.') if indx >= 0: base_taskname = taskname[(indx+1):] pkg_name = taskname[:indx] else: base_taskname = taskname pkg_name = '' assert(True if packagename is None else (packagename == pkg_name)) return base_taskname
[ "def", "base_taskname", "(", "taskname", ",", "packagename", "=", "None", ")", ":", "if", "not", "isinstance", "(", "taskname", ",", "str", ")", ":", "return", "taskname", "indx", "=", "taskname", ".", "rfind", "(", "'.'", ")", "if", "indx", ">=", "0",...
Extract the base name of the task. Many tasks in the `drizzlepac` have "compound" names such as 'drizzlepac.sky'. This function will search for the presence of a dot in the input `taskname` and if found, it will return the string to the right of the right-most dot. If a dot is not found, it will return the input string. Parameters ---------- taskname : str, None Full task name. If it is `None`, :py:func:`base_taskname` will return `None`\ . packagename : str, None (Default = None) Package name. It is assumed that a compound task name is formed by concatenating `packagename` + '.' + `taskname`\ . If `packagename` is not `None`, :py:func:`base_taskname` will check that the string to the left of the right-most dot matches `packagename` and will raise an `AssertionError` if the package name derived from the input `taskname` does not match the supplied `packagename`\ . This is intended as a check for discrepancies that may arise during the development of the tasks. If `packagename` is `None`, no such check will be performed. Raises ------ AssertionError Raised when package name derived from the input `taskname` does not match the supplied `packagename`
[ "Extract", "the", "base", "name", "of", "the", "task", "." ]
python
train
ninja-build/ninja
misc/ninja_syntax.py
https://github.com/ninja-build/ninja/blob/2e64645749ff91eff2f999f03f55da360ae5913d/misc/ninja_syntax.py#L116-L150
def _line(self, text, indent=0): """Write 'text' word-wrapped at self.width characters.""" leading_space = ' ' * indent while len(leading_space) + len(text) > self.width: # The text is too wide; wrap if possible. # Find the rightmost space that would obey our width constraint and # that's not an escaped space. available_space = self.width - len(leading_space) - len(' $') space = available_space while True: space = text.rfind(' ', 0, space) if (space < 0 or self._count_dollars_before_index(text, space) % 2 == 0): break if space < 0: # No such space; just use the first unescaped space we can find. space = available_space - 1 while True: space = text.find(' ', space + 1) if (space < 0 or self._count_dollars_before_index(text, space) % 2 == 0): break if space < 0: # Give up on breaking. break self.output.write(leading_space + text[0:space] + ' $\n') text = text[space+1:] # Subsequent lines are continuations, so indent them. leading_space = ' ' * (indent+2) self.output.write(leading_space + text + '\n')
[ "def", "_line", "(", "self", ",", "text", ",", "indent", "=", "0", ")", ":", "leading_space", "=", "' '", "*", "indent", "while", "len", "(", "leading_space", ")", "+", "len", "(", "text", ")", ">", "self", ".", "width", ":", "# The text is too wide; ...
Write 'text' word-wrapped at self.width characters.
[ "Write", "text", "word", "-", "wrapped", "at", "self", ".", "width", "characters", "." ]
python
train
coinbase/coinbase-python
coinbase/wallet/client.py
https://github.com/coinbase/coinbase-python/blob/497c28158f529e8c7d0228521b4386a890baf088/coinbase/wallet/client.py#L421-L426
def create_report(self, **params): """https://developers.coinbase.com/api/v2#generate-a-new-report""" if 'type' not in params and 'email' not in params: raise ValueError("Missing required parameter: 'type' or 'email'") response = self._post('v2', 'reports', data=params) return self._make_api_object(response, Report)
[ "def", "create_report", "(", "self", ",", "*", "*", "params", ")", ":", "if", "'type'", "not", "in", "params", "and", "'email'", "not", "in", "params", ":", "raise", "ValueError", "(", "\"Missing required parameter: 'type' or 'email'\"", ")", "response", "=", ...
https://developers.coinbase.com/api/v2#generate-a-new-report
[ "https", ":", "//", "developers", ".", "coinbase", ".", "com", "/", "api", "/", "v2#generate", "-", "a", "-", "new", "-", "report" ]
python
train
DataDog/integrations-core
nginx/datadog_checks/nginx/nginx.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/nginx/datadog_checks/nginx/nginx.py#L275-L320
def _flatten_json(cls, metric_base, val, tags): """ Recursively flattens the nginx json object. Returns the following: [(metric_name, value, tags)] """ output = [] if isinstance(val, dict): # Pull out the server as a tag instead of trying to read as a metric if 'server' in val and val['server']: server = 'server:%s' % val.pop('server') if tags is None: tags = [server] else: tags = tags + [server] for key, val2 in iteritems(val): if key in TAGGED_KEYS: metric_name = '%s.%s' % (metric_base, TAGGED_KEYS[key]) for tag_val, data in iteritems(val2): tag = '%s:%s' % (TAGGED_KEYS[key], tag_val) output.extend(cls._flatten_json(metric_name, data, tags + [tag])) else: metric_name = '%s.%s' % (metric_base, key) output.extend(cls._flatten_json(metric_name, val2, tags)) elif isinstance(val, list): for val2 in val: output.extend(cls._flatten_json(metric_base, val2, tags)) elif isinstance(val, bool): output.append((metric_base, int(val), tags, 'gauge')) elif isinstance(val, (int, float, long)): output.append((metric_base, val, tags, 'gauge')) elif isinstance(val, (text_type, str)): if val[-1] == "Z": try: # In the new Plus API, timestamps are now formatted # strings, some include microseconds, some don't... timestamp = fromisoformat(val[:19]) except ValueError: pass else: output.append((metric_base, int((timestamp - EPOCH).total_seconds()), tags, 'gauge')) return output
[ "def", "_flatten_json", "(", "cls", ",", "metric_base", ",", "val", ",", "tags", ")", ":", "output", "=", "[", "]", "if", "isinstance", "(", "val", ",", "dict", ")", ":", "# Pull out the server as a tag instead of trying to read as a metric", "if", "'server'", "...
Recursively flattens the nginx json object. Returns the following: [(metric_name, value, tags)]
[ "Recursively", "flattens", "the", "nginx", "json", "object", ".", "Returns", "the", "following", ":", "[", "(", "metric_name", "value", "tags", ")", "]" ]
python
train
nok/sklearn-porter
sklearn_porter/estimator/classifier/MLPClassifier/__init__.py
https://github.com/nok/sklearn-porter/blob/04673f768310bde31f9747a68a5e070592441ef2/sklearn_porter/estimator/classifier/MLPClassifier/__init__.py#L89-L156
def export(self, class_name, method_name, export_data=False, export_dir='.', export_filename='data.json', export_append_checksum=False, **kwargs): """ Port a trained estimator to the syntax of a chosen programming language. Parameters ---------- :param class_name : string The name of the class in the returned result. :param method_name : string The name of the method in the returned result. :param export_data : bool, default: False Whether the model data should be saved or not. :param export_dir : string, default: '.' (current directory) The directory where the model data should be saved. :param export_filename : string, default: 'data.json' The filename of the exported model data. :param export_append_checksum : bool, default: False Whether to append the checksum to the filename or not. Returns ------- :return : string The transpiled algorithm with the defined placeholders. """ # Arguments: self.class_name = class_name self.method_name = method_name # Estimator: est = self.estimator self.output_activation = est.out_activation_ self.hidden_activation = est.activation self.n_layers = est.n_layers_ self.n_hidden_layers = est.n_layers_ - 2 self.n_inputs = len(est.coefs_[0]) self.n_outputs = est.n_outputs_ self.hidden_layer_sizes = est.hidden_layer_sizes if isinstance(self.hidden_layer_sizes, int): self.hidden_layer_sizes = [self.hidden_layer_sizes] self.hidden_layer_sizes = list(self.hidden_layer_sizes) self.layer_units = \ [self.n_inputs] + self.hidden_layer_sizes + [est.n_outputs_] # Weights: self.coefficients = est.coefs_ # Bias: self.intercepts = est.intercepts_ # Binary or multiclass classifier? self.is_binary = self.n_outputs == 1 self.prefix = 'binary' if self.is_binary else 'multi' if self.target_method == 'predict': # Exported: if export_data and os.path.isdir(export_dir): self.export_data(export_dir, export_filename, export_append_checksum) return self.predict('exported') # Separated: return self.predict('separated')
[ "def", "export", "(", "self", ",", "class_name", ",", "method_name", ",", "export_data", "=", "False", ",", "export_dir", "=", "'.'", ",", "export_filename", "=", "'data.json'", ",", "export_append_checksum", "=", "False", ",", "*", "*", "kwargs", ")", ":", ...
Port a trained estimator to the syntax of a chosen programming language. Parameters ---------- :param class_name : string The name of the class in the returned result. :param method_name : string The name of the method in the returned result. :param export_data : bool, default: False Whether the model data should be saved or not. :param export_dir : string, default: '.' (current directory) The directory where the model data should be saved. :param export_filename : string, default: 'data.json' The filename of the exported model data. :param export_append_checksum : bool, default: False Whether to append the checksum to the filename or not. Returns ------- :return : string The transpiled algorithm with the defined placeholders.
[ "Port", "a", "trained", "estimator", "to", "the", "syntax", "of", "a", "chosen", "programming", "language", "." ]
python
train
jut-io/jut-python-tools
jut/api/data_engine.py
https://github.com/jut-io/jut-python-tools/blob/65574d23f51a7bbced9bb25010d02da5ca5d906f/jut/api/data_engine.py#L79-L89
def get_juttle_data_url(deployment_name, token_manager=None, app_url=defaults.APP_URL): """ return the juttle data url """ return get_data_url(deployment_name, endpoint_type='juttle', app_url=app_url, token_manager=token_manager)
[ "def", "get_juttle_data_url", "(", "deployment_name", ",", "token_manager", "=", "None", ",", "app_url", "=", "defaults", ".", "APP_URL", ")", ":", "return", "get_data_url", "(", "deployment_name", ",", "endpoint_type", "=", "'juttle'", ",", "app_url", "=", "app...
return the juttle data url
[ "return", "the", "juttle", "data", "url" ]
python
train
vaexio/vaex
packages/vaex-core/vaex/functions.py
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L1080-L1110
def str_repeat(x, repeats): """Duplicate each string in a column. :param int repeats: number of times each string sample is to be duplicated. :returns: an expression containing the duplicated strings Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.repeat(3) Expression = str_repeat(text, 3) Length: 5 dtype: str (expression) --------------------------------- 0 SomethingSomethingSomething 1 very prettyvery prettyvery pretty 2 is comingis comingis coming 3 ourourour 4 way.way.way. """ sl = _to_string_sequence(x).repeat(repeats) return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
[ "def", "str_repeat", "(", "x", ",", "repeats", ")", ":", "sl", "=", "_to_string_sequence", "(", "x", ")", ".", "repeat", "(", "repeats", ")", "return", "column", ".", "ColumnStringArrow", "(", "sl", ".", "bytes", ",", "sl", ".", "indices", ",", "sl", ...
Duplicate each string in a column. :param int repeats: number of times each string sample is to be duplicated. :returns: an expression containing the duplicated strings Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.repeat(3) Expression = str_repeat(text, 3) Length: 5 dtype: str (expression) --------------------------------- 0 SomethingSomethingSomething 1 very prettyvery prettyvery pretty 2 is comingis comingis coming 3 ourourour 4 way.way.way.
[ "Duplicate", "each", "string", "in", "a", "column", "." ]
python
test
intuition-io/intuition
intuition/api/algorithm.py
https://github.com/intuition-io/intuition/blob/cd517e6b3b315a743eb4d0d0dc294e264ab913ce/intuition/api/algorithm.py#L130-L139
def _call_one_middleware(self, middleware): ''' Evaluate arguments and execute the middleware function ''' args = {} for arg in middleware['args']: if hasattr(self, arg): # same as eval() but safer for arbitrary code execution args[arg] = reduce(getattr, arg.split('.'), self) self.logger.debug('calling middleware event {}' .format(middleware['name'])) middleware['call'](**args)
[ "def", "_call_one_middleware", "(", "self", ",", "middleware", ")", ":", "args", "=", "{", "}", "for", "arg", "in", "middleware", "[", "'args'", "]", ":", "if", "hasattr", "(", "self", ",", "arg", ")", ":", "# same as eval() but safer for arbitrary code execut...
Evaluate arguments and execute the middleware function
[ "Evaluate", "arguments", "and", "execute", "the", "middleware", "function" ]
python
train
minus7/asif
asif/bot.py
https://github.com/minus7/asif/blob/0d8acc5306ba93386ec679f69d466b56f099b877/asif/bot.py#L450-L457
def _bg(self, coro: coroutine) -> asyncio.Task: """Run coro in background, log errors""" async def runner(): try: await coro except: self._log.exception("async: Coroutine raised exception") return asyncio.ensure_future(runner())
[ "def", "_bg", "(", "self", ",", "coro", ":", "coroutine", ")", "->", "asyncio", ".", "Task", ":", "async", "def", "runner", "(", ")", ":", "try", ":", "await", "coro", "except", ":", "self", ".", "_log", ".", "exception", "(", "\"async: Coroutine raise...
Run coro in background, log errors
[ "Run", "coro", "in", "background", "log", "errors" ]
python
train
juju/charm-helpers
charmhelpers/contrib/database/mysql.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/database/mysql.py#L513-L520
def get_mem_total(self): """Calculate the total memory in the current service unit.""" with open('/proc/meminfo') as meminfo_file: for line in meminfo_file: key, mem = line.split(':', 2) if key == 'MemTotal': mtot, modifier = mem.strip().split(' ') return '%s%s' % (mtot, modifier[0].upper())
[ "def", "get_mem_total", "(", "self", ")", ":", "with", "open", "(", "'/proc/meminfo'", ")", "as", "meminfo_file", ":", "for", "line", "in", "meminfo_file", ":", "key", ",", "mem", "=", "line", ".", "split", "(", "':'", ",", "2", ")", "if", "key", "==...
Calculate the total memory in the current service unit.
[ "Calculate", "the", "total", "memory", "in", "the", "current", "service", "unit", "." ]
python
train
pandas-dev/pandas
pandas/core/dtypes/missing.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/missing.py#L157-L181
def _use_inf_as_na(key): """Option change callback for na/inf behaviour Choose which replacement for numpy.isnan / -numpy.isfinite is used. Parameters ---------- flag: bool True means treat None, NaN, INF, -INF as null (old way), False means None and NaN are null, but INF, -INF are not null (new way). Notes ----- This approach to setting global module values is discussed and approved here: * http://stackoverflow.com/questions/4859217/ programmatically-creating-variables-in-python/4859312#4859312 """ from pandas._config import get_option flag = get_option(key) if flag: globals()['_isna'] = _isna_old else: globals()['_isna'] = _isna_new
[ "def", "_use_inf_as_na", "(", "key", ")", ":", "from", "pandas", ".", "_config", "import", "get_option", "flag", "=", "get_option", "(", "key", ")", "if", "flag", ":", "globals", "(", ")", "[", "'_isna'", "]", "=", "_isna_old", "else", ":", "globals", ...
Option change callback for na/inf behaviour Choose which replacement for numpy.isnan / -numpy.isfinite is used. Parameters ---------- flag: bool True means treat None, NaN, INF, -INF as null (old way), False means None and NaN are null, but INF, -INF are not null (new way). Notes ----- This approach to setting global module values is discussed and approved here: * http://stackoverflow.com/questions/4859217/ programmatically-creating-variables-in-python/4859312#4859312
[ "Option", "change", "callback", "for", "na", "/", "inf", "behaviour", "Choose", "which", "replacement", "for", "numpy", ".", "isnan", "/", "-", "numpy", ".", "isfinite", "is", "used", "." ]
python
train
RJT1990/pyflux
pyflux/families/cauchy.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/cauchy.py#L188-L212
def markov_blanket(y, mean, scale, shape, skewness): """ Markov blanket for each likelihood term - used for state space models Parameters ---------- y : np.ndarray univariate time series mean : np.ndarray array of location parameters for the Cauchy distribution scale : float scale parameter for the Cauchy distribution shape : float tail thickness parameter for the Cauchy distribution skewness : float skewness parameter for the Cauchy distribution Returns ---------- - Markov blanket of the Cauchy family """ return ss.cauchy.logpdf(y, loc=mean, scale=scale)
[ "def", "markov_blanket", "(", "y", ",", "mean", ",", "scale", ",", "shape", ",", "skewness", ")", ":", "return", "ss", ".", "cauchy", ".", "logpdf", "(", "y", ",", "loc", "=", "mean", ",", "scale", "=", "scale", ")" ]
Markov blanket for each likelihood term - used for state space models Parameters ---------- y : np.ndarray univariate time series mean : np.ndarray array of location parameters for the Cauchy distribution scale : float scale parameter for the Cauchy distribution shape : float tail thickness parameter for the Cauchy distribution skewness : float skewness parameter for the Cauchy distribution Returns ---------- - Markov blanket of the Cauchy family
[ "Markov", "blanket", "for", "each", "likelihood", "term", "-", "used", "for", "state", "space", "models" ]
python
train
Capitains/flask-capitains-nemo
flask_nemo/chunker.py
https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/chunker.py#L5-L17
def default_chunker(text, getreffs): """ This is the default chunker which will resolve the reference giving a callback (getreffs) and a text object with its metadata :param text: Text Object representing either an edition or a translation :type text: MyCapytains.resources.inventory.Text :param getreffs: callback function which retrieves a list of references :type getreffs: function :return: List of urn references with their human readable version :rtype: [(str, str)] """ level = len(text.citation) return [tuple([reff.split(":")[-1]]*2) for reff in getreffs(level=level)]
[ "def", "default_chunker", "(", "text", ",", "getreffs", ")", ":", "level", "=", "len", "(", "text", ".", "citation", ")", "return", "[", "tuple", "(", "[", "reff", ".", "split", "(", "\":\"", ")", "[", "-", "1", "]", "]", "*", "2", ")", "for", ...
This is the default chunker which will resolve the reference giving a callback (getreffs) and a text object with its metadata :param text: Text Object representing either an edition or a translation :type text: MyCapytains.resources.inventory.Text :param getreffs: callback function which retrieves a list of references :type getreffs: function :return: List of urn references with their human readable version :rtype: [(str, str)]
[ "This", "is", "the", "default", "chunker", "which", "will", "resolve", "the", "reference", "giving", "a", "callback", "(", "getreffs", ")", "and", "a", "text", "object", "with", "its", "metadata" ]
python
valid
uchicago-cs/deepdish
deepdish/util/padding.py
https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/util/padding.py#L160-L197
def pad_repeat_border_corner(data, shape): """ Similar to `pad_repeat_border`, except the padding is always done on the upper end of each axis and the target size is specified. Parameters ---------- data : ndarray Numpy array of any dimension and type. shape : tuple Final shape of padded array. Should be tuple of length ``data.ndim``. If it has to pad unevenly, it will pad one more at the end of the axis than at the beginning. Examples -------- >>> import deepdish as dd >>> import numpy as np Pad an array by repeating its upper borders. >>> shape = (3, 4) >>> x = np.arange(np.prod(shape)).reshape(shape) >>> dd.util.pad_repeat_border_corner(x, (5, 5)) array([[ 0., 1., 2., 3., 3.], [ 4., 5., 6., 7., 7.], [ 8., 9., 10., 11., 11.], [ 8., 9., 10., 11., 11.], [ 8., 9., 10., 11., 11.]]) """ new_data = np.empty(shape) new_data[[slice(upper) for upper in data.shape]] = data for i in range(len(shape)): selection = [slice(None)]*i + [slice(data.shape[i], None)] selection2 = [slice(None)]*i + [slice(data.shape[i]-1, data.shape[i])] new_data[selection] = new_data[selection2] return new_data
[ "def", "pad_repeat_border_corner", "(", "data", ",", "shape", ")", ":", "new_data", "=", "np", ".", "empty", "(", "shape", ")", "new_data", "[", "[", "slice", "(", "upper", ")", "for", "upper", "in", "data", ".", "shape", "]", "]", "=", "data", "for"...
Similar to `pad_repeat_border`, except the padding is always done on the upper end of each axis and the target size is specified. Parameters ---------- data : ndarray Numpy array of any dimension and type. shape : tuple Final shape of padded array. Should be tuple of length ``data.ndim``. If it has to pad unevenly, it will pad one more at the end of the axis than at the beginning. Examples -------- >>> import deepdish as dd >>> import numpy as np Pad an array by repeating its upper borders. >>> shape = (3, 4) >>> x = np.arange(np.prod(shape)).reshape(shape) >>> dd.util.pad_repeat_border_corner(x, (5, 5)) array([[ 0., 1., 2., 3., 3.], [ 4., 5., 6., 7., 7.], [ 8., 9., 10., 11., 11.], [ 8., 9., 10., 11., 11.], [ 8., 9., 10., 11., 11.]])
[ "Similar", "to", "pad_repeat_border", "except", "the", "padding", "is", "always", "done", "on", "the", "upper", "end", "of", "each", "axis", "and", "the", "target", "size", "is", "specified", "." ]
python
train
PolyJIT/benchbuild
benchbuild/container.py
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/container.py#L48-L81
def setup_container(builddir, _container): """Prepare the container and returns the path where it can be found.""" build_dir = local.path(builddir) in_dir = build_dir / "container-in" container_path = local.path(_container) with local.cwd(builddir): container_bin = container_path.basename container_in = in_dir / container_bin download.Copy(_container, container_in) uchrt = uchroot.no_args() with local.cwd("container-in"): uchrt = uchrt["-E", "-A", "-u", "0", "-g", "0", "-C", "-r", "/", "-w", os.path.abspath("."), "--"] # Check, if we need erlent support for this archive. has_erlent = bash[ "-c", "tar --list -f './{0}' | grep --silent '.erlent'".format( container_in)] has_erlent = (has_erlent & TF) # Unpack input container to: container-in if not has_erlent: cmd = local["/bin/tar"]["xf"] cmd = uchrt[cmd[container_bin]] else: cmd = tar["xf"] cmd = cmd[container_in] with local.cwd("container-in"): cmd("--exclude=dev/*") rm(container_in) return in_dir
[ "def", "setup_container", "(", "builddir", ",", "_container", ")", ":", "build_dir", "=", "local", ".", "path", "(", "builddir", ")", "in_dir", "=", "build_dir", "/", "\"container-in\"", "container_path", "=", "local", ".", "path", "(", "_container", ")", "w...
Prepare the container and returns the path where it can be found.
[ "Prepare", "the", "container", "and", "returns", "the", "path", "where", "it", "can", "be", "found", "." ]
python
train
benoitkugler/abstractDataLibrary
pyDLib/GUI/list_views.py
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/GUI/list_views.py#L361-L367
def _draw_placeholder(self): """To be used in QTreeView""" if self.model().rowCount() == 0: painter = QPainter(self.viewport()) painter.setFont(_custom_font(is_italic=True)) painter.drawText(self.rect().adjusted(0, 0, -5, -5), Qt.AlignCenter | Qt.TextWordWrap, self.PLACEHOLDER)
[ "def", "_draw_placeholder", "(", "self", ")", ":", "if", "self", ".", "model", "(", ")", ".", "rowCount", "(", ")", "==", "0", ":", "painter", "=", "QPainter", "(", "self", ".", "viewport", "(", ")", ")", "painter", ".", "setFont", "(", "_custom_font...
To be used in QTreeView
[ "To", "be", "used", "in", "QTreeView" ]
python
train
pycontribs/pyrax
pyrax/clouddatabases.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/clouddatabases.py#L509-L516
def enable_root_user(self): """ Enables login from any host for the root user and provides the user with a generated root password. """ uri = "/instances/%s/root" % self.id resp, body = self.manager.api.method_post(uri) return body["user"]["password"]
[ "def", "enable_root_user", "(", "self", ")", ":", "uri", "=", "\"/instances/%s/root\"", "%", "self", ".", "id", "resp", ",", "body", "=", "self", ".", "manager", ".", "api", ".", "method_post", "(", "uri", ")", "return", "body", "[", "\"user\"", "]", "...
Enables login from any host for the root user and provides the user with a generated root password.
[ "Enables", "login", "from", "any", "host", "for", "the", "root", "user", "and", "provides", "the", "user", "with", "a", "generated", "root", "password", "." ]
python
train
trec-kba/streamcorpus-pipeline
streamcorpus_pipeline/_lingpipe.py
https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_lingpipe.py#L249-L254
def get_sentences(self, ner_dom): '''parse the sentences and tokens out of the XML''' lp_parser = LingPipeParser(self.config) lp_parser.set(ner_dom) sentences = list( lp_parser.sentences() ) return sentences, lp_parser.relations, lp_parser.attributes
[ "def", "get_sentences", "(", "self", ",", "ner_dom", ")", ":", "lp_parser", "=", "LingPipeParser", "(", "self", ".", "config", ")", "lp_parser", ".", "set", "(", "ner_dom", ")", "sentences", "=", "list", "(", "lp_parser", ".", "sentences", "(", ")", ")",...
parse the sentences and tokens out of the XML
[ "parse", "the", "sentences", "and", "tokens", "out", "of", "the", "XML" ]
python
test
datamachine/twx.botapi
twx/botapi/botapi.py
https://github.com/datamachine/twx.botapi/blob/c85184da738169e8f9d6d8e62970540f427c486e/twx/botapi/botapi.py#L4334-L4336
def edit_message_live_location(self, *args, **kwargs): """See :func:`edit_message_live_location`""" return edit_message_live_location(*args, **self._merge_overrides(**kwargs)).run()
[ "def", "edit_message_live_location", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "edit_message_live_location", "(", "*", "args", ",", "*", "*", "self", ".", "_merge_overrides", "(", "*", "*", "kwargs", ")", ")", ".", "run"...
See :func:`edit_message_live_location`
[ "See", ":", "func", ":", "edit_message_live_location" ]
python
train
mitsei/dlkit
dlkit/handcar/osid/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/osid/sessions.py#L295-L316
def get_effective_agent(self): """Gets the effective agent in use by this session. If is_authenticated() is true, then the effective agent may be the same as the agent returned by get_authenticated_agent(). If is_authenticated() is false, then the effective agent may be a default agent used for authorization by an unknwon or anonymous user. return: (osid.authentication.Agent) - the effective agent raise: OperationFailed - unable to complete request compliance: mandatory - This method must be implemented. """ if self._proxy is not None and self._proxy.has_authentication(): return self._proxy.get_authentication().get_agent() elif self._proxy is not None and self._proxy.has_effective_agent(): return Agent(identifier=self._proxy.get_effective_agent_id().get_identifier(), namespace=self._proxy.get_effective_agent_id().get_namespace(), authority=self._proxy.get_effective_agent_id().get_authority()) else: return Agent(identifier='MC3GUE$T@MIT.EDU', namespace='osid.agent.Agent', authority='MIT-OEIT')
[ "def", "get_effective_agent", "(", "self", ")", ":", "if", "self", ".", "_proxy", "is", "not", "None", "and", "self", ".", "_proxy", ".", "has_authentication", "(", ")", ":", "return", "self", ".", "_proxy", ".", "get_authentication", "(", ")", ".", "get...
Gets the effective agent in use by this session. If is_authenticated() is true, then the effective agent may be the same as the agent returned by get_authenticated_agent(). If is_authenticated() is false, then the effective agent may be a default agent used for authorization by an unknwon or anonymous user. return: (osid.authentication.Agent) - the effective agent raise: OperationFailed - unable to complete request compliance: mandatory - This method must be implemented.
[ "Gets", "the", "effective", "agent", "in", "use", "by", "this", "session", ".", "If", "is_authenticated", "()", "is", "true", "then", "the", "effective", "agent", "may", "be", "the", "same", "as", "the", "agent", "returned", "by", "get_authenticated_agent", ...
python
train
discontinue/django-secure-js-login
secure_js_login/utils/crypt.py
https://github.com/discontinue/django-secure-js-login/blob/4bfc592c48f381de115e592e721f31d2eb915968/secure_js_login/utils/crypt.py#L91-L101
def hexlify_pbkdf2(password, salt, iterations, length, digest=hashlib.sha1): """ >>> hash = hexlify_pbkdf2("not secret", "a salt value", iterations=100, length=16) >>> hash == '0b919231515dde16f76364666cf07107' True """ # log.debug("hexlify_pbkdf2 with iterations=%i", iterations) hash = crypto.pbkdf2(password, salt, iterations=iterations, dklen=length, digest=digest) hash = binascii.hexlify(hash) hash = six.text_type(hash, "ascii") return hash
[ "def", "hexlify_pbkdf2", "(", "password", ",", "salt", ",", "iterations", ",", "length", ",", "digest", "=", "hashlib", ".", "sha1", ")", ":", "# log.debug(\"hexlify_pbkdf2 with iterations=%i\", iterations)", "hash", "=", "crypto", ".", "pbkdf2", "(", "password", ...
>>> hash = hexlify_pbkdf2("not secret", "a salt value", iterations=100, length=16) >>> hash == '0b919231515dde16f76364666cf07107' True
[ ">>>", "hash", "=", "hexlify_pbkdf2", "(", "not", "secret", "a", "salt", "value", "iterations", "=", "100", "length", "=", "16", ")", ">>>", "hash", "==", "0b919231515dde16f76364666cf07107", "True" ]
python
train
ktbyers/netmiko
netmiko/cisco/cisco_wlc_ssh.py
https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/cisco/cisco_wlc_ssh.py#L15-L42
def special_login_handler(self, delay_factor=1): """WLC presents with the following on login (in certain OS versions) login as: user (Cisco Controller) User: user Password:**** """ delay_factor = self.select_delay_factor(delay_factor) i = 0 time.sleep(delay_factor * 0.5) output = "" while i <= 12: output = self.read_channel() if output: if "login as" in output or "User:" in output: self.write_channel(self.username + self.RETURN) elif "Password" in output: self.write_channel(self.password + self.RETURN) break time.sleep(delay_factor * 1) else: self.write_channel(self.RETURN) time.sleep(delay_factor * 1.5) i += 1
[ "def", "special_login_handler", "(", "self", ",", "delay_factor", "=", "1", ")", ":", "delay_factor", "=", "self", ".", "select_delay_factor", "(", "delay_factor", ")", "i", "=", "0", "time", ".", "sleep", "(", "delay_factor", "*", "0.5", ")", "output", "=...
WLC presents with the following on login (in certain OS versions) login as: user (Cisco Controller) User: user Password:****
[ "WLC", "presents", "with", "the", "following", "on", "login", "(", "in", "certain", "OS", "versions", ")" ]
python
train
JarryShaw/PyPCAPKit
src/protocols/internet/hopopt.py
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/protocols/internet/hopopt.py#L974-L1014
def _read_opt_home(self, code, *, desc): """Read HOPOPT Home Address option. Structure of HOPOPT Home Address option [RFC 6275]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Option Type | Option Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | + + | | + Home Address + | | + + | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 hopopt.home.type Option Type 0 0 hopopt.home.type.value Option Number 0 0 hopopt.home.type.action Action (11) 0 2 hopopt.home.type.change Change Flag (0) 1 8 hopopt.home.length Length of Option Data 2 16 hopopt.home.ip Home Address """ _type = self._read_opt_type(code) _size = self._read_unpack(1) if _size != 16: raise ProtocolError(f'{self.alias}: [Optno {code}] invalid format') _addr = self._read_fileng(16) opt = dict( desc=desc, type=_type, length=_size + 2, ip=ipaddress.ip_address(_addr), ) return opt
[ "def", "_read_opt_home", "(", "self", ",", "code", ",", "*", ",", "desc", ")", ":", "_type", "=", "self", ".", "_read_opt_type", "(", "code", ")", "_size", "=", "self", ".", "_read_unpack", "(", "1", ")", "if", "_size", "!=", "16", ":", "raise", "P...
Read HOPOPT Home Address option. Structure of HOPOPT Home Address option [RFC 6275]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Option Type | Option Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | + + | | + Home Address + | | + + | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 hopopt.home.type Option Type 0 0 hopopt.home.type.value Option Number 0 0 hopopt.home.type.action Action (11) 0 2 hopopt.home.type.change Change Flag (0) 1 8 hopopt.home.length Length of Option Data 2 16 hopopt.home.ip Home Address
[ "Read", "HOPOPT", "Home", "Address", "option", "." ]
python
train
briancappello/flask-unchained
flask_unchained/bundles/controller/utils.py
https://github.com/briancappello/flask-unchained/blob/4d536cb90e2cc4829c1c05f2c74d3e22901a1399/flask_unchained/bundles/controller/utils.py#L164-L182
def join(*args, trailing_slash=False): """ Return a url path joined from the arguments. It correctly handles blank/None arguments, and removes back-to-back slashes, eg:: assert join('/', 'foo', None, 'bar', '', 'baz') == '/foo/bar/baz' assert join('/', '/foo', '/', '/bar/') == '/foo/bar' Note that it removes trailing slashes by default, so if you want to keep those, then you need to pass the ``trailing_slash`` keyword argument:: assert join('/foo', 'baz', None, trailing_slash=True) == '/foo/baz/' """ dirty_path = '/'.join(map(lambda x: x and x or '', args)) path = re.sub(r'/+', '/', dirty_path) if path in {'', '/'}: return '/' path = path.rstrip('/') return path if not trailing_slash else path + '/'
[ "def", "join", "(", "*", "args", ",", "trailing_slash", "=", "False", ")", ":", "dirty_path", "=", "'/'", ".", "join", "(", "map", "(", "lambda", "x", ":", "x", "and", "x", "or", "''", ",", "args", ")", ")", "path", "=", "re", ".", "sub", "(", ...
Return a url path joined from the arguments. It correctly handles blank/None arguments, and removes back-to-back slashes, eg:: assert join('/', 'foo', None, 'bar', '', 'baz') == '/foo/bar/baz' assert join('/', '/foo', '/', '/bar/') == '/foo/bar' Note that it removes trailing slashes by default, so if you want to keep those, then you need to pass the ``trailing_slash`` keyword argument:: assert join('/foo', 'baz', None, trailing_slash=True) == '/foo/baz/'
[ "Return", "a", "url", "path", "joined", "from", "the", "arguments", ".", "It", "correctly", "handles", "blank", "/", "None", "arguments", "and", "removes", "back", "-", "to", "-", "back", "slashes", "eg", "::" ]
python
train
eaton-lab/toytree
toytree/Toytree.py
https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/Toytree.py#L433-L447
def collapse_nodes(self, min_dist=1e-6, min_support=0): """ Returns a copy of the tree where internal nodes with dist <= min_dist are deleted, resulting in a collapsed tree. e.g.: newtre = tre.collapse_nodes(min_dist=0.001) newtre = tre.collapse_nodes(min_support=50) """ nself = self.copy() for node in nself.treenode.traverse(): if not node.is_leaf(): if (node.dist <= min_dist) | (node.support < min_support): node.delete() nself._coords.update() return nself
[ "def", "collapse_nodes", "(", "self", ",", "min_dist", "=", "1e-6", ",", "min_support", "=", "0", ")", ":", "nself", "=", "self", ".", "copy", "(", ")", "for", "node", "in", "nself", ".", "treenode", ".", "traverse", "(", ")", ":", "if", "not", "no...
Returns a copy of the tree where internal nodes with dist <= min_dist are deleted, resulting in a collapsed tree. e.g.: newtre = tre.collapse_nodes(min_dist=0.001) newtre = tre.collapse_nodes(min_support=50)
[ "Returns", "a", "copy", "of", "the", "tree", "where", "internal", "nodes", "with", "dist", "<", "=", "min_dist", "are", "deleted", "resulting", "in", "a", "collapsed", "tree", ".", "e", ".", "g", ".", ":" ]
python
train
poldracklab/niworkflows
niworkflows/interfaces/mni.py
https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/interfaces/mni.py#L436-L502
def create_cfm(in_file, lesion_mask=None, global_mask=True, out_path=None): """ Create a mask to constrain registration. Parameters ---------- in_file : str Path to an existing image (usually a mask). If global_mask = True, this is used as a size/dimension reference. out_path : str Path/filename for the new cost function mask. lesion_mask : str, optional Path to an existing binary lesion mask. global_mask : bool Create a whole-image mask (True) or limit to reference mask (False) A whole image-mask is 1 everywhere Returns ------- str Absolute path of the new cost function mask. Notes ----- in_file and lesion_mask must be in the same image space and have the same dimensions """ import os import numpy as np import nibabel as nb from nipype.utils.filemanip import fname_presuffix if out_path is None: out_path = fname_presuffix(in_file, suffix='_cfm', newpath=os.getcwd()) else: out_path = os.path.abspath(out_path) if not global_mask and not lesion_mask: NIWORKFLOWS_LOG.warning( 'No lesion mask was provided and global_mask not requested, ' 'therefore the original mask will not be modified.') # Load the input image in_img = nb.load(in_file) # If we want a global mask, create one based on the input image. data = np.ones(in_img.shape, dtype=np.uint8) if global_mask else in_img.get_data() if set(np.unique(data)) - {0, 1}: raise ValueError("`global_mask` must be true if `in_file` is not a binary mask") # If a lesion mask was provided, combine it with the secondary mask. if lesion_mask is not None: # Reorient the lesion mask and get the data. lm_img = nb.as_closest_canonical(nb.load(lesion_mask)) # Subtract lesion mask from secondary mask, set negatives to 0 data = np.fmax(data - lm_img.get_data(), 0) # Cost function mask will be created from subtraction # Otherwise, CFM will be created from global mask cfm_img = nb.Nifti1Image(data, in_img.affine, in_img.header) # Save the cost function mask. cfm_img.set_data_dtype(np.uint8) cfm_img.to_filename(out_path) return out_path
[ "def", "create_cfm", "(", "in_file", ",", "lesion_mask", "=", "None", ",", "global_mask", "=", "True", ",", "out_path", "=", "None", ")", ":", "import", "os", "import", "numpy", "as", "np", "import", "nibabel", "as", "nb", "from", "nipype", ".", "utils",...
Create a mask to constrain registration. Parameters ---------- in_file : str Path to an existing image (usually a mask). If global_mask = True, this is used as a size/dimension reference. out_path : str Path/filename for the new cost function mask. lesion_mask : str, optional Path to an existing binary lesion mask. global_mask : bool Create a whole-image mask (True) or limit to reference mask (False) A whole image-mask is 1 everywhere Returns ------- str Absolute path of the new cost function mask. Notes ----- in_file and lesion_mask must be in the same image space and have the same dimensions
[ "Create", "a", "mask", "to", "constrain", "registration", "." ]
python
train
NuGrid/NuGridPy
nugridpy/utils.py
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/utils.py#L789-L804
def isoratio_init(self,isos): ''' This file returns the isotopic ratio of two isotopes specified as iso1 and iso2. The isotopes are given as, e.g., ['Fe',56,'Fe',58] or ['Fe-56','Fe-58'] (for compatibility) -> list. ''' if len(isos) == 2: dumb = [] dumb = isos[0].split('-') dumb.append(isos[1].split('-')[0]) dumb.append(isos[1].split('-')[1]) isos = dumb ssratio = old_div(self.habu[isos[0].ljust(2).lower() + str(int(isos[1])).rjust(3)], self.habu[isos[2].ljust(2).lower() + str(int(isos[3])).rjust(3)]) return ssratio
[ "def", "isoratio_init", "(", "self", ",", "isos", ")", ":", "if", "len", "(", "isos", ")", "==", "2", ":", "dumb", "=", "[", "]", "dumb", "=", "isos", "[", "0", "]", ".", "split", "(", "'-'", ")", "dumb", ".", "append", "(", "isos", "[", "1",...
This file returns the isotopic ratio of two isotopes specified as iso1 and iso2. The isotopes are given as, e.g., ['Fe',56,'Fe',58] or ['Fe-56','Fe-58'] (for compatibility) -> list.
[ "This", "file", "returns", "the", "isotopic", "ratio", "of", "two", "isotopes", "specified", "as", "iso1", "and", "iso2", ".", "The", "isotopes", "are", "given", "as", "e", ".", "g", ".", "[", "Fe", "56", "Fe", "58", "]", "or", "[", "Fe", "-", "56"...
python
train
cbclab/MOT
mot/lib/utils.py
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/utils.py#L603-L653
def split_cl_function(cl_str): """Split an CL function into a return type, function name, parameters list and the body. Args: cl_str (str): the CL code to parse and plit into components Returns: tuple: string elements for the return type, function name, parameter list and the body """ class Semantics: def __init__(self): self._return_type = '' self._function_name = '' self._parameter_list = [] self._cl_body = '' def result(self, ast): return self._return_type, self._function_name, self._parameter_list, self._cl_body def address_space(self, ast): self._return_type = ast.strip() + ' ' return ast def data_type(self, ast): self._return_type += ''.join(ast).strip() return ast def function_name(self, ast): self._function_name = ast.strip() return ast def arglist(self, ast): if ast != '()': self._parameter_list = ast return ast def body(self, ast): def join(items): result = '' for item in items: if isinstance(item, str): result += item else: result += join(item) return result self._cl_body = join(ast).strip()[1:-1] return ast return _split_cl_function_parser.parse(cl_str, semantics=Semantics())
[ "def", "split_cl_function", "(", "cl_str", ")", ":", "class", "Semantics", ":", "def", "__init__", "(", "self", ")", ":", "self", ".", "_return_type", "=", "''", "self", ".", "_function_name", "=", "''", "self", ".", "_parameter_list", "=", "[", "]", "se...
Split an CL function into a return type, function name, parameters list and the body. Args: cl_str (str): the CL code to parse and plit into components Returns: tuple: string elements for the return type, function name, parameter list and the body
[ "Split", "an", "CL", "function", "into", "a", "return", "type", "function", "name", "parameters", "list", "and", "the", "body", "." ]
python
train
noxdafox/clipspy
clips/environment.py
https://github.com/noxdafox/clipspy/blob/b22d71a6da821c1715d8fa00d7d75cabc09ed364/clips/environment.py#L188-L202
def define_function(self, function, name=None): """Define the Python function within the CLIPS environment. If a name is given, it will be the function name within CLIPS. Otherwise, the name of the Python function will be used. The Python function will be accessible within CLIPS via its name as if it was defined via the `deffunction` construct. """ name = name if name is not None else function.__name__ ENVIRONMENT_DATA[self._env].user_functions[name] = function self.build(DEFFUNCTION.format(name))
[ "def", "define_function", "(", "self", ",", "function", ",", "name", "=", "None", ")", ":", "name", "=", "name", "if", "name", "is", "not", "None", "else", "function", ".", "__name__", "ENVIRONMENT_DATA", "[", "self", ".", "_env", "]", ".", "user_functio...
Define the Python function within the CLIPS environment. If a name is given, it will be the function name within CLIPS. Otherwise, the name of the Python function will be used. The Python function will be accessible within CLIPS via its name as if it was defined via the `deffunction` construct.
[ "Define", "the", "Python", "function", "within", "the", "CLIPS", "environment", "." ]
python
train
angr/pyvex
pyvex/lifting/util/instr_helper.py
https://github.com/angr/pyvex/blob/c418edc1146982b2a0579bf56e5993c1c7046b19/pyvex/lifting/util/instr_helper.py#L239-L253
def get(self, reg, ty): """ Load a value from a machine register into a VEX temporary register. All values must be loaded out of registers before they can be used with operations, etc and stored back into them when the instruction is over. See Put(). :param reg: Register number as an integer, or register string name :param ty: The Type to use. :return: A VexValue of the gotten value. """ offset = self.lookup_register(self.irsb_c.irsb.arch, reg) if offset == self.irsb_c.irsb.arch.ip_offset: return self.constant(self.addr, ty) rdt = self.irsb_c.rdreg(offset, ty) return VexValue(self.irsb_c, rdt)
[ "def", "get", "(", "self", ",", "reg", ",", "ty", ")", ":", "offset", "=", "self", ".", "lookup_register", "(", "self", ".", "irsb_c", ".", "irsb", ".", "arch", ",", "reg", ")", "if", "offset", "==", "self", ".", "irsb_c", ".", "irsb", ".", "arch...
Load a value from a machine register into a VEX temporary register. All values must be loaded out of registers before they can be used with operations, etc and stored back into them when the instruction is over. See Put(). :param reg: Register number as an integer, or register string name :param ty: The Type to use. :return: A VexValue of the gotten value.
[ "Load", "a", "value", "from", "a", "machine", "register", "into", "a", "VEX", "temporary", "register", ".", "All", "values", "must", "be", "loaded", "out", "of", "registers", "before", "they", "can", "be", "used", "with", "operations", "etc", "and", "store...
python
train
bukun/TorCMS
torcms/handlers/user_handler.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/user_handler.py#L540-L582
def login(self): ''' user login. ''' post_data = self.get_post_data() if 'next' in post_data: next_url = post_data['next'] else: next_url = '/' u_name = post_data['user_name'] u_pass = post_data['user_pass'] result = MUser.check_user_by_name(u_name, u_pass) # Todo: the kwd should remove from the codes. if result == 1: self.set_secure_cookie("user", u_name) MUser.update_time_login(u_name) self.redirect(next_url) elif result == 0: self.set_status(401) self.render('user/user_relogin.html', cfg=config.CMS_CFG, kwd={ 'info': '密码验证出错,请重新登陆。', 'link': '/user/login', }, userinfo=self.userinfo) elif result == -1: self.set_status(401) self.render('misc/html/404.html', cfg=config.CMS_CFG, kwd={ 'info': '没有这个用户', 'link': '/user/login', }, userinfo=self.userinfo) else: self.set_status(305) self.redirect("{0}".format(next_url))
[ "def", "login", "(", "self", ")", ":", "post_data", "=", "self", ".", "get_post_data", "(", ")", "if", "'next'", "in", "post_data", ":", "next_url", "=", "post_data", "[", "'next'", "]", "else", ":", "next_url", "=", "'/'", "u_name", "=", "post_data", ...
user login.
[ "user", "login", "." ]
python
train
jaraco/jaraco.postgres
jaraco/postgres/__init__.py
https://github.com/jaraco/jaraco.postgres/blob/57375043314a3ce821ac3b0372ba2465135daa95/jaraco/postgres/__init__.py#L422-L450
def _is_running(self, tries=10): """ Return if the server is running according to pg_ctl. """ # We can't possibly be running if our base_pathname isn't defined. if not self.base_pathname: return False if tries < 1: raise ValueError('tries must be > 0') cmd = [ PostgresFinder.find_root() / 'pg_ctl', 'status', '-D', self.base_pathname, ] votes = 0 while abs(votes) < tries: time.sleep(0.1) running = (subprocess.call(cmd, stdout=DEV_NULL) == 0) if running and votes >= 0: votes += 1 elif not running and votes <= 0: votes -= 1 else: votes = 0 return votes > 0
[ "def", "_is_running", "(", "self", ",", "tries", "=", "10", ")", ":", "# We can't possibly be running if our base_pathname isn't defined.", "if", "not", "self", ".", "base_pathname", ":", "return", "False", "if", "tries", "<", "1", ":", "raise", "ValueError", "(",...
Return if the server is running according to pg_ctl.
[ "Return", "if", "the", "server", "is", "running", "according", "to", "pg_ctl", "." ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/install.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/install.py#L129-L147
def copyFuncVersionedLib(dest, source, env): """Install a versioned library into a destination by copying, (including copying permission/mode bits) and then creating required symlinks.""" if os.path.isdir(source): raise SCons.Errors.UserError("cannot install directory `%s' as a version library" % str(source) ) else: # remove the link if it is already there try: os.remove(dest) except: pass shutil.copy2(source, dest) st = os.stat(source) os.chmod(dest, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE) installShlibLinks(dest, source, env) return 0
[ "def", "copyFuncVersionedLib", "(", "dest", ",", "source", ",", "env", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "source", ")", ":", "raise", "SCons", ".", "Errors", ".", "UserError", "(", "\"cannot install directory `%s' as a version library\"", "...
Install a versioned library into a destination by copying, (including copying permission/mode bits) and then creating required symlinks.
[ "Install", "a", "versioned", "library", "into", "a", "destination", "by", "copying", "(", "including", "copying", "permission", "/", "mode", "bits", ")", "and", "then", "creating", "required", "symlinks", "." ]
python
train
ets-labs/python-dependency-injector
examples/providers/dependency.py
https://github.com/ets-labs/python-dependency-injector/blob/d04fe41eb17f667da38b97525e2d16c8f2d272fe/examples/providers/dependency.py#L40-L44
def get_by_id(self, id): """Return user info by user id.""" with contextlib.closing(self.database.cursor()) as cursor: cursor.execute('SELECT id, name FROM users WHERE id=?', (id,)) return cursor.fetchone()
[ "def", "get_by_id", "(", "self", ",", "id", ")", ":", "with", "contextlib", ".", "closing", "(", "self", ".", "database", ".", "cursor", "(", ")", ")", "as", "cursor", ":", "cursor", ".", "execute", "(", "'SELECT id, name FROM users WHERE id=?'", ",", "(",...
Return user info by user id.
[ "Return", "user", "info", "by", "user", "id", "." ]
python
train
pymacaron/pymacaron
pymacaron/__init__.py
https://github.com/pymacaron/pymacaron/blob/af244f203f8216108b39d374d46bf8e1813f13d5/pymacaron/__init__.py#L177-L279
def start(self, serve=[]): """Load all apis, either as local apis served by the flask app, or as remote apis to be called from whithin the app's endpoints, then start the app server""" # Check arguments if type(serve) is str: serve = [serve] elif type(serve) is list: pass else: raise Exception("'serve' should be an api name or a list of api names") if len(serve) == 0: raise Exception("You must specify at least one api to serve") for api_name in serve: if api_name not in self.apis: raise Exception("Can't find %s.yaml (swagger file) in the api directory %s" % (api_name, self.path_apis)) app = self.app app.secret_key = os.urandom(24) # Initialize JWT config conf = get_config() if hasattr(conf, 'jwt_secret'): log.info("Set JWT parameters to issuer=%s audience=%s secret=%s***" % ( conf.jwt_issuer, conf.jwt_audience, conf.jwt_secret[0:8], )) # Always serve the ping api serve.append('ping') # Let's compress returned data when possible compress = Compress() compress.init_app(app) # All apis that are not served locally are not persistent not_persistent = [] for api_name in self.apis.keys(): if api_name in serve: pass else: not_persistent.append(api_name) # Now load those apis into the ApiPool for api_name, api_path in self.apis.items(): host = None port = None if api_name in serve: # We are serving this api locally: override the host:port specified in the swagger spec host = self.host port = self.port do_persist = True if api_name not in not_persistent else False local = True if api_name in serve else False log.info("Loading api %s from %s (persist: %s)" % (api_name, api_path, do_persist)) ApiPool.add( api_name, yaml_path=api_path, timeout=self.timeout, error_callback=self.error_callback, formats=self.formats, do_persist=do_persist, host=host, port=port, local=local, ) ApiPool.merge() # Now spawn flask routes for all endpoints for api_name in self.apis.keys(): if api_name in serve: log.info("Spawning api %s" % api_name) api = getattr(ApiPool, api_name) # Spawn api and wrap every endpoint in a crash handler that # catches replies and reports errors api.spawn_api(app, decorator=generate_crash_handler_decorator(self.error_decorator)) log.debug("Argv is [%s]" % ' '.join(sys.argv)) if 'celery' in sys.argv[0].lower(): # This code is loading in a celery server - Don't start the actual flask app. log.info("Running in a Celery worker - Not starting the Flask app") return # Initialize monitoring, if any is defined monitor_init(app=app, config=conf) if os.path.basename(sys.argv[0]) == 'gunicorn': # Gunicorn takes care of spawning workers log.info("Running in Gunicorn - Not starting the Flask app") return # Debug mode is the default when not running via gunicorn app.debug = self.debug app.run(host='0.0.0.0', port=self.port)
[ "def", "start", "(", "self", ",", "serve", "=", "[", "]", ")", ":", "# Check arguments", "if", "type", "(", "serve", ")", "is", "str", ":", "serve", "=", "[", "serve", "]", "elif", "type", "(", "serve", ")", "is", "list", ":", "pass", "else", ":"...
Load all apis, either as local apis served by the flask app, or as remote apis to be called from whithin the app's endpoints, then start the app server
[ "Load", "all", "apis", "either", "as", "local", "apis", "served", "by", "the", "flask", "app", "or", "as", "remote", "apis", "to", "be", "called", "from", "whithin", "the", "app", "s", "endpoints", "then", "start", "the", "app", "server" ]
python
train
zetaops/zengine
zengine/management_commands.py
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/management_commands.py#L586-L599
def check_redis(): """ Redis checks the connection It displays on the screen whether or not you have a connection. """ from pyoko.db.connection import cache from redis.exceptions import ConnectionError try: cache.ping() print(CheckList.OKGREEN + "{0}Redis is working{1}" + CheckList.ENDC) except ConnectionError as e: print(__(u"{0}Redis is not working{1} ").format(CheckList.FAIL, CheckList.ENDC), e.message)
[ "def", "check_redis", "(", ")", ":", "from", "pyoko", ".", "db", ".", "connection", "import", "cache", "from", "redis", ".", "exceptions", "import", "ConnectionError", "try", ":", "cache", ".", "ping", "(", ")", "print", "(", "CheckList", ".", "OKGREEN", ...
Redis checks the connection It displays on the screen whether or not you have a connection.
[ "Redis", "checks", "the", "connection", "It", "displays", "on", "the", "screen", "whether", "or", "not", "you", "have", "a", "connection", "." ]
python
train
google/grr
grr/server/grr_response_server/client_index.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/client_index.py#L531-L539
def RemoveAllClientLabels(self, client_id): """Removes all labels for a given client. Args: client_id: The client_id. """ labels_to_remove = set( [l.name for l in data_store.REL_DB.ReadClientLabels(client_id)]) self.RemoveClientLabels(client_id, labels_to_remove)
[ "def", "RemoveAllClientLabels", "(", "self", ",", "client_id", ")", ":", "labels_to_remove", "=", "set", "(", "[", "l", ".", "name", "for", "l", "in", "data_store", ".", "REL_DB", ".", "ReadClientLabels", "(", "client_id", ")", "]", ")", "self", ".", "Re...
Removes all labels for a given client. Args: client_id: The client_id.
[ "Removes", "all", "labels", "for", "a", "given", "client", "." ]
python
train
eng-tools/sfsimodels
sfsimodels/scores.py
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/scores.py#L4-L21
def lc_score(value): """ Evaluates the accuracy of a predictive measure (e.g. r-squared) :param value: float, between 0.0 and 1.0. :return: """ rebased = 2 * (value - 0.5) if rebased == 0: return 0 elif rebased > 0: compliment = 1.0 - rebased score = - np.log2(compliment) else: compliment = 1.0 + rebased score = np.log2(compliment) return score
[ "def", "lc_score", "(", "value", ")", ":", "rebased", "=", "2", "*", "(", "value", "-", "0.5", ")", "if", "rebased", "==", "0", ":", "return", "0", "elif", "rebased", ">", "0", ":", "compliment", "=", "1.0", "-", "rebased", "score", "=", "-", "np...
Evaluates the accuracy of a predictive measure (e.g. r-squared) :param value: float, between 0.0 and 1.0. :return:
[ "Evaluates", "the", "accuracy", "of", "a", "predictive", "measure", "(", "e", ".", "g", ".", "r", "-", "squared", ")" ]
python
train
genialis/resolwe
resolwe/flow/migrations/0012_recreate_empty_parents.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/migrations/0012_recreate_empty_parents.py#L10-L31
def recreate_parent_dependencies(apps, schema_editor): """Create empty dependency relation if parent has been deleted.""" Data = apps.get_model('flow', 'Data') DataDependency = apps.get_model('flow', 'DataDependency') def process_dependency(data, parent): if not Data.objects.filter(pk=parent).exists(): DataDependency.objects.create( child=data, parent=None, kind='io' ) for data in Data.objects.all(): for field_schema, fields in iterate_fields(data.input, data.process.input_schema): name = field_schema['name'] value = fields[name] if field_schema.get('type', '').startswith('data:'): process_dependency(data, value) elif field_schema.get('type', '').startswith('list:data:'): for parent in value: process_dependency(data, parent)
[ "def", "recreate_parent_dependencies", "(", "apps", ",", "schema_editor", ")", ":", "Data", "=", "apps", ".", "get_model", "(", "'flow'", ",", "'Data'", ")", "DataDependency", "=", "apps", ".", "get_model", "(", "'flow'", ",", "'DataDependency'", ")", "def", ...
Create empty dependency relation if parent has been deleted.
[ "Create", "empty", "dependency", "relation", "if", "parent", "has", "been", "deleted", "." ]
python
train
great-expectations/great_expectations
great_expectations/data_asset/file_data_asset.py
https://github.com/great-expectations/great_expectations/blob/08385c40529d4f14a1c46916788aecc47f33ee9d/great_expectations/data_asset/file_data_asset.py#L259-L333
def expect_file_line_regex_match_count_to_equal(self, regex, expected_count=0, skip=None, mostly=None, nonnull_lines_regex=r"^\s*$", result_format=None, include_config=False, catch_exceptions=None, meta=None, _lines=None): """ Expect the number of times a regular expression appears on each line of a file to be between a maximum and minimum value. Args: regex: \ A string that can be compiled as valid regular expression to match expected_count (None or nonnegative integer): \ Specifies the number of times regex is expected to appear on each line of the file Keyword Args: skip (None or nonnegative integer): \ Integer specifying the first lines in the file the method should skip before assessing expectations mostly (None or number between 0 and 1): \ Specifies an acceptable error for expectations. If the percentage of unexpected lines is less than mostly, the method still returns true even if all lines don't match the expectation criteria. null_lines_regex (valid regular expression or None): \ If not none, a regex to skip lines as null. Defaults to empty or whitespace-only lines. Other Parameters: result_format (str or None): \ Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`. include_config (boolean): \ If True, then include the expectation config as part of the result object. For more detail, see :ref:`include_config`. catch_exceptions (boolean or None): \ If True, then catch exceptions and include them as part of the result object. For more detail, see :ref:`catch_exceptions`. meta (dict or None): \ A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. For more detail, see :ref:`meta`. _lines (list): \ The lines over which to operate (provided by the file_lines_map_expectation decorator) Returns: A JSON-serializable expectation result object. Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and :ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`. """ try: comp_regex = re.compile(regex) except: raise ValueError("Must enter valid regular expression for regex") try: assert float(expected_count).is_integer() assert float(expected_count) >= 0 except: raise ValueError("expected_count must be a non-negative integer") return [True if(len(comp_regex.findall(line)) == expected_count) else False \ for line in _lines]
[ "def", "expect_file_line_regex_match_count_to_equal", "(", "self", ",", "regex", ",", "expected_count", "=", "0", ",", "skip", "=", "None", ",", "mostly", "=", "None", ",", "nonnull_lines_regex", "=", "r\"^\\s*$\"", ",", "result_format", "=", "None", ",", "inclu...
Expect the number of times a regular expression appears on each line of a file to be between a maximum and minimum value. Args: regex: \ A string that can be compiled as valid regular expression to match expected_count (None or nonnegative integer): \ Specifies the number of times regex is expected to appear on each line of the file Keyword Args: skip (None or nonnegative integer): \ Integer specifying the first lines in the file the method should skip before assessing expectations mostly (None or number between 0 and 1): \ Specifies an acceptable error for expectations. If the percentage of unexpected lines is less than mostly, the method still returns true even if all lines don't match the expectation criteria. null_lines_regex (valid regular expression or None): \ If not none, a regex to skip lines as null. Defaults to empty or whitespace-only lines. Other Parameters: result_format (str or None): \ Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`. include_config (boolean): \ If True, then include the expectation config as part of the result object. For more detail, see :ref:`include_config`. catch_exceptions (boolean or None): \ If True, then catch exceptions and include them as part of the result object. For more detail, see :ref:`catch_exceptions`. meta (dict or None): \ A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. For more detail, see :ref:`meta`. _lines (list): \ The lines over which to operate (provided by the file_lines_map_expectation decorator) Returns: A JSON-serializable expectation result object. Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and :ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
[ "Expect", "the", "number", "of", "times", "a", "regular", "expression", "appears", "on", "each", "line", "of", "a", "file", "to", "be", "between", "a", "maximum", "and", "minimum", "value", "." ]
python
train
pymupdf/PyMuPDF
fitz/utils.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/utils.py#L1100-L1128
def insertPage( doc, pno, text=None, fontsize=11, width=595, height=842, fontname="helv", fontfile=None, color=None, ): """ Create a new PDF page and insert some text. Notes: Function combining Document.newPage() and Page.insertText(). For parameter details see these methods. """ page = doc.newPage(pno=pno, width=width, height=height) if not bool(text): return 0 rc = page.insertText( (50, 72), text, fontsize=fontsize, fontname=fontname, fontfile=fontfile, color=color, ) return rc
[ "def", "insertPage", "(", "doc", ",", "pno", ",", "text", "=", "None", ",", "fontsize", "=", "11", ",", "width", "=", "595", ",", "height", "=", "842", ",", "fontname", "=", "\"helv\"", ",", "fontfile", "=", "None", ",", "color", "=", "None", ",", ...
Create a new PDF page and insert some text. Notes: Function combining Document.newPage() and Page.insertText(). For parameter details see these methods.
[ "Create", "a", "new", "PDF", "page", "and", "insert", "some", "text", "." ]
python
train
aarongarrett/inspyred
inspyred/ec/archivers.py
https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/inspyred/ec/archivers.py#L121-L256
def adaptive_grid_archiver(random, population, archive, args): """Archive only the best individual(s) using a fixed size grid. This function archives the best solutions by using a fixed-size grid to determine which existing solutions should be removed in order to make room for new ones. This archiver is designed specifically for use with the Pareto Archived Evolution Strategy (PAES). .. Arguments: random -- the random number generator object population -- the population of individuals archive -- the current archive of individuals args -- a dictionary of keyword arguments Optional keyword arguments in args: - *max_archive_size* -- the maximum number of individuals in the archive (default len(population)) - *num_grid_divisions* -- the number of grid divisions (default 1) """ def get_grid_location(fitness, num_grid_divisions, global_smallest, global_largest): loc = 0 n = 1 num_objectives = len(fitness) inc = [0 for _ in range(num_objectives)] width = [0 for _ in range(num_objectives)] local_smallest = global_smallest[:] for i, f in enumerate(fitness): if f < local_smallest[i] or f > local_smallest[i] + global_largest[i] - global_smallest[i]: return -1 for i in range(num_objectives): inc[i] = n n *= 2 width[i] = global_largest[i] - global_smallest[i] for d in range(num_grid_divisions): for i, f in enumerate(fitness): if f < width[i] / 2.0 + local_smallest[i]: loc += inc[i] else: local_smallest[i] += width[i] / 2.0 for i in range(num_objectives): inc[i] *= num_objectives * 2 width[i] /= 2.0 return loc def update_grid(individual, archive, num_grid_divisions, global_smallest, global_largest, grid_population): if len(archive) == 0: num_objectives = len(individual.fitness) smallest = [individual.fitness[o] for o in range(num_objectives)] largest = [individual.fitness[o] for o in range(num_objectives)] else: num_objectives = min(min([len(a.fitness) for a in archive]), len(individual.fitness)) smallest = [min(min([a.fitness[o] for a in archive]), individual.fitness[o]) for o in range(num_objectives)] largest = [max(max([a.fitness[o] for a in archive]), individual.fitness[o]) for o in range(num_objectives)] for i in range(num_objectives): global_smallest[i] = smallest[i] - abs(0.2 * smallest[i]) global_largest[i] = largest[i] + abs(0.2 * largest[i]) for i in range(len(grid_population)): grid_population[i] = 0 for a in archive: loc = get_grid_location(a.fitness, num_grid_divisions, global_smallest, global_largest) a.grid_location = loc grid_population[loc] += 1 loc = get_grid_location(individual.fitness, num_grid_divisions, global_smallest, global_largest) individual.grid_location = loc grid_population[loc] += 1 max_archive_size = args.setdefault('max_archive_size', len(population)) num_grid_divisions = args.setdefault('num_grid_divisions', 1) if not 'grid_population' in dir(adaptive_grid_archiver): adaptive_grid_archiver.grid_population = [0 for _ in range(2**(min([len(p.fitness) for p in population]) * num_grid_divisions))] if not 'global_smallest' in dir(adaptive_grid_archiver): adaptive_grid_archiver.global_smallest = [0 for _ in range(min([len(p.fitness) for p in population]))] if not 'global_largest' in dir(adaptive_grid_archiver): adaptive_grid_archiver.global_largest = [0 for _ in range(min([len(p.fitness) for p in population]))] new_archive = archive for ind in population: update_grid(ind, new_archive, num_grid_divisions, adaptive_grid_archiver.global_smallest, adaptive_grid_archiver.global_largest, adaptive_grid_archiver.grid_population) should_be_added = True for a in new_archive: if ind == a or a > ind: should_be_added = False if should_be_added: if len(new_archive) == 0: new_archive.append(ind) else: join = False nondominated = True removal_set = [] for i, a in enumerate(new_archive): if ind > a and not join: new_archive[i] = ind join = True elif ind > a: if not a in removal_set: removal_set.append(a) # Otherwise, the individual is nondominated against this archive member. # We can't use set difference because Individual objects are not hashable. # We'd like to say... # new_archive = list(set(new_archive) - set(removal_set)) # So this code gets that same result without using sets. temp_archive = [] for ind in new_archive: if ind not in removal_set: temp_archive.append(ind) new_archive = temp_archive if not join and nondominated: if len(new_archive) == max_archive_size: replaced_index = 0 found_replacement = False loc = get_grid_location(ind.fitness, num_grid_divisions, adaptive_grid_archiver.global_smallest, adaptive_grid_archiver.global_largest) ind.grid_location = loc if ind.grid_location >= 0: most = adaptive_grid_archiver.grid_population[ind.grid_location] else: most = -1 for i, a in enumerate(new_archive): pop_at_a = adaptive_grid_archiver.grid_population[a.grid_location] if pop_at_a > most: most = pop_at_a replaced_index = i found_replacement = True if found_replacement: new_archive[replaced_index] = ind else: new_archive.append(ind) return new_archive
[ "def", "adaptive_grid_archiver", "(", "random", ",", "population", ",", "archive", ",", "args", ")", ":", "def", "get_grid_location", "(", "fitness", ",", "num_grid_divisions", ",", "global_smallest", ",", "global_largest", ")", ":", "loc", "=", "0", "n", "=",...
Archive only the best individual(s) using a fixed size grid. This function archives the best solutions by using a fixed-size grid to determine which existing solutions should be removed in order to make room for new ones. This archiver is designed specifically for use with the Pareto Archived Evolution Strategy (PAES). .. Arguments: random -- the random number generator object population -- the population of individuals archive -- the current archive of individuals args -- a dictionary of keyword arguments Optional keyword arguments in args: - *max_archive_size* -- the maximum number of individuals in the archive (default len(population)) - *num_grid_divisions* -- the number of grid divisions (default 1)
[ "Archive", "only", "the", "best", "individual", "(", "s", ")", "using", "a", "fixed", "size", "grid", ".", "This", "function", "archives", "the", "best", "solutions", "by", "using", "a", "fixed", "-", "size", "grid", "to", "determine", "which", "existing",...
python
train
roclark/sportsreference
sportsreference/nhl/boxscore.py
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/nhl/boxscore.py#L581-L664
def _parse_game_data(self, uri): """ Parses a value for every attribute. This function looks through every attribute and retrieves the value according to the parsing scheme and index of the attribute from the passed HTML data. Once the value is retrieved, the attribute's value is updated with the returned result. Note that this method is called directly once Boxscore is invoked and does not need to be called manually. Parameters ---------- uri : string The relative link to the boxscore HTML page, such as '201802040nwe'. """ boxscore = self._retrieve_html_page(uri) # If the boxscore is None, the game likely hasn't been played yet and # no information can be gathered. As there is nothing to grab, the # class instance should just be empty. if not boxscore: return fields_to_special_parse = [ 'away_even_strength_assists', 'away_power_play_assists', 'away_short_handed_assists', 'away_game_winning_goals', 'away_saves', 'away_save_percentage', 'away_shutout', 'home_even_strength_assists', 'home_power_play_assists', 'home_short_handed_assists', 'home_game_winning_goals', 'home_saves', 'home_save_percentage', 'home_shutout' ] for field in self.__dict__: # Remove the '_' from the name short_field = str(field)[1:] if short_field == 'winner' or \ short_field == 'winning_name' or \ short_field == 'winning_abbr' or \ short_field == 'losing_name' or \ short_field == 'losing_abbr' or \ short_field == 'uri' or \ short_field == 'date' or \ short_field == 'time' or \ short_field == 'arena' or \ short_field == 'attendance' or \ short_field == 'time_of_day' or \ short_field == 'duration': continue if short_field == 'away_name' or \ short_field == 'home_name': value = self._parse_name(short_field, boxscore) setattr(self, field, value) continue if short_field in fields_to_special_parse: scheme = BOXSCORE_SCHEME[short_field] value = [i.text() for i in boxscore(scheme).items()] setattr(self, field, value) continue index = 0 if short_field in BOXSCORE_ELEMENT_INDEX.keys(): index = BOXSCORE_ELEMENT_INDEX[short_field] value = utils._parse_field(BOXSCORE_SCHEME, boxscore, short_field, index) setattr(self, field, value) self._away_skaters = len(boxscore(BOXSCORE_SCHEME['away_skaters'])) num_away_goalies = boxscore(BOXSCORE_SCHEME['away_goalies']).items() # Skip the first element as it is dedicated to skaters and not goalies. next(num_away_goalies) self._away_goalies = len(next(num_away_goalies)('tbody tr')) self._parse_game_date_and_location(boxscore) self._away_players, self._home_players = self._find_players(boxscore)
[ "def", "_parse_game_data", "(", "self", ",", "uri", ")", ":", "boxscore", "=", "self", ".", "_retrieve_html_page", "(", "uri", ")", "# If the boxscore is None, the game likely hasn't been played yet and", "# no information can be gathered. As there is nothing to grab, the", "# cl...
Parses a value for every attribute. This function looks through every attribute and retrieves the value according to the parsing scheme and index of the attribute from the passed HTML data. Once the value is retrieved, the attribute's value is updated with the returned result. Note that this method is called directly once Boxscore is invoked and does not need to be called manually. Parameters ---------- uri : string The relative link to the boxscore HTML page, such as '201802040nwe'.
[ "Parses", "a", "value", "for", "every", "attribute", "." ]
python
train
tcalmant/ipopo
pelix/ipopo/handlers/requiresbest.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/handlers/requiresbest.py#L54-L79
def get_handlers(self, component_context, instance): """ Sets up service providers for the given component :param component_context: The ComponentContext bean :param instance: The component instance :return: The list of handlers associated to the given component """ # Extract information from the context requirements = component_context.get_handler( ipopo_constants.HANDLER_REQUIRES_BEST ) requires_filters = component_context.properties.get( ipopo_constants.IPOPO_REQUIRES_FILTERS, None ) # Prepare requirements requirements = self._prepare_requirements( requirements, requires_filters ) # Set up the runtime dependency handlers return [ BestDependency(field, requirement) for field, requirement in requirements.items() ]
[ "def", "get_handlers", "(", "self", ",", "component_context", ",", "instance", ")", ":", "# Extract information from the context", "requirements", "=", "component_context", ".", "get_handler", "(", "ipopo_constants", ".", "HANDLER_REQUIRES_BEST", ")", "requires_filters", ...
Sets up service providers for the given component :param component_context: The ComponentContext bean :param instance: The component instance :return: The list of handlers associated to the given component
[ "Sets", "up", "service", "providers", "for", "the", "given", "component" ]
python
train
odlgroup/odl
odl/discr/discretization.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/discretization.py#L414-L439
def sampling(self, ufunc, **kwargs): """Sample a continuous function and assign to this element. Parameters ---------- ufunc : ``self.space.fspace`` element The continuous function that should be samplingicted. kwargs : Additional arugments for the sampling operator implementation Examples -------- >>> space = odl.uniform_discr(0, 1, 5) >>> x = space.element() Assign x according to a continuous function: >>> x.sampling(lambda t: t) >>> x # Print values at grid points (which are centered) uniform_discr(0.0, 1.0, 5).element([ 0.1, 0.3, 0.5, 0.7, 0.9]) See Also -------- DiscretizedSpace.sampling : For full description """ self.space.sampling(ufunc, out=self.tensor, **kwargs)
[ "def", "sampling", "(", "self", ",", "ufunc", ",", "*", "*", "kwargs", ")", ":", "self", ".", "space", ".", "sampling", "(", "ufunc", ",", "out", "=", "self", ".", "tensor", ",", "*", "*", "kwargs", ")" ]
Sample a continuous function and assign to this element. Parameters ---------- ufunc : ``self.space.fspace`` element The continuous function that should be samplingicted. kwargs : Additional arugments for the sampling operator implementation Examples -------- >>> space = odl.uniform_discr(0, 1, 5) >>> x = space.element() Assign x according to a continuous function: >>> x.sampling(lambda t: t) >>> x # Print values at grid points (which are centered) uniform_discr(0.0, 1.0, 5).element([ 0.1, 0.3, 0.5, 0.7, 0.9]) See Also -------- DiscretizedSpace.sampling : For full description
[ "Sample", "a", "continuous", "function", "and", "assign", "to", "this", "element", "." ]
python
train
jilljenn/tryalgo
tryalgo/max_interval_intersec.py
https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/max_interval_intersec.py#L8-L23
def max_interval_intersec(S): """determine a value that is contained in a largest number of given intervals :param S: list of half open intervals :complexity: O(n log n), where n = len(S) """ B = ([(left, +1) for left, right in S] + [(right, -1) for left, right in S]) B.sort() c = 0 best = (c, None) for x, d in B: c += d if best[0] < c: best = (c, x) return best
[ "def", "max_interval_intersec", "(", "S", ")", ":", "B", "=", "(", "[", "(", "left", ",", "+", "1", ")", "for", "left", ",", "right", "in", "S", "]", "+", "[", "(", "right", ",", "-", "1", ")", "for", "left", ",", "right", "in", "S", "]", "...
determine a value that is contained in a largest number of given intervals :param S: list of half open intervals :complexity: O(n log n), where n = len(S)
[ "determine", "a", "value", "that", "is", "contained", "in", "a", "largest", "number", "of", "given", "intervals" ]
python
train
googleapis/google-cloud-python
storage/google/cloud/storage/_helpers.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/_helpers.py#L216-L228
def _scalar_property(fieldname): """Create a property descriptor around the :class:`_PropertyMixin` helpers. """ def _getter(self): """Scalar property getter.""" return self._properties.get(fieldname) def _setter(self, value): """Scalar property setter.""" self._patch_property(fieldname, value) return property(_getter, _setter)
[ "def", "_scalar_property", "(", "fieldname", ")", ":", "def", "_getter", "(", "self", ")", ":", "\"\"\"Scalar property getter.\"\"\"", "return", "self", ".", "_properties", ".", "get", "(", "fieldname", ")", "def", "_setter", "(", "self", ",", "value", ")", ...
Create a property descriptor around the :class:`_PropertyMixin` helpers.
[ "Create", "a", "property", "descriptor", "around", "the", ":", "class", ":", "_PropertyMixin", "helpers", "." ]
python
train
wavycloud/pyboto3
pyboto3/machinelearning.py
https://github.com/wavycloud/pyboto3/blob/924957ccf994303713a4eed90b775ff2ab95b2e5/pyboto3/machinelearning.py#L752-L857
def describe_batch_predictions(FilterVariable=None, EQ=None, GT=None, LT=None, GE=None, LE=None, NE=None, Prefix=None, SortOrder=None, NextToken=None, Limit=None): """ Returns a list of BatchPrediction operations that match the search criteria in the request. See also: AWS API Documentation :example: response = client.describe_batch_predictions( FilterVariable='CreatedAt'|'LastUpdatedAt'|'Status'|'Name'|'IAMUser'|'MLModelId'|'DataSourceId'|'DataURI', EQ='string', GT='string', LT='string', GE='string', LE='string', NE='string', Prefix='string', SortOrder='asc'|'dsc', NextToken='string', Limit=123 ) :type FilterVariable: string :param FilterVariable: Use one of the following variables to filter a list of BatchPrediction : CreatedAt - Sets the search criteria to the BatchPrediction creation date. Status - Sets the search criteria to the BatchPrediction status. Name - Sets the search criteria to the contents of the BatchPrediction **** Name . IAMUser - Sets the search criteria to the user account that invoked the BatchPrediction creation. MLModelId - Sets the search criteria to the MLModel used in the BatchPrediction . DataSourceId - Sets the search criteria to the DataSource used in the BatchPrediction . DataURI - Sets the search criteria to the data file(s) used in the BatchPrediction . The URL can identify either a file or an Amazon Simple Storage Solution (Amazon S3) bucket or directory. :type EQ: string :param EQ: The equal to operator. The BatchPrediction results will have FilterVariable values that exactly match the value specified with EQ . :type GT: string :param GT: The greater than operator. The BatchPrediction results will have FilterVariable values that are greater than the value specified with GT . :type LT: string :param LT: The less than operator. The BatchPrediction results will have FilterVariable values that are less than the value specified with LT . :type GE: string :param GE: The greater than or equal to operator. The BatchPrediction results will have FilterVariable values that are greater than or equal to the value specified with GE . :type LE: string :param LE: The less than or equal to operator. The BatchPrediction results will have FilterVariable values that are less than or equal to the value specified with LE . :type NE: string :param NE: The not equal to operator. The BatchPrediction results will have FilterVariable values not equal to the value specified with NE . :type Prefix: string :param Prefix: A string that is found at the beginning of a variable, such as Name or Id . For example, a Batch Prediction operation could have the Name 2014-09-09-HolidayGiftMailer . To search for this BatchPrediction , select Name for the FilterVariable and any of the following strings for the Prefix : 2014-09 2014-09-09 2014-09-09-Holiday :type SortOrder: string :param SortOrder: A two-value parameter that determines the sequence of the resulting list of MLModel s. asc - Arranges the list in ascending order (A-Z, 0-9). dsc - Arranges the list in descending order (Z-A, 9-0). Results are sorted by FilterVariable . :type NextToken: string :param NextToken: An ID of the page in the paginated results. :type Limit: integer :param Limit: The number of pages of information to include in the result. The range of acceptable values is 1 through 100 . The default value is 100 . :rtype: dict :return: { 'Results': [ { 'BatchPredictionId': 'string', 'MLModelId': 'string', 'BatchPredictionDataSourceId': 'string', 'InputDataLocationS3': 'string', 'CreatedByIamUser': 'string', 'CreatedAt': datetime(2015, 1, 1), 'LastUpdatedAt': datetime(2015, 1, 1), 'Name': 'string', 'Status': 'PENDING'|'INPROGRESS'|'FAILED'|'COMPLETED'|'DELETED', 'OutputUri': 'string', 'Message': 'string', 'ComputeTime': 123, 'FinishedAt': datetime(2015, 1, 1), 'StartedAt': datetime(2015, 1, 1), 'TotalRecordCount': 123, 'InvalidRecordCount': 123 }, ], 'NextToken': 'string' } :returns: PENDING - Amazon Machine Learning (Amazon ML) submitted a request to generate predictions for a batch of observations. INPROGRESS - The process is underway. FAILED - The request to perform a batch prediction did not run to completion. It is not usable. COMPLETED - The batch prediction process completed successfully. DELETED - The BatchPrediction is marked as deleted. It is not usable. """ pass
[ "def", "describe_batch_predictions", "(", "FilterVariable", "=", "None", ",", "EQ", "=", "None", ",", "GT", "=", "None", ",", "LT", "=", "None", ",", "GE", "=", "None", ",", "LE", "=", "None", ",", "NE", "=", "None", ",", "Prefix", "=", "None", ","...
Returns a list of BatchPrediction operations that match the search criteria in the request. See also: AWS API Documentation :example: response = client.describe_batch_predictions( FilterVariable='CreatedAt'|'LastUpdatedAt'|'Status'|'Name'|'IAMUser'|'MLModelId'|'DataSourceId'|'DataURI', EQ='string', GT='string', LT='string', GE='string', LE='string', NE='string', Prefix='string', SortOrder='asc'|'dsc', NextToken='string', Limit=123 ) :type FilterVariable: string :param FilterVariable: Use one of the following variables to filter a list of BatchPrediction : CreatedAt - Sets the search criteria to the BatchPrediction creation date. Status - Sets the search criteria to the BatchPrediction status. Name - Sets the search criteria to the contents of the BatchPrediction **** Name . IAMUser - Sets the search criteria to the user account that invoked the BatchPrediction creation. MLModelId - Sets the search criteria to the MLModel used in the BatchPrediction . DataSourceId - Sets the search criteria to the DataSource used in the BatchPrediction . DataURI - Sets the search criteria to the data file(s) used in the BatchPrediction . The URL can identify either a file or an Amazon Simple Storage Solution (Amazon S3) bucket or directory. :type EQ: string :param EQ: The equal to operator. The BatchPrediction results will have FilterVariable values that exactly match the value specified with EQ . :type GT: string :param GT: The greater than operator. The BatchPrediction results will have FilterVariable values that are greater than the value specified with GT . :type LT: string :param LT: The less than operator. The BatchPrediction results will have FilterVariable values that are less than the value specified with LT . :type GE: string :param GE: The greater than or equal to operator. The BatchPrediction results will have FilterVariable values that are greater than or equal to the value specified with GE . :type LE: string :param LE: The less than or equal to operator. The BatchPrediction results will have FilterVariable values that are less than or equal to the value specified with LE . :type NE: string :param NE: The not equal to operator. The BatchPrediction results will have FilterVariable values not equal to the value specified with NE . :type Prefix: string :param Prefix: A string that is found at the beginning of a variable, such as Name or Id . For example, a Batch Prediction operation could have the Name 2014-09-09-HolidayGiftMailer . To search for this BatchPrediction , select Name for the FilterVariable and any of the following strings for the Prefix : 2014-09 2014-09-09 2014-09-09-Holiday :type SortOrder: string :param SortOrder: A two-value parameter that determines the sequence of the resulting list of MLModel s. asc - Arranges the list in ascending order (A-Z, 0-9). dsc - Arranges the list in descending order (Z-A, 9-0). Results are sorted by FilterVariable . :type NextToken: string :param NextToken: An ID of the page in the paginated results. :type Limit: integer :param Limit: The number of pages of information to include in the result. The range of acceptable values is 1 through 100 . The default value is 100 . :rtype: dict :return: { 'Results': [ { 'BatchPredictionId': 'string', 'MLModelId': 'string', 'BatchPredictionDataSourceId': 'string', 'InputDataLocationS3': 'string', 'CreatedByIamUser': 'string', 'CreatedAt': datetime(2015, 1, 1), 'LastUpdatedAt': datetime(2015, 1, 1), 'Name': 'string', 'Status': 'PENDING'|'INPROGRESS'|'FAILED'|'COMPLETED'|'DELETED', 'OutputUri': 'string', 'Message': 'string', 'ComputeTime': 123, 'FinishedAt': datetime(2015, 1, 1), 'StartedAt': datetime(2015, 1, 1), 'TotalRecordCount': 123, 'InvalidRecordCount': 123 }, ], 'NextToken': 'string' } :returns: PENDING - Amazon Machine Learning (Amazon ML) submitted a request to generate predictions for a batch of observations. INPROGRESS - The process is underway. FAILED - The request to perform a batch prediction did not run to completion. It is not usable. COMPLETED - The batch prediction process completed successfully. DELETED - The BatchPrediction is marked as deleted. It is not usable.
[ "Returns", "a", "list", "of", "BatchPrediction", "operations", "that", "match", "the", "search", "criteria", "in", "the", "request", ".", "See", "also", ":", "AWS", "API", "Documentation", ":", "example", ":", "response", "=", "client", ".", "describe_batch_pr...
python
train
JasonKessler/scattertext
scattertext/DeployedClassifier.py
https://github.com/JasonKessler/scattertext/blob/cacf1f687d218ee8cae3fc05cc901db824bb1b81/scattertext/DeployedClassifier.py#L80-L88
def build(self): '''Builds Depoyed Classifier ''' if self._clf is None: raise NeedToTrainExceptionBeforeDeployingException() return DeployedClassifier(self._category, self._term_doc_matrix._category_idx_store, self._term_doc_matrix._term_idx_store, self._term_doc_matrix_factory)
[ "def", "build", "(", "self", ")", ":", "if", "self", ".", "_clf", "is", "None", ":", "raise", "NeedToTrainExceptionBeforeDeployingException", "(", ")", "return", "DeployedClassifier", "(", "self", ".", "_category", ",", "self", ".", "_term_doc_matrix", ".", "_...
Builds Depoyed Classifier
[ "Builds", "Depoyed", "Classifier" ]
python
train
wandb/client
wandb/vendor/prompt_toolkit/layout/containers.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/containers.py#L1004-L1035
def _merge_dimensions(dimension, preferred=None, dont_extend=False): """ Take the LayoutDimension from this `Window` class and the received preferred size from the `UIControl` and return a `LayoutDimension` to report to the parent container. """ dimension = dimension or LayoutDimension() # When a preferred dimension was explicitly given to the Window, # ignore the UIControl. if dimension.preferred_specified: preferred = dimension.preferred # When a 'preferred' dimension is given by the UIControl, make sure # that it stays within the bounds of the Window. if preferred is not None: if dimension.max: preferred = min(preferred, dimension.max) if dimension.min: preferred = max(preferred, dimension.min) # When a `dont_extend` flag has been given, use the preferred dimension # also as the max dimension. if dont_extend and preferred is not None: max_ = min(dimension.max, preferred) else: max_ = dimension.max return LayoutDimension( min=dimension.min, max=max_, preferred=preferred, weight=dimension.weight)
[ "def", "_merge_dimensions", "(", "dimension", ",", "preferred", "=", "None", ",", "dont_extend", "=", "False", ")", ":", "dimension", "=", "dimension", "or", "LayoutDimension", "(", ")", "# When a preferred dimension was explicitly given to the Window,", "# ignore the UIC...
Take the LayoutDimension from this `Window` class and the received preferred size from the `UIControl` and return a `LayoutDimension` to report to the parent container.
[ "Take", "the", "LayoutDimension", "from", "this", "Window", "class", "and", "the", "received", "preferred", "size", "from", "the", "UIControl", "and", "return", "a", "LayoutDimension", "to", "report", "to", "the", "parent", "container", "." ]
python
train
NickMonzillo/SmartCloud
SmartCloud/__init__.py
https://github.com/NickMonzillo/SmartCloud/blob/481d1ef428427b452a8a787999c1d4a8868a3824/SmartCloud/__init__.py#L87-L107
def text_cloud(self,text,max_text_size=72,min_text_size=12,expand_width=50,expand_height=50,max_count=100000): '''Creates a word cloud using plain text.''' worddict = assign_fonts(tuplecount(text),max_text_size,min_text_size,self.exclude_words) sorted_worddict = list(reversed(sorted(worddict.keys(), key=lambda x: worddict[x]))) for word in sorted_worddict: self.render_word(word,worddict[word],(randint(0,255),randint(0,255),randint(0,255))) if self.width < self.word_size[0]: #If the word is bigger than the surface, expand the surface. self.expand(self.word_size[0]-self.width,0) elif self.height < self.word_size[1]: self.expand(0,self.word_size[1]-self.height) position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])] loopcount = 0 while self.collides(position,self.word_size): if loopcount > max_count: #If it can't find a position for the word, expand the cloud. self.expand(expand_width,expand_height) loopcount = 0 position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])] loopcount += 1 self.plot_word(position)
[ "def", "text_cloud", "(", "self", ",", "text", ",", "max_text_size", "=", "72", ",", "min_text_size", "=", "12", ",", "expand_width", "=", "50", ",", "expand_height", "=", "50", ",", "max_count", "=", "100000", ")", ":", "worddict", "=", "assign_fonts", ...
Creates a word cloud using plain text.
[ "Creates", "a", "word", "cloud", "using", "plain", "text", "." ]
python
train
jobovy/galpy
galpy/orbit/OrbitTop.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/orbit/OrbitTop.py#L495-L516
def dec(self,*args,**kwargs): """ NAME: dec PURPOSE: return the declination INPUT: t - (optional) time at which to get dec obs=[X,Y,Z] - (optional) position of observer (in kpc) (default=Object-wide default) OR Orbit object that corresponds to the orbit of the observer Y is ignored and always assumed to be zero ro= distance in kpc corresponding to R=1. (default=Object-wide default) OUTPUT: dec(t) HISTORY: 2011-02-23 - Written - Bovy (NYU) """ _check_roSet(self,kwargs,'dec') radec= self._radec(*args,**kwargs) return radec[:,1]
[ "def", "dec", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "_check_roSet", "(", "self", ",", "kwargs", ",", "'dec'", ")", "radec", "=", "self", ".", "_radec", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "radec",...
NAME: dec PURPOSE: return the declination INPUT: t - (optional) time at which to get dec obs=[X,Y,Z] - (optional) position of observer (in kpc) (default=Object-wide default) OR Orbit object that corresponds to the orbit of the observer Y is ignored and always assumed to be zero ro= distance in kpc corresponding to R=1. (default=Object-wide default) OUTPUT: dec(t) HISTORY: 2011-02-23 - Written - Bovy (NYU)
[ "NAME", ":", "dec", "PURPOSE", ":", "return", "the", "declination", "INPUT", ":", "t", "-", "(", "optional", ")", "time", "at", "which", "to", "get", "dec", "obs", "=", "[", "X", "Y", "Z", "]", "-", "(", "optional", ")", "position", "of", "observer...
python
train
vtkiorg/vtki
vtki/common.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/common.py#L864-L876
def overwrite(self, mesh): """ Overwrites this mesh inplace with the new mesh's geometries and data Parameters ---------- mesh : vtk.vtkDataSet The overwriting mesh. """ self.DeepCopy(mesh) if is_vtki_obj(mesh): self.copy_meta_from(mesh)
[ "def", "overwrite", "(", "self", ",", "mesh", ")", ":", "self", ".", "DeepCopy", "(", "mesh", ")", "if", "is_vtki_obj", "(", "mesh", ")", ":", "self", ".", "copy_meta_from", "(", "mesh", ")" ]
Overwrites this mesh inplace with the new mesh's geometries and data Parameters ---------- mesh : vtk.vtkDataSet The overwriting mesh.
[ "Overwrites", "this", "mesh", "inplace", "with", "the", "new", "mesh", "s", "geometries", "and", "data" ]
python
train
Unity-Technologies/ml-agents
ml-agents/mlagents/trainers/ppo/models.py
https://github.com/Unity-Technologies/ml-agents/blob/37d139af636e4a2351751fbf0f2fca5a9ed7457f/ml-agents/mlagents/trainers/ppo/models.py#L56-L114
def create_curiosity_encoders(self): """ Creates state encoders for current and future observations. Used for implementation of Curiosity-driven Exploration by Self-supervised Prediction See https://arxiv.org/abs/1705.05363 for more details. :return: current and future state encoder tensors. """ encoded_state_list = [] encoded_next_state_list = [] if self.vis_obs_size > 0: self.next_visual_in = [] visual_encoders = [] next_visual_encoders = [] for i in range(self.vis_obs_size): # Create input ops for next (t+1) visual observations. next_visual_input = self.create_visual_input(self.brain.camera_resolutions[i], name="next_visual_observation_" + str(i)) self.next_visual_in.append(next_visual_input) # Create the encoder ops for current and next visual input. Not that these encoders are siamese. encoded_visual = self.create_visual_observation_encoder(self.visual_in[i], self.curiosity_enc_size, self.swish, 1, "stream_{}_visual_obs_encoder" .format(i), False) encoded_next_visual = self.create_visual_observation_encoder(self.next_visual_in[i], self.curiosity_enc_size, self.swish, 1, "stream_{}_visual_obs_encoder".format(i), True) visual_encoders.append(encoded_visual) next_visual_encoders.append(encoded_next_visual) hidden_visual = tf.concat(visual_encoders, axis=1) hidden_next_visual = tf.concat(next_visual_encoders, axis=1) encoded_state_list.append(hidden_visual) encoded_next_state_list.append(hidden_next_visual) if self.vec_obs_size > 0: # Create the encoder ops for current and next vector input. Not that these encoders are siamese. # Create input op for next (t+1) vector observation. self.next_vector_in = tf.placeholder(shape=[None, self.vec_obs_size], dtype=tf.float32, name='next_vector_observation') encoded_vector_obs = self.create_vector_observation_encoder(self.vector_in, self.curiosity_enc_size, self.swish, 2, "vector_obs_encoder", False) encoded_next_vector_obs = self.create_vector_observation_encoder(self.next_vector_in, self.curiosity_enc_size, self.swish, 2, "vector_obs_encoder", True) encoded_state_list.append(encoded_vector_obs) encoded_next_state_list.append(encoded_next_vector_obs) encoded_state = tf.concat(encoded_state_list, axis=1) encoded_next_state = tf.concat(encoded_next_state_list, axis=1) return encoded_state, encoded_next_state
[ "def", "create_curiosity_encoders", "(", "self", ")", ":", "encoded_state_list", "=", "[", "]", "encoded_next_state_list", "=", "[", "]", "if", "self", ".", "vis_obs_size", ">", "0", ":", "self", ".", "next_visual_in", "=", "[", "]", "visual_encoders", "=", ...
Creates state encoders for current and future observations. Used for implementation of Curiosity-driven Exploration by Self-supervised Prediction See https://arxiv.org/abs/1705.05363 for more details. :return: current and future state encoder tensors.
[ "Creates", "state", "encoders", "for", "current", "and", "future", "observations", ".", "Used", "for", "implementation", "of", "Curiosity", "-", "driven", "Exploration", "by", "Self", "-", "supervised", "Prediction", "See", "https", ":", "//", "arxiv", ".", "...
python
train
F5Networks/f5-common-python
f5/multi_device/trust_domain.py
https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5/multi_device/trust_domain.py#L208-L224
def _delete_iapp(self, iapp_name, deploying_device): '''Delete an iapp service and template on the root device. :param iapp_name: str -- name of iapp :param deploying_device: ManagementRoot object -- device where the iapp will be deleted ''' iapp = deploying_device.tm.sys.application iapp_serv = iapp.services.service.load( name=iapp_name, partition=self.partition ) iapp_serv.delete() iapp_tmpl = iapp.templates.template.load( name=iapp_name, partition=self.partition ) iapp_tmpl.delete()
[ "def", "_delete_iapp", "(", "self", ",", "iapp_name", ",", "deploying_device", ")", ":", "iapp", "=", "deploying_device", ".", "tm", ".", "sys", ".", "application", "iapp_serv", "=", "iapp", ".", "services", ".", "service", ".", "load", "(", "name", "=", ...
Delete an iapp service and template on the root device. :param iapp_name: str -- name of iapp :param deploying_device: ManagementRoot object -- device where the iapp will be deleted
[ "Delete", "an", "iapp", "service", "and", "template", "on", "the", "root", "device", "." ]
python
train
ewiger/mlab
src/mlab/awmstools.py
https://github.com/ewiger/mlab/blob/72a98adf6499f548848ad44c604f74d68f07fe4f/src/mlab/awmstools.py#L829-L852
def splitAt(iterable, indices): r"""Yield chunks of `iterable`, split at the points in `indices`: >>> [l for l in splitAt(range(10), [2,5])] [[0, 1], [2, 3, 4], [5, 6, 7, 8, 9]] splits past the length of `iterable` are ignored: >>> [l for l in splitAt(range(10), [2,5,10])] [[0, 1], [2, 3, 4], [5, 6, 7, 8, 9]] """ iterable = iter(iterable) now = 0 for to in indices: try: res = [] for i in range(now, to): res.append(iterable.next()) except StopIteration: yield res; return yield res now = to res = list(iterable) if res: yield res
[ "def", "splitAt", "(", "iterable", ",", "indices", ")", ":", "iterable", "=", "iter", "(", "iterable", ")", "now", "=", "0", "for", "to", "in", "indices", ":", "try", ":", "res", "=", "[", "]", "for", "i", "in", "range", "(", "now", ",", "to", ...
r"""Yield chunks of `iterable`, split at the points in `indices`: >>> [l for l in splitAt(range(10), [2,5])] [[0, 1], [2, 3, 4], [5, 6, 7, 8, 9]] splits past the length of `iterable` are ignored: >>> [l for l in splitAt(range(10), [2,5,10])] [[0, 1], [2, 3, 4], [5, 6, 7, 8, 9]]
[ "r", "Yield", "chunks", "of", "iterable", "split", "at", "the", "points", "in", "indices", ":" ]
python
train
gagneurlab/concise
concise/data/encode.py
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/data/encode.py#L34-L47
def get_pwm_list(motif_name_list, pseudocountProb=0.0001): """Get a list of ENCODE PWM's. # Arguments pwm_id_list: List of id's from the `PWM_id` column in `get_metadata()` table pseudocountProb: Added pseudocount probabilities to the PWM # Returns List of `concise.utils.pwm.PWM` instances. """ l = _load_motifs() l = {k.split()[0]: v for k, v in l.items()} pwm_list = [PWM(l[m] + pseudocountProb, name=m) for m in motif_name_list] return pwm_list
[ "def", "get_pwm_list", "(", "motif_name_list", ",", "pseudocountProb", "=", "0.0001", ")", ":", "l", "=", "_load_motifs", "(", ")", "l", "=", "{", "k", ".", "split", "(", ")", "[", "0", "]", ":", "v", "for", "k", ",", "v", "in", "l", ".", "items"...
Get a list of ENCODE PWM's. # Arguments pwm_id_list: List of id's from the `PWM_id` column in `get_metadata()` table pseudocountProb: Added pseudocount probabilities to the PWM # Returns List of `concise.utils.pwm.PWM` instances.
[ "Get", "a", "list", "of", "ENCODE", "PWM", "s", "." ]
python
train
ray-project/ray
python/ray/tune/automlboard/run.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/automlboard/run.py#L18-L43
def run_board(args): """ Run main entry for AutoMLBoard. Args: args: args parsed from command line """ init_config(args) # backend service, should import after django settings initialized from backend.collector import CollectorService service = CollectorService( args.logdir, args.reload_interval, standalone=False, log_level=args.log_level) service.run() # frontend service logger.info("Try to start automlboard on port %s\n" % args.port) command = [ os.path.join(root_path, "manage.py"), "runserver", "0.0.0.0:%s" % args.port, "--noreload" ] execute_from_command_line(command)
[ "def", "run_board", "(", "args", ")", ":", "init_config", "(", "args", ")", "# backend service, should import after django settings initialized", "from", "backend", ".", "collector", "import", "CollectorService", "service", "=", "CollectorService", "(", "args", ".", "lo...
Run main entry for AutoMLBoard. Args: args: args parsed from command line
[ "Run", "main", "entry", "for", "AutoMLBoard", "." ]
python
train
nugget/python-insteonplm
insteonplm/utils.py
https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/utils.py#L28-L30
def byte_to_unitcode(bytecode): """Return an X10 unitcode value from a byte value.""" return list(UC_LOOKUP.keys())[list(UC_LOOKUP.values()).index(bytecode)]
[ "def", "byte_to_unitcode", "(", "bytecode", ")", ":", "return", "list", "(", "UC_LOOKUP", ".", "keys", "(", ")", ")", "[", "list", "(", "UC_LOOKUP", ".", "values", "(", ")", ")", ".", "index", "(", "bytecode", ")", "]" ]
Return an X10 unitcode value from a byte value.
[ "Return", "an", "X10", "unitcode", "value", "from", "a", "byte", "value", "." ]
python
train
tanghaibao/jcvi
jcvi/algorithms/lis.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/algorithms/lis.py#L78-L104
def longest_increasing_subsequence(xs): '''Return a longest increasing subsequence of xs. (Note that there may be more than one such subsequence.) >>> longest_increasing_subsequence(range(3)) [0, 1, 2] >>> longest_increasing_subsequence([3, 1, 2, 0]) [1, 2] ''' # Patience sort xs, stacking (x, prev_ix) pairs on the piles. # Prev_ix indexes the element at the top of the previous pile, # which has a lower x value than the current x value. piles = [[]] # Create a dummy pile 0 for x, p in patience_sort(xs): if p + 1 == len(piles): piles.append([]) # backlink to the top of the previous pile piles[p + 1].append((x, len(piles[p]) - 1)) # Backtrack to find a longest increasing subsequence npiles = len(piles) - 1 prev = 0 lis = list() for pile in range(npiles, 0, -1): x, prev = piles[pile][prev] lis.append(x) lis.reverse() return lis
[ "def", "longest_increasing_subsequence", "(", "xs", ")", ":", "# Patience sort xs, stacking (x, prev_ix) pairs on the piles.", "# Prev_ix indexes the element at the top of the previous pile,", "# which has a lower x value than the current x value.", "piles", "=", "[", "[", "]", "]", "#...
Return a longest increasing subsequence of xs. (Note that there may be more than one such subsequence.) >>> longest_increasing_subsequence(range(3)) [0, 1, 2] >>> longest_increasing_subsequence([3, 1, 2, 0]) [1, 2]
[ "Return", "a", "longest", "increasing", "subsequence", "of", "xs", "." ]
python
train
pandas-dev/pandas
pandas/core/series.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L1575-L1588
def _set_name(self, name, inplace=False): """ Set the Series name. Parameters ---------- name : str inplace : bool whether to modify `self` directly or return a copy """ inplace = validate_bool_kwarg(inplace, 'inplace') ser = self if inplace else self.copy() ser.name = name return ser
[ "def", "_set_name", "(", "self", ",", "name", ",", "inplace", "=", "False", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "ser", "=", "self", "if", "inplace", "else", "self", ".", "copy", "(", ")", "ser", ".", ...
Set the Series name. Parameters ---------- name : str inplace : bool whether to modify `self` directly or return a copy
[ "Set", "the", "Series", "name", "." ]
python
train
myint/autoflake
autoflake.py
https://github.com/myint/autoflake/blob/68fea68646922b920d55975f9f2adaeafd84df4f/autoflake.py#L564-L574
def filter_useless_pass(source): """Yield code with useless "pass" lines removed.""" try: marked_lines = frozenset(useless_pass_line_numbers(source)) except (SyntaxError, tokenize.TokenError): marked_lines = frozenset() sio = io.StringIO(source) for line_number, line in enumerate(sio.readlines(), start=1): if line_number not in marked_lines: yield line
[ "def", "filter_useless_pass", "(", "source", ")", ":", "try", ":", "marked_lines", "=", "frozenset", "(", "useless_pass_line_numbers", "(", "source", ")", ")", "except", "(", "SyntaxError", ",", "tokenize", ".", "TokenError", ")", ":", "marked_lines", "=", "fr...
Yield code with useless "pass" lines removed.
[ "Yield", "code", "with", "useless", "pass", "lines", "removed", "." ]
python
test
theislab/scanpy
scanpy/plotting/_anndata.py
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/plotting/_anndata.py#L1320-L1620
def dotplot(adata, var_names, groupby=None, use_raw=None, log=False, num_categories=7, expression_cutoff=0., mean_only_expressed=False, color_map='Reds', dot_max=None, dot_min=None, figsize=None, dendrogram=False, gene_symbols=None, var_group_positions=None, standard_scale=None, smallest_dot=0., var_group_labels=None, var_group_rotation=None, layer=None, show=None, save=None, **kwds): """\ Makes a *dot plot* of the expression values of `var_names`. For each var_name and each `groupby` category a dot is plotted. Each dot represents two values: mean expression within each category (visualized by color) and fraction of cells expressing the var_name in the category (visualized by the size of the dot). If groupby is not given, the dotplot assumes that all data belongs to a single category. **Note**: A gene is considered expressed if the expression value in the adata (or adata.raw) is above the specified threshold which is zero by default. An example of dotplot usage is to visualize, for multiple marker genes, the mean value and the percentage of cells expressing the gene accross multiple clusters. Parameters ---------- {common_plot_args} expression_cutoff : `float` (default: `0.`) Expression cutoff that is used for binarizing the gene expression and determining the fraction of cells expressing given genes. A gene is expressed only if the expression value is greater than this threshold. mean_only_expressed : `bool` (default: `False`) If True, gene expression is averaged only over the cells expressing the given genes. color_map : `str`, optional (default: `Reds`) String denoting matplotlib color map. dot_max : `float` optional (default: `None`) If none, the maximum dot size is set to the maximum fraction value found (e.g. 0.6). If given, the value should be a number between 0 and 1. All fractions larger than dot_max are clipped to this value. dot_min : `float` optional (default: `None`) If none, the minimum dot size is set to 0. If given, the value should be a number between 0 and 1. All fractions smaller than dot_min are clipped to this value. standard_scale : {{'var', 'group'}}, optional (default: None) Whether or not to standardize that dimension between 0 and 1, meaning for each variable or group, subtract the minimum and divide each by its maximum. smallest_dot : `float` optional (default: 0.) If none, the smallest dot has size 0. All expression levels with `dot_min` are potted with `smallest_dot` dot size. {show_save_ax} **kwds : keyword arguments Are passed to `matplotlib.pyplot.scatter`. Returns ------- List of :class:`~matplotlib.axes.Axes` Examples ------- >>> adata = sc.datasets.pbmc68k_reduced() >>> sc.pl.dotplot(adata, ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ'], ... groupby='bulk_labels', dendrogram=True) """ if use_raw is None and adata.raw is not None: use_raw = True if isinstance(var_names, str): var_names = [var_names] categories, obs_tidy = _prepare_dataframe(adata, var_names, groupby, use_raw, log, num_categories, layer=layer, gene_symbols=gene_symbols) # for if category defined by groupby (if any) compute for each var_name # 1. the fraction of cells in the category having a value > expression_cutoff # 2. the mean value over the category # 1. compute fraction of cells having value > expression_cutoff # transform obs_tidy into boolean matrix using the expression_cutoff obs_bool = obs_tidy > expression_cutoff # compute the sum per group which in the boolean matrix this is the number # of values > expression_cutoff, and divide the result by the total number of values # in the group (given by `count()`) fraction_obs = obs_bool.groupby(level=0).sum() / obs_bool.groupby(level=0).count() # 2. compute mean value if mean_only_expressed: mean_obs = obs_tidy.mask(~obs_bool).groupby(level=0).mean().fillna(0) else: mean_obs = obs_tidy.groupby(level=0).mean() if standard_scale == 'group': mean_obs = mean_obs.sub(mean_obs.min(1), axis=0) mean_obs = mean_obs.div(mean_obs.max(1), axis=0).fillna(0) elif standard_scale == 'var': mean_obs -= mean_obs.min(0) mean_obs = (mean_obs / mean_obs.max(0)).fillna(0) elif standard_scale is None: pass else: logg.warn('Unknown type for standard_scale, ignored') dendro_width = 0.8 if dendrogram else 0 colorbar_width = 0.2 colorbar_width_spacer = 0.5 size_legend_width = 0.25 if figsize is None: height = len(categories) * 0.3 + 1 # +1 for labels # if the number of categories is small (eg 1 or 2) use # a larger height height = max([1.5, height]) heatmap_width = len(var_names) * 0.35 width = heatmap_width + colorbar_width + size_legend_width + dendro_width + colorbar_width_spacer else: width, height = figsize heatmap_width = width - (colorbar_width + size_legend_width + dendro_width + colorbar_width_spacer) # colorbar ax width should not change with differences in the width of the image # otherwise can become too small if var_group_positions is not None and len(var_group_positions) > 0: # add some space in case 'brackets' want to be plotted on top of the image height_ratios = [0.5, 10] else: height_ratios = [0, 10.5] # define a layout of 2 rows x 5 columns # first row is for 'brackets' (if no brackets needed, the height of this row is zero) # second row is for main content. This second row # is divided into 4 axes: # first ax is for the main figure # second ax is for dendrogram (if present) # third ax is for the color bar legend # fourth ax is for an spacer that avoids the ticks # from the color bar to be hidden beneath the size lengend axis # fifth ax is to plot the size legend fig = pl.figure(figsize=(width, height)) axs = gridspec.GridSpec(nrows=2, ncols=5, wspace=0.02, hspace=0.04, width_ratios=[heatmap_width, dendro_width, colorbar_width, colorbar_width_spacer, size_legend_width], height_ratios=height_ratios) if len(categories) < 4: # when few categories are shown, the colorbar and size legend # need to be larger than the main plot, otherwise they would look # compressed. For this, the dotplot ax is split into two: axs2 = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=axs[1, 0], height_ratios=[len(categories) * 0.3, 1]) dot_ax = fig.add_subplot(axs2[0]) else: dot_ax = fig.add_subplot(axs[1, 0]) color_legend = fig.add_subplot(axs[1, 2]) if groupby is None or len(categories) <= 1: # dendrogram can only be computed between groupby categories dendrogram = False if dendrogram: dendro_data = _reorder_categories_after_dendrogram(adata, groupby, dendrogram, var_names=var_names, var_group_labels=var_group_labels, var_group_positions=var_group_positions) var_group_labels = dendro_data['var_group_labels'] var_group_positions = dendro_data['var_group_positions'] # reorder matrix if dendro_data['var_names_idx_ordered'] is not None: # reorder columns (usually genes) if needed. This only happens when # var_group_positions and var_group_labels is set mean_obs = mean_obs.iloc[:,dendro_data['var_names_idx_ordered']] fraction_obs = fraction_obs.iloc[:, dendro_data['var_names_idx_ordered']] # reorder rows (categories) to match the dendrogram order mean_obs = mean_obs.iloc[dendro_data['categories_idx_ordered'], :] fraction_obs = fraction_obs.iloc[dendro_data['categories_idx_ordered'], :] y_ticks = range(mean_obs.shape[0]) dendro_ax = fig.add_subplot(axs[1, 1], sharey=dot_ax) _plot_dendrogram(dendro_ax, adata, groupby, dendrogram_key=dendrogram, ticks=y_ticks) # to keep the size_legen of about the same height, irrespective # of the number of categories, the fourth ax is subdivided into two parts size_legend_height = min(1.3, height) # wspace is proportional to the width but a constant value is # needed such that the spacing is the same for thinner or wider images. wspace = 10.5 / width axs3 = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=axs[1, 4], wspace=wspace, height_ratios=[size_legend_height / height, (height - size_legend_height) / height]) # make scatter plot in which # x = var_names # y = groupby category # size = fraction # color = mean expression y, x = np.indices(mean_obs.shape) y = y.flatten() x = x.flatten() frac = fraction_obs.values.flatten() mean_flat = mean_obs.values.flatten() cmap = pl.get_cmap(color_map) if dot_max is None: dot_max = np.ceil(max(frac) * 10) / 10 else: if dot_max < 0 or dot_max > 1: raise ValueError("`dot_max` value has to be between 0 and 1") if dot_min is None: dot_min = 0 else: if dot_min < 0 or dot_min > 1: raise ValueError("`dot_min` value has to be between 0 and 1") if dot_min != 0 or dot_max != 1: # clip frac between dot_min and dot_max frac = np.clip(frac, dot_min, dot_max) old_range = dot_max - dot_min # re-scale frac between 0 and 1 frac = ((frac - dot_min) / old_range) size = (frac * 10) ** 2 size += smallest_dot import matplotlib.colors normalize = matplotlib.colors.Normalize(vmin=kwds.get('vmin'), vmax=kwds.get('vmax')) colors = cmap(normalize(mean_flat)) dot_ax.scatter(x, y, color=colors, s=size, cmap=cmap, norm=None, edgecolor='none', **kwds) y_ticks = range(mean_obs.shape[0]) dot_ax.set_yticks(y_ticks) dot_ax.set_yticklabels([mean_obs.index[idx] for idx in y_ticks]) x_ticks = range(mean_obs.shape[1]) dot_ax.set_xticks(x_ticks) dot_ax.set_xticklabels([mean_obs.columns[idx] for idx in x_ticks], rotation=90) dot_ax.tick_params(axis='both', labelsize='small') dot_ax.grid(False) dot_ax.set_xlim(-0.5, len(var_names) + 0.5) dot_ax.set_ylabel(groupby) # to be consistent with the heatmap plot, is better to # invert the order of the y-axis, such that the first group is on # top ymin, ymax = dot_ax.get_ylim() dot_ax.set_ylim(ymax+0.5, ymin - 0.5) dot_ax.set_xlim(-1, len(var_names)) # plot group legends on top of dot_ax (if given) if var_group_positions is not None and len(var_group_positions) > 0: gene_groups_ax = fig.add_subplot(axs[0, 0], sharex=dot_ax) _plot_gene_groups_brackets(gene_groups_ax, group_positions=var_group_positions, group_labels=var_group_labels, rotation=var_group_rotation) # plot colorbar import matplotlib.colorbar matplotlib.colorbar.ColorbarBase(color_legend, cmap=cmap, norm=normalize) # for the dot size legend, use step between dot_max and dot_min # based on how different they are. diff = dot_max - dot_min if 0.3 < diff <= 0.6: step = 0.1 elif diff <= 0.3: step = 0.05 else: step = 0.2 # a descending range that is afterwards inverted is used # to guarantee that dot_max is in the legend. fracs_legends = np.arange(dot_max, dot_min, step * -1)[::-1] if dot_min != 0 or dot_max != 1: fracs_values = ((fracs_legends - dot_min) / old_range) else: fracs_values = fracs_legends size = (fracs_values * 10) ** 2 size += smallest_dot color = [cmap(normalize(value)) for value in np.repeat(max(mean_flat) * 0.7, len(size))] # plot size bar size_legend = fig.add_subplot(axs3[0]) size_legend.scatter(np.repeat(0, len(size)), range(len(size)), s=size, color=color) size_legend.set_yticks(range(len(size))) labels = ["{:.0%}".format(x) for x in fracs_legends] if dot_max < 1: labels[-1] = ">" + labels[-1] size_legend.set_yticklabels(labels) size_legend.set_yticklabels(["{:.0%}".format(x) for x in fracs_legends]) size_legend.tick_params(axis='y', left=False, labelleft=False, labelright=True) # remove x ticks and labels size_legend.tick_params(axis='x', bottom=False, labelbottom=False) # remove surrounding lines size_legend.spines['right'].set_visible(False) size_legend.spines['top'].set_visible(False) size_legend.spines['left'].set_visible(False) size_legend.spines['bottom'].set_visible(False) size_legend.grid(False) ymin, ymax = size_legend.get_ylim() size_legend.set_ylim(ymin, ymax+0.5) utils.savefig_or_show('dotplot', show=show, save=save) return axs
[ "def", "dotplot", "(", "adata", ",", "var_names", ",", "groupby", "=", "None", ",", "use_raw", "=", "None", ",", "log", "=", "False", ",", "num_categories", "=", "7", ",", "expression_cutoff", "=", "0.", ",", "mean_only_expressed", "=", "False", ",", "co...
\ Makes a *dot plot* of the expression values of `var_names`. For each var_name and each `groupby` category a dot is plotted. Each dot represents two values: mean expression within each category (visualized by color) and fraction of cells expressing the var_name in the category (visualized by the size of the dot). If groupby is not given, the dotplot assumes that all data belongs to a single category. **Note**: A gene is considered expressed if the expression value in the adata (or adata.raw) is above the specified threshold which is zero by default. An example of dotplot usage is to visualize, for multiple marker genes, the mean value and the percentage of cells expressing the gene accross multiple clusters. Parameters ---------- {common_plot_args} expression_cutoff : `float` (default: `0.`) Expression cutoff that is used for binarizing the gene expression and determining the fraction of cells expressing given genes. A gene is expressed only if the expression value is greater than this threshold. mean_only_expressed : `bool` (default: `False`) If True, gene expression is averaged only over the cells expressing the given genes. color_map : `str`, optional (default: `Reds`) String denoting matplotlib color map. dot_max : `float` optional (default: `None`) If none, the maximum dot size is set to the maximum fraction value found (e.g. 0.6). If given, the value should be a number between 0 and 1. All fractions larger than dot_max are clipped to this value. dot_min : `float` optional (default: `None`) If none, the minimum dot size is set to 0. If given, the value should be a number between 0 and 1. All fractions smaller than dot_min are clipped to this value. standard_scale : {{'var', 'group'}}, optional (default: None) Whether or not to standardize that dimension between 0 and 1, meaning for each variable or group, subtract the minimum and divide each by its maximum. smallest_dot : `float` optional (default: 0.) If none, the smallest dot has size 0. All expression levels with `dot_min` are potted with `smallest_dot` dot size. {show_save_ax} **kwds : keyword arguments Are passed to `matplotlib.pyplot.scatter`. Returns ------- List of :class:`~matplotlib.axes.Axes` Examples ------- >>> adata = sc.datasets.pbmc68k_reduced() >>> sc.pl.dotplot(adata, ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ'], ... groupby='bulk_labels', dendrogram=True)
[ "\\", "Makes", "a", "*", "dot", "plot", "*", "of", "the", "expression", "values", "of", "var_names", "." ]
python
train
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/salt/saltxmi.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/salt/saltxmi.py#L264-L270
def print_token(self, token_node_index): """returns the string representation of a token.""" err_msg = "The given node is not a token node." assert isinstance(self.nodes[token_node_index], TokenNode), err_msg onset = self.nodes[token_node_index].onset offset = self.nodes[token_node_index].offset return self.text[onset:offset]
[ "def", "print_token", "(", "self", ",", "token_node_index", ")", ":", "err_msg", "=", "\"The given node is not a token node.\"", "assert", "isinstance", "(", "self", ".", "nodes", "[", "token_node_index", "]", ",", "TokenNode", ")", ",", "err_msg", "onset", "=", ...
returns the string representation of a token.
[ "returns", "the", "string", "representation", "of", "a", "token", "." ]
python
train
adrn/gala
gala/dynamics/actionangle.py
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/dynamics/actionangle.py#L237-L283
def check_angle_sampling(nvecs, angles): """ Returns a list of the index of elements of n which do not have adequate toy angle coverage. The criterion is that we must have at least one sample in each Nyquist box when we project the toy angles along the vector n. Parameters ---------- nvecs : array_like Array of integer vectors. angles : array_like Array of angles. Returns ------- failed_nvecs : :class:`numpy.ndarray` Array of all integer vectors that failed checks. Has shape (N,3). failures : :class:`numpy.ndarray` Array of flags that designate whether this failed needing a longer integration window (0) or finer sampling (1). """ failed_nvecs = [] failures = [] for i, vec in enumerate(nvecs): # N = np.linalg.norm(vec) # X = np.dot(angles,vec) X = (angles*vec[:, None]).sum(axis=0) diff = float(np.abs(X.max() - X.min())) if diff < (2.*np.pi): warnings.warn("Need a longer integration window for mode {0}" .format(vec)) failed_nvecs.append(vec.tolist()) # P.append(2.*np.pi - diff) failures.append(0) elif (diff/len(X)) > np.pi: warnings.warn("Need a finer sampling for mode {0}" .format(str(vec))) failed_nvecs.append(vec.tolist()) # P.append(np.pi - diff/len(X)) failures.append(1) return np.array(failed_nvecs), np.array(failures)
[ "def", "check_angle_sampling", "(", "nvecs", ",", "angles", ")", ":", "failed_nvecs", "=", "[", "]", "failures", "=", "[", "]", "for", "i", ",", "vec", "in", "enumerate", "(", "nvecs", ")", ":", "# N = np.linalg.norm(vec)", "# X = np.dot(angles,vec)", "X", "...
Returns a list of the index of elements of n which do not have adequate toy angle coverage. The criterion is that we must have at least one sample in each Nyquist box when we project the toy angles along the vector n. Parameters ---------- nvecs : array_like Array of integer vectors. angles : array_like Array of angles. Returns ------- failed_nvecs : :class:`numpy.ndarray` Array of all integer vectors that failed checks. Has shape (N,3). failures : :class:`numpy.ndarray` Array of flags that designate whether this failed needing a longer integration window (0) or finer sampling (1).
[ "Returns", "a", "list", "of", "the", "index", "of", "elements", "of", "n", "which", "do", "not", "have", "adequate", "toy", "angle", "coverage", ".", "The", "criterion", "is", "that", "we", "must", "have", "at", "least", "one", "sample", "in", "each", ...
python
train
jason-weirather/py-seq-tools
seqtools/format/sam/__init__.py
https://github.com/jason-weirather/py-seq-tools/blob/f642c2c73ffef2acc83656a78059a476fc734ca1/seqtools/format/sam/__init__.py#L176-L184
def query_quality(self): """ Overrides align .. warning:: this returns the full query quality, not just the aligned portion """ if not self.entries.qual: return None if self.entries.qual == '*': return None if self.check_flag(0x10): return self.entries.qual[::-1] return self.entries.qual
[ "def", "query_quality", "(", "self", ")", ":", "if", "not", "self", ".", "entries", ".", "qual", ":", "return", "None", "if", "self", ".", "entries", ".", "qual", "==", "'*'", ":", "return", "None", "if", "self", ".", "check_flag", "(", "0x10", ")", ...
Overrides align .. warning:: this returns the full query quality, not just the aligned portion
[ "Overrides", "align" ]
python
train
wanji/bitmap
src/bitmap.py
https://github.com/wanji/bitmap/blob/beb750530045e4f7cf665675bfb28f82d6325007/src/bitmap.py#L140-L145
def fromhexstring(cls, hexstring): """ Construct BitMap from hex string """ bitstring = format(int(hexstring, 16), "0" + str(len(hexstring)/4) + "b") return cls.fromstring(bitstring)
[ "def", "fromhexstring", "(", "cls", ",", "hexstring", ")", ":", "bitstring", "=", "format", "(", "int", "(", "hexstring", ",", "16", ")", ",", "\"0\"", "+", "str", "(", "len", "(", "hexstring", ")", "/", "4", ")", "+", "\"b\"", ")", "return", "cls"...
Construct BitMap from hex string
[ "Construct", "BitMap", "from", "hex", "string" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_common_def.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_common_def.py#L407-L425
def ip_hide_community_list_holder_community_list_extended_ip_action(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def") hide_community_list_holder = ET.SubElement(ip, "hide-community-list-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy") community_list = ET.SubElement(hide_community_list_holder, "community-list") extended = ET.SubElement(community_list, "extended") name_key = ET.SubElement(extended, "name") name_key.text = kwargs.pop('name') seq_keyword_key = ET.SubElement(extended, "seq-keyword") seq_keyword_key.text = kwargs.pop('seq_keyword') instance_key = ET.SubElement(extended, "instance") instance_key.text = kwargs.pop('instance') ip_action = ET.SubElement(extended, "ip-action") ip_action.text = kwargs.pop('ip_action') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "ip_hide_community_list_holder_community_list_extended_ip_action", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "ip", "=", "ET", ".", "SubElement", "(", "config", ",", "\"ip\"", ",", "xmlns...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8384-L8391
def read_lsm_timestamps(fh): """Read LSM time stamps from file and return as list.""" size, count = struct.unpack('<ii', fh.read(8)) if size != (8 + 8 * count): log.warning('read_lsm_timestamps: invalid LSM TimeStamps block') return [] # return struct.unpack('<%dd' % count, fh.read(8*count)) return fh.read_array('<f8', count=count)
[ "def", "read_lsm_timestamps", "(", "fh", ")", ":", "size", ",", "count", "=", "struct", ".", "unpack", "(", "'<ii'", ",", "fh", ".", "read", "(", "8", ")", ")", "if", "size", "!=", "(", "8", "+", "8", "*", "count", ")", ":", "log", ".", "warnin...
Read LSM time stamps from file and return as list.
[ "Read", "LSM", "time", "stamps", "from", "file", "and", "return", "as", "list", "." ]
python
train
dlecocq/nsq-py
nsq/client.py
https://github.com/dlecocq/nsq-py/blob/3ecacf6ab7719d38031179277113d875554a0c16/nsq/client.py#L128-L139
def connection_checker(self): '''Run periodic reconnection checks''' thread = ConnectionChecker(self) logger.info('Starting connection-checker thread') thread.start() try: yield thread finally: logger.info('Stopping connection-checker') thread.stop() logger.info('Joining connection-checker') thread.join()
[ "def", "connection_checker", "(", "self", ")", ":", "thread", "=", "ConnectionChecker", "(", "self", ")", "logger", ".", "info", "(", "'Starting connection-checker thread'", ")", "thread", ".", "start", "(", ")", "try", ":", "yield", "thread", "finally", ":", ...
Run periodic reconnection checks
[ "Run", "periodic", "reconnection", "checks" ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/module.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/module.py#L220-L237
def set_process(self, process = None): """ Manually set the parent process. Use with care! @type process: L{Process} @param process: (Optional) Process object. Use C{None} for no process. """ if process is None: self.__process = None else: global Process # delayed import if Process is None: from winappdbg.process import Process if not isinstance(process, Process): msg = "Parent process must be a Process instance, " msg += "got %s instead" % type(process) raise TypeError(msg) self.__process = process
[ "def", "set_process", "(", "self", ",", "process", "=", "None", ")", ":", "if", "process", "is", "None", ":", "self", ".", "__process", "=", "None", "else", ":", "global", "Process", "# delayed import", "if", "Process", "is", "None", ":", "from", "winapp...
Manually set the parent process. Use with care! @type process: L{Process} @param process: (Optional) Process object. Use C{None} for no process.
[ "Manually", "set", "the", "parent", "process", ".", "Use", "with", "care!" ]
python
train
googleapis/google-cloud-python
spanner/google/cloud/spanner_v1/transaction.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/spanner/google/cloud/spanner_v1/transaction.py#L208-L258
def batch_update(self, statements): """Perform a batch of DML statements via an ``ExecuteBatchDml`` request. :type statements: Sequence[Union[ str, Tuple[str, Dict[str, Any], Dict[str, Union[dict, .types.Type]]]]] :param statements: List of DML statements, with optional params / param types. If passed, 'params' is a dict mapping names to the values for parameter replacement. Keys must match the names used in the corresponding DML statement. If 'params' is passed, 'param_types' must also be passed, as a dict mapping names to the type of value passed in 'params'. :rtype: Tuple(status, Sequence[int]) :returns: Status code, plus counts of rows affected by each completed DML statement. Note that if the staus code is not ``OK``, the statement triggering the error will not have an entry in the list, nor will any statements following that one. """ parsed = [] for statement in statements: if isinstance(statement, str): parsed.append({"sql": statement}) else: dml, params, param_types = statement params_pb = self._make_params_pb(params, param_types) parsed.append( {"sql": dml, "params": params_pb, "param_types": param_types} ) database = self._session._database metadata = _metadata_with_prefix(database.name) transaction = self._make_txn_selector() api = database.spanner_api response = api.execute_batch_dml( session=self._session.name, transaction=transaction, statements=parsed, seqno=self._execute_sql_count, metadata=metadata, ) self._execute_sql_count += 1 row_counts = [ result_set.stats.row_count_exact for result_set in response.result_sets ] return response.status, row_counts
[ "def", "batch_update", "(", "self", ",", "statements", ")", ":", "parsed", "=", "[", "]", "for", "statement", "in", "statements", ":", "if", "isinstance", "(", "statement", ",", "str", ")", ":", "parsed", ".", "append", "(", "{", "\"sql\"", ":", "state...
Perform a batch of DML statements via an ``ExecuteBatchDml`` request. :type statements: Sequence[Union[ str, Tuple[str, Dict[str, Any], Dict[str, Union[dict, .types.Type]]]]] :param statements: List of DML statements, with optional params / param types. If passed, 'params' is a dict mapping names to the values for parameter replacement. Keys must match the names used in the corresponding DML statement. If 'params' is passed, 'param_types' must also be passed, as a dict mapping names to the type of value passed in 'params'. :rtype: Tuple(status, Sequence[int]) :returns: Status code, plus counts of rows affected by each completed DML statement. Note that if the staus code is not ``OK``, the statement triggering the error will not have an entry in the list, nor will any statements following that one.
[ "Perform", "a", "batch", "of", "DML", "statements", "via", "an", "ExecuteBatchDml", "request", "." ]
python
train
carta/ldap_tools
src/ldap_tools/user.py
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/user.py#L184-L192
def create(config, name, group, type): """Create an LDAP user.""" if type not in ('user', 'service'): raise click.BadOptionUsage("--type must be 'user' or 'service'") client = Client() client.prepare_connection() user_api = API(client) group_api = GroupApi(client) user_api.create(name[0], name[1], group, type, group_api)
[ "def", "create", "(", "config", ",", "name", ",", "group", ",", "type", ")", ":", "if", "type", "not", "in", "(", "'user'", ",", "'service'", ")", ":", "raise", "click", ".", "BadOptionUsage", "(", "\"--type must be 'user' or 'service'\"", ")", "client", "...
Create an LDAP user.
[ "Create", "an", "LDAP", "user", "." ]
python
train