repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
elifesciences/proofreader-python
proofreader/license_checker/__init__.py
run_license_checker
def run_license_checker(config_path): # type: (str) -> None """Generate table of installed packages and check for license warnings based off user defined restricted license values. :param config_path: str :return: """ whitelist_licenses = _get_whitelist_licenses(config_path) table = PrintTable(ROW_HEADERS) warnings = [] for pkg in _get_packages(): allowed = pkg.license in whitelist_licenses table.add_row((pkg.name, pkg.version, pkg.license, str(allowed))) if not allowed: warnings.append(pkg) print(table) print('{} RESTRICTED LICENSES DETECTED'.format(len(warnings)))
python
def run_license_checker(config_path): # type: (str) -> None """Generate table of installed packages and check for license warnings based off user defined restricted license values. :param config_path: str :return: """ whitelist_licenses = _get_whitelist_licenses(config_path) table = PrintTable(ROW_HEADERS) warnings = [] for pkg in _get_packages(): allowed = pkg.license in whitelist_licenses table.add_row((pkg.name, pkg.version, pkg.license, str(allowed))) if not allowed: warnings.append(pkg) print(table) print('{} RESTRICTED LICENSES DETECTED'.format(len(warnings)))
[ "def", "run_license_checker", "(", "config_path", ")", ":", "# type: (str) -> None", "whitelist_licenses", "=", "_get_whitelist_licenses", "(", "config_path", ")", "table", "=", "PrintTable", "(", "ROW_HEADERS", ")", "warnings", "=", "[", "]", "for", "pkg", "in", ...
Generate table of installed packages and check for license warnings based off user defined restricted license values. :param config_path: str :return:
[ "Generate", "table", "of", "installed", "packages", "and", "check", "for", "license", "warnings", "based", "off", "user", "defined", "restricted", "license", "values", "." ]
train
https://github.com/elifesciences/proofreader-python/blob/387b3c65ee7777e26b3a7340179dc4ed68f24f58/proofreader/license_checker/__init__.py#L42-L64
AutomatedTester/Bugsy
bugsy/search.py
Search.include_fields
def include_fields(self, *args): r""" Include fields is the fields that you want to be returned when searching. These are in addition to the fields that are always included below. :param args: items passed in will be turned into a list :returns: :class:`Search` >>> bugzilla.search_for.include_fields("flags") The following fields are always included in search: 'version', 'id', 'summary', 'status', 'op_sys', 'resolution', 'product', 'component', 'platform' """ for arg in args: self._includefields.append(arg) return self
python
def include_fields(self, *args): r""" Include fields is the fields that you want to be returned when searching. These are in addition to the fields that are always included below. :param args: items passed in will be turned into a list :returns: :class:`Search` >>> bugzilla.search_for.include_fields("flags") The following fields are always included in search: 'version', 'id', 'summary', 'status', 'op_sys', 'resolution', 'product', 'component', 'platform' """ for arg in args: self._includefields.append(arg) return self
[ "def", "include_fields", "(", "self", ",", "*", "args", ")", ":", "for", "arg", "in", "args", ":", "self", ".", "_includefields", ".", "append", "(", "arg", ")", "return", "self" ]
r""" Include fields is the fields that you want to be returned when searching. These are in addition to the fields that are always included below. :param args: items passed in will be turned into a list :returns: :class:`Search` >>> bugzilla.search_for.include_fields("flags") The following fields are always included in search: 'version', 'id', 'summary', 'status', 'op_sys', 'resolution', 'product', 'component', 'platform'
[ "r", "Include", "fields", "is", "the", "fields", "that", "you", "want", "to", "be", "returned", "when", "searching", ".", "These", "are", "in", "addition", "to", "the", "fields", "that", "are", "always", "included", "below", "." ]
train
https://github.com/AutomatedTester/Bugsy/blob/ac14df84e744a148e81aeaae20a144bc5f3cebf1/bugsy/search.py#L29-L46
AutomatedTester/Bugsy
bugsy/search.py
Search.component
def component(self, *components): r""" When search() is called it will limit results to items in a component. :param component: items passed in will be turned into a list :returns: :class:`Search` """ for component in components: self._component.append(component) return self
python
def component(self, *components): r""" When search() is called it will limit results to items in a component. :param component: items passed in will be turned into a list :returns: :class:`Search` """ for component in components: self._component.append(component) return self
[ "def", "component", "(", "self", ",", "*", "components", ")", ":", "for", "component", "in", "components", ":", "self", ".", "_component", ".", "append", "(", "component", ")", "return", "self" ]
r""" When search() is called it will limit results to items in a component. :param component: items passed in will be turned into a list :returns: :class:`Search`
[ "r", "When", "search", "()", "is", "called", "it", "will", "limit", "results", "to", "items", "in", "a", "component", "." ]
train
https://github.com/AutomatedTester/Bugsy/blob/ac14df84e744a148e81aeaae20a144bc5f3cebf1/bugsy/search.py#L48-L57
AutomatedTester/Bugsy
bugsy/search.py
Search.product
def product(self, *products): r""" When search is called, it will limit the results to items in a Product. :param product: items passed in will be turned into a list :returns: :class:`Search` """ for product in products: self._product.append(product) return self
python
def product(self, *products): r""" When search is called, it will limit the results to items in a Product. :param product: items passed in will be turned into a list :returns: :class:`Search` """ for product in products: self._product.append(product) return self
[ "def", "product", "(", "self", ",", "*", "products", ")", ":", "for", "product", "in", "products", ":", "self", ".", "_product", ".", "append", "(", "product", ")", "return", "self" ]
r""" When search is called, it will limit the results to items in a Product. :param product: items passed in will be turned into a list :returns: :class:`Search`
[ "r", "When", "search", "is", "called", "it", "will", "limit", "the", "results", "to", "items", "in", "a", "Product", "." ]
train
https://github.com/AutomatedTester/Bugsy/blob/ac14df84e744a148e81aeaae20a144bc5f3cebf1/bugsy/search.py#L59-L68
AutomatedTester/Bugsy
bugsy/search.py
Search.timeframe
def timeframe(self, start, end): r""" When you want to search bugs for a certain time frame. :param start: :param end: :returns: :class:`Search` """ if start: self._time_frame['chfieldfrom'] = start if end: self._time_frame['chfieldto'] = end return self
python
def timeframe(self, start, end): r""" When you want to search bugs for a certain time frame. :param start: :param end: :returns: :class:`Search` """ if start: self._time_frame['chfieldfrom'] = start if end: self._time_frame['chfieldto'] = end return self
[ "def", "timeframe", "(", "self", ",", "start", ",", "end", ")", ":", "if", "start", ":", "self", ".", "_time_frame", "[", "'chfieldfrom'", "]", "=", "start", "if", "end", ":", "self", ".", "_time_frame", "[", "'chfieldto'", "]", "=", "end", "return", ...
r""" When you want to search bugs for a certain time frame. :param start: :param end: :returns: :class:`Search`
[ "r", "When", "you", "want", "to", "search", "bugs", "for", "a", "certain", "time", "frame", "." ]
train
https://github.com/AutomatedTester/Bugsy/blob/ac14df84e744a148e81aeaae20a144bc5f3cebf1/bugsy/search.py#L136-L148
AutomatedTester/Bugsy
bugsy/search.py
Search.change_history_fields
def change_history_fields(self, fields, value=None): r""" """ if not isinstance(fields, list): raise Exception('fields should be a list') self._change_history['fields'] = fields if value: self._change_history['value'] = value return self
python
def change_history_fields(self, fields, value=None): r""" """ if not isinstance(fields, list): raise Exception('fields should be a list') self._change_history['fields'] = fields if value: self._change_history['value'] = value return self
[ "def", "change_history_fields", "(", "self", ",", "fields", ",", "value", "=", "None", ")", ":", "if", "not", "isinstance", "(", "fields", ",", "list", ")", ":", "raise", "Exception", "(", "'fields should be a list'", ")", "self", ".", "_change_history", "["...
r"""
[ "r" ]
train
https://github.com/AutomatedTester/Bugsy/blob/ac14df84e744a148e81aeaae20a144bc5f3cebf1/bugsy/search.py#L150-L161
AutomatedTester/Bugsy
bugsy/search.py
Search.search
def search(self): r""" Call the Bugzilla endpoint that will do the search. It will take the information used in other methods on the Search object and build up the query string. If no bugs are found then an empty list is returned. >>> bugs = bugzilla.search_for\ ... .keywords("checkin-needed")\ ... .include_fields("flags")\ ... .search() """ params = {} params.update(self._time_frame.items()) if self._includefields: params['include_fields'] = list(self._includefields) if self._bug_numbers: bugs = [] for bug in self._bug_numbers: result = self._bugsy.request('bug/%s' % bug, params=params) bugs.append(Bug(self._bugsy, **result['bugs'][0])) return bugs else: if self._component: params['component'] = list(self._component) if self._product: params['product'] = list(self._product) if self._keywords: params['keywords'] = list(self._keywords) if self._assigned: params['assigned_to'] = list(self._assigned) if self._summaries: params['short_desc_type'] = 'allwordssubstr' params['short_desc'] = list(self._summaries) if self._whiteboard: params['short_desc_type'] = 'allwordssubstr' params['whiteboard'] = list(self._whiteboard) if self._change_history['fields']: params['chfield'] = self._change_history['fields'] if self._change_history.get('value', None): params['chfieldvalue'] = self._change_history['value'] try: results = self._bugsy.request('bug', params=params) except Exception as e: raise SearchException(e.msg, e.code) return [Bug(self._bugsy, **bug) for bug in results['bugs']]
python
def search(self): r""" Call the Bugzilla endpoint that will do the search. It will take the information used in other methods on the Search object and build up the query string. If no bugs are found then an empty list is returned. >>> bugs = bugzilla.search_for\ ... .keywords("checkin-needed")\ ... .include_fields("flags")\ ... .search() """ params = {} params.update(self._time_frame.items()) if self._includefields: params['include_fields'] = list(self._includefields) if self._bug_numbers: bugs = [] for bug in self._bug_numbers: result = self._bugsy.request('bug/%s' % bug, params=params) bugs.append(Bug(self._bugsy, **result['bugs'][0])) return bugs else: if self._component: params['component'] = list(self._component) if self._product: params['product'] = list(self._product) if self._keywords: params['keywords'] = list(self._keywords) if self._assigned: params['assigned_to'] = list(self._assigned) if self._summaries: params['short_desc_type'] = 'allwordssubstr' params['short_desc'] = list(self._summaries) if self._whiteboard: params['short_desc_type'] = 'allwordssubstr' params['whiteboard'] = list(self._whiteboard) if self._change_history['fields']: params['chfield'] = self._change_history['fields'] if self._change_history.get('value', None): params['chfieldvalue'] = self._change_history['value'] try: results = self._bugsy.request('bug', params=params) except Exception as e: raise SearchException(e.msg, e.code) return [Bug(self._bugsy, **bug) for bug in results['bugs']]
[ "def", "search", "(", "self", ")", ":", "params", "=", "{", "}", "params", ".", "update", "(", "self", ".", "_time_frame", ".", "items", "(", ")", ")", "if", "self", ".", "_includefields", ":", "params", "[", "'include_fields'", "]", "=", "list", "("...
r""" Call the Bugzilla endpoint that will do the search. It will take the information used in other methods on the Search object and build up the query string. If no bugs are found then an empty list is returned. >>> bugs = bugzilla.search_for\ ... .keywords("checkin-needed")\ ... .include_fields("flags")\ ... .search()
[ "r", "Call", "the", "Bugzilla", "endpoint", "that", "will", "do", "the", "search", ".", "It", "will", "take", "the", "information", "used", "in", "other", "methods", "on", "the", "Search", "object", "and", "build", "up", "the", "query", "string", ".", "I...
train
https://github.com/AutomatedTester/Bugsy/blob/ac14df84e744a148e81aeaae20a144bc5f3cebf1/bugsy/search.py#L163-L213
darkfeline/animanager
animanager/migrations.py
_parse_date
def _parse_date(string: str) -> datetime.date: """Parse an ISO format date (YYYY-mm-dd). >>> _parse_date('1990-01-02') datetime.date(1990, 1, 2) """ return datetime.datetime.strptime(string, '%Y-%m-%d').date()
python
def _parse_date(string: str) -> datetime.date: """Parse an ISO format date (YYYY-mm-dd). >>> _parse_date('1990-01-02') datetime.date(1990, 1, 2) """ return datetime.datetime.strptime(string, '%Y-%m-%d').date()
[ "def", "_parse_date", "(", "string", ":", "str", ")", "->", "datetime", ".", "date", ":", "return", "datetime", ".", "datetime", ".", "strptime", "(", "string", ",", "'%Y-%m-%d'", ")", ".", "date", "(", ")" ]
Parse an ISO format date (YYYY-mm-dd). >>> _parse_date('1990-01-02') datetime.date(1990, 1, 2)
[ "Parse", "an", "ISO", "format", "date", "(", "YYYY", "-", "mm", "-", "dd", ")", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/migrations.py#L184-L190
Frojd/Fabrik
fabrik/ext/wpcli.py
sync_remote_to_local
def sync_remote_to_local(force="no"): """ Replace your remote db with your local Example: sync_remote_to_local:force=yes """ assert "local_wp_dir" in env, "Missing local_wp_dir in env" if force != "yes": message = "This will replace your local database with your "\ "remote, are you sure [y/n]" answer = prompt(message, "y") if answer != "y": logger.info("Sync stopped") return init_tasks() # Bootstrap fabrik remote_file = "sync_%s.sql" % int(time.time()*1000) remote_path = "/tmp/%s" % remote_file with env.cd(paths.get_current_path()): env.run("wp db export %s" % remote_path) local_wp_dir = env.local_wp_dir local_path = "/tmp/%s" % remote_file # Download sync file get(remote_path, local_path) with lcd(local_wp_dir): elocal("wp db import %s" % local_path) # Cleanup env.run("rm %s" % remote_path) elocal("rm %s" % local_path)
python
def sync_remote_to_local(force="no"): """ Replace your remote db with your local Example: sync_remote_to_local:force=yes """ assert "local_wp_dir" in env, "Missing local_wp_dir in env" if force != "yes": message = "This will replace your local database with your "\ "remote, are you sure [y/n]" answer = prompt(message, "y") if answer != "y": logger.info("Sync stopped") return init_tasks() # Bootstrap fabrik remote_file = "sync_%s.sql" % int(time.time()*1000) remote_path = "/tmp/%s" % remote_file with env.cd(paths.get_current_path()): env.run("wp db export %s" % remote_path) local_wp_dir = env.local_wp_dir local_path = "/tmp/%s" % remote_file # Download sync file get(remote_path, local_path) with lcd(local_wp_dir): elocal("wp db import %s" % local_path) # Cleanup env.run("rm %s" % remote_path) elocal("rm %s" % local_path)
[ "def", "sync_remote_to_local", "(", "force", "=", "\"no\"", ")", ":", "assert", "\"local_wp_dir\"", "in", "env", ",", "\"Missing local_wp_dir in env\"", "if", "force", "!=", "\"yes\"", ":", "message", "=", "\"This will replace your local database with your \"", "\"remote,...
Replace your remote db with your local Example: sync_remote_to_local:force=yes
[ "Replace", "your", "remote", "db", "with", "your", "local" ]
train
https://github.com/Frojd/Fabrik/blob/9f2edbba97a7fd236b72a9b3010f6e912ab5c001/fabrik/ext/wpcli.py#L29-L67
darkfeline/animanager
animanager/db/query/files.py
get_priority_rules
def get_priority_rules(db) -> Iterable[PriorityRule]: """Get file priority rules.""" cur = db.cursor() cur.execute('SELECT id, regexp, priority FROM file_priority') for row in cur: yield PriorityRule(*row)
python
def get_priority_rules(db) -> Iterable[PriorityRule]: """Get file priority rules.""" cur = db.cursor() cur.execute('SELECT id, regexp, priority FROM file_priority') for row in cur: yield PriorityRule(*row)
[ "def", "get_priority_rules", "(", "db", ")", "->", "Iterable", "[", "PriorityRule", "]", ":", "cur", "=", "db", ".", "cursor", "(", ")", "cur", ".", "execute", "(", "'SELECT id, regexp, priority FROM file_priority'", ")", "for", "row", "in", "cur", ":", "yie...
Get file priority rules.
[ "Get", "file", "priority", "rules", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/db/query/files.py#L33-L38
darkfeline/animanager
animanager/db/query/files.py
add_priority_rule
def add_priority_rule( db, regexp: str, priority: Optional[int] = None, ) -> int: """Add a file priority rule.""" with db: cur = db.cursor() if priority is None: cur.execute('SELECT MAX(priority) FROM file_priority') highest_priority = cur.fetchone()[0] if highest_priority is None: priority = 1 else: priority = highest_priority + 1 cur.execute(""" INSERT INTO file_priority (regexp, priority) VALUES (?, ?)""", (regexp, priority)) row_id = db.last_insert_rowid() return row_id
python
def add_priority_rule( db, regexp: str, priority: Optional[int] = None, ) -> int: """Add a file priority rule.""" with db: cur = db.cursor() if priority is None: cur.execute('SELECT MAX(priority) FROM file_priority') highest_priority = cur.fetchone()[0] if highest_priority is None: priority = 1 else: priority = highest_priority + 1 cur.execute(""" INSERT INTO file_priority (regexp, priority) VALUES (?, ?)""", (regexp, priority)) row_id = db.last_insert_rowid() return row_id
[ "def", "add_priority_rule", "(", "db", ",", "regexp", ":", "str", ",", "priority", ":", "Optional", "[", "int", "]", "=", "None", ",", ")", "->", "int", ":", "with", "db", ":", "cur", "=", "db", ".", "cursor", "(", ")", "if", "priority", "is", "N...
Add a file priority rule.
[ "Add", "a", "file", "priority", "rule", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/db/query/files.py#L41-L58
darkfeline/animanager
animanager/db/query/files.py
delete_priority_rule
def delete_priority_rule(db, rule_id: int) -> None: """Delete a file priority rule.""" with db: cur = db.cursor() cur.execute('DELETE FROM file_priority WHERE id=?', (rule_id,))
python
def delete_priority_rule(db, rule_id: int) -> None: """Delete a file priority rule.""" with db: cur = db.cursor() cur.execute('DELETE FROM file_priority WHERE id=?', (rule_id,))
[ "def", "delete_priority_rule", "(", "db", ",", "rule_id", ":", "int", ")", "->", "None", ":", "with", "db", ":", "cur", "=", "db", ".", "cursor", "(", ")", "cur", ".", "execute", "(", "'DELETE FROM file_priority WHERE id=?'", ",", "(", "rule_id", ",", ")...
Delete a file priority rule.
[ "Delete", "a", "file", "priority", "rule", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/db/query/files.py#L61-L65
darkfeline/animanager
animanager/db/query/files.py
cache_files
def cache_files(db, aid: int, anime_files: AnimeFiles) -> None: """Cache files for anime.""" with db: cache_status(db, aid) db.cursor().execute( """UPDATE cache_anime SET anime_files=? WHERE aid=?""", (anime_files.to_json(), aid))
python
def cache_files(db, aid: int, anime_files: AnimeFiles) -> None: """Cache files for anime.""" with db: cache_status(db, aid) db.cursor().execute( """UPDATE cache_anime SET anime_files=? WHERE aid=?""", (anime_files.to_json(), aid))
[ "def", "cache_files", "(", "db", ",", "aid", ":", "int", ",", "anime_files", ":", "AnimeFiles", ")", "->", "None", ":", "with", "db", ":", "cache_status", "(", "db", ",", "aid", ")", "db", ".", "cursor", "(", ")", ".", "execute", "(", "\"\"\"UPDATE c...
Cache files for anime.
[ "Cache", "files", "for", "anime", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/db/query/files.py#L68-L76
darkfeline/animanager
animanager/db/query/files.py
get_files
def get_files(conn, aid: int) -> AnimeFiles: """Get cached files for anime.""" with conn: cur = conn.cursor().execute( 'SELECT anime_files FROM cache_anime WHERE aid=?', (aid,)) row = cur.fetchone() if row is None: raise ValueError('No cached files') return AnimeFiles.from_json(row[0])
python
def get_files(conn, aid: int) -> AnimeFiles: """Get cached files for anime.""" with conn: cur = conn.cursor().execute( 'SELECT anime_files FROM cache_anime WHERE aid=?', (aid,)) row = cur.fetchone() if row is None: raise ValueError('No cached files') return AnimeFiles.from_json(row[0])
[ "def", "get_files", "(", "conn", ",", "aid", ":", "int", ")", "->", "AnimeFiles", ":", "with", "conn", ":", "cur", "=", "conn", ".", "cursor", "(", ")", ".", "execute", "(", "'SELECT anime_files FROM cache_anime WHERE aid=?'", ",", "(", "aid", ",", ")", ...
Get cached files for anime.
[ "Get", "cached", "files", "for", "anime", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/db/query/files.py#L79-L88
darkfeline/animanager
animanager/files/anime.py
AnimeFiles.add
def add(self, filename): """Try to add a file.""" basename = os.path.basename(filename) match = self.regexp.search(basename) if match: self.by_episode[int(match.group('ep'))].add(filename)
python
def add(self, filename): """Try to add a file.""" basename = os.path.basename(filename) match = self.regexp.search(basename) if match: self.by_episode[int(match.group('ep'))].add(filename)
[ "def", "add", "(", "self", ",", "filename", ")", ":", "basename", "=", "os", ".", "path", ".", "basename", "(", "filename", ")", "match", "=", "self", ".", "regexp", ".", "search", "(", "basename", ")", "if", "match", ":", "self", ".", "by_episode", ...
Try to add a file.
[ "Try", "to", "add", "a", "file", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/files/anime.py#L69-L74
darkfeline/animanager
animanager/files/anime.py
AnimeFiles.available_string
def available_string(self, episode): """Return a string of available episodes.""" available = [ep for ep in self if ep > episode] string = ','.join(str(ep) for ep in available[:self.EPISODES_TO_SHOW]) if len(available) > self.EPISODES_TO_SHOW: string += '...' return string
python
def available_string(self, episode): """Return a string of available episodes.""" available = [ep for ep in self if ep > episode] string = ','.join(str(ep) for ep in available[:self.EPISODES_TO_SHOW]) if len(available) > self.EPISODES_TO_SHOW: string += '...' return string
[ "def", "available_string", "(", "self", ",", "episode", ")", ":", "available", "=", "[", "ep", "for", "ep", "in", "self", "if", "ep", ">", "episode", "]", "string", "=", "','", ".", "join", "(", "str", "(", "ep", ")", "for", "ep", "in", "available"...
Return a string of available episodes.
[ "Return", "a", "string", "of", "available", "episodes", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/files/anime.py#L81-L87
darkfeline/animanager
animanager/files/anime.py
AnimeFiles.from_json
def from_json(cls, string): """Create AnimeFiles from JSON string.""" obj = json.loads(string) return cls(obj['regexp'], obj['files'])
python
def from_json(cls, string): """Create AnimeFiles from JSON string.""" obj = json.loads(string) return cls(obj['regexp'], obj['files'])
[ "def", "from_json", "(", "cls", ",", "string", ")", ":", "obj", "=", "json", ".", "loads", "(", "string", ")", "return", "cls", "(", "obj", "[", "'regexp'", "]", ",", "obj", "[", "'files'", "]", ")" ]
Create AnimeFiles from JSON string.
[ "Create", "AnimeFiles", "from", "JSON", "string", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/files/anime.py#L102-L105
gtaylor/petfinder-api
petfinder/exceptions.py
_get_exception_class_from_status_code
def _get_exception_class_from_status_code(status_code): """ Utility function that accepts a status code, and spits out a reference to the correct exception class to raise. :param str status_code: The status code to return an exception class for. :rtype: PetfinderAPIError or None :returns: The appropriate PetfinderAPIError subclass. If the status code is not an error, return ``None``. """ if status_code == '100': return None exc_class = STATUS_CODE_MAPPING.get(status_code) if not exc_class: # No status code match, return the "I don't know wtf this is" # exception class. return STATUS_CODE_MAPPING['UNKNOWN'] else: # Match found, yay. return exc_class
python
def _get_exception_class_from_status_code(status_code): """ Utility function that accepts a status code, and spits out a reference to the correct exception class to raise. :param str status_code: The status code to return an exception class for. :rtype: PetfinderAPIError or None :returns: The appropriate PetfinderAPIError subclass. If the status code is not an error, return ``None``. """ if status_code == '100': return None exc_class = STATUS_CODE_MAPPING.get(status_code) if not exc_class: # No status code match, return the "I don't know wtf this is" # exception class. return STATUS_CODE_MAPPING['UNKNOWN'] else: # Match found, yay. return exc_class
[ "def", "_get_exception_class_from_status_code", "(", "status_code", ")", ":", "if", "status_code", "==", "'100'", ":", "return", "None", "exc_class", "=", "STATUS_CODE_MAPPING", ".", "get", "(", "status_code", ")", "if", "not", "exc_class", ":", "# No status code ma...
Utility function that accepts a status code, and spits out a reference to the correct exception class to raise. :param str status_code: The status code to return an exception class for. :rtype: PetfinderAPIError or None :returns: The appropriate PetfinderAPIError subclass. If the status code is not an error, return ``None``.
[ "Utility", "function", "that", "accepts", "a", "status", "code", "and", "spits", "out", "a", "reference", "to", "the", "correct", "exception", "class", "to", "raise", "." ]
train
https://github.com/gtaylor/petfinder-api/blob/4f1bc76c276d537208c9b11f7c87282f6d2bb50d/petfinder/exceptions.py#L93-L113
SylvanasSun/FishFishJump
fish_core/scrapy/pipelines.py
MongodbPipeline.open_spider
def open_spider(self, spider): """ Initialize Mongodb client. """ if self.url == "": self.client = pymongo.MongoClient(self.host, self.port) else: self.client = pymongo.MongoClient(self.url) self.db_name, self.collection_name = self._replace_placeholder(spider) self.db = self.client[self.db_name]
python
def open_spider(self, spider): """ Initialize Mongodb client. """ if self.url == "": self.client = pymongo.MongoClient(self.host, self.port) else: self.client = pymongo.MongoClient(self.url) self.db_name, self.collection_name = self._replace_placeholder(spider) self.db = self.client[self.db_name]
[ "def", "open_spider", "(", "self", ",", "spider", ")", ":", "if", "self", ".", "url", "==", "\"\"", ":", "self", ".", "client", "=", "pymongo", ".", "MongoClient", "(", "self", ".", "host", ",", "self", ".", "port", ")", "else", ":", "self", ".", ...
Initialize Mongodb client.
[ "Initialize", "Mongodb", "client", "." ]
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_core/scrapy/pipelines.py#L44-L54
SylvanasSun/FishFishJump
fish_core/scrapy/pipelines.py
MongodbPipeline._replace_placeholder
def _replace_placeholder(self, spider): """ Returns replaced db_name and collection_name(base on spider's name). if your db_name or collection_name does not have a placeholder or your db_name or collection_name that not base on spider's name you must override this function. """ return self.db_name % {'spider': spider.name}, self.collection_name % {'spider': spider.name}
python
def _replace_placeholder(self, spider): """ Returns replaced db_name and collection_name(base on spider's name). if your db_name or collection_name does not have a placeholder or your db_name or collection_name that not base on spider's name you must override this function. """ return self.db_name % {'spider': spider.name}, self.collection_name % {'spider': spider.name}
[ "def", "_replace_placeholder", "(", "self", ",", "spider", ")", ":", "return", "self", ".", "db_name", "%", "{", "'spider'", ":", "spider", ".", "name", "}", ",", "self", ".", "collection_name", "%", "{", "'spider'", ":", "spider", ".", "name", "}" ]
Returns replaced db_name and collection_name(base on spider's name). if your db_name or collection_name does not have a placeholder or your db_name or collection_name that not base on spider's name you must override this function.
[ "Returns", "replaced", "db_name", "and", "collection_name", "(", "base", "on", "spider", "s", "name", ")", ".", "if", "your", "db_name", "or", "collection_name", "does", "not", "have", "a", "placeholder", "or", "your", "db_name", "or", "collection_name", "that...
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_core/scrapy/pipelines.py#L59-L66
fuzeman/plex.py
plex/request.py
PlexRequest.construct_url
def construct_url(self): """Construct a full plex request URI, with `params`.""" path = [self.path] path.extend([str(x) for x in self.params]) url = self.client.base_url + '/'.join(x for x in path if x) query = self.kwargs.get('query') if query: # Dict -> List if type(query) is dict: query = query.items() # Remove items with `None` value query = [ (k, v) for (k, v) in query if v is not None ] # Encode query, append to URL url += '?' + urlencode(query) return url
python
def construct_url(self): """Construct a full plex request URI, with `params`.""" path = [self.path] path.extend([str(x) for x in self.params]) url = self.client.base_url + '/'.join(x for x in path if x) query = self.kwargs.get('query') if query: # Dict -> List if type(query) is dict: query = query.items() # Remove items with `None` value query = [ (k, v) for (k, v) in query if v is not None ] # Encode query, append to URL url += '?' + urlencode(query) return url
[ "def", "construct_url", "(", "self", ")", ":", "path", "=", "[", "self", ".", "path", "]", "path", ".", "extend", "(", "[", "str", "(", "x", ")", "for", "x", "in", "self", ".", "params", "]", ")", "url", "=", "self", ".", "client", ".", "base_u...
Construct a full plex request URI, with `params`.
[ "Construct", "a", "full", "plex", "request", "URI", "with", "params", "." ]
train
https://github.com/fuzeman/plex.py/blob/f1155f94bce1a695a569b56ac8ba5bb6da00a07f/plex/request.py#L34-L56
apetrynet/pyfilemail
pyfilemail/__init__.py
login_required
def login_required(f): """Decorator function to check if user is loged in. :raises: :class:`FMBaseError` if not logged in """ @wraps(f) def check_login(cls, *args, **kwargs): if not cls.logged_in: raise FMBaseError('Please login to use this method') return f(cls, *args, **kwargs) return check_login
python
def login_required(f): """Decorator function to check if user is loged in. :raises: :class:`FMBaseError` if not logged in """ @wraps(f) def check_login(cls, *args, **kwargs): if not cls.logged_in: raise FMBaseError('Please login to use this method') return f(cls, *args, **kwargs) return check_login
[ "def", "login_required", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "check_login", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "cls", ".", "logged_in", ":", "raise", "FMBaseError", "(", "'Please login to...
Decorator function to check if user is loged in. :raises: :class:`FMBaseError` if not logged in
[ "Decorator", "function", "to", "check", "if", "user", "is", "loged", "in", "." ]
train
https://github.com/apetrynet/pyfilemail/blob/eb81b0e69ff42f4335d5298833e4769b750bf397/pyfilemail/__init__.py#L69-L82
apetrynet/pyfilemail
pyfilemail/__init__.py
load_config
def load_config(): """Load configuration file containing API KEY and other settings. :rtype: str """ configfile = get_configfile() if not os.path.exists(configfile): data = { 'apikey': 'GET KEY AT: https://www.filemail.com/apidoc/ApiKey.aspx' } save_config(data) with open(configfile, 'rb') as f: return json.load(f)
python
def load_config(): """Load configuration file containing API KEY and other settings. :rtype: str """ configfile = get_configfile() if not os.path.exists(configfile): data = { 'apikey': 'GET KEY AT: https://www.filemail.com/apidoc/ApiKey.aspx' } save_config(data) with open(configfile, 'rb') as f: return json.load(f)
[ "def", "load_config", "(", ")", ":", "configfile", "=", "get_configfile", "(", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "configfile", ")", ":", "data", "=", "{", "'apikey'", ":", "'GET KEY AT: https://www.filemail.com/apidoc/ApiKey.aspx'", "}", ...
Load configuration file containing API KEY and other settings. :rtype: str
[ "Load", "configuration", "file", "containing", "API", "KEY", "and", "other", "settings", "." ]
train
https://github.com/apetrynet/pyfilemail/blob/eb81b0e69ff42f4335d5298833e4769b750bf397/pyfilemail/__init__.py#L85-L101
apetrynet/pyfilemail
pyfilemail/__init__.py
save_config
def save_config(config): """Save configuration file to users data location. - Linux: ~/.local/share/pyfilemail - OSX: ~/Library/Application Support/pyfilemail - Windows: C:\\\Users\\\{username}\\\AppData\\\Local\\\pyfilemail :rtype: str """ configfile = get_configfile() if not os.path.exists(configfile): configdir = os.path.dirname(configfile) if not os.path.exists(configdir): os.makedirs(configdir) data = config with open(configfile, 'wb') as f: json.dump(data, f, indent=2)
python
def save_config(config): """Save configuration file to users data location. - Linux: ~/.local/share/pyfilemail - OSX: ~/Library/Application Support/pyfilemail - Windows: C:\\\Users\\\{username}\\\AppData\\\Local\\\pyfilemail :rtype: str """ configfile = get_configfile() if not os.path.exists(configfile): configdir = os.path.dirname(configfile) if not os.path.exists(configdir): os.makedirs(configdir) data = config with open(configfile, 'wb') as f: json.dump(data, f, indent=2)
[ "def", "save_config", "(", "config", ")", ":", "configfile", "=", "get_configfile", "(", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "configfile", ")", ":", "configdir", "=", "os", ".", "path", ".", "dirname", "(", "configfile", ")", "if",...
Save configuration file to users data location. - Linux: ~/.local/share/pyfilemail - OSX: ~/Library/Application Support/pyfilemail - Windows: C:\\\Users\\\{username}\\\AppData\\\Local\\\pyfilemail :rtype: str
[ "Save", "configuration", "file", "to", "users", "data", "location", "." ]
train
https://github.com/apetrynet/pyfilemail/blob/eb81b0e69ff42f4335d5298833e4769b750bf397/pyfilemail/__init__.py#L104-L125
apetrynet/pyfilemail
pyfilemail/__init__.py
get_configfile
def get_configfile(): """Return full path to configuration file. - Linux: ~/.local/share/pyfilemail - OSX: ~/Library/Application Support/pyfilemail - Windows: C:\\\Users\\\{username}\\\AppData\\\Local\\\pyfilemail :rtype: str """ ad = appdirs.AppDirs('pyfilemail') configdir = ad.user_data_dir configfile = os.path.join(configdir, 'pyfilemail.cfg') return configfile
python
def get_configfile(): """Return full path to configuration file. - Linux: ~/.local/share/pyfilemail - OSX: ~/Library/Application Support/pyfilemail - Windows: C:\\\Users\\\{username}\\\AppData\\\Local\\\pyfilemail :rtype: str """ ad = appdirs.AppDirs('pyfilemail') configdir = ad.user_data_dir configfile = os.path.join(configdir, 'pyfilemail.cfg') return configfile
[ "def", "get_configfile", "(", ")", ":", "ad", "=", "appdirs", ".", "AppDirs", "(", "'pyfilemail'", ")", "configdir", "=", "ad", ".", "user_data_dir", "configfile", "=", "os", ".", "path", ".", "join", "(", "configdir", ",", "'pyfilemail.cfg'", ")", "return...
Return full path to configuration file. - Linux: ~/.local/share/pyfilemail - OSX: ~/Library/Application Support/pyfilemail - Windows: C:\\\Users\\\{username}\\\AppData\\\Local\\\pyfilemail :rtype: str
[ "Return", "full", "path", "to", "configuration", "file", "." ]
train
https://github.com/apetrynet/pyfilemail/blob/eb81b0e69ff42f4335d5298833e4769b750bf397/pyfilemail/__init__.py#L128-L142
wglass/lighthouse
lighthouse/log/config.py
Logging.from_config
def from_config(cls, name, config): """ Override of the base `from_config()` method that returns `None` if the name of the config file isn't "logging". We do this in case this `Configurable` subclass winds up sharing the root of the config directory with other subclasses. """ if name != cls.name: return return super(Logging, cls).from_config(name, config)
python
def from_config(cls, name, config): """ Override of the base `from_config()` method that returns `None` if the name of the config file isn't "logging". We do this in case this `Configurable` subclass winds up sharing the root of the config directory with other subclasses. """ if name != cls.name: return return super(Logging, cls).from_config(name, config)
[ "def", "from_config", "(", "cls", ",", "name", ",", "config", ")", ":", "if", "name", "!=", "cls", ".", "name", ":", "return", "return", "super", "(", "Logging", ",", "cls", ")", ".", "from_config", "(", "name", ",", "config", ")" ]
Override of the base `from_config()` method that returns `None` if the name of the config file isn't "logging". We do this in case this `Configurable` subclass winds up sharing the root of the config directory with other subclasses.
[ "Override", "of", "the", "base", "from_config", "()", "method", "that", "returns", "None", "if", "the", "name", "of", "the", "config", "file", "isn", "t", "logging", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/log/config.py#L22-L33
elifesciences/proofreader-python
proofreader/config/utils.py
get_flake8_options
def get_flake8_options(config_dir='.'): # type: (str) -> List[str] """Checks for local config overrides for `flake8` and add them in the correct `flake8` `options` format. :param config_dir: :return: List[str] """ if FLAKE8_CONFIG_NAME in os.listdir(config_dir): flake8_config_path = FLAKE8_CONFIG_NAME else: flake8_config_path = DEFAULT_FLAKE8_CONFIG_PATH return ['--config={}'.format(flake8_config_path)]
python
def get_flake8_options(config_dir='.'): # type: (str) -> List[str] """Checks for local config overrides for `flake8` and add them in the correct `flake8` `options` format. :param config_dir: :return: List[str] """ if FLAKE8_CONFIG_NAME in os.listdir(config_dir): flake8_config_path = FLAKE8_CONFIG_NAME else: flake8_config_path = DEFAULT_FLAKE8_CONFIG_PATH return ['--config={}'.format(flake8_config_path)]
[ "def", "get_flake8_options", "(", "config_dir", "=", "'.'", ")", ":", "# type: (str) -> List[str]", "if", "FLAKE8_CONFIG_NAME", "in", "os", ".", "listdir", "(", "config_dir", ")", ":", "flake8_config_path", "=", "FLAKE8_CONFIG_NAME", "else", ":", "flake8_config_path",...
Checks for local config overrides for `flake8` and add them in the correct `flake8` `options` format. :param config_dir: :return: List[str]
[ "Checks", "for", "local", "config", "overrides", "for", "flake8", "and", "add", "them", "in", "the", "correct", "flake8", "options", "format", "." ]
train
https://github.com/elifesciences/proofreader-python/blob/387b3c65ee7777e26b3a7340179dc4ed68f24f58/proofreader/config/utils.py#L13-L26
elifesciences/proofreader-python
proofreader/config/utils.py
get_license_checker_config_path
def get_license_checker_config_path(config_dir='.'): # type: (str) -> List[str] """Checks for local config overrides for license checker, if not found it returns the package default. :param config_dir: :return: str """ if LICENSE_CHECKER_CONFIG_NAME in os.listdir(config_dir): license_checker_config_path = LICENSE_CHECKER_CONFIG_NAME else: license_checker_config_path = DEFAULT_LICENSE_CHECKER_CONFIG_PATH return license_checker_config_path
python
def get_license_checker_config_path(config_dir='.'): # type: (str) -> List[str] """Checks for local config overrides for license checker, if not found it returns the package default. :param config_dir: :return: str """ if LICENSE_CHECKER_CONFIG_NAME in os.listdir(config_dir): license_checker_config_path = LICENSE_CHECKER_CONFIG_NAME else: license_checker_config_path = DEFAULT_LICENSE_CHECKER_CONFIG_PATH return license_checker_config_path
[ "def", "get_license_checker_config_path", "(", "config_dir", "=", "'.'", ")", ":", "# type: (str) -> List[str]", "if", "LICENSE_CHECKER_CONFIG_NAME", "in", "os", ".", "listdir", "(", "config_dir", ")", ":", "license_checker_config_path", "=", "LICENSE_CHECKER_CONFIG_NAME", ...
Checks for local config overrides for license checker, if not found it returns the package default. :param config_dir: :return: str
[ "Checks", "for", "local", "config", "overrides", "for", "license", "checker", "if", "not", "found", "it", "returns", "the", "package", "default", "." ]
train
https://github.com/elifesciences/proofreader-python/blob/387b3c65ee7777e26b3a7340179dc4ed68f24f58/proofreader/config/utils.py#L29-L42
elifesciences/proofreader-python
proofreader/config/utils.py
get_pylint_options
def get_pylint_options(config_dir='.'): # type: (str) -> List[str] """Checks for local config overrides for `pylint` and add them in the correct `pylint` `options` format. :param config_dir: :return: List [str] """ if PYLINT_CONFIG_NAME in os.listdir(config_dir): pylint_config_path = PYLINT_CONFIG_NAME else: pylint_config_path = DEFAULT_PYLINT_CONFIG_PATH return ['--rcfile={}'.format(pylint_config_path)]
python
def get_pylint_options(config_dir='.'): # type: (str) -> List[str] """Checks for local config overrides for `pylint` and add them in the correct `pylint` `options` format. :param config_dir: :return: List [str] """ if PYLINT_CONFIG_NAME in os.listdir(config_dir): pylint_config_path = PYLINT_CONFIG_NAME else: pylint_config_path = DEFAULT_PYLINT_CONFIG_PATH return ['--rcfile={}'.format(pylint_config_path)]
[ "def", "get_pylint_options", "(", "config_dir", "=", "'.'", ")", ":", "# type: (str) -> List[str]", "if", "PYLINT_CONFIG_NAME", "in", "os", ".", "listdir", "(", "config_dir", ")", ":", "pylint_config_path", "=", "PYLINT_CONFIG_NAME", "else", ":", "pylint_config_path",...
Checks for local config overrides for `pylint` and add them in the correct `pylint` `options` format. :param config_dir: :return: List [str]
[ "Checks", "for", "local", "config", "overrides", "for", "pylint", "and", "add", "them", "in", "the", "correct", "pylint", "options", "format", "." ]
train
https://github.com/elifesciences/proofreader-python/blob/387b3c65ee7777e26b3a7340179dc4ed68f24f58/proofreader/config/utils.py#L45-L58
wglass/lighthouse
lighthouse/events.py
wait_on_any
def wait_on_any(*events, **kwargs): """ Helper method for waiting for any of the given threading events to be set. The standard threading lib doesn't include any mechanism for waiting on more than one event at a time so we have to monkey patch the events so that their `set()` and `clear()` methods fire a callback we can use to determine how a composite event should react. """ timeout = kwargs.get("timeout") composite_event = threading.Event() if any([event.is_set() for event in events]): return def on_change(): if any([event.is_set() for event in events]): composite_event.set() else: composite_event.clear() def patch(original): def patched(): original() on_change() return patched for event in events: event.set = patch(event.set) event.clear = patch(event.clear) wait_on_event(composite_event, timeout=timeout)
python
def wait_on_any(*events, **kwargs): """ Helper method for waiting for any of the given threading events to be set. The standard threading lib doesn't include any mechanism for waiting on more than one event at a time so we have to monkey patch the events so that their `set()` and `clear()` methods fire a callback we can use to determine how a composite event should react. """ timeout = kwargs.get("timeout") composite_event = threading.Event() if any([event.is_set() for event in events]): return def on_change(): if any([event.is_set() for event in events]): composite_event.set() else: composite_event.clear() def patch(original): def patched(): original() on_change() return patched for event in events: event.set = patch(event.set) event.clear = patch(event.clear) wait_on_event(composite_event, timeout=timeout)
[ "def", "wait_on_any", "(", "*", "events", ",", "*", "*", "kwargs", ")", ":", "timeout", "=", "kwargs", ".", "get", "(", "\"timeout\"", ")", "composite_event", "=", "threading", ".", "Event", "(", ")", "if", "any", "(", "[", "event", ".", "is_set", "(...
Helper method for waiting for any of the given threading events to be set. The standard threading lib doesn't include any mechanism for waiting on more than one event at a time so we have to monkey patch the events so that their `set()` and `clear()` methods fire a callback we can use to determine how a composite event should react.
[ "Helper", "method", "for", "waiting", "for", "any", "of", "the", "given", "threading", "events", "to", "be", "set", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/events.py#L11-L45
wglass/lighthouse
lighthouse/events.py
wait_on_event
def wait_on_event(event, timeout=None): """ Waits on a single threading Event, with an optional timeout. This is here for compatibility reasons as python 2 can't reliably wait on an event without a timeout and python 3 doesn't define a `maxint`. """ if timeout is not None: event.wait(timeout) return if six.PY2: # Thanks to a bug in python 2's threading lib, we can't simply call # .wait() with no timeout since it would wind up ignoring signals. while not event.is_set(): event.wait(sys.maxint) else: event.wait()
python
def wait_on_event(event, timeout=None): """ Waits on a single threading Event, with an optional timeout. This is here for compatibility reasons as python 2 can't reliably wait on an event without a timeout and python 3 doesn't define a `maxint`. """ if timeout is not None: event.wait(timeout) return if six.PY2: # Thanks to a bug in python 2's threading lib, we can't simply call # .wait() with no timeout since it would wind up ignoring signals. while not event.is_set(): event.wait(sys.maxint) else: event.wait()
[ "def", "wait_on_event", "(", "event", ",", "timeout", "=", "None", ")", ":", "if", "timeout", "is", "not", "None", ":", "event", ".", "wait", "(", "timeout", ")", "return", "if", "six", ".", "PY2", ":", "# Thanks to a bug in python 2's threading lib, we can't ...
Waits on a single threading Event, with an optional timeout. This is here for compatibility reasons as python 2 can't reliably wait on an event without a timeout and python 3 doesn't define a `maxint`.
[ "Waits", "on", "a", "single", "threading", "Event", "with", "an", "optional", "timeout", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/events.py#L48-L65
wglass/lighthouse
lighthouse/haproxy/balancer.py
HAProxy.validate_config
def validate_config(cls, config): """ Validates that a config file path and a control socket file path and pid file path are all present in the HAProxy config. """ if "config_file" not in config: raise ValueError("No config file path given") if "socket_file" not in config: raise ValueError("No control socket path given") if "pid_file" not in config: raise ValueError("No PID file path given") if "stats" in config and "port" not in config["stats"]: raise ValueError("Stats interface defined, but no port given") if "proxies" in config: cls.validate_proxies_config(config["proxies"]) return config
python
def validate_config(cls, config): """ Validates that a config file path and a control socket file path and pid file path are all present in the HAProxy config. """ if "config_file" not in config: raise ValueError("No config file path given") if "socket_file" not in config: raise ValueError("No control socket path given") if "pid_file" not in config: raise ValueError("No PID file path given") if "stats" in config and "port" not in config["stats"]: raise ValueError("Stats interface defined, but no port given") if "proxies" in config: cls.validate_proxies_config(config["proxies"]) return config
[ "def", "validate_config", "(", "cls", ",", "config", ")", ":", "if", "\"config_file\"", "not", "in", "config", ":", "raise", "ValueError", "(", "\"No config file path given\"", ")", "if", "\"socket_file\"", "not", "in", "config", ":", "raise", "ValueError", "(",...
Validates that a config file path and a control socket file path and pid file path are all present in the HAProxy config.
[ "Validates", "that", "a", "config", "file", "path", "and", "a", "control", "socket", "file", "path", "and", "pid", "file", "path", "are", "all", "present", "in", "the", "HAProxy", "config", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/haproxy/balancer.py#L54-L70
wglass/lighthouse
lighthouse/haproxy/balancer.py
HAProxy.validate_proxies_config
def validate_proxies_config(cls, proxies): """ Specific config validation method for the "proxies" portion of a config. Checks that each proxy defines a port and a list of `upstreams`, and that each upstream entry has a host and port defined. """ for name, proxy in six.iteritems(proxies): if "port" not in proxy: raise ValueError("No port defined for proxy %s" % name) if "upstreams" not in proxy: raise ValueError( "No upstreams defined for proxy %s" % name ) for upstream in proxy["upstreams"]: if "host" not in upstream: raise ValueError( "No host defined for upstream in proxy %s" % name ) if "port" not in upstream: raise ValueError( "No port defined for upstream in proxy %s" % name )
python
def validate_proxies_config(cls, proxies): """ Specific config validation method for the "proxies" portion of a config. Checks that each proxy defines a port and a list of `upstreams`, and that each upstream entry has a host and port defined. """ for name, proxy in six.iteritems(proxies): if "port" not in proxy: raise ValueError("No port defined for proxy %s" % name) if "upstreams" not in proxy: raise ValueError( "No upstreams defined for proxy %s" % name ) for upstream in proxy["upstreams"]: if "host" not in upstream: raise ValueError( "No host defined for upstream in proxy %s" % name ) if "port" not in upstream: raise ValueError( "No port defined for upstream in proxy %s" % name )
[ "def", "validate_proxies_config", "(", "cls", ",", "proxies", ")", ":", "for", "name", ",", "proxy", "in", "six", ".", "iteritems", "(", "proxies", ")", ":", "if", "\"port\"", "not", "in", "proxy", ":", "raise", "ValueError", "(", "\"No port defined for prox...
Specific config validation method for the "proxies" portion of a config. Checks that each proxy defines a port and a list of `upstreams`, and that each upstream entry has a host and port defined.
[ "Specific", "config", "validation", "method", "for", "the", "proxies", "portion", "of", "a", "config", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/haproxy/balancer.py#L73-L96
wglass/lighthouse
lighthouse/haproxy/balancer.py
HAProxy.apply_config
def apply_config(self, config): """ Constructs HAProxyConfig and HAProxyControl instances based on the contents of the config. This is mostly a matter of constructing the configuration stanzas. """ self.haproxy_config_path = config["config_file"] global_stanza = Stanza("global") global_stanza.add_lines(config.get("global", [])) global_stanza.add_lines([ "stats socket %s mode 600 level admin" % config["socket_file"], "stats timeout 2m" ]) defaults_stanza = Stanza("defaults") defaults_stanza.add_lines(config.get("defaults", [])) proxy_stanzas = [ ProxyStanza( name, proxy["port"], proxy["upstreams"], proxy.get("options", []), proxy.get("bind_address") ) for name, proxy in six.iteritems(config.get("proxies", {})) ] stats_stanza = None if "stats" in config: stats_stanza = StatsStanza( config["stats"]["port"], config["stats"].get("uri", "/") ) for timeout in ("client", "connect", "server"): if timeout in config["stats"].get("timeouts", {}): stats_stanza.add_line( "timeout %s %d" % ( timeout, config["stats"]["timeouts"][timeout] ) ) self.config_file = HAProxyConfig( global_stanza, defaults_stanza, proxy_stanzas=proxy_stanzas, stats_stanza=stats_stanza, meta_clusters=config.get("meta_clusters", {}), bind_address=config.get("bind_address") ) self.control = HAProxyControl( config["config_file"], config["socket_file"], config["pid_file"], )
python
def apply_config(self, config): """ Constructs HAProxyConfig and HAProxyControl instances based on the contents of the config. This is mostly a matter of constructing the configuration stanzas. """ self.haproxy_config_path = config["config_file"] global_stanza = Stanza("global") global_stanza.add_lines(config.get("global", [])) global_stanza.add_lines([ "stats socket %s mode 600 level admin" % config["socket_file"], "stats timeout 2m" ]) defaults_stanza = Stanza("defaults") defaults_stanza.add_lines(config.get("defaults", [])) proxy_stanzas = [ ProxyStanza( name, proxy["port"], proxy["upstreams"], proxy.get("options", []), proxy.get("bind_address") ) for name, proxy in six.iteritems(config.get("proxies", {})) ] stats_stanza = None if "stats" in config: stats_stanza = StatsStanza( config["stats"]["port"], config["stats"].get("uri", "/") ) for timeout in ("client", "connect", "server"): if timeout in config["stats"].get("timeouts", {}): stats_stanza.add_line( "timeout %s %d" % ( timeout, config["stats"]["timeouts"][timeout] ) ) self.config_file = HAProxyConfig( global_stanza, defaults_stanza, proxy_stanzas=proxy_stanzas, stats_stanza=stats_stanza, meta_clusters=config.get("meta_clusters", {}), bind_address=config.get("bind_address") ) self.control = HAProxyControl( config["config_file"], config["socket_file"], config["pid_file"], )
[ "def", "apply_config", "(", "self", ",", "config", ")", ":", "self", ".", "haproxy_config_path", "=", "config", "[", "\"config_file\"", "]", "global_stanza", "=", "Stanza", "(", "\"global\"", ")", "global_stanza", ".", "add_lines", "(", "config", ".", "get", ...
Constructs HAProxyConfig and HAProxyControl instances based on the contents of the config. This is mostly a matter of constructing the configuration stanzas.
[ "Constructs", "HAProxyConfig", "and", "HAProxyControl", "instances", "based", "on", "the", "contents", "of", "the", "config", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/haproxy/balancer.py#L98-L149
wglass/lighthouse
lighthouse/haproxy/balancer.py
HAProxy.sync_file
def sync_file(self, clusters): """ Generates new HAProxy config file content and writes it to the file at `haproxy_config_path`. If a restart is not necessary the nodes configured in HAProxy will be synced on the fly. If a restart *is* necessary, one will be triggered. """ logger.info("Updating HAProxy config file.") if not self.restart_required: self.sync_nodes(clusters) version = self.control.get_version() with open(self.haproxy_config_path, "w") as f: f.write(self.config_file.generate(clusters, version=version)) if self.restart_required: with self.restart_lock: self.restart()
python
def sync_file(self, clusters): """ Generates new HAProxy config file content and writes it to the file at `haproxy_config_path`. If a restart is not necessary the nodes configured in HAProxy will be synced on the fly. If a restart *is* necessary, one will be triggered. """ logger.info("Updating HAProxy config file.") if not self.restart_required: self.sync_nodes(clusters) version = self.control.get_version() with open(self.haproxy_config_path, "w") as f: f.write(self.config_file.generate(clusters, version=version)) if self.restart_required: with self.restart_lock: self.restart()
[ "def", "sync_file", "(", "self", ",", "clusters", ")", ":", "logger", ".", "info", "(", "\"Updating HAProxy config file.\"", ")", "if", "not", "self", ".", "restart_required", ":", "self", ".", "sync_nodes", "(", "clusters", ")", "version", "=", "self", ".",...
Generates new HAProxy config file content and writes it to the file at `haproxy_config_path`. If a restart is not necessary the nodes configured in HAProxy will be synced on the fly. If a restart *is* necessary, one will be triggered.
[ "Generates", "new", "HAProxy", "config", "file", "content", "and", "writes", "it", "to", "the", "file", "at", "haproxy_config_path", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/haproxy/balancer.py#L151-L171
wglass/lighthouse
lighthouse/haproxy/balancer.py
HAProxy.restart
def restart(self): """ Tells the HAProxy control object to restart the process. If it's been fewer than `restart_interval` seconds since the previous restart, it will wait until the interval has passed. This staves off situations where the process is constantly restarting, as it is possible to drop packets for a short interval while doing so. """ delay = (self.last_restart - time.time()) + self.restart_interval if delay > 0: time.sleep(delay) self.control.restart() self.last_restart = time.time() self.restart_required = False
python
def restart(self): """ Tells the HAProxy control object to restart the process. If it's been fewer than `restart_interval` seconds since the previous restart, it will wait until the interval has passed. This staves off situations where the process is constantly restarting, as it is possible to drop packets for a short interval while doing so. """ delay = (self.last_restart - time.time()) + self.restart_interval if delay > 0: time.sleep(delay) self.control.restart() self.last_restart = time.time() self.restart_required = False
[ "def", "restart", "(", "self", ")", ":", "delay", "=", "(", "self", ".", "last_restart", "-", "time", ".", "time", "(", ")", ")", "+", "self", ".", "restart_interval", "if", "delay", ">", "0", ":", "time", ".", "sleep", "(", "delay", ")", "self", ...
Tells the HAProxy control object to restart the process. If it's been fewer than `restart_interval` seconds since the previous restart, it will wait until the interval has passed. This staves off situations where the process is constantly restarting, as it is possible to drop packets for a short interval while doing so.
[ "Tells", "the", "HAProxy", "control", "object", "to", "restart", "the", "process", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/haproxy/balancer.py#L173-L190
wglass/lighthouse
lighthouse/haproxy/balancer.py
HAProxy.sync_nodes
def sync_nodes(self, clusters): """ Syncs the enabled/disabled status of nodes existing in HAProxy based on the given clusters. This is used to inform HAProxy of up/down nodes without necessarily doing a restart of the process. """ logger.info("Syncing HAProxy backends.") current_nodes, enabled_nodes = self.get_current_nodes(clusters) for cluster_name, nodes in six.iteritems(current_nodes): for node in nodes: if node["svname"] in enabled_nodes[cluster_name]: command = self.control.enable_node else: command = self.control.disable_node try: response = command(cluster_name, node["svname"]) except Exception: logger.exception("Error when enabling/disabling node") self.restart_required = True else: if response: logger.error( "Socket command for %s node %s failed: %s", cluster_name, node["svname"], response ) self.restart_required = True return logger.info("HAProxy nodes/servers synced.")
python
def sync_nodes(self, clusters): """ Syncs the enabled/disabled status of nodes existing in HAProxy based on the given clusters. This is used to inform HAProxy of up/down nodes without necessarily doing a restart of the process. """ logger.info("Syncing HAProxy backends.") current_nodes, enabled_nodes = self.get_current_nodes(clusters) for cluster_name, nodes in six.iteritems(current_nodes): for node in nodes: if node["svname"] in enabled_nodes[cluster_name]: command = self.control.enable_node else: command = self.control.disable_node try: response = command(cluster_name, node["svname"]) except Exception: logger.exception("Error when enabling/disabling node") self.restart_required = True else: if response: logger.error( "Socket command for %s node %s failed: %s", cluster_name, node["svname"], response ) self.restart_required = True return logger.info("HAProxy nodes/servers synced.")
[ "def", "sync_nodes", "(", "self", ",", "clusters", ")", ":", "logger", ".", "info", "(", "\"Syncing HAProxy backends.\"", ")", "current_nodes", ",", "enabled_nodes", "=", "self", ".", "get_current_nodes", "(", "clusters", ")", "for", "cluster_name", ",", "nodes"...
Syncs the enabled/disabled status of nodes existing in HAProxy based on the given clusters. This is used to inform HAProxy of up/down nodes without necessarily doing a restart of the process.
[ "Syncs", "the", "enabled", "/", "disabled", "status", "of", "nodes", "existing", "in", "HAProxy", "based", "on", "the", "given", "clusters", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/haproxy/balancer.py#L192-L225
wglass/lighthouse
lighthouse/haproxy/balancer.py
HAProxy.get_current_nodes
def get_current_nodes(self, clusters): """ Returns two dictionaries, the current nodes and the enabled nodes. The current_nodes dictionary is keyed off of the cluster name and values are a list of nodes known to HAProxy. The enabled_nodes dictionary is also keyed off of the cluster name and values are list of *enabled* nodes, i.e. the same values as current_nodes but limited to servers currently taking traffic. """ current_nodes = self.control.get_active_nodes() enabled_nodes = collections.defaultdict(list) for cluster in clusters: if not cluster.nodes: continue if cluster.name not in current_nodes: logger.debug( "New cluster '%s' added, restart required.", cluster.name ) self.restart_required = True for node in cluster.nodes: if node.name not in [ current_node["svname"] for current_node in current_nodes.get(cluster.name, []) ]: logger.debug( "New node added to cluster '%s', restart required.", cluster.name ) self.restart_required = True enabled_nodes[cluster.name].append(node.name) return current_nodes, enabled_nodes
python
def get_current_nodes(self, clusters): """ Returns two dictionaries, the current nodes and the enabled nodes. The current_nodes dictionary is keyed off of the cluster name and values are a list of nodes known to HAProxy. The enabled_nodes dictionary is also keyed off of the cluster name and values are list of *enabled* nodes, i.e. the same values as current_nodes but limited to servers currently taking traffic. """ current_nodes = self.control.get_active_nodes() enabled_nodes = collections.defaultdict(list) for cluster in clusters: if not cluster.nodes: continue if cluster.name not in current_nodes: logger.debug( "New cluster '%s' added, restart required.", cluster.name ) self.restart_required = True for node in cluster.nodes: if node.name not in [ current_node["svname"] for current_node in current_nodes.get(cluster.name, []) ]: logger.debug( "New node added to cluster '%s', restart required.", cluster.name ) self.restart_required = True enabled_nodes[cluster.name].append(node.name) return current_nodes, enabled_nodes
[ "def", "get_current_nodes", "(", "self", ",", "clusters", ")", ":", "current_nodes", "=", "self", ".", "control", ".", "get_active_nodes", "(", ")", "enabled_nodes", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "cluster", "in", "clusters", ...
Returns two dictionaries, the current nodes and the enabled nodes. The current_nodes dictionary is keyed off of the cluster name and values are a list of nodes known to HAProxy. The enabled_nodes dictionary is also keyed off of the cluster name and values are list of *enabled* nodes, i.e. the same values as current_nodes but limited to servers currently taking traffic.
[ "Returns", "two", "dictionaries", "the", "current", "nodes", "and", "the", "enabled", "nodes", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/haproxy/balancer.py#L227-L265
wglass/lighthouse
lighthouse/service.py
Service.validate_config
def validate_config(cls, config): """ Runs a check on the given config to make sure that `port`/`ports` and `discovery` is defined. """ if "discovery" not in config: raise ValueError("No discovery method defined.") if not any([item in config for item in ["port", "ports"]]): raise ValueError("No port(s) defined.") cls.validate_check_configs(config)
python
def validate_config(cls, config): """ Runs a check on the given config to make sure that `port`/`ports` and `discovery` is defined. """ if "discovery" not in config: raise ValueError("No discovery method defined.") if not any([item in config for item in ["port", "ports"]]): raise ValueError("No port(s) defined.") cls.validate_check_configs(config)
[ "def", "validate_config", "(", "cls", ",", "config", ")", ":", "if", "\"discovery\"", "not", "in", "config", ":", "raise", "ValueError", "(", "\"No discovery method defined.\"", ")", "if", "not", "any", "(", "[", "item", "in", "config", "for", "item", "in", ...
Runs a check on the given config to make sure that `port`/`ports` and `discovery` is defined.
[ "Runs", "a", "check", "on", "the", "given", "config", "to", "make", "sure", "that", "port", "/", "ports", "and", "discovery", "is", "defined", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/service.py#L39-L50
wglass/lighthouse
lighthouse/service.py
Service.validate_check_configs
def validate_check_configs(cls, config): """ Config validation specific to the health check options. Verifies that checks are defined along with an interval, and calls out to the `Check` class to make sure each individual check's config is valid. """ if "checks" not in config: raise ValueError("No checks defined.") if "interval" not in config["checks"]: raise ValueError("No check interval defined.") for check_name, check_config in six.iteritems(config["checks"]): if check_name == "interval": continue Check.from_config(check_name, check_config)
python
def validate_check_configs(cls, config): """ Config validation specific to the health check options. Verifies that checks are defined along with an interval, and calls out to the `Check` class to make sure each individual check's config is valid. """ if "checks" not in config: raise ValueError("No checks defined.") if "interval" not in config["checks"]: raise ValueError("No check interval defined.") for check_name, check_config in six.iteritems(config["checks"]): if check_name == "interval": continue Check.from_config(check_name, check_config)
[ "def", "validate_check_configs", "(", "cls", ",", "config", ")", ":", "if", "\"checks\"", "not", "in", "config", ":", "raise", "ValueError", "(", "\"No checks defined.\"", ")", "if", "\"interval\"", "not", "in", "config", "[", "\"checks\"", "]", ":", "raise", ...
Config validation specific to the health check options. Verifies that checks are defined along with an interval, and calls out to the `Check` class to make sure each individual check's config is valid.
[ "Config", "validation", "specific", "to", "the", "health", "check", "options", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/service.py#L53-L70
wglass/lighthouse
lighthouse/service.py
Service.apply_config
def apply_config(self, config): """ Takes a given validated config dictionary and sets an instance attribute for each one. For check definitions, a Check instance is is created and a `checks` attribute set to a dictionary keyed off of the checks' names. If the Check instance has some sort of error while being created an error is logged and the check skipped. """ self.host = config.get("host", "127.0.0.1") self.configured_ports = config.get("ports", [config.get("port")]) self.discovery = config["discovery"] self.metadata = config.get("metadata", {}) self.update_ports() self.check_interval = config["checks"]["interval"] self.update_checks(config["checks"])
python
def apply_config(self, config): """ Takes a given validated config dictionary and sets an instance attribute for each one. For check definitions, a Check instance is is created and a `checks` attribute set to a dictionary keyed off of the checks' names. If the Check instance has some sort of error while being created an error is logged and the check skipped. """ self.host = config.get("host", "127.0.0.1") self.configured_ports = config.get("ports", [config.get("port")]) self.discovery = config["discovery"] self.metadata = config.get("metadata", {}) self.update_ports() self.check_interval = config["checks"]["interval"] self.update_checks(config["checks"])
[ "def", "apply_config", "(", "self", ",", "config", ")", ":", "self", ".", "host", "=", "config", ".", "get", "(", "\"host\"", ",", "\"127.0.0.1\"", ")", "self", ".", "configured_ports", "=", "config", ".", "get", "(", "\"ports\"", ",", "[", "config", "...
Takes a given validated config dictionary and sets an instance attribute for each one. For check definitions, a Check instance is is created and a `checks` attribute set to a dictionary keyed off of the checks' names. If the Check instance has some sort of error while being created an error is logged and the check skipped.
[ "Takes", "a", "given", "validated", "config", "dictionary", "and", "sets", "an", "instance", "attribute", "for", "each", "one", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/service.py#L72-L94
wglass/lighthouse
lighthouse/service.py
Service.update_ports
def update_ports(self): """ Sets the `ports` attribute to the set of valid port values set in the configuration. """ ports = set() for port in self.configured_ports: try: ports.add(int(port)) except ValueError: logger.error("Invalid port value: %s", port) continue self.ports = ports
python
def update_ports(self): """ Sets the `ports` attribute to the set of valid port values set in the configuration. """ ports = set() for port in self.configured_ports: try: ports.add(int(port)) except ValueError: logger.error("Invalid port value: %s", port) continue self.ports = ports
[ "def", "update_ports", "(", "self", ")", ":", "ports", "=", "set", "(", ")", "for", "port", "in", "self", ".", "configured_ports", ":", "try", ":", "ports", ".", "add", "(", "int", "(", "port", ")", ")", "except", "ValueError", ":", "logger", ".", ...
Sets the `ports` attribute to the set of valid port values set in the configuration.
[ "Sets", "the", "ports", "attribute", "to", "the", "set", "of", "valid", "port", "values", "set", "in", "the", "configuration", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/service.py#L105-L119
wglass/lighthouse
lighthouse/service.py
Service.update_checks
def update_checks(self, check_configs): """ Maintains the values in the `checks` attribute's dictionary. Each key in the dictionary is a port, and each value is a nested dictionary mapping each check's name to the Check instance. This method makes sure the attribute reflects all of the properly configured checks and ports. Removing no-longer-configured ports is left to the `run_checks` method. """ for check_name, check_config in six.iteritems(check_configs): if check_name == "interval": continue for port in self.ports: try: check = Check.from_config(check_name, check_config) check.host = self.host check.port = port self.checks[port][check_name] = check except ValueError as e: logger.error( "Error when configuring check '%s' for service %s: %s", check_name, self.name, str(e) ) continue
python
def update_checks(self, check_configs): """ Maintains the values in the `checks` attribute's dictionary. Each key in the dictionary is a port, and each value is a nested dictionary mapping each check's name to the Check instance. This method makes sure the attribute reflects all of the properly configured checks and ports. Removing no-longer-configured ports is left to the `run_checks` method. """ for check_name, check_config in six.iteritems(check_configs): if check_name == "interval": continue for port in self.ports: try: check = Check.from_config(check_name, check_config) check.host = self.host check.port = port self.checks[port][check_name] = check except ValueError as e: logger.error( "Error when configuring check '%s' for service %s: %s", check_name, self.name, str(e) ) continue
[ "def", "update_checks", "(", "self", ",", "check_configs", ")", ":", "for", "check_name", ",", "check_config", "in", "six", ".", "iteritems", "(", "check_configs", ")", ":", "if", "check_name", "==", "\"interval\"", ":", "continue", "for", "port", "in", "sel...
Maintains the values in the `checks` attribute's dictionary. Each key in the dictionary is a port, and each value is a nested dictionary mapping each check's name to the Check instance. This method makes sure the attribute reflects all of the properly configured checks and ports. Removing no-longer-configured ports is left to the `run_checks` method.
[ "Maintains", "the", "values", "in", "the", "checks", "attribute", "s", "dictionary", ".", "Each", "key", "in", "the", "dictionary", "is", "a", "port", "and", "each", "value", "is", "a", "nested", "dictionary", "mapping", "each", "check", "s", "name", "to",...
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/service.py#L121-L146
wglass/lighthouse
lighthouse/service.py
Service.run_checks
def run_checks(self): """ Iterates over the configured ports and runs the checks on each one. Returns a two-element tuple: the first is the set of ports that transitioned from down to up, the second is the set of ports that transitioned from up to down. Also handles the case where a check for a since-removed port is run, marking the port as down regardless of the check's result and removing the check(s) for the port. """ came_up = set() went_down = set() for port in self.ports: checks = self.checks[port].values() if not checks: logger.warn("No checks defined for self: %s", self.name) for check in checks: check.run() checks_pass = all([check.passing for check in checks]) if self.is_up[port] in (False, None) and checks_pass: came_up.add(port) self.is_up[port] = True elif self.is_up[port] in (True, None) and not checks_pass: went_down.add(port) self.is_up[port] = False for unused_port in set(self.checks.keys()) - self.ports: went_down.add(unused_port) del self.checks[unused_port] return came_up, went_down
python
def run_checks(self): """ Iterates over the configured ports and runs the checks on each one. Returns a two-element tuple: the first is the set of ports that transitioned from down to up, the second is the set of ports that transitioned from up to down. Also handles the case where a check for a since-removed port is run, marking the port as down regardless of the check's result and removing the check(s) for the port. """ came_up = set() went_down = set() for port in self.ports: checks = self.checks[port].values() if not checks: logger.warn("No checks defined for self: %s", self.name) for check in checks: check.run() checks_pass = all([check.passing for check in checks]) if self.is_up[port] in (False, None) and checks_pass: came_up.add(port) self.is_up[port] = True elif self.is_up[port] in (True, None) and not checks_pass: went_down.add(port) self.is_up[port] = False for unused_port in set(self.checks.keys()) - self.ports: went_down.add(unused_port) del self.checks[unused_port] return came_up, went_down
[ "def", "run_checks", "(", "self", ")", ":", "came_up", "=", "set", "(", ")", "went_down", "=", "set", "(", ")", "for", "port", "in", "self", ".", "ports", ":", "checks", "=", "self", ".", "checks", "[", "port", "]", ".", "values", "(", ")", "if",...
Iterates over the configured ports and runs the checks on each one. Returns a two-element tuple: the first is the set of ports that transitioned from down to up, the second is the set of ports that transitioned from up to down. Also handles the case where a check for a since-removed port is run, marking the port as down regardless of the check's result and removing the check(s) for the port.
[ "Iterates", "over", "the", "configured", "ports", "and", "runs", "the", "checks", "on", "each", "one", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/service.py#L148-L185
ltalirz/aiida-gudhi
aiida_gudhi/parsers/rips.py
RipsParser.parse_with_retrieved
def parse_with_retrieved(self, retrieved): """ Parse output data folder, store results in database. :param retrieved: a dictionary of retrieved nodes, where the key is the link name :returns: a tuple with two values ``(bool, node_list)``, where: * ``bool``: variable to tell if the parsing succeeded * ``node_list``: list of new nodes to be stored in the db (as a list of tuples ``(link_name, node)``) """ from aiida.orm.data.singlefile import SinglefileData success = False node_list = [] # Check that the retrieved folder is there try: out_folder = retrieved['retrieved'] except KeyError: self.logger.error("No retrieved folder found") return success, node_list # Check the folder content is as expected list_of_files = out_folder.get_folder_list() output_files = self._calc.inp.parameters.output_files # Note: set(A) <= set(B) checks whether A is a subset if set(output_files) <= set(list_of_files): pass else: self.logger.error( "Not all expected output files {} were found".format( output_files)) return success, node_list output_links = self._calc.inp.parameters.output_links for fname, link in list(zip(output_files, output_links)): parsed = SinglefileData(file=out_folder.get_abs_path(fname)) node_list.append((link, parsed)) success = True return success, node_list
python
def parse_with_retrieved(self, retrieved): """ Parse output data folder, store results in database. :param retrieved: a dictionary of retrieved nodes, where the key is the link name :returns: a tuple with two values ``(bool, node_list)``, where: * ``bool``: variable to tell if the parsing succeeded * ``node_list``: list of new nodes to be stored in the db (as a list of tuples ``(link_name, node)``) """ from aiida.orm.data.singlefile import SinglefileData success = False node_list = [] # Check that the retrieved folder is there try: out_folder = retrieved['retrieved'] except KeyError: self.logger.error("No retrieved folder found") return success, node_list # Check the folder content is as expected list_of_files = out_folder.get_folder_list() output_files = self._calc.inp.parameters.output_files # Note: set(A) <= set(B) checks whether A is a subset if set(output_files) <= set(list_of_files): pass else: self.logger.error( "Not all expected output files {} were found".format( output_files)) return success, node_list output_links = self._calc.inp.parameters.output_links for fname, link in list(zip(output_files, output_links)): parsed = SinglefileData(file=out_folder.get_abs_path(fname)) node_list.append((link, parsed)) success = True return success, node_list
[ "def", "parse_with_retrieved", "(", "self", ",", "retrieved", ")", ":", "from", "aiida", ".", "orm", ".", "data", ".", "singlefile", "import", "SinglefileData", "success", "=", "False", "node_list", "=", "[", "]", "# Check that the retrieved folder is there", "try...
Parse output data folder, store results in database. :param retrieved: a dictionary of retrieved nodes, where the key is the link name :returns: a tuple with two values ``(bool, node_list)``, where: * ``bool``: variable to tell if the parsing succeeded * ``node_list``: list of new nodes to be stored in the db (as a list of tuples ``(link_name, node)``)
[ "Parse", "output", "data", "folder", "store", "results", "in", "database", "." ]
train
https://github.com/ltalirz/aiida-gudhi/blob/81ebec782ddff3ab97a3e3242b809fec989fa4b9/aiida_gudhi/parsers/rips.py#L26-L69
davebridges/mousedb
mousedb/animal/admin.py
AnimalAdmin.mark_sacrificed
def mark_sacrificed(self,request,queryset): """An admin action for marking several animals as sacrificed. This action sets the selected animals as Alive=False, Death=today and Cause_of_Death as sacrificed. To use other paramters, mice muse be individually marked as sacrificed. This admin action also shows as the output the number of mice sacrificed.""" rows_updated = queryset.update(Alive=False, Death=datetime.date.today(), Cause_of_Death='Sacrificed') if rows_updated == 1: message_bit = "1 animal was" else: message_bit = "%s animals were" % rows_updated self.message_user(request, "%s successfully marked as sacrificed." % message_bit)
python
def mark_sacrificed(self,request,queryset): """An admin action for marking several animals as sacrificed. This action sets the selected animals as Alive=False, Death=today and Cause_of_Death as sacrificed. To use other paramters, mice muse be individually marked as sacrificed. This admin action also shows as the output the number of mice sacrificed.""" rows_updated = queryset.update(Alive=False, Death=datetime.date.today(), Cause_of_Death='Sacrificed') if rows_updated == 1: message_bit = "1 animal was" else: message_bit = "%s animals were" % rows_updated self.message_user(request, "%s successfully marked as sacrificed." % message_bit)
[ "def", "mark_sacrificed", "(", "self", ",", "request", ",", "queryset", ")", ":", "rows_updated", "=", "queryset", ".", "update", "(", "Alive", "=", "False", ",", "Death", "=", "datetime", ".", "date", ".", "today", "(", ")", ",", "Cause_of_Death", "=", ...
An admin action for marking several animals as sacrificed. This action sets the selected animals as Alive=False, Death=today and Cause_of_Death as sacrificed. To use other paramters, mice muse be individually marked as sacrificed. This admin action also shows as the output the number of mice sacrificed.
[ "An", "admin", "action", "for", "marking", "several", "animals", "as", "sacrificed", ".", "This", "action", "sets", "the", "selected", "animals", "as", "Alive", "=", "False", "Death", "=", "today", "and", "Cause_of_Death", "as", "sacrificed", ".", "To", "use...
train
https://github.com/davebridges/mousedb/blob/2a33f6d15d88b1540b05f7232b154fdbf8568580/mousedb/animal/admin.py#L33-L43
davebridges/mousedb
mousedb/animal/admin.py
BreedingAdmin.mark_deactivated
def mark_deactivated(self,request,queryset): """An admin action for marking several cages as inactive. This action sets the selected cages as Active=False and Death=today. This admin action also shows as the output the number of mice sacrificed.""" rows_updated = queryset.update(Active=False, End=datetime.date.today() ) if rows_updated == 1: message_bit = "1 cage was" else: message_bit = "%s cages were" % rows_updated self.message_user(request, "%s successfully marked as deactivated." % message_bit)
python
def mark_deactivated(self,request,queryset): """An admin action for marking several cages as inactive. This action sets the selected cages as Active=False and Death=today. This admin action also shows as the output the number of mice sacrificed.""" rows_updated = queryset.update(Active=False, End=datetime.date.today() ) if rows_updated == 1: message_bit = "1 cage was" else: message_bit = "%s cages were" % rows_updated self.message_user(request, "%s successfully marked as deactivated." % message_bit)
[ "def", "mark_deactivated", "(", "self", ",", "request", ",", "queryset", ")", ":", "rows_updated", "=", "queryset", ".", "update", "(", "Active", "=", "False", ",", "End", "=", "datetime", ".", "date", ".", "today", "(", ")", ")", "if", "rows_updated", ...
An admin action for marking several cages as inactive. This action sets the selected cages as Active=False and Death=today. This admin action also shows as the output the number of mice sacrificed.
[ "An", "admin", "action", "for", "marking", "several", "cages", "as", "inactive", ".", "This", "action", "sets", "the", "selected", "cages", "as", "Active", "=", "False", "and", "Death", "=", "today", ".", "This", "admin", "action", "also", "shows", "as", ...
train
https://github.com/davebridges/mousedb/blob/2a33f6d15d88b1540b05f7232b154fdbf8568580/mousedb/animal/admin.py#L78-L88
uta-smile/smile-python
smile/app.py
run
def run(main=None, argv=None): """Runs the program with an optional 'main' function and 'argv' list.""" flags_obj = flags.FLAGS absl_flags_obj = absl_flags.FLAGS # Extract the args from the optional `argv` list. args = argv[1:] if argv else None # Parse the known flags from that list, or from the command # line otherwise. # pylint: disable=protected-access flags_passthrough = flags_obj._parse_flags(args=args) # pylint: enable=protected-access # Immediately after flags are parsed, bump verbosity to INFO if the flag has # not been set. if absl_flags_obj["verbosity"].using_default_value: absl_flags_obj.verbosity = 0 main = main or sys.modules['__main__'].main # Call the main function, passing through any arguments # to the final program. sys.exit(main(sys.argv[:1] + flags_passthrough))
python
def run(main=None, argv=None): """Runs the program with an optional 'main' function and 'argv' list.""" flags_obj = flags.FLAGS absl_flags_obj = absl_flags.FLAGS # Extract the args from the optional `argv` list. args = argv[1:] if argv else None # Parse the known flags from that list, or from the command # line otherwise. # pylint: disable=protected-access flags_passthrough = flags_obj._parse_flags(args=args) # pylint: enable=protected-access # Immediately after flags are parsed, bump verbosity to INFO if the flag has # not been set. if absl_flags_obj["verbosity"].using_default_value: absl_flags_obj.verbosity = 0 main = main or sys.modules['__main__'].main # Call the main function, passing through any arguments # to the final program. sys.exit(main(sys.argv[:1] + flags_passthrough))
[ "def", "run", "(", "main", "=", "None", ",", "argv", "=", "None", ")", ":", "flags_obj", "=", "flags", ".", "FLAGS", "absl_flags_obj", "=", "absl_flags", ".", "FLAGS", "# Extract the args from the optional `argv` list.", "args", "=", "argv", "[", "1", ":", "...
Runs the program with an optional 'main' function and 'argv' list.
[ "Runs", "the", "program", "with", "an", "optional", "main", "function", "and", "argv", "list", "." ]
train
https://github.com/uta-smile/smile-python/blob/8918d1bb9ef76922ef976f6e3a10578b3a0af1c8/smile/app.py#L12-L35
peterldowns/python-mustache
mustache/utils.py
make_unicode
def make_unicode(s, encoding='utf-8', encoding_errors='strict'): """ Return the unicode version of an input. """ if not isinstance(s, unicode): if not isinstance(s, basestring): return unicode(str(s), encoding, encoding_errors) return unicode(s, encoding, encoding_errors) return s
python
def make_unicode(s, encoding='utf-8', encoding_errors='strict'): """ Return the unicode version of an input. """ if not isinstance(s, unicode): if not isinstance(s, basestring): return unicode(str(s), encoding, encoding_errors) return unicode(s, encoding, encoding_errors) return s
[ "def", "make_unicode", "(", "s", ",", "encoding", "=", "'utf-8'", ",", "encoding_errors", "=", "'strict'", ")", ":", "if", "not", "isinstance", "(", "s", ",", "unicode", ")", ":", "if", "not", "isinstance", "(", "s", ",", "basestring", ")", ":", "retur...
Return the unicode version of an input.
[ "Return", "the", "unicode", "version", "of", "an", "input", "." ]
train
https://github.com/peterldowns/python-mustache/blob/ea3753696ea9886b6eb39cc5de27db7054adc069/mustache/utils.py#L4-L10
peterldowns/python-mustache
mustache/utils.py
html_escape
def html_escape(s, encoding='utf-8', encoding_errors='strict'): """ Return the HTML-escaped version of an input. """ return escape(make_unicode(s, encoding, encoding_errors), quote=True)
python
def html_escape(s, encoding='utf-8', encoding_errors='strict'): """ Return the HTML-escaped version of an input. """ return escape(make_unicode(s, encoding, encoding_errors), quote=True)
[ "def", "html_escape", "(", "s", ",", "encoding", "=", "'utf-8'", ",", "encoding_errors", "=", "'strict'", ")", ":", "return", "escape", "(", "make_unicode", "(", "s", ",", "encoding", ",", "encoding_errors", ")", ",", "quote", "=", "True", ")" ]
Return the HTML-escaped version of an input.
[ "Return", "the", "HTML", "-", "escaped", "version", "of", "an", "input", "." ]
train
https://github.com/peterldowns/python-mustache/blob/ea3753696ea9886b6eb39cc5de27db7054adc069/mustache/utils.py#L12-L14
ucbvislab/radiotool
radiotool/algorithms/retarget.py
retarget_to_length
def retarget_to_length(song, duration, start=True, end=True, slack=5, beats_per_measure=None): """Create a composition of a song that changes its length to a given duration. :param song: Song to retarget :type song: :py:class:`radiotool.composer.Song` :param duration: Duration of retargeted song (in seconds) :type duration: float :param start: Start the retargeted song at the beginning of the original song :type start: boolean :param end: End the retargeted song at the end of the original song :type end: boolean :param slack: Track will be within slack seconds of the target duration (more slack allows for better-sounding music) :type slack: float :returns: Composition of retargeted song :rtype: :py:class:`radiotool.composer.Composition` """ duration = float(duration) constraints = [ rt_constraints.TimbrePitchConstraint( context=0, timbre_weight=1.0, chroma_weight=1.0), rt_constraints.EnergyConstraint(penalty=.5), rt_constraints.MinimumLoopConstraint(8), ] if beats_per_measure is not None: constraints.append( rt_constraints.RhythmConstraint(beats_per_measure, .125)) if start: constraints.append( rt_constraints.StartAtStartConstraint(padding=0)) if end: constraints.append( rt_constraints.EndAtEndConstraint(padding=slack)) comp, info = retarget( [song], duration, constraints=[constraints], fade_in_len=None, fade_out_len=None) # force the new track to extend to the end of the song if end: last_seg = sorted( comp.segments, key=lambda seg: seg.comp_location_in_seconds + seg.duration_in_seconds )[-1] last_seg.duration_in_seconds = ( song.duration_in_seconds - last_seg.start_in_seconds) path_cost = info["path_cost"] total_nonzero_cost = [] total_nonzero_points = [] for node in path_cost: if float(node.name) > 0.0: total_nonzero_cost.append(float(node.name)) total_nonzero_points.append(float(node.time)) transitions = zip(total_nonzero_points, total_nonzero_cost) for transition in transitions: comp.add_label(Label("crossfade", transition[0])) return comp
python
def retarget_to_length(song, duration, start=True, end=True, slack=5, beats_per_measure=None): """Create a composition of a song that changes its length to a given duration. :param song: Song to retarget :type song: :py:class:`radiotool.composer.Song` :param duration: Duration of retargeted song (in seconds) :type duration: float :param start: Start the retargeted song at the beginning of the original song :type start: boolean :param end: End the retargeted song at the end of the original song :type end: boolean :param slack: Track will be within slack seconds of the target duration (more slack allows for better-sounding music) :type slack: float :returns: Composition of retargeted song :rtype: :py:class:`radiotool.composer.Composition` """ duration = float(duration) constraints = [ rt_constraints.TimbrePitchConstraint( context=0, timbre_weight=1.0, chroma_weight=1.0), rt_constraints.EnergyConstraint(penalty=.5), rt_constraints.MinimumLoopConstraint(8), ] if beats_per_measure is not None: constraints.append( rt_constraints.RhythmConstraint(beats_per_measure, .125)) if start: constraints.append( rt_constraints.StartAtStartConstraint(padding=0)) if end: constraints.append( rt_constraints.EndAtEndConstraint(padding=slack)) comp, info = retarget( [song], duration, constraints=[constraints], fade_in_len=None, fade_out_len=None) # force the new track to extend to the end of the song if end: last_seg = sorted( comp.segments, key=lambda seg: seg.comp_location_in_seconds + seg.duration_in_seconds )[-1] last_seg.duration_in_seconds = ( song.duration_in_seconds - last_seg.start_in_seconds) path_cost = info["path_cost"] total_nonzero_cost = [] total_nonzero_points = [] for node in path_cost: if float(node.name) > 0.0: total_nonzero_cost.append(float(node.name)) total_nonzero_points.append(float(node.time)) transitions = zip(total_nonzero_points, total_nonzero_cost) for transition in transitions: comp.add_label(Label("crossfade", transition[0])) return comp
[ "def", "retarget_to_length", "(", "song", ",", "duration", ",", "start", "=", "True", ",", "end", "=", "True", ",", "slack", "=", "5", ",", "beats_per_measure", "=", "None", ")", ":", "duration", "=", "float", "(", "duration", ")", "constraints", "=", ...
Create a composition of a song that changes its length to a given duration. :param song: Song to retarget :type song: :py:class:`radiotool.composer.Song` :param duration: Duration of retargeted song (in seconds) :type duration: float :param start: Start the retargeted song at the beginning of the original song :type start: boolean :param end: End the retargeted song at the end of the original song :type end: boolean :param slack: Track will be within slack seconds of the target duration (more slack allows for better-sounding music) :type slack: float :returns: Composition of retargeted song :rtype: :py:class:`radiotool.composer.Composition`
[ "Create", "a", "composition", "of", "a", "song", "that", "changes", "its", "length", "to", "a", "given", "duration", "." ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/algorithms/retarget.py#L23-L92
ucbvislab/radiotool
radiotool/algorithms/retarget.py
retarget_with_change_points
def retarget_with_change_points(song, cp_times, duration): """Create a composition of a song of a given duration that reaches music change points at specified times. This is still under construction. It might not work as well with more than 2 ``cp_times`` at the moment. Here's an example of retargeting music to be 40 seconds long and hit a change point at the 10 and 30 second marks:: song = Song("instrumental_music.wav") composition, change_points =\ retarget.retarget_with_change_points(song, [10, 30], 40) composition.export(filename="retargeted_instrumental_music.") :param song: Song to retarget :type song: :py:class:`radiotool.composer.Song` :param cp_times: Times to reach change points (in seconds) :type cp_times: list of floats :param duration: Target length of retargeted music (in seconds) :type duration: float :returns: Composition of retargeted song and list of locations of change points in the retargeted composition :rtype: (:py:class:`radiotool.composer.Composition`, list) """ analysis = song.analysis beat_length = analysis[BEAT_DUR_KEY] beats = np.array(analysis["beats"]) # find change points cps = np.array(novelty(song, nchangepoints=4)) cp_times = np.array(cp_times) # mark change points in original music def music_labels(t): # find beat closest to t closest_beat_idx = np.argmin(np.abs(beats - t)) closest_beat = beats[closest_beat_idx] closest_cp = cps[np.argmin(np.abs(cps - closest_beat))] if np.argmin(np.abs(beats - closest_cp)) == closest_beat_idx: return "cp" else: return "noncp" # mark where we want change points in the output music # (a few beats of slack to improve the quality of the end result) def out_labels(t): if np.min(np.abs(cp_times - t)) < 1.5 * beat_length: return "cp" return "noncp" m_labels = [music_labels(i) for i in np.arange(0, song.duration_in_seconds, beat_length)] o_labels = [out_labels(i) for i in np.arange(0, duration, beat_length)] constraints = [ rt_constraints.TimbrePitchConstraint( context=0, timbre_weight=1.0, chroma_weight=1.0), rt_constraints.EnergyConstraint(penalty=.5), rt_constraints.MinimumLoopConstraint(8), rt_constraints.NoveltyConstraint(m_labels, o_labels, 1.0) ] comp, info = retarget( [song], duration, constraints=[constraints], fade_in_len=None, fade_out_len=None) final_cp_locations = [beat_length * i for i, label in enumerate(info['result_labels']) if label == 'cp'] return comp, final_cp_locations
python
def retarget_with_change_points(song, cp_times, duration): """Create a composition of a song of a given duration that reaches music change points at specified times. This is still under construction. It might not work as well with more than 2 ``cp_times`` at the moment. Here's an example of retargeting music to be 40 seconds long and hit a change point at the 10 and 30 second marks:: song = Song("instrumental_music.wav") composition, change_points =\ retarget.retarget_with_change_points(song, [10, 30], 40) composition.export(filename="retargeted_instrumental_music.") :param song: Song to retarget :type song: :py:class:`radiotool.composer.Song` :param cp_times: Times to reach change points (in seconds) :type cp_times: list of floats :param duration: Target length of retargeted music (in seconds) :type duration: float :returns: Composition of retargeted song and list of locations of change points in the retargeted composition :rtype: (:py:class:`radiotool.composer.Composition`, list) """ analysis = song.analysis beat_length = analysis[BEAT_DUR_KEY] beats = np.array(analysis["beats"]) # find change points cps = np.array(novelty(song, nchangepoints=4)) cp_times = np.array(cp_times) # mark change points in original music def music_labels(t): # find beat closest to t closest_beat_idx = np.argmin(np.abs(beats - t)) closest_beat = beats[closest_beat_idx] closest_cp = cps[np.argmin(np.abs(cps - closest_beat))] if np.argmin(np.abs(beats - closest_cp)) == closest_beat_idx: return "cp" else: return "noncp" # mark where we want change points in the output music # (a few beats of slack to improve the quality of the end result) def out_labels(t): if np.min(np.abs(cp_times - t)) < 1.5 * beat_length: return "cp" return "noncp" m_labels = [music_labels(i) for i in np.arange(0, song.duration_in_seconds, beat_length)] o_labels = [out_labels(i) for i in np.arange(0, duration, beat_length)] constraints = [ rt_constraints.TimbrePitchConstraint( context=0, timbre_weight=1.0, chroma_weight=1.0), rt_constraints.EnergyConstraint(penalty=.5), rt_constraints.MinimumLoopConstraint(8), rt_constraints.NoveltyConstraint(m_labels, o_labels, 1.0) ] comp, info = retarget( [song], duration, constraints=[constraints], fade_in_len=None, fade_out_len=None) final_cp_locations = [beat_length * i for i, label in enumerate(info['result_labels']) if label == 'cp'] return comp, final_cp_locations
[ "def", "retarget_with_change_points", "(", "song", ",", "cp_times", ",", "duration", ")", ":", "analysis", "=", "song", ".", "analysis", "beat_length", "=", "analysis", "[", "BEAT_DUR_KEY", "]", "beats", "=", "np", ".", "array", "(", "analysis", "[", "\"beat...
Create a composition of a song of a given duration that reaches music change points at specified times. This is still under construction. It might not work as well with more than 2 ``cp_times`` at the moment. Here's an example of retargeting music to be 40 seconds long and hit a change point at the 10 and 30 second marks:: song = Song("instrumental_music.wav") composition, change_points =\ retarget.retarget_with_change_points(song, [10, 30], 40) composition.export(filename="retargeted_instrumental_music.") :param song: Song to retarget :type song: :py:class:`radiotool.composer.Song` :param cp_times: Times to reach change points (in seconds) :type cp_times: list of floats :param duration: Target length of retargeted music (in seconds) :type duration: float :returns: Composition of retargeted song and list of locations of change points in the retargeted composition :rtype: (:py:class:`radiotool.composer.Composition`, list)
[ "Create", "a", "composition", "of", "a", "song", "of", "a", "given", "duration", "that", "reaches", "music", "change", "points", "at", "specified", "times", ".", "This", "is", "still", "under", "construction", ".", "It", "might", "not", "work", "as", "well...
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/algorithms/retarget.py#L95-L167
ucbvislab/radiotool
radiotool/algorithms/retarget.py
retarget
def retarget(songs, duration, music_labels=None, out_labels=None, out_penalty=None, volume=None, volume_breakpoints=None, springs=None, constraints=None, min_beats=None, max_beats=None, fade_in_len=3.0, fade_out_len=5.0, **kwargs): """Retarget a song to a duration given input and output labels on the music. Suppose you like one section of a song, say, the guitar solo, and you want to create a three minute long version of the solo. Suppose the guitar solo occurs from the 150 second mark to the 200 second mark in the original song. You can set the label the guitar solo with 'solo' and the rest of the song with 'other' by crafting the ``music_labels`` input function. And you can set the ``out_labels`` function to give you nothing but solo:: def labels(t): if 150 < t < 200: return 'solo' return 'other' def target(t): return 'solo' song = Song("sweet-rock-song.wav") composition, info = retarget(song, 180, music_labels=labels, out_labels=target) composition.export(filename="super-long-solo") You can achieve much more complicated retargetings by adjusting the ``music_labels``, `out_labels` and ``out_penalty`` functions, but this should give you a basic sense of how to use the ``retarget`` function. :param song: Song to retarget :type song: :py:class:`radiotool.composer.Song` :param duration: Duration of retargeted song (in seconds) :type duration: float :param music_labels: A function that takes a time (in seconds) and returns the label (str) of the input music at that time :type music_labels: function :param out_labels: A function that takes a time (in seconds) and returns the desired label (str) of the output music at that time :type out_labels: function :param out_penalty: A function that takes a time (in seconds) and returns the penalty for not matching the correct output label at that time (default is 1.0) :type out_penalty: function :returns: Composition of retargeted song, and dictionary of information about the retargeting :rtype: (:py:class:`radiotool.composer.Composition`, dict) """ # get song analysis if isinstance(songs, Track): songs = [songs] multi_songs = len(songs) > 1 analyses = [s.analysis for s in songs] # generate labels for every beat in the input and output beat_lengths = [a[BEAT_DUR_KEY] for a in analyses] beats = [a["beats"] for a in analyses] beat_length = np.mean(beat_lengths) logging.info("Beat lengths of songs: {} (mean: {})". format(beat_lengths, beat_length)) if out_labels is not None: target = [out_labels(i) for i in np.arange(0, duration, beat_length)] else: target = ["" for i in np.arange(0, duration, beat_length)] if music_labels is not None: if not multi_songs: music_labels = [music_labels] music_labels = [item for sublist in music_labels for item in sublist] if len(music_labels) != len(songs): raise ArgumentException("Did not specify {} sets of music labels". format(len(songs))) start = [[music_labels[i](j) for j in b] for i, b in enumerate(beats)] else: start = [["" for i in b] for b in beats] if out_penalty is not None: pen = np.array([out_penalty(i) for i in np.arange( 0, duration, beat_length)]) else: pen = np.array([1 for i in np.arange(0, duration, beat_length)]) # we're using a valence/arousal constraint, so we need these in_vas = kwargs.pop('music_va', None) if in_vas is not None: if not multi_songs: in_vas = [in_vas] in_vas = [item for sublist in in_vas for item in sublist] if len(in_vas) != len(songs): raise ArgumentException("Did not specify {} sets of v/a labels". format(len(songs))) for i, in_va in enumerate(in_vas): if callable(in_va): in_va = np.array([in_va(j) for j in beats[i]]) in_vas[i] = in_va target_va = kwargs.pop('out_va', None) if callable(target_va): target_va = np.array( [target_va(i) for i in np.arange(0, duration, beat_length)]) # set constraints if constraints is None: min_pause_len = 20. max_pause_len = 35. min_pause_beats = int(np.ceil(min_pause_len / beat_length)) max_pause_beats = int(np.floor(max_pause_len / beat_length)) constraints = [( rt_constraints.PauseConstraint( min_pause_beats, max_pause_beats, to_penalty=1.4, between_penalty=.05, unit="beats"), rt_constraints.PauseEntryVAChangeConstraint(target_va, .005), rt_constraints.PauseExitVAChangeConstraint(target_va, .005), rt_constraints.TimbrePitchConstraint( context=0, timbre_weight=1.5, chroma_weight=1.5), rt_constraints.EnergyConstraint(penalty=0.5), rt_constraints.MinimumLoopConstraint(8), rt_constraints.ValenceArousalConstraint( in_va, target_va, pen * .125), rt_constraints.NoveltyVAConstraint(in_va, target_va, pen), ) for in_va in in_vas] else: max_pause_beats = 0 if len(constraints) > 0: if isinstance(constraints[0], rt_constraints.Constraint): constraints = [constraints] pipelines = [rt_constraints.ConstraintPipeline(constraints=c_set) for c_set in constraints] trans_costs = [] penalties = [] all_beat_names = [] for i, song in enumerate(songs): (trans_cost, penalty, bn) = pipelines[i].apply(song, len(target)) trans_costs.append(trans_cost) penalties.append(penalty) all_beat_names.append(bn) logging.info("Combining tables") total_music_beats = int(np.sum([len(b) for b in beats])) total_beats = total_music_beats + max_pause_beats # combine transition cost tables trans_cost = np.ones((total_beats, total_beats)) * np.inf sizes = [len(b) for b in beats] idx = 0 for i, size in enumerate(sizes): trans_cost[idx:idx + size, idx:idx + size] =\ trans_costs[i][:size, :size] idx += size trans_cost[:total_music_beats, total_music_beats:] =\ np.vstack([tc[:len(beats[i]), len(beats[i]):] for i, tc in enumerate(trans_costs)]) trans_cost[total_music_beats:, :total_music_beats] =\ np.hstack([tc[len(beats[i]):, :len(beats[i])] for i, tc in enumerate(trans_costs)]) trans_cost[total_music_beats:, total_music_beats:] =\ trans_costs[0][len(beats[0]):, len(beats[0]):] # combine penalty tables penalty = np.empty((total_beats, penalties[0].shape[1])) penalty[:total_music_beats, :] =\ np.vstack([p[:len(beats[i]), :] for i, p in enumerate(penalties)]) penalty[total_music_beats:, :] = penalties[0][len(beats[0]):, :] logging.info("Building cost table") # compute the dynamic programming table (prev python method) # cost, prev_node = _build_table(analysis, duration, start, target, pen) # first_pause = 0 # if max_pause_beats > 0: first_pause = total_music_beats if min_beats is None: min_beats = 0 elif min_beats is 'default': min_beats = int(20. / beat_length) if max_beats is None: max_beats = -1 elif max_beats is 'default': max_beats = int(90. / beat_length) max_beats = min(max_beats, penalty.shape[1]) tc2 = np.nan_to_num(trans_cost) pen2 = np.nan_to_num(penalty) beat_names = [] for i, bn in enumerate(all_beat_names): for b in bn: if not str(b).startswith('p'): beat_names.append((i, float(b))) beat_names.extend([('p', i) for i in xrange(max_pause_beats)]) result_labels = [] logging.info("Running optimization (full backtrace, memory efficient)") logging.info("\twith min_beats(%d) and max_beats(%d) and first_pause(%d)" % (min_beats, max_beats, first_pause)) song_starts = [0] for song in songs: song_starts.append(song_starts[-1] + len(song.analysis["beats"])) song_ends = np.array(song_starts[1:], dtype=np.int32) song_starts = np.array(song_starts[:-1], dtype=np.int32) t1 = time.clock() path_i, path_cost = build_table_full_backtrace( tc2, pen2, song_starts, song_ends, first_pause=first_pause, max_beats=max_beats, min_beats=min_beats) t2 = time.clock() logging.info("Built table (full backtrace) in {} seconds" .format(t2 - t1)) path = [] if max_beats == -1: max_beats = min_beats + 1 first_pause_full = max_beats * first_pause n_beats = first_pause for i in path_i: if i >= first_pause_full: path.append(('p', i - first_pause_full)) result_labels.append(None) # path.append('p' + str(i - first_pause_full)) else: path.append(beat_names[i % n_beats]) song_i = path[-1][0] beat_name = path[-1][1] result_labels.append( start[song_i][np.where(np.array(beats[song_i]) == beat_name)[0][0]]) # path.append(float(beat_names[i % n_beats])) # else: # print("Running optimization (fast, full table)") # # this won't work right now- needs to be updated # # with the multi-song approach # # fortran method # t1 = time.clock() # cost, prev_node = build_table(tc2, pen2) # t2 = time.clock() # print("Built table (fortran) in {} seconds".format(t2 - t1)) # res = cost[:, -1] # best_idx = N.argmin(res) # if N.isfinite(res[best_idx]): # path, path_cost, path_i = _reconstruct_path( # prev_node, cost, beat_names, best_idx, N.shape(cost)[1] - 1) # # path_i = [beat_names.index(x) for x in path] # else: # # throw an exception here? # return None # path = [] # result_labels = [] # if max_pause_beats == 0: # n_beats = total_music_beats # first_pause = n_beats # else: # n_beats = first_pause # for i in path_i: # if i >= first_pause: # path.append(('p', i - first_pause)) # result_labels.append(None) # else: # path.append(beat_names[i % n_beats]) # song_i = path[-1][0] # beat_name = path[-1][1] # result_labels.append( # start[song_i][N.where(N.array(beats[song_i]) == # beat_name)[0][0]]) # return a radiotool Composition logging.info("Generating audio") (comp, cf_locations, result_full_labels, cost_labels, contracted, result_volume) =\ _generate_audio( songs, beats, path, path_cost, start, volume=volume, volume_breakpoints=volume_breakpoints, springs=springs, fade_in_len=fade_in_len, fade_out_len=fade_out_len) info = { "beat_length": beat_length, "contracted": contracted, "cost": np.sum(path_cost) / len(path), "path": path, "path_i": path_i, "target_labels": target, "result_labels": result_labels, "result_full_labels": result_full_labels, "result_volume": result_volume, "transitions": [Label("crossfade", loc) for loc in cf_locations], "path_cost": cost_labels } return comp, info
python
def retarget(songs, duration, music_labels=None, out_labels=None, out_penalty=None, volume=None, volume_breakpoints=None, springs=None, constraints=None, min_beats=None, max_beats=None, fade_in_len=3.0, fade_out_len=5.0, **kwargs): """Retarget a song to a duration given input and output labels on the music. Suppose you like one section of a song, say, the guitar solo, and you want to create a three minute long version of the solo. Suppose the guitar solo occurs from the 150 second mark to the 200 second mark in the original song. You can set the label the guitar solo with 'solo' and the rest of the song with 'other' by crafting the ``music_labels`` input function. And you can set the ``out_labels`` function to give you nothing but solo:: def labels(t): if 150 < t < 200: return 'solo' return 'other' def target(t): return 'solo' song = Song("sweet-rock-song.wav") composition, info = retarget(song, 180, music_labels=labels, out_labels=target) composition.export(filename="super-long-solo") You can achieve much more complicated retargetings by adjusting the ``music_labels``, `out_labels` and ``out_penalty`` functions, but this should give you a basic sense of how to use the ``retarget`` function. :param song: Song to retarget :type song: :py:class:`radiotool.composer.Song` :param duration: Duration of retargeted song (in seconds) :type duration: float :param music_labels: A function that takes a time (in seconds) and returns the label (str) of the input music at that time :type music_labels: function :param out_labels: A function that takes a time (in seconds) and returns the desired label (str) of the output music at that time :type out_labels: function :param out_penalty: A function that takes a time (in seconds) and returns the penalty for not matching the correct output label at that time (default is 1.0) :type out_penalty: function :returns: Composition of retargeted song, and dictionary of information about the retargeting :rtype: (:py:class:`radiotool.composer.Composition`, dict) """ # get song analysis if isinstance(songs, Track): songs = [songs] multi_songs = len(songs) > 1 analyses = [s.analysis for s in songs] # generate labels for every beat in the input and output beat_lengths = [a[BEAT_DUR_KEY] for a in analyses] beats = [a["beats"] for a in analyses] beat_length = np.mean(beat_lengths) logging.info("Beat lengths of songs: {} (mean: {})". format(beat_lengths, beat_length)) if out_labels is not None: target = [out_labels(i) for i in np.arange(0, duration, beat_length)] else: target = ["" for i in np.arange(0, duration, beat_length)] if music_labels is not None: if not multi_songs: music_labels = [music_labels] music_labels = [item for sublist in music_labels for item in sublist] if len(music_labels) != len(songs): raise ArgumentException("Did not specify {} sets of music labels". format(len(songs))) start = [[music_labels[i](j) for j in b] for i, b in enumerate(beats)] else: start = [["" for i in b] for b in beats] if out_penalty is not None: pen = np.array([out_penalty(i) for i in np.arange( 0, duration, beat_length)]) else: pen = np.array([1 for i in np.arange(0, duration, beat_length)]) # we're using a valence/arousal constraint, so we need these in_vas = kwargs.pop('music_va', None) if in_vas is not None: if not multi_songs: in_vas = [in_vas] in_vas = [item for sublist in in_vas for item in sublist] if len(in_vas) != len(songs): raise ArgumentException("Did not specify {} sets of v/a labels". format(len(songs))) for i, in_va in enumerate(in_vas): if callable(in_va): in_va = np.array([in_va(j) for j in beats[i]]) in_vas[i] = in_va target_va = kwargs.pop('out_va', None) if callable(target_va): target_va = np.array( [target_va(i) for i in np.arange(0, duration, beat_length)]) # set constraints if constraints is None: min_pause_len = 20. max_pause_len = 35. min_pause_beats = int(np.ceil(min_pause_len / beat_length)) max_pause_beats = int(np.floor(max_pause_len / beat_length)) constraints = [( rt_constraints.PauseConstraint( min_pause_beats, max_pause_beats, to_penalty=1.4, between_penalty=.05, unit="beats"), rt_constraints.PauseEntryVAChangeConstraint(target_va, .005), rt_constraints.PauseExitVAChangeConstraint(target_va, .005), rt_constraints.TimbrePitchConstraint( context=0, timbre_weight=1.5, chroma_weight=1.5), rt_constraints.EnergyConstraint(penalty=0.5), rt_constraints.MinimumLoopConstraint(8), rt_constraints.ValenceArousalConstraint( in_va, target_va, pen * .125), rt_constraints.NoveltyVAConstraint(in_va, target_va, pen), ) for in_va in in_vas] else: max_pause_beats = 0 if len(constraints) > 0: if isinstance(constraints[0], rt_constraints.Constraint): constraints = [constraints] pipelines = [rt_constraints.ConstraintPipeline(constraints=c_set) for c_set in constraints] trans_costs = [] penalties = [] all_beat_names = [] for i, song in enumerate(songs): (trans_cost, penalty, bn) = pipelines[i].apply(song, len(target)) trans_costs.append(trans_cost) penalties.append(penalty) all_beat_names.append(bn) logging.info("Combining tables") total_music_beats = int(np.sum([len(b) for b in beats])) total_beats = total_music_beats + max_pause_beats # combine transition cost tables trans_cost = np.ones((total_beats, total_beats)) * np.inf sizes = [len(b) for b in beats] idx = 0 for i, size in enumerate(sizes): trans_cost[idx:idx + size, idx:idx + size] =\ trans_costs[i][:size, :size] idx += size trans_cost[:total_music_beats, total_music_beats:] =\ np.vstack([tc[:len(beats[i]), len(beats[i]):] for i, tc in enumerate(trans_costs)]) trans_cost[total_music_beats:, :total_music_beats] =\ np.hstack([tc[len(beats[i]):, :len(beats[i])] for i, tc in enumerate(trans_costs)]) trans_cost[total_music_beats:, total_music_beats:] =\ trans_costs[0][len(beats[0]):, len(beats[0]):] # combine penalty tables penalty = np.empty((total_beats, penalties[0].shape[1])) penalty[:total_music_beats, :] =\ np.vstack([p[:len(beats[i]), :] for i, p in enumerate(penalties)]) penalty[total_music_beats:, :] = penalties[0][len(beats[0]):, :] logging.info("Building cost table") # compute the dynamic programming table (prev python method) # cost, prev_node = _build_table(analysis, duration, start, target, pen) # first_pause = 0 # if max_pause_beats > 0: first_pause = total_music_beats if min_beats is None: min_beats = 0 elif min_beats is 'default': min_beats = int(20. / beat_length) if max_beats is None: max_beats = -1 elif max_beats is 'default': max_beats = int(90. / beat_length) max_beats = min(max_beats, penalty.shape[1]) tc2 = np.nan_to_num(trans_cost) pen2 = np.nan_to_num(penalty) beat_names = [] for i, bn in enumerate(all_beat_names): for b in bn: if not str(b).startswith('p'): beat_names.append((i, float(b))) beat_names.extend([('p', i) for i in xrange(max_pause_beats)]) result_labels = [] logging.info("Running optimization (full backtrace, memory efficient)") logging.info("\twith min_beats(%d) and max_beats(%d) and first_pause(%d)" % (min_beats, max_beats, first_pause)) song_starts = [0] for song in songs: song_starts.append(song_starts[-1] + len(song.analysis["beats"])) song_ends = np.array(song_starts[1:], dtype=np.int32) song_starts = np.array(song_starts[:-1], dtype=np.int32) t1 = time.clock() path_i, path_cost = build_table_full_backtrace( tc2, pen2, song_starts, song_ends, first_pause=first_pause, max_beats=max_beats, min_beats=min_beats) t2 = time.clock() logging.info("Built table (full backtrace) in {} seconds" .format(t2 - t1)) path = [] if max_beats == -1: max_beats = min_beats + 1 first_pause_full = max_beats * first_pause n_beats = first_pause for i in path_i: if i >= first_pause_full: path.append(('p', i - first_pause_full)) result_labels.append(None) # path.append('p' + str(i - first_pause_full)) else: path.append(beat_names[i % n_beats]) song_i = path[-1][0] beat_name = path[-1][1] result_labels.append( start[song_i][np.where(np.array(beats[song_i]) == beat_name)[0][0]]) # path.append(float(beat_names[i % n_beats])) # else: # print("Running optimization (fast, full table)") # # this won't work right now- needs to be updated # # with the multi-song approach # # fortran method # t1 = time.clock() # cost, prev_node = build_table(tc2, pen2) # t2 = time.clock() # print("Built table (fortran) in {} seconds".format(t2 - t1)) # res = cost[:, -1] # best_idx = N.argmin(res) # if N.isfinite(res[best_idx]): # path, path_cost, path_i = _reconstruct_path( # prev_node, cost, beat_names, best_idx, N.shape(cost)[1] - 1) # # path_i = [beat_names.index(x) for x in path] # else: # # throw an exception here? # return None # path = [] # result_labels = [] # if max_pause_beats == 0: # n_beats = total_music_beats # first_pause = n_beats # else: # n_beats = first_pause # for i in path_i: # if i >= first_pause: # path.append(('p', i - first_pause)) # result_labels.append(None) # else: # path.append(beat_names[i % n_beats]) # song_i = path[-1][0] # beat_name = path[-1][1] # result_labels.append( # start[song_i][N.where(N.array(beats[song_i]) == # beat_name)[0][0]]) # return a radiotool Composition logging.info("Generating audio") (comp, cf_locations, result_full_labels, cost_labels, contracted, result_volume) =\ _generate_audio( songs, beats, path, path_cost, start, volume=volume, volume_breakpoints=volume_breakpoints, springs=springs, fade_in_len=fade_in_len, fade_out_len=fade_out_len) info = { "beat_length": beat_length, "contracted": contracted, "cost": np.sum(path_cost) / len(path), "path": path, "path_i": path_i, "target_labels": target, "result_labels": result_labels, "result_full_labels": result_full_labels, "result_volume": result_volume, "transitions": [Label("crossfade", loc) for loc in cf_locations], "path_cost": cost_labels } return comp, info
[ "def", "retarget", "(", "songs", ",", "duration", ",", "music_labels", "=", "None", ",", "out_labels", "=", "None", ",", "out_penalty", "=", "None", ",", "volume", "=", "None", ",", "volume_breakpoints", "=", "None", ",", "springs", "=", "None", ",", "co...
Retarget a song to a duration given input and output labels on the music. Suppose you like one section of a song, say, the guitar solo, and you want to create a three minute long version of the solo. Suppose the guitar solo occurs from the 150 second mark to the 200 second mark in the original song. You can set the label the guitar solo with 'solo' and the rest of the song with 'other' by crafting the ``music_labels`` input function. And you can set the ``out_labels`` function to give you nothing but solo:: def labels(t): if 150 < t < 200: return 'solo' return 'other' def target(t): return 'solo' song = Song("sweet-rock-song.wav") composition, info = retarget(song, 180, music_labels=labels, out_labels=target) composition.export(filename="super-long-solo") You can achieve much more complicated retargetings by adjusting the ``music_labels``, `out_labels` and ``out_penalty`` functions, but this should give you a basic sense of how to use the ``retarget`` function. :param song: Song to retarget :type song: :py:class:`radiotool.composer.Song` :param duration: Duration of retargeted song (in seconds) :type duration: float :param music_labels: A function that takes a time (in seconds) and returns the label (str) of the input music at that time :type music_labels: function :param out_labels: A function that takes a time (in seconds) and returns the desired label (str) of the output music at that time :type out_labels: function :param out_penalty: A function that takes a time (in seconds) and returns the penalty for not matching the correct output label at that time (default is 1.0) :type out_penalty: function :returns: Composition of retargeted song, and dictionary of information about the retargeting :rtype: (:py:class:`radiotool.composer.Composition`, dict)
[ "Retarget", "a", "song", "to", "a", "duration", "given", "input", "and", "output", "labels", "on", "the", "music", "." ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/algorithms/retarget.py#L170-L492
SylvanasSun/FishFishJump
fish_dashboard/scrapyd/scrapyd_agent.py
ScrapydCommandSet.init_command_set
def init_command_set(self, scrapyd_url): """ Initialize command set by scrapyd_url,each element is a list such as ['command','supported http method type'] """ if scrapyd_url[-1:] != '/': scrapyd_url = scrapyd_url + '/' self['daemonstatus'] = [scrapyd_url + 'daemonstatus.json', http_utils.METHOD_GET] self['addversion'] = [scrapyd_url + 'addversion.json', http_utils.METHOD_POST] self['schedule'] = [scrapyd_url + 'schedule.json', http_utils.METHOD_POST] self['cancel'] = [scrapyd_url + 'cancel.json', http_utils.METHOD_POST] self['listprojects'] = [scrapyd_url + 'listprojects.json', http_utils.METHOD_GET] self['listversions'] = [scrapyd_url + 'listversions.json', http_utils.METHOD_GET] self['listspiders'] = [scrapyd_url + 'listspiders.json', http_utils.METHOD_GET] self['listjobs'] = [scrapyd_url + 'listjobs.json', http_utils.METHOD_GET] self['delversion'] = [scrapyd_url + 'delversion.json', http_utils.METHOD_POST] self['delproject'] = [scrapyd_url + 'delproject.json', http_utils.METHOD_POST] self['logs'] = [scrapyd_url + 'logs/', http_utils.METHOD_GET]
python
def init_command_set(self, scrapyd_url): """ Initialize command set by scrapyd_url,each element is a list such as ['command','supported http method type'] """ if scrapyd_url[-1:] != '/': scrapyd_url = scrapyd_url + '/' self['daemonstatus'] = [scrapyd_url + 'daemonstatus.json', http_utils.METHOD_GET] self['addversion'] = [scrapyd_url + 'addversion.json', http_utils.METHOD_POST] self['schedule'] = [scrapyd_url + 'schedule.json', http_utils.METHOD_POST] self['cancel'] = [scrapyd_url + 'cancel.json', http_utils.METHOD_POST] self['listprojects'] = [scrapyd_url + 'listprojects.json', http_utils.METHOD_GET] self['listversions'] = [scrapyd_url + 'listversions.json', http_utils.METHOD_GET] self['listspiders'] = [scrapyd_url + 'listspiders.json', http_utils.METHOD_GET] self['listjobs'] = [scrapyd_url + 'listjobs.json', http_utils.METHOD_GET] self['delversion'] = [scrapyd_url + 'delversion.json', http_utils.METHOD_POST] self['delproject'] = [scrapyd_url + 'delproject.json', http_utils.METHOD_POST] self['logs'] = [scrapyd_url + 'logs/', http_utils.METHOD_GET]
[ "def", "init_command_set", "(", "self", ",", "scrapyd_url", ")", ":", "if", "scrapyd_url", "[", "-", "1", ":", "]", "!=", "'/'", ":", "scrapyd_url", "=", "scrapyd_url", "+", "'/'", "self", "[", "'daemonstatus'", "]", "=", "[", "scrapyd_url", "+", "'daemo...
Initialize command set by scrapyd_url,each element is a list such as ['command','supported http method type']
[ "Initialize", "command", "set", "by", "scrapyd_url", "each", "element", "is", "a", "list", "such", "as", "[", "command", "supported", "http", "method", "type", "]" ]
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_dashboard/scrapyd/scrapyd_agent.py#L50-L67
SylvanasSun/FishFishJump
fish_dashboard/scrapyd/scrapyd_agent.py
ScrapydAgent.get_load_status
def get_load_status(self): """ To check the load status of a service. :return: a dictionary that include json data. example: { "status": "ok", "running": "0", "pending": "0", "finished": "0", "node_name": "node-name" } """ url, method = self.command_set['daemonstatus'][0], self.command_set['daemonstatus'][1] response = http_utils.request(url, method_type=method, return_type=http_utils.RETURN_JSON) if response is None: logging.warning('%s failure: not found or connection fail' % sys._getframe().f_code.co_name) response = DaemonStatus().__dict__ return response
python
def get_load_status(self): """ To check the load status of a service. :return: a dictionary that include json data. example: { "status": "ok", "running": "0", "pending": "0", "finished": "0", "node_name": "node-name" } """ url, method = self.command_set['daemonstatus'][0], self.command_set['daemonstatus'][1] response = http_utils.request(url, method_type=method, return_type=http_utils.RETURN_JSON) if response is None: logging.warning('%s failure: not found or connection fail' % sys._getframe().f_code.co_name) response = DaemonStatus().__dict__ return response
[ "def", "get_load_status", "(", "self", ")", ":", "url", ",", "method", "=", "self", ".", "command_set", "[", "'daemonstatus'", "]", "[", "0", "]", ",", "self", ".", "command_set", "[", "'daemonstatus'", "]", "[", "1", "]", "response", "=", "http_utils", ...
To check the load status of a service. :return: a dictionary that include json data. example: { "status": "ok", "running": "0", "pending": "0", "finished": "0", "node_name": "node-name" }
[ "To", "check", "the", "load", "status", "of", "a", "service", ".", ":", "return", ":", "a", "dictionary", "that", "include", "json", "data", ".", "example", ":", "{", "status", ":", "ok", "running", ":", "0", "pending", ":", "0", "finished", ":", "0"...
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_dashboard/scrapyd/scrapyd_agent.py#L76-L87
SylvanasSun/FishFishJump
fish_dashboard/scrapyd/scrapyd_agent.py
ScrapydAgent.schedule
def schedule(self, project_name, spider_name, priority=0, setting=None, job_id=None, version=None, args={}): """ Schedule a spider run (also known as a job), returning the job id. :param project_name: the project name :param spider_name: the spider name :param priority: the run priority :param setting: a Scrapy setting to use when running the spider :param job_id: a job id used to identify the job, overrides the default generated UUID :param version: the version of the project to use :param args: passed as spider argument :return: a dictionary that status message example: {"status": "ok", "jobid": "6487ec79947edab326d6db28a2d86511e8247444"} """ url, method = self.command_set['schedule'][0], self.command_set['schedule'][1] data = {} data['project'] = project_name data['spider'] = spider_name data['priority'] = priority if setting is not None: data['setting'] = setting if job_id is not None: data['jobid'] = job_id if version is not None: data['_version'] = version for k, v in args.items(): data[k] = v response = http_utils.request(url, method_type=method, data=data, return_type=http_utils.RETURN_JSON) if response is None: logging.warning('%s failure: not found or connection fail' % sys._getframe().f_code.co_name) response = ScheduleResultSet().__dict__ return response
python
def schedule(self, project_name, spider_name, priority=0, setting=None, job_id=None, version=None, args={}): """ Schedule a spider run (also known as a job), returning the job id. :param project_name: the project name :param spider_name: the spider name :param priority: the run priority :param setting: a Scrapy setting to use when running the spider :param job_id: a job id used to identify the job, overrides the default generated UUID :param version: the version of the project to use :param args: passed as spider argument :return: a dictionary that status message example: {"status": "ok", "jobid": "6487ec79947edab326d6db28a2d86511e8247444"} """ url, method = self.command_set['schedule'][0], self.command_set['schedule'][1] data = {} data['project'] = project_name data['spider'] = spider_name data['priority'] = priority if setting is not None: data['setting'] = setting if job_id is not None: data['jobid'] = job_id if version is not None: data['_version'] = version for k, v in args.items(): data[k] = v response = http_utils.request(url, method_type=method, data=data, return_type=http_utils.RETURN_JSON) if response is None: logging.warning('%s failure: not found or connection fail' % sys._getframe().f_code.co_name) response = ScheduleResultSet().__dict__ return response
[ "def", "schedule", "(", "self", ",", "project_name", ",", "spider_name", ",", "priority", "=", "0", ",", "setting", "=", "None", ",", "job_id", "=", "None", ",", "version", "=", "None", ",", "args", "=", "{", "}", ")", ":", "url", ",", "method", "=...
Schedule a spider run (also known as a job), returning the job id. :param project_name: the project name :param spider_name: the spider name :param priority: the run priority :param setting: a Scrapy setting to use when running the spider :param job_id: a job id used to identify the job, overrides the default generated UUID :param version: the version of the project to use :param args: passed as spider argument :return: a dictionary that status message example: {"status": "ok", "jobid": "6487ec79947edab326d6db28a2d86511e8247444"}
[ "Schedule", "a", "spider", "run", "(", "also", "known", "as", "a", "job", ")", "returning", "the", "job", "id", ".", ":", "param", "project_name", ":", "the", "project", "name", ":", "param", "spider_name", ":", "the", "spider", "name", ":", "param", "...
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_dashboard/scrapyd/scrapyd_agent.py#L109-L146
SylvanasSun/FishFishJump
fish_dashboard/scrapyd/scrapyd_agent.py
ScrapydAgent.cancel
def cancel(self, project_name, job_id): """ Cancel a spider run (aka. job). If the job is pending, it will be removed. If the job is running, it will be terminated. :param project_name: the project name :param job_id: the job id :return: a dictionary that status message example: {"status": "ok", "prevstate": "running"} """ url, method = self.command_set['cancel'][0], self.command_set['cancel'][1] data = {} data['project'] = project_name data['job'] = job_id response = http_utils.request(url, method_type=method, data=data, return_type=http_utils.RETURN_JSON) if response is None: logging.warning('%s failure: not found or connection fail' % sys._getframe().f_code.co_name) response = CancelResultSet().__dict__ return response
python
def cancel(self, project_name, job_id): """ Cancel a spider run (aka. job). If the job is pending, it will be removed. If the job is running, it will be terminated. :param project_name: the project name :param job_id: the job id :return: a dictionary that status message example: {"status": "ok", "prevstate": "running"} """ url, method = self.command_set['cancel'][0], self.command_set['cancel'][1] data = {} data['project'] = project_name data['job'] = job_id response = http_utils.request(url, method_type=method, data=data, return_type=http_utils.RETURN_JSON) if response is None: logging.warning('%s failure: not found or connection fail' % sys._getframe().f_code.co_name) response = CancelResultSet().__dict__ return response
[ "def", "cancel", "(", "self", ",", "project_name", ",", "job_id", ")", ":", "url", ",", "method", "=", "self", ".", "command_set", "[", "'cancel'", "]", "[", "0", "]", ",", "self", ".", "command_set", "[", "'cancel'", "]", "[", "1", "]", "data", "=...
Cancel a spider run (aka. job). If the job is pending, it will be removed. If the job is running, it will be terminated. :param project_name: the project name :param job_id: the job id :return: a dictionary that status message example: {"status": "ok", "prevstate": "running"}
[ "Cancel", "a", "spider", "run", "(", "aka", ".", "job", ")", ".", "If", "the", "job", "is", "pending", "it", "will", "be", "removed", ".", "If", "the", "job", "is", "running", "it", "will", "be", "terminated", ".", ":", "param", "project_name", ":", ...
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_dashboard/scrapyd/scrapyd_agent.py#L148-L164
SylvanasSun/FishFishJump
fish_dashboard/scrapyd/scrapyd_agent.py
ScrapydAgent.get_spider_list
def get_spider_list(self, project_name, version=None): """ Get the list of spiders available in the last (unless overridden) version of some project. :param project_name: the project name :param version: the version of the project to examine :return: a dictionary that spider name list example: {"status": "ok", "spiders": ["spider1", "spider2", "spider3"]} """ url, method = self.command_set['listspiders'][0], self.command_set['listspiders'][1] data = {} data['project'] = project_name if version is not None: data['_version'] = version response = http_utils.request(url, method_type=method, data=data, return_type=http_utils.RETURN_JSON) if response is None: logging.warning('%s failure: not found or connection fail' % sys._getframe().f_code.co_name) response = SpiderList().__dict__ return response
python
def get_spider_list(self, project_name, version=None): """ Get the list of spiders available in the last (unless overridden) version of some project. :param project_name: the project name :param version: the version of the project to examine :return: a dictionary that spider name list example: {"status": "ok", "spiders": ["spider1", "spider2", "spider3"]} """ url, method = self.command_set['listspiders'][0], self.command_set['listspiders'][1] data = {} data['project'] = project_name if version is not None: data['_version'] = version response = http_utils.request(url, method_type=method, data=data, return_type=http_utils.RETURN_JSON) if response is None: logging.warning('%s failure: not found or connection fail' % sys._getframe().f_code.co_name) response = SpiderList().__dict__ return response
[ "def", "get_spider_list", "(", "self", ",", "project_name", ",", "version", "=", "None", ")", ":", "url", ",", "method", "=", "self", ".", "command_set", "[", "'listspiders'", "]", "[", "0", "]", ",", "self", ".", "command_set", "[", "'listspiders'", "]"...
Get the list of spiders available in the last (unless overridden) version of some project. :param project_name: the project name :param version: the version of the project to examine :return: a dictionary that spider name list example: {"status": "ok", "spiders": ["spider1", "spider2", "spider3"]}
[ "Get", "the", "list", "of", "spiders", "available", "in", "the", "last", "(", "unless", "overridden", ")", "version", "of", "some", "project", ".", ":", "param", "project_name", ":", "the", "project", "name", ":", "param", "version", ":", "the", "version",...
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_dashboard/scrapyd/scrapyd_agent.py#L195-L212
SylvanasSun/FishFishJump
fish_dashboard/scrapyd/scrapyd_agent.py
ScrapydAgent.get_job_list
def get_job_list(self, project_name): """ Get the list of pending, running and finished jobs of some project. :param project_name: the project name :return: a dictionary that list inculde job name and status example: {"status": "ok", "pending": [{"id": "78391cc0fcaf11e1b0090800272a6d06", "spider": "spider1"}], "running": [{"id": "422e608f9f28cef127b3d5ef93fe9399", "spider": "spider2", "start_time": "2012-09-12 10:14:03.594664"}], "finished": [{"id": "2f16646cfcaf11e1b0090800272a6d06", "spider": "spider3", "start_time": "2012-09-12 10:14:03.594664", "end_time": "2012-09-12 10:24:03.594664"}]} """ url, method = self.command_set['listjobs'][0], self.command_set['listjobs'][1] data = {'project': project_name} response = http_utils.request(url, method_type=method, data=data, return_type=http_utils.RETURN_JSON) if response is None: logging.warning('%s failure: not found or connection fail' % sys._getframe().f_code.co_name) response = JobList().__dict__ return response
python
def get_job_list(self, project_name): """ Get the list of pending, running and finished jobs of some project. :param project_name: the project name :return: a dictionary that list inculde job name and status example: {"status": "ok", "pending": [{"id": "78391cc0fcaf11e1b0090800272a6d06", "spider": "spider1"}], "running": [{"id": "422e608f9f28cef127b3d5ef93fe9399", "spider": "spider2", "start_time": "2012-09-12 10:14:03.594664"}], "finished": [{"id": "2f16646cfcaf11e1b0090800272a6d06", "spider": "spider3", "start_time": "2012-09-12 10:14:03.594664", "end_time": "2012-09-12 10:24:03.594664"}]} """ url, method = self.command_set['listjobs'][0], self.command_set['listjobs'][1] data = {'project': project_name} response = http_utils.request(url, method_type=method, data=data, return_type=http_utils.RETURN_JSON) if response is None: logging.warning('%s failure: not found or connection fail' % sys._getframe().f_code.co_name) response = JobList().__dict__ return response
[ "def", "get_job_list", "(", "self", ",", "project_name", ")", ":", "url", ",", "method", "=", "self", ".", "command_set", "[", "'listjobs'", "]", "[", "0", "]", ",", "self", ".", "command_set", "[", "'listjobs'", "]", "[", "1", "]", "data", "=", "{",...
Get the list of pending, running and finished jobs of some project. :param project_name: the project name :return: a dictionary that list inculde job name and status example: {"status": "ok", "pending": [{"id": "78391cc0fcaf11e1b0090800272a6d06", "spider": "spider1"}], "running": [{"id": "422e608f9f28cef127b3d5ef93fe9399", "spider": "spider2", "start_time": "2012-09-12 10:14:03.594664"}], "finished": [{"id": "2f16646cfcaf11e1b0090800272a6d06", "spider": "spider3", "start_time": "2012-09-12 10:14:03.594664", "end_time": "2012-09-12 10:24:03.594664"}]}
[ "Get", "the", "list", "of", "pending", "running", "and", "finished", "jobs", "of", "some", "project", ".", ":", "param", "project_name", ":", "the", "project", "name", ":", "return", ":", "a", "dictionary", "that", "list", "inculde", "job", "name", "and", ...
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_dashboard/scrapyd/scrapyd_agent.py#L214-L233
SylvanasSun/FishFishJump
fish_dashboard/scrapyd/scrapyd_agent.py
ScrapydAgent.get_logs
def get_logs(self, project_name, spider_name): """ Get urls that scrapyd logs file by project name and spider name :param project_name: the project name :param spider_name: the spider name :return: two list of the logs file name and logs file url """ url, method = self.command_set['logs'][0] + project_name + '/' + spider_name + '/', self.command_set['logs'][1] response = http_utils.request(url, method_type=method) html_parser = ScrapydLogsPageHTMLParser() html_parser.feed(response) html_parser.clean_enter_sign() return html_parser.result, [url + x for x in html_parser.result]
python
def get_logs(self, project_name, spider_name): """ Get urls that scrapyd logs file by project name and spider name :param project_name: the project name :param spider_name: the spider name :return: two list of the logs file name and logs file url """ url, method = self.command_set['logs'][0] + project_name + '/' + spider_name + '/', self.command_set['logs'][1] response = http_utils.request(url, method_type=method) html_parser = ScrapydLogsPageHTMLParser() html_parser.feed(response) html_parser.clean_enter_sign() return html_parser.result, [url + x for x in html_parser.result]
[ "def", "get_logs", "(", "self", ",", "project_name", ",", "spider_name", ")", ":", "url", ",", "method", "=", "self", ".", "command_set", "[", "'logs'", "]", "[", "0", "]", "+", "project_name", "+", "'/'", "+", "spider_name", "+", "'/'", ",", "self", ...
Get urls that scrapyd logs file by project name and spider name :param project_name: the project name :param spider_name: the spider name :return: two list of the logs file name and logs file url
[ "Get", "urls", "that", "scrapyd", "logs", "file", "by", "project", "name", "and", "spider", "name", ":", "param", "project_name", ":", "the", "project", "name", ":", "param", "spider_name", ":", "the", "spider", "name", ":", "return", ":", "two", "list", ...
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_dashboard/scrapyd/scrapyd_agent.py#L267-L279
ars096/pypci
pypci/pypci.py
lspci
def lspci(vendor=None, device=None): """Collect PCI information and return its list. Parameters ---------- vendor : int Return only devices with specified vendor ID. device : int Return only devices with specified device ID. Returns ------- list List of PCI device information. Information are stored in PCIConfigHeader namedtuple object. Examples -------- >>> b = pypci.lspci(vendor=0x1147, device=3214) >>> b[0].vendor_id 4423 >>> b[0].bar [BaseAddressRegister(type='mem', addr=2421170176, size=64), BaseAddressRegister(type='mem', addr=2421166080, size=64), BaseAddressRegister(type='mem', addr=2421174272, size=32)] """ lspci_cmd = ['lspci', '-xxxx', '-v'] lspci_results = subprocess.run(lspci_cmd, stdout=subprocess.PIPE) lspci_stdout = lspci_results.stdout.decode('utf-8') config = [parse_lspci_output(stdout) for stdout in lspci_stdout.split('\n\n') if stdout != ''] if vendor is not None: config = [c for c in config if c.vendor_id==vendor] pass if device is not None: config = [c for c in config if c.device_id==device] pass return config
python
def lspci(vendor=None, device=None): """Collect PCI information and return its list. Parameters ---------- vendor : int Return only devices with specified vendor ID. device : int Return only devices with specified device ID. Returns ------- list List of PCI device information. Information are stored in PCIConfigHeader namedtuple object. Examples -------- >>> b = pypci.lspci(vendor=0x1147, device=3214) >>> b[0].vendor_id 4423 >>> b[0].bar [BaseAddressRegister(type='mem', addr=2421170176, size=64), BaseAddressRegister(type='mem', addr=2421166080, size=64), BaseAddressRegister(type='mem', addr=2421174272, size=32)] """ lspci_cmd = ['lspci', '-xxxx', '-v'] lspci_results = subprocess.run(lspci_cmd, stdout=subprocess.PIPE) lspci_stdout = lspci_results.stdout.decode('utf-8') config = [parse_lspci_output(stdout) for stdout in lspci_stdout.split('\n\n') if stdout != ''] if vendor is not None: config = [c for c in config if c.vendor_id==vendor] pass if device is not None: config = [c for c in config if c.device_id==device] pass return config
[ "def", "lspci", "(", "vendor", "=", "None", ",", "device", "=", "None", ")", ":", "lspci_cmd", "=", "[", "'lspci'", ",", "'-xxxx'", ",", "'-v'", "]", "lspci_results", "=", "subprocess", ".", "run", "(", "lspci_cmd", ",", "stdout", "=", "subprocess", "....
Collect PCI information and return its list. Parameters ---------- vendor : int Return only devices with specified vendor ID. device : int Return only devices with specified device ID. Returns ------- list List of PCI device information. Information are stored in PCIConfigHeader namedtuple object. Examples -------- >>> b = pypci.lspci(vendor=0x1147, device=3214) >>> b[0].vendor_id 4423 >>> b[0].bar [BaseAddressRegister(type='mem', addr=2421170176, size=64), BaseAddressRegister(type='mem', addr=2421166080, size=64), BaseAddressRegister(type='mem', addr=2421174272, size=32)]
[ "Collect", "PCI", "information", "and", "return", "its", "list", ".", "Parameters", "----------", "vendor", ":", "int", "Return", "only", "devices", "with", "specified", "vendor", "ID", ".", "device", ":", "int", "Return", "only", "devices", "with", "specified...
train
https://github.com/ars096/pypci/blob/9469fa012e1f88fc6efc3aa6c17cd9732bbf73f6/pypci/pypci.py#L172-L215
ars096/pypci
pypci/pypci.py
read
def read(bar, offset, size): """Read PCI data register. Parameters ---------- bar : BaseAddressRegister BAR to read. offset : int Address offset in BAR to read. size : int Data size to read. Returns ------- bytes Data read from BAR Examples -------- >>> b = pypci.lspci(vendor=0x1147, device=3214) >>> pypci.read(b[0].bar[2], 0x0c, 4) b'\x00\x00\x00\x0c' """ verify_access_range(bar, offset, size) if bar.type == 'io': return io_read(bar, offset, size) if bar.type == 'mem': return mem_read(bar, offset, size) return b''
python
def read(bar, offset, size): """Read PCI data register. Parameters ---------- bar : BaseAddressRegister BAR to read. offset : int Address offset in BAR to read. size : int Data size to read. Returns ------- bytes Data read from BAR Examples -------- >>> b = pypci.lspci(vendor=0x1147, device=3214) >>> pypci.read(b[0].bar[2], 0x0c, 4) b'\x00\x00\x00\x0c' """ verify_access_range(bar, offset, size) if bar.type == 'io': return io_read(bar, offset, size) if bar.type == 'mem': return mem_read(bar, offset, size) return b''
[ "def", "read", "(", "bar", ",", "offset", ",", "size", ")", ":", "verify_access_range", "(", "bar", ",", "offset", ",", "size", ")", "if", "bar", ".", "type", "==", "'io'", ":", "return", "io_read", "(", "bar", ",", "offset", ",", "size", ")", "if"...
Read PCI data register. Parameters ---------- bar : BaseAddressRegister BAR to read. offset : int Address offset in BAR to read. size : int Data size to read. Returns ------- bytes Data read from BAR Examples -------- >>> b = pypci.lspci(vendor=0x1147, device=3214) >>> pypci.read(b[0].bar[2], 0x0c, 4) b'\x00\x00\x00\x0c'
[ "Read", "PCI", "data", "register", ".", "Parameters", "----------", "bar", ":", "BaseAddressRegister", "BAR", "to", "read", ".", "offset", ":", "int", "Address", "offset", "in", "BAR", "to", "read", ".", "size", ":", "int", "Data", "size", "to", "read", ...
train
https://github.com/ars096/pypci/blob/9469fa012e1f88fc6efc3aa6c17cd9732bbf73f6/pypci/pypci.py#L217-L245
ars096/pypci
pypci/pypci.py
write
def write(bar, offset, data): """Write data to PCI board. Parameters ---------- bar : BaseAddressRegister BAR to write. offset : int Address offset in BAR to write. data : bytes Data to write. Returns ------- None Examples -------- >>> b = pypci.lspci(vendor=0x1147, device=3214) >>> pypci.write(b[0].bar[2], 0x04, b'\x01') >>> data = struct.pack('<I', 1234567) >>> pypci.write(b[0].bar[2], 0x00, data) """ if type(data) not in [bytes, bytearray]: msg = 'data should be bytes or bytearray type' raise TypeError(msg) size = len(data) verify_access_range(bar, offset, size) if bar.type == 'io': return io_write(bar, offset, data) if bar.type == 'mem': return mem_write(bar, offset, data) return
python
def write(bar, offset, data): """Write data to PCI board. Parameters ---------- bar : BaseAddressRegister BAR to write. offset : int Address offset in BAR to write. data : bytes Data to write. Returns ------- None Examples -------- >>> b = pypci.lspci(vendor=0x1147, device=3214) >>> pypci.write(b[0].bar[2], 0x04, b'\x01') >>> data = struct.pack('<I', 1234567) >>> pypci.write(b[0].bar[2], 0x00, data) """ if type(data) not in [bytes, bytearray]: msg = 'data should be bytes or bytearray type' raise TypeError(msg) size = len(data) verify_access_range(bar, offset, size) if bar.type == 'io': return io_write(bar, offset, data) if bar.type == 'mem': return mem_write(bar, offset, data) return
[ "def", "write", "(", "bar", ",", "offset", ",", "data", ")", ":", "if", "type", "(", "data", ")", "not", "in", "[", "bytes", ",", "bytearray", "]", ":", "msg", "=", "'data should be bytes or bytearray type'", "raise", "TypeError", "(", "msg", ")", "size"...
Write data to PCI board. Parameters ---------- bar : BaseAddressRegister BAR to write. offset : int Address offset in BAR to write. data : bytes Data to write. Returns ------- None Examples -------- >>> b = pypci.lspci(vendor=0x1147, device=3214) >>> pypci.write(b[0].bar[2], 0x04, b'\x01') >>> data = struct.pack('<I', 1234567) >>> pypci.write(b[0].bar[2], 0x00, data)
[ "Write", "data", "to", "PCI", "board", ".", "Parameters", "----------", "bar", ":", "BaseAddressRegister", "BAR", "to", "write", ".", "offset", ":", "int", "Address", "offset", "in", "BAR", "to", "write", ".", "data", ":", "bytes", "Data", "to", "write", ...
train
https://github.com/ars096/pypci/blob/9469fa012e1f88fc6efc3aa6c17cd9732bbf73f6/pypci/pypci.py#L247-L282
nickhand/classylss
classylss/__init__.py
get_data_files
def get_data_files(): """ Returns the path of data files, which are installed to the package directory. """ import os path = os.path.dirname(__file__) path = os.path.join(path, 'data') r = dict( Alpha_inf_hyrec_file = os.path.join(path, 'hyrec', 'Alpha_inf.dat'), R_inf_hyrec_file = os.path.join(path, 'hyrec', 'R_inf.dat'), two_photon_tables_hyrec_file = os.path.join(path, 'hyrec', 'two_photon_tables.dat'), sBBN_file = os.path.join(path, 'bbn', 'sBBN.dat'), ) return r
python
def get_data_files(): """ Returns the path of data files, which are installed to the package directory. """ import os path = os.path.dirname(__file__) path = os.path.join(path, 'data') r = dict( Alpha_inf_hyrec_file = os.path.join(path, 'hyrec', 'Alpha_inf.dat'), R_inf_hyrec_file = os.path.join(path, 'hyrec', 'R_inf.dat'), two_photon_tables_hyrec_file = os.path.join(path, 'hyrec', 'two_photon_tables.dat'), sBBN_file = os.path.join(path, 'bbn', 'sBBN.dat'), ) return r
[ "def", "get_data_files", "(", ")", ":", "import", "os", "path", "=", "os", ".", "path", ".", "dirname", "(", "__file__", ")", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'data'", ")", "r", "=", "dict", "(", "Alpha_inf_hyrec_file",...
Returns the path of data files, which are installed to the package directory.
[ "Returns", "the", "path", "of", "data", "files", "which", "are", "installed", "to", "the", "package", "directory", "." ]
train
https://github.com/nickhand/classylss/blob/b297cb25bc47ffed845470fe1c052346ea96cddd/classylss/__init__.py#L3-L17
nickhand/classylss
classylss/__init__.py
_find_file
def _find_file(filename): """ Find the file path, first checking if it exists and then looking in the data directory """ import os if os.path.exists(filename): path = filename else: path = os.path.dirname(__file__) path = os.path.join(path, 'data', filename) if not os.path.exists(path): raise ValueError("cannot locate file '%s'" %filename) return path
python
def _find_file(filename): """ Find the file path, first checking if it exists and then looking in the data directory """ import os if os.path.exists(filename): path = filename else: path = os.path.dirname(__file__) path = os.path.join(path, 'data', filename) if not os.path.exists(path): raise ValueError("cannot locate file '%s'" %filename) return path
[ "def", "_find_file", "(", "filename", ")", ":", "import", "os", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "path", "=", "filename", "else", ":", "path", "=", "os", ".", "path", ".", "dirname", "(", "__file__", ")", "path", "...
Find the file path, first checking if it exists and then looking in the data directory
[ "Find", "the", "file", "path", "first", "checking", "if", "it", "exists", "and", "then", "looking", "in", "the", "data", "directory" ]
train
https://github.com/nickhand/classylss/blob/b297cb25bc47ffed845470fe1c052346ea96cddd/classylss/__init__.py#L19-L34
nickhand/classylss
classylss/__init__.py
load_precision
def load_precision(filename): """ Load a CLASS precision file into a dictionary. Parameters ---------- filename : str the name of an existing file to load, or one in the files included as part of the CLASS source Returns ------- dict : the precision parameters loaded from file """ # also look in data dir path = _find_file(filename) r = dict() with open(path, 'r') as f: exec(f.read(), {}, r) return r
python
def load_precision(filename): """ Load a CLASS precision file into a dictionary. Parameters ---------- filename : str the name of an existing file to load, or one in the files included as part of the CLASS source Returns ------- dict : the precision parameters loaded from file """ # also look in data dir path = _find_file(filename) r = dict() with open(path, 'r') as f: exec(f.read(), {}, r) return r
[ "def", "load_precision", "(", "filename", ")", ":", "# also look in data dir", "path", "=", "_find_file", "(", "filename", ")", "r", "=", "dict", "(", ")", "with", "open", "(", "path", ",", "'r'", ")", "as", "f", ":", "exec", "(", "f", ".", "read", "...
Load a CLASS precision file into a dictionary. Parameters ---------- filename : str the name of an existing file to load, or one in the files included as part of the CLASS source Returns ------- dict : the precision parameters loaded from file
[ "Load", "a", "CLASS", "precision", "file", "into", "a", "dictionary", "." ]
train
https://github.com/nickhand/classylss/blob/b297cb25bc47ffed845470fe1c052346ea96cddd/classylss/__init__.py#L36-L58
nickhand/classylss
classylss/__init__.py
load_ini
def load_ini(filename): """ Read a CLASS ``.ini`` file, returning a dictionary of parameters Parameters ---------- filename : str the name of an existing parameter file to load, or one included as part of the CLASS source Returns ------- dict : the input parameters loaded from file """ # also look in data dir path = _find_file(filename) pars = {} with open(path, 'r') as ff: # loop over lines for lineno, line in enumerate(ff): if not line: continue # skip any commented lines with # if '#' in line: line = line[line.index('#')+1:] # must have an equals sign to be valid if "=" not in line: continue # extract key and value pairs fields = line.split("=") if len(fields) != 2: import warnings warnings.warn("skipping line number %d: '%s'" %(lineno,line)) continue pars[fields[0].strip()] = fields[1].strip() return pars
python
def load_ini(filename): """ Read a CLASS ``.ini`` file, returning a dictionary of parameters Parameters ---------- filename : str the name of an existing parameter file to load, or one included as part of the CLASS source Returns ------- dict : the input parameters loaded from file """ # also look in data dir path = _find_file(filename) pars = {} with open(path, 'r') as ff: # loop over lines for lineno, line in enumerate(ff): if not line: continue # skip any commented lines with # if '#' in line: line = line[line.index('#')+1:] # must have an equals sign to be valid if "=" not in line: continue # extract key and value pairs fields = line.split("=") if len(fields) != 2: import warnings warnings.warn("skipping line number %d: '%s'" %(lineno,line)) continue pars[fields[0].strip()] = fields[1].strip() return pars
[ "def", "load_ini", "(", "filename", ")", ":", "# also look in data dir", "path", "=", "_find_file", "(", "filename", ")", "pars", "=", "{", "}", "with", "open", "(", "path", ",", "'r'", ")", "as", "ff", ":", "# loop over lines", "for", "lineno", ",", "li...
Read a CLASS ``.ini`` file, returning a dictionary of parameters Parameters ---------- filename : str the name of an existing parameter file to load, or one included as part of the CLASS source Returns ------- dict : the input parameters loaded from file
[ "Read", "a", "CLASS", ".", "ini", "file", "returning", "a", "dictionary", "of", "parameters" ]
train
https://github.com/nickhand/classylss/blob/b297cb25bc47ffed845470fe1c052346ea96cddd/classylss/__init__.py#L60-L99
frnsys/broca
broca/tokenize/keyword/pos.py
extract_noun_phrases
def extract_noun_phrases(tagged_doc): """ (From textblob) """ tags = _normalize_tags(tagged_doc) merge = True while merge: merge = False for x in range(0, len(tags) - 1): t1 = tags[x] t2 = tags[x + 1] key = t1[1], t2[1] value = CFG.get(key, '') if value: merge = True tags.pop(x) tags.pop(x) match = '%s %s' % (t1[0], t2[0]) pos = value tags.insert(x, (match, pos)) break matches = [t[0] for t in tags if t[1] in ['NNP', 'NNI']] return matches
python
def extract_noun_phrases(tagged_doc): """ (From textblob) """ tags = _normalize_tags(tagged_doc) merge = True while merge: merge = False for x in range(0, len(tags) - 1): t1 = tags[x] t2 = tags[x + 1] key = t1[1], t2[1] value = CFG.get(key, '') if value: merge = True tags.pop(x) tags.pop(x) match = '%s %s' % (t1[0], t2[0]) pos = value tags.insert(x, (match, pos)) break matches = [t[0] for t in tags if t[1] in ['NNP', 'NNI']] return matches
[ "def", "extract_noun_phrases", "(", "tagged_doc", ")", ":", "tags", "=", "_normalize_tags", "(", "tagged_doc", ")", "merge", "=", "True", "while", "merge", ":", "merge", "=", "False", "for", "x", "in", "range", "(", "0", ",", "len", "(", "tags", ")", "...
(From textblob)
[ "(", "From", "textblob", ")" ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/tokenize/keyword/pos.py#L37-L60
frnsys/broca
broca/tokenize/keyword/pos.py
_normalize_tags
def _normalize_tags(chunk): """ (From textblob) Normalize the corpus tags. ("NN", "NN-PL", "NNS") -> "NN" """ ret = [] for word, tag in chunk: if tag == 'NP-TL' or tag == 'NP': ret.append((word, 'NNP')) continue if tag.endswith('-TL'): ret.append((word, tag[:-3])) continue if tag.endswith('S'): ret.append((word, tag[:-1])) continue ret.append((word, tag)) return ret
python
def _normalize_tags(chunk): """ (From textblob) Normalize the corpus tags. ("NN", "NN-PL", "NNS") -> "NN" """ ret = [] for word, tag in chunk: if tag == 'NP-TL' or tag == 'NP': ret.append((word, 'NNP')) continue if tag.endswith('-TL'): ret.append((word, tag[:-3])) continue if tag.endswith('S'): ret.append((word, tag[:-1])) continue ret.append((word, tag)) return ret
[ "def", "_normalize_tags", "(", "chunk", ")", ":", "ret", "=", "[", "]", "for", "word", ",", "tag", "in", "chunk", ":", "if", "tag", "==", "'NP-TL'", "or", "tag", "==", "'NP'", ":", "ret", ".", "append", "(", "(", "word", ",", "'NNP'", ")", ")", ...
(From textblob) Normalize the corpus tags. ("NN", "NN-PL", "NNS") -> "NN"
[ "(", "From", "textblob", ")" ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/tokenize/keyword/pos.py#L63-L82
rdireen/spherepy
spherepy/file.py
save_patt
def save_patt(patt, filename): """Saves ScalarPatternUniform object 'patt' to file. The first line of the file has the number of rows and the number of columns in the pattern separated by a comma (single sphere data). The remaining lines have the form: 0, 1, 3.14, 2.718 The first two numbers are index for the row and index for the column respectively. The last two numbers are the real and imaginary part of the number associated with the row and column. """ nrows = patt.nrows ncols = patt.ncols frmstr = "{0},{1},{2:.16e},{3:.16e}\n" ar = patt.array with open(filename, 'w') as f: f.write("{0},{1}\n".format(nrows, ncols)) for nr in xrange(0, nrows): for nc in xrange(0, ncols): f.write(frmstr.format(nr, nc, ar[nr, nc].real, ar[nr, nc].imag))
python
def save_patt(patt, filename): """Saves ScalarPatternUniform object 'patt' to file. The first line of the file has the number of rows and the number of columns in the pattern separated by a comma (single sphere data). The remaining lines have the form: 0, 1, 3.14, 2.718 The first two numbers are index for the row and index for the column respectively. The last two numbers are the real and imaginary part of the number associated with the row and column. """ nrows = patt.nrows ncols = patt.ncols frmstr = "{0},{1},{2:.16e},{3:.16e}\n" ar = patt.array with open(filename, 'w') as f: f.write("{0},{1}\n".format(nrows, ncols)) for nr in xrange(0, nrows): for nc in xrange(0, ncols): f.write(frmstr.format(nr, nc, ar[nr, nc].real, ar[nr, nc].imag))
[ "def", "save_patt", "(", "patt", ",", "filename", ")", ":", "nrows", "=", "patt", ".", "nrows", "ncols", "=", "patt", ".", "ncols", "frmstr", "=", "\"{0},{1},{2:.16e},{3:.16e}\\n\"", "ar", "=", "patt", ".", "array", "with", "open", "(", "filename", ",", ...
Saves ScalarPatternUniform object 'patt' to file. The first line of the file has the number of rows and the number of columns in the pattern separated by a comma (single sphere data). The remaining lines have the form: 0, 1, 3.14, 2.718 The first two numbers are index for the row and index for the column respectively. The last two numbers are the real and imaginary part of the number associated with the row and column.
[ "Saves", "ScalarPatternUniform", "object", "patt", "to", "file", ".", "The", "first", "line", "of", "the", "file", "has", "the", "number", "of", "rows", "and", "the", "number", "of", "columns", "in", "the", "pattern", "separated", "by", "a", "comma", "(", ...
train
https://github.com/rdireen/spherepy/blob/241521401d4d76851d4a1a564a365cfab8e98496/spherepy/file.py#L44-L68
rdireen/spherepy
spherepy/file.py
save_coef
def save_coef(scoef, filename): """Saves ScalarCoeffs object 'scoef' to file. The first line of the file has the max number N and the max number M of the scoef structure separated by a comma. The remaining lines have the form 3.14, 2.718 The first number is the real part of the mode and the second is the imaginary. """ nmax = scoef.nmax mmax = scoef.mmax frmstr = "{0:.16e},{1:.16e}\n" L = (nmax + 1) + mmax * (2 * nmax - mmax + 1); with open(filename, 'w') as f: f.write("{0},{1}\n".format(nmax, mmax)) for n in xrange(0, L): f.write(frmstr.format(scoef._vec[n].real, scoef._vec[n].imag))
python
def save_coef(scoef, filename): """Saves ScalarCoeffs object 'scoef' to file. The first line of the file has the max number N and the max number M of the scoef structure separated by a comma. The remaining lines have the form 3.14, 2.718 The first number is the real part of the mode and the second is the imaginary. """ nmax = scoef.nmax mmax = scoef.mmax frmstr = "{0:.16e},{1:.16e}\n" L = (nmax + 1) + mmax * (2 * nmax - mmax + 1); with open(filename, 'w') as f: f.write("{0},{1}\n".format(nmax, mmax)) for n in xrange(0, L): f.write(frmstr.format(scoef._vec[n].real, scoef._vec[n].imag))
[ "def", "save_coef", "(", "scoef", ",", "filename", ")", ":", "nmax", "=", "scoef", ".", "nmax", "mmax", "=", "scoef", ".", "mmax", "frmstr", "=", "\"{0:.16e},{1:.16e}\\n\"", "L", "=", "(", "nmax", "+", "1", ")", "+", "mmax", "*", "(", "2", "*", "nm...
Saves ScalarCoeffs object 'scoef' to file. The first line of the file has the max number N and the max number M of the scoef structure separated by a comma. The remaining lines have the form 3.14, 2.718 The first number is the real part of the mode and the second is the imaginary.
[ "Saves", "ScalarCoeffs", "object", "scoef", "to", "file", ".", "The", "first", "line", "of", "the", "file", "has", "the", "max", "number", "N", "and", "the", "max", "number", "M", "of", "the", "scoef", "structure", "separated", "by", "a", "comma", ".", ...
train
https://github.com/rdireen/spherepy/blob/241521401d4d76851d4a1a564a365cfab8e98496/spherepy/file.py#L71-L93
rdireen/spherepy
spherepy/file.py
load_patt
def load_patt(filename): """Loads a file that was saved with the save_patt routine.""" with open(filename) as f: lines = f.readlines() lst = lines[0].split(',') patt = np.zeros([int(lst[0]), int(lst[1])], dtype=np.complex128) lines.pop(0) for line in lines: lst = line.split(',') n = int(lst[0]) m = int(lst[1]) re = float(lst[2]) im = float(lst[3]) patt[n, m] = re + 1j * im return sp.ScalarPatternUniform(patt, doublesphere=False)
python
def load_patt(filename): """Loads a file that was saved with the save_patt routine.""" with open(filename) as f: lines = f.readlines() lst = lines[0].split(',') patt = np.zeros([int(lst[0]), int(lst[1])], dtype=np.complex128) lines.pop(0) for line in lines: lst = line.split(',') n = int(lst[0]) m = int(lst[1]) re = float(lst[2]) im = float(lst[3]) patt[n, m] = re + 1j * im return sp.ScalarPatternUniform(patt, doublesphere=False)
[ "def", "load_patt", "(", "filename", ")", ":", "with", "open", "(", "filename", ")", "as", "f", ":", "lines", "=", "f", ".", "readlines", "(", ")", "lst", "=", "lines", "[", "0", "]", ".", "split", "(", "','", ")", "patt", "=", "np", ".", "zero...
Loads a file that was saved with the save_patt routine.
[ "Loads", "a", "file", "that", "was", "saved", "with", "the", "save_patt", "routine", "." ]
train
https://github.com/rdireen/spherepy/blob/241521401d4d76851d4a1a564a365cfab8e98496/spherepy/file.py#L96-L117
rdireen/spherepy
spherepy/file.py
load_vpatt
def load_vpatt(filename1, filename2): """Loads a VectorPatternUniform pattern that is saved between two files. """ with open(filename1) as f: lines = f.readlines() lst = lines[0].split(',') patt1 = np.zeros([int(lst[0]), int(lst[1])], dtype=np.complex128) lines.pop(0) for line in lines: lst = line.split(',') n = int(lst[0]) m = int(lst[1]) re = float(lst[2]) im = float(lst[3]) patt1[n, m] = re + 1j * im with open(filename2) as f: lines2 = f.readlines() lst = lines2[0].split(',') patt2 = np.zeros([int(lst[0]), int(lst[1])], dtype=np.complex128) lines2.pop(0) for line in lines2: lst = line.split(',') n = int(lst[0]) m = int(lst[1]) re = float(lst[2]) im = float(lst[3]) patt2[n, m] = re + 1j * im return sp.VectorPatternUniform(patt1, patt2)
python
def load_vpatt(filename1, filename2): """Loads a VectorPatternUniform pattern that is saved between two files. """ with open(filename1) as f: lines = f.readlines() lst = lines[0].split(',') patt1 = np.zeros([int(lst[0]), int(lst[1])], dtype=np.complex128) lines.pop(0) for line in lines: lst = line.split(',') n = int(lst[0]) m = int(lst[1]) re = float(lst[2]) im = float(lst[3]) patt1[n, m] = re + 1j * im with open(filename2) as f: lines2 = f.readlines() lst = lines2[0].split(',') patt2 = np.zeros([int(lst[0]), int(lst[1])], dtype=np.complex128) lines2.pop(0) for line in lines2: lst = line.split(',') n = int(lst[0]) m = int(lst[1]) re = float(lst[2]) im = float(lst[3]) patt2[n, m] = re + 1j * im return sp.VectorPatternUniform(patt1, patt2)
[ "def", "load_vpatt", "(", "filename1", ",", "filename2", ")", ":", "with", "open", "(", "filename1", ")", "as", "f", ":", "lines", "=", "f", ".", "readlines", "(", ")", "lst", "=", "lines", "[", "0", "]", ".", "split", "(", "','", ")", "patt1", "...
Loads a VectorPatternUniform pattern that is saved between two files.
[ "Loads", "a", "VectorPatternUniform", "pattern", "that", "is", "saved", "between", "two", "files", "." ]
train
https://github.com/rdireen/spherepy/blob/241521401d4d76851d4a1a564a365cfab8e98496/spherepy/file.py#L119-L159
rdireen/spherepy
spherepy/file.py
load_coef
def load_coef(filename): """Loads a file that was saved with save_coef.""" with open(filename) as f: lines = f.readlines() lst = lines[0].split(',') nmax = int(lst[0]) mmax = int(lst[1]) L = (nmax + 1) + mmax * (2 * nmax - mmax + 1); vec = np.zeros(L, dtype=np.complex128) lines.pop(0) for n, line in enumerate(lines): lst = line.split(',') re = float(lst[0]) im = float(lst[1]) vec[n] = re + 1j * im return sp.ScalarCoefs(vec, nmax, mmax)
python
def load_coef(filename): """Loads a file that was saved with save_coef.""" with open(filename) as f: lines = f.readlines() lst = lines[0].split(',') nmax = int(lst[0]) mmax = int(lst[1]) L = (nmax + 1) + mmax * (2 * nmax - mmax + 1); vec = np.zeros(L, dtype=np.complex128) lines.pop(0) for n, line in enumerate(lines): lst = line.split(',') re = float(lst[0]) im = float(lst[1]) vec[n] = re + 1j * im return sp.ScalarCoefs(vec, nmax, mmax)
[ "def", "load_coef", "(", "filename", ")", ":", "with", "open", "(", "filename", ")", "as", "f", ":", "lines", "=", "f", ".", "readlines", "(", ")", "lst", "=", "lines", "[", "0", "]", ".", "split", "(", "','", ")", "nmax", "=", "int", "(", "lst...
Loads a file that was saved with save_coef.
[ "Loads", "a", "file", "that", "was", "saved", "with", "save_coef", "." ]
train
https://github.com/rdireen/spherepy/blob/241521401d4d76851d4a1a564a365cfab8e98496/spherepy/file.py#L183-L206
rdireen/spherepy
spherepy/file.py
load_vcoef
def load_vcoef(filename): """Loads a set of vector coefficients that were saved in MATLAB. The third number on the first line is the directivity calculated within the MATLAB code.""" with open(filename) as f: lines = f.readlines() lst = lines[0].split(',') nmax = int(lst[0]) mmax = int(lst[1]) directivity = float(lst[2]) L = (nmax + 1) + mmax * (2 * nmax - mmax + 1); vec1 = np.zeros(L, dtype=np.complex128) vec2 = np.zeros(L, dtype=np.complex128) lines.pop(0) n = 0 in_vec2 = False for line in lines: if line.strip() == 'break': n = 0 in_vec2 = True; else: lst = line.split(',') re = float(lst[0]) im = float(lst[1]) if in_vec2: vec2[n] = re + 1j * im else: vec1[n] = re + 1j * im n += 1 return (sp.VectorCoefs(vec1, vec2, nmax, mmax), directivity)
python
def load_vcoef(filename): """Loads a set of vector coefficients that were saved in MATLAB. The third number on the first line is the directivity calculated within the MATLAB code.""" with open(filename) as f: lines = f.readlines() lst = lines[0].split(',') nmax = int(lst[0]) mmax = int(lst[1]) directivity = float(lst[2]) L = (nmax + 1) + mmax * (2 * nmax - mmax + 1); vec1 = np.zeros(L, dtype=np.complex128) vec2 = np.zeros(L, dtype=np.complex128) lines.pop(0) n = 0 in_vec2 = False for line in lines: if line.strip() == 'break': n = 0 in_vec2 = True; else: lst = line.split(',') re = float(lst[0]) im = float(lst[1]) if in_vec2: vec2[n] = re + 1j * im else: vec1[n] = re + 1j * im n += 1 return (sp.VectorCoefs(vec1, vec2, nmax, mmax), directivity)
[ "def", "load_vcoef", "(", "filename", ")", ":", "with", "open", "(", "filename", ")", "as", "f", ":", "lines", "=", "f", ".", "readlines", "(", ")", "lst", "=", "lines", "[", "0", "]", ".", "split", "(", "','", ")", "nmax", "=", "int", "(", "ls...
Loads a set of vector coefficients that were saved in MATLAB. The third number on the first line is the directivity calculated within the MATLAB code.
[ "Loads", "a", "set", "of", "vector", "coefficients", "that", "were", "saved", "in", "MATLAB", ".", "The", "third", "number", "on", "the", "first", "line", "is", "the", "directivity", "calculated", "within", "the", "MATLAB", "code", "." ]
train
https://github.com/rdireen/spherepy/blob/241521401d4d76851d4a1a564a365cfab8e98496/spherepy/file.py#L208-L248
ltalirz/aiida-gudhi
aiida_gudhi/parsers/barcode.py
BarcodeParser.parse
def parse(cls, filename, max_life=None): """ Parse barcode from gudhi output. """ data = np.genfromtxt(filename) #data = np.genfromtxt(filename, dtype= (int, int, float, float)) if max_life is not None: data[np.isinf(data)] = max_life return data
python
def parse(cls, filename, max_life=None): """ Parse barcode from gudhi output. """ data = np.genfromtxt(filename) #data = np.genfromtxt(filename, dtype= (int, int, float, float)) if max_life is not None: data[np.isinf(data)] = max_life return data
[ "def", "parse", "(", "cls", ",", "filename", ",", "max_life", "=", "None", ")", ":", "data", "=", "np", ".", "genfromtxt", "(", "filename", ")", "#data = np.genfromtxt(filename, dtype= (int, int, float, float))", "if", "max_life", "is", "not", "None", ":", "data...
Parse barcode from gudhi output.
[ "Parse", "barcode", "from", "gudhi", "output", "." ]
train
https://github.com/ltalirz/aiida-gudhi/blob/81ebec782ddff3ab97a3e3242b809fec989fa4b9/aiida_gudhi/parsers/barcode.py#L16-L24
ltalirz/aiida-gudhi
aiida_gudhi/parsers/barcode.py
BarcodeParser.plot
def plot(self, dimension): """ Plot barcode using matplotlib. """ import matplotlib.pyplot as plt life_lines = self.get_life_lines(dimension) x, y = zip(*life_lines) plt.scatter(x, y) plt.xlabel("Birth") plt.ylabel("Death") if self.max_life is not None: plt.xlim([0, self.max_life]) plt.title("Persistence Homology Dimension {}".format(dimension)) #TODO: remove this plt.show()
python
def plot(self, dimension): """ Plot barcode using matplotlib. """ import matplotlib.pyplot as plt life_lines = self.get_life_lines(dimension) x, y = zip(*life_lines) plt.scatter(x, y) plt.xlabel("Birth") plt.ylabel("Death") if self.max_life is not None: plt.xlim([0, self.max_life]) plt.title("Persistence Homology Dimension {}".format(dimension)) #TODO: remove this plt.show()
[ "def", "plot", "(", "self", ",", "dimension", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "life_lines", "=", "self", ".", "get_life_lines", "(", "dimension", ")", "x", ",", "y", "=", "zip", "(", "*", "life_lines", ")", "plt", ".", "...
Plot barcode using matplotlib.
[ "Plot", "barcode", "using", "matplotlib", "." ]
train
https://github.com/ltalirz/aiida-gudhi/blob/81ebec782ddff3ab97a3e3242b809fec989fa4b9/aiida_gudhi/parsers/barcode.py#L33-L49
wglass/lighthouse
lighthouse/configurable.py
Configurable.from_config
def from_config(cls, name, config): """ Returns a Configurable instance with the given name and config. By default this is a simple matter of calling the constructor, but subclasses that are also `Pluggable` instances override this in order to check that the plugin is installed correctly first. """ cls.validate_config(config) instance = cls() if not instance.name: instance.name = config.get("name", name) instance.apply_config(config) return instance
python
def from_config(cls, name, config): """ Returns a Configurable instance with the given name and config. By default this is a simple matter of calling the constructor, but subclasses that are also `Pluggable` instances override this in order to check that the plugin is installed correctly first. """ cls.validate_config(config) instance = cls() if not instance.name: instance.name = config.get("name", name) instance.apply_config(config) return instance
[ "def", "from_config", "(", "cls", ",", "name", ",", "config", ")", ":", "cls", ".", "validate_config", "(", "config", ")", "instance", "=", "cls", "(", ")", "if", "not", "instance", ".", "name", ":", "instance", ".", "name", "=", "config", ".", "get"...
Returns a Configurable instance with the given name and config. By default this is a simple matter of calling the constructor, but subclasses that are also `Pluggable` instances override this in order to check that the plugin is installed correctly first.
[ "Returns", "a", "Configurable", "instance", "with", "the", "given", "name", "and", "config", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/configurable.py#L38-L54
mistio/mist.client
examples/multi-provision.py
get_args
def get_args(): """ Supports the command-line arguments listed below. """ parser = argparse.ArgumentParser( description="Provision multiple VM's through mist.io. You can get " "information returned with the name of the virtual machine created " "and its main mac and ip address in IPv4 format. A post-script can be " "specified for post-processing.") parser.add_argument('-b', '--basename', nargs=1, required=True, help='Basename of the newly deployed VMs', dest='basename', type=str) parser.add_argument('-d', '--debug', required=False, help='Enable debug output', dest='debug', action='store_true') parser.add_argument('-i', '--print-ips', required=False, help='Enable IP output', dest='ips', action='store_true') parser.add_argument('-m', '--print-macs', required=False, help='Enable MAC output', dest='macs', action='store_true') parser.add_argument('-l', '--log-file', nargs=1, required=False, help='File to log to (default = stdout)', dest='logfile', type=str) parser.add_argument('-n', '--number', nargs=1, required=False, help='Amount of VMs to deploy (default = 1)', dest='quantity', type=int, default=[1]) parser.add_argument('-M', '--monitoring', required=False, help='Enable monitoring on the virtual machines', dest='monitoring', action='store_true') parser.add_argument('-B', '--backend-name', required=False, help='The name of the backend to use for provisioning.' ' Defaults to the first available backend', dest='backend_name', type=str) parser.add_argument('-I', '--image-id', required=True, help='The image to deploy', dest='image_id') parser.add_argument('-S', '--size-id', required=True, help='The id of the size/flavor to use', dest='size_id') parser.add_argument('-N', '--networks', required=False, nargs='+', help='The ids of the networks to assign to the VMs', dest='networks') parser.add_argument('-s', '--post-script', nargs=1, required=False, help='Script to be called after each VM is created and' ' booted.', dest='post_script', type=str) parser.add_argument('-P', '--script-params', nargs=1, required=False, help='Script to be called after each VM is created and' ' booted.', dest='script_params', type=str) parser.add_argument('-H', '--host', required=False, help='mist.io instance to connect to', dest='host', type=str, default='https://mist.io') parser.add_argument('-u', '--user', nargs=1, required=False, help='email registered to mist.io', dest='username', type=str) parser.add_argument('-p', '--password', nargs=1, required=False, help='The password with which to connect to the host. ' 'If not specified, the user is prompted at runtime for' ' a password', dest='password', type=str) parser.add_argument('-v', '--verbose', required=False, help='Enable verbose output', dest='verbose', action='store_true') parser.add_argument('-w', '--wait-max', nargs=1, required=False, help='Maximum amount of seconds to wait when gathering' ' information (default = 600)', dest='maxwait', type=int, default=[600]) parser.add_argument('-f', '--associate-floating-ip', required=False, action='store_true', help='Auto-associates floating ips to vms in Openstack backens', dest='associate_floating_ip',) args = parser.parse_args() return args
python
def get_args(): """ Supports the command-line arguments listed below. """ parser = argparse.ArgumentParser( description="Provision multiple VM's through mist.io. You can get " "information returned with the name of the virtual machine created " "and its main mac and ip address in IPv4 format. A post-script can be " "specified for post-processing.") parser.add_argument('-b', '--basename', nargs=1, required=True, help='Basename of the newly deployed VMs', dest='basename', type=str) parser.add_argument('-d', '--debug', required=False, help='Enable debug output', dest='debug', action='store_true') parser.add_argument('-i', '--print-ips', required=False, help='Enable IP output', dest='ips', action='store_true') parser.add_argument('-m', '--print-macs', required=False, help='Enable MAC output', dest='macs', action='store_true') parser.add_argument('-l', '--log-file', nargs=1, required=False, help='File to log to (default = stdout)', dest='logfile', type=str) parser.add_argument('-n', '--number', nargs=1, required=False, help='Amount of VMs to deploy (default = 1)', dest='quantity', type=int, default=[1]) parser.add_argument('-M', '--monitoring', required=False, help='Enable monitoring on the virtual machines', dest='monitoring', action='store_true') parser.add_argument('-B', '--backend-name', required=False, help='The name of the backend to use for provisioning.' ' Defaults to the first available backend', dest='backend_name', type=str) parser.add_argument('-I', '--image-id', required=True, help='The image to deploy', dest='image_id') parser.add_argument('-S', '--size-id', required=True, help='The id of the size/flavor to use', dest='size_id') parser.add_argument('-N', '--networks', required=False, nargs='+', help='The ids of the networks to assign to the VMs', dest='networks') parser.add_argument('-s', '--post-script', nargs=1, required=False, help='Script to be called after each VM is created and' ' booted.', dest='post_script', type=str) parser.add_argument('-P', '--script-params', nargs=1, required=False, help='Script to be called after each VM is created and' ' booted.', dest='script_params', type=str) parser.add_argument('-H', '--host', required=False, help='mist.io instance to connect to', dest='host', type=str, default='https://mist.io') parser.add_argument('-u', '--user', nargs=1, required=False, help='email registered to mist.io', dest='username', type=str) parser.add_argument('-p', '--password', nargs=1, required=False, help='The password with which to connect to the host. ' 'If not specified, the user is prompted at runtime for' ' a password', dest='password', type=str) parser.add_argument('-v', '--verbose', required=False, help='Enable verbose output', dest='verbose', action='store_true') parser.add_argument('-w', '--wait-max', nargs=1, required=False, help='Maximum amount of seconds to wait when gathering' ' information (default = 600)', dest='maxwait', type=int, default=[600]) parser.add_argument('-f', '--associate-floating-ip', required=False, action='store_true', help='Auto-associates floating ips to vms in Openstack backens', dest='associate_floating_ip',) args = parser.parse_args() return args
[ "def", "get_args", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"Provision multiple VM's through mist.io. You can get \"", "\"information returned with the name of the virtual machine created \"", "\"and its main mac and ip address in IPv4...
Supports the command-line arguments listed below.
[ "Supports", "the", "command", "-", "line", "arguments", "listed", "below", "." ]
train
https://github.com/mistio/mist.client/blob/bc190af2cba358fa556a69b205c12a77a34eb2a8/examples/multi-provision.py#L34-L104
darkfeline/animanager
animanager/commands/rules.py
command
def command(state, args): """List file priority rules.""" rules = query.files.get_priority_rules(state.db) print(tabulate(rules, headers=['ID', 'Regexp', 'Priority']))
python
def command(state, args): """List file priority rules.""" rules = query.files.get_priority_rules(state.db) print(tabulate(rules, headers=['ID', 'Regexp', 'Priority']))
[ "def", "command", "(", "state", ",", "args", ")", ":", "rules", "=", "query", ".", "files", ".", "get_priority_rules", "(", "state", ".", "db", ")", "print", "(", "tabulate", "(", "rules", ",", "headers", "=", "[", "'ID'", ",", "'Regexp'", ",", "'Pri...
List file priority rules.
[ "List", "file", "priority", "rules", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/commands/rules.py#L23-L26
frnsys/broca
broca/tokenize/keyword/apriori.py
filter_support
def filter_support(candidates, transactions, min_sup): """ Filter candidates to a frequent set by some minimum support. """ counts = defaultdict(lambda: 0) for transaction in transactions: for c in (c for c in candidates if set(c).issubset(transaction)): counts[c] += 1 return {i for i in candidates if counts[i]/len(transactions) >= min_sup}
python
def filter_support(candidates, transactions, min_sup): """ Filter candidates to a frequent set by some minimum support. """ counts = defaultdict(lambda: 0) for transaction in transactions: for c in (c for c in candidates if set(c).issubset(transaction)): counts[c] += 1 return {i for i in candidates if counts[i]/len(transactions) >= min_sup}
[ "def", "filter_support", "(", "candidates", ",", "transactions", ",", "min_sup", ")", ":", "counts", "=", "defaultdict", "(", "lambda", ":", "0", ")", "for", "transaction", "in", "transactions", ":", "for", "c", "in", "(", "c", "for", "c", "in", "candida...
Filter candidates to a frequent set by some minimum support.
[ "Filter", "candidates", "to", "a", "frequent", "set", "by", "some", "minimum", "support", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/tokenize/keyword/apriori.py#L72-L80
frnsys/broca
broca/tokenize/keyword/apriori.py
generate_candidates
def generate_candidates(freq_set, k): """ Generate candidates for an iteration. Use this only for k >= 2. """ single_set = {(i,) for i in set(flatten(freq_set))} # TO DO generating all combinations gets very slow for large documents. # Is there a way of doing this without exhaustively searching all combinations? cands = [flatten(f) for f in combinations(single_set, k)] return [cand for cand in cands if validate_candidate(cand, freq_set, k)]
python
def generate_candidates(freq_set, k): """ Generate candidates for an iteration. Use this only for k >= 2. """ single_set = {(i,) for i in set(flatten(freq_set))} # TO DO generating all combinations gets very slow for large documents. # Is there a way of doing this without exhaustively searching all combinations? cands = [flatten(f) for f in combinations(single_set, k)] return [cand for cand in cands if validate_candidate(cand, freq_set, k)]
[ "def", "generate_candidates", "(", "freq_set", ",", "k", ")", ":", "single_set", "=", "{", "(", "i", ",", ")", "for", "i", "in", "set", "(", "flatten", "(", "freq_set", ")", ")", "}", "# TO DO generating all combinations gets very slow for large documents.", "# ...
Generate candidates for an iteration. Use this only for k >= 2.
[ "Generate", "candidates", "for", "an", "iteration", ".", "Use", "this", "only", "for", "k", ">", "=", "2", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/tokenize/keyword/apriori.py#L83-L93
frnsys/broca
broca/tokenize/keyword/apriori.py
validate_candidate
def validate_candidate(candidate, freq_set, k): """ Checks if we should keep a candidate. We keep a candidate if all its k-1-sized subsets are present in the frequent sets. """ for subcand in combinations(candidate, k-1): if subcand not in freq_set: return False return True
python
def validate_candidate(candidate, freq_set, k): """ Checks if we should keep a candidate. We keep a candidate if all its k-1-sized subsets are present in the frequent sets. """ for subcand in combinations(candidate, k-1): if subcand not in freq_set: return False return True
[ "def", "validate_candidate", "(", "candidate", ",", "freq_set", ",", "k", ")", ":", "for", "subcand", "in", "combinations", "(", "candidate", ",", "k", "-", "1", ")", ":", "if", "subcand", "not", "in", "freq_set", ":", "return", "False", "return", "True"...
Checks if we should keep a candidate. We keep a candidate if all its k-1-sized subsets are present in the frequent sets.
[ "Checks", "if", "we", "should", "keep", "a", "candidate", ".", "We", "keep", "a", "candidate", "if", "all", "its", "k", "-", "1", "-", "sized", "subsets", "are", "present", "in", "the", "frequent", "sets", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/tokenize/keyword/apriori.py#L96-L105
frnsys/broca
broca/tokenize/keyword/apriori.py
AprioriTokenizer.tokenize
def tokenize(self, docs): """ The first pass consists of converting documents into "transactions" (sets of their tokens) and the initial frequency/support filtering. Then iterate until we close in on a final set. `docs` can be any iterator or generator so long as it yields lists. Each list represents a document (i.e. is a list of tokens). For example, it can be a list of lists of nouns and noun phrases if trying to identify aspects, where each list represents a sentence or document. `min_sup` defines the minimum frequency (as a ratio over the total) necessary to keep a candidate. """ if self.min_sup < 1/len(docs): raise Exception('`min_sup` must be greater than or equal to `1/len(docs)`.') # First pass candidates = set() transactions = [] # Use nouns and noun phrases. for doc in POSTokenizer().tokenize(docs): transaction = set(doc) candidates = candidates.union({(t,) for t in transaction}) transactions.append(transaction) freq_set = filter_support(candidates, transactions, self.min_sup) # Iterate k = 2 last_set = set() while freq_set != set(): last_set = freq_set cands = generate_candidates(freq_set, k) freq_set = filter_support(cands, transactions, self.min_sup) k += 1 # Map documents to their keywords. keywords = flatten(last_set) return prune([[kw for kw in keywords if kw in doc] for doc in docs])
python
def tokenize(self, docs): """ The first pass consists of converting documents into "transactions" (sets of their tokens) and the initial frequency/support filtering. Then iterate until we close in on a final set. `docs` can be any iterator or generator so long as it yields lists. Each list represents a document (i.e. is a list of tokens). For example, it can be a list of lists of nouns and noun phrases if trying to identify aspects, where each list represents a sentence or document. `min_sup` defines the minimum frequency (as a ratio over the total) necessary to keep a candidate. """ if self.min_sup < 1/len(docs): raise Exception('`min_sup` must be greater than or equal to `1/len(docs)`.') # First pass candidates = set() transactions = [] # Use nouns and noun phrases. for doc in POSTokenizer().tokenize(docs): transaction = set(doc) candidates = candidates.union({(t,) for t in transaction}) transactions.append(transaction) freq_set = filter_support(candidates, transactions, self.min_sup) # Iterate k = 2 last_set = set() while freq_set != set(): last_set = freq_set cands = generate_candidates(freq_set, k) freq_set = filter_support(cands, transactions, self.min_sup) k += 1 # Map documents to their keywords. keywords = flatten(last_set) return prune([[kw for kw in keywords if kw in doc] for doc in docs])
[ "def", "tokenize", "(", "self", ",", "docs", ")", ":", "if", "self", ".", "min_sup", "<", "1", "/", "len", "(", "docs", ")", ":", "raise", "Exception", "(", "'`min_sup` must be greater than or equal to `1/len(docs)`.'", ")", "# First pass", "candidates", "=", ...
The first pass consists of converting documents into "transactions" (sets of their tokens) and the initial frequency/support filtering. Then iterate until we close in on a final set. `docs` can be any iterator or generator so long as it yields lists. Each list represents a document (i.e. is a list of tokens). For example, it can be a list of lists of nouns and noun phrases if trying to identify aspects, where each list represents a sentence or document. `min_sup` defines the minimum frequency (as a ratio over the total) necessary to keep a candidate.
[ "The", "first", "pass", "consists", "of", "converting", "documents", "into", "transactions", "(", "sets", "of", "their", "tokens", ")", "and", "the", "initial", "frequency", "/", "support", "filtering", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/tokenize/keyword/apriori.py#L21-L62
peterldowns/python-mustache
mustache/state.py
State.compile_tag_re
def compile_tag_re(self, tags): """ Return the regex used to look for Mustache tags compiled to work with specific opening tags, close tags, and tag types. """ return re.compile(self.raw_tag_re % tags, self.re_flags)
python
def compile_tag_re(self, tags): """ Return the regex used to look for Mustache tags compiled to work with specific opening tags, close tags, and tag types. """ return re.compile(self.raw_tag_re % tags, self.re_flags)
[ "def", "compile_tag_re", "(", "self", ",", "tags", ")", ":", "return", "re", ".", "compile", "(", "self", ".", "raw_tag_re", "%", "tags", ",", "self", ".", "re_flags", ")" ]
Return the regex used to look for Mustache tags compiled to work with specific opening tags, close tags, and tag types.
[ "Return", "the", "regex", "used", "to", "look", "for", "Mustache", "tags", "compiled", "to", "work", "with", "specific", "opening", "tags", "close", "tags", "and", "tag", "types", "." ]
train
https://github.com/peterldowns/python-mustache/blob/ea3753696ea9886b6eb39cc5de27db7054adc069/mustache/state.py#L57-L62
gkmngrgn/radpress
radpress/south_migrations/0004_radpress_article_authors.py
Migration.forwards
def forwards(self, orm): "Write your forwards methods here." # Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..." User = orm[user_orm_label] try: user = User.objects.all()[0] for article in orm.Article.objects.all(): article.author = user article.save() except IndexError: pass
python
def forwards(self, orm): "Write your forwards methods here." # Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..." User = orm[user_orm_label] try: user = User.objects.all()[0] for article in orm.Article.objects.all(): article.author = user article.save() except IndexError: pass
[ "def", "forwards", "(", "self", ",", "orm", ")", ":", "# Note: Remember to use orm['appname.ModelName'] rather than \"from appname.models...\"", "User", "=", "orm", "[", "user_orm_label", "]", "try", ":", "user", "=", "User", ".", "objects", ".", "all", "(", ")", ...
Write your forwards methods here.
[ "Write", "your", "forwards", "methods", "here", "." ]
train
https://github.com/gkmngrgn/radpress/blob/2ed3b97f94e722479601832ffc40ea2135cda916/radpress/south_migrations/0004_radpress_article_authors.py#L26-L40
rfk/tnetstring
tnetstring/__init__.py
dumps
def dumps(value,encoding=None): """dumps(object,encoding=None) -> string This function dumps a python object as a tnetstring. """ # This uses a deque to collect output fragments in reverse order, # then joins them together at the end. It's measurably faster # than creating all the intermediate strings. # If you're reading this to get a handle on the tnetstring format, # consider the _gdumps() function instead; it's a standard top-down # generator that's simpler to understand but much less efficient. q = deque() _rdumpq(q,0,value,encoding) return "".join(q)
python
def dumps(value,encoding=None): """dumps(object,encoding=None) -> string This function dumps a python object as a tnetstring. """ # This uses a deque to collect output fragments in reverse order, # then joins them together at the end. It's measurably faster # than creating all the intermediate strings. # If you're reading this to get a handle on the tnetstring format, # consider the _gdumps() function instead; it's a standard top-down # generator that's simpler to understand but much less efficient. q = deque() _rdumpq(q,0,value,encoding) return "".join(q)
[ "def", "dumps", "(", "value", ",", "encoding", "=", "None", ")", ":", "# This uses a deque to collect output fragments in reverse order,", "# then joins them together at the end. It's measurably faster", "# than creating all the intermediate strings.", "# If you're reading this to get...
dumps(object,encoding=None) -> string This function dumps a python object as a tnetstring.
[ "dumps", "(", "object", "encoding", "=", "None", ")", "-", ">", "string" ]
train
https://github.com/rfk/tnetstring/blob/146381498a07d6053e044375562be08ef16017c2/tnetstring/__init__.py#L61-L74
rfk/tnetstring
tnetstring/__init__.py
_rdumpq
def _rdumpq(q,size,value,encoding=None): """Dump value as a tnetstring, to a deque instance, last chunks first. This function generates the tnetstring representation of the given value, pushing chunks of the output onto the given deque instance. It pushes the last chunk first, then recursively generates more chunks. When passed in the current size of the string in the queue, it will return the new size of the string in the queue. Operating last-chunk-first makes it easy to calculate the size written for recursive structures without having to build their representation as a string. This is measurably faster than generating the intermediate strings, especially on deeply nested structures. """ write = q.appendleft if value is None: write("0:~") return size + 3 if value is True: write("4:true!") return size + 7 if value is False: write("5:false!") return size + 8 if isinstance(value,(int,long)): data = str(value) ldata = len(data) span = str(ldata) write("#") write(data) write(":") write(span) return size + 2 + len(span) + ldata if isinstance(value,(float,)): # Use repr() for float rather than str(). # It round-trips more accurately. # Probably unnecessary in later python versions that # use David Gay's ftoa routines. data = repr(value) ldata = len(data) span = str(ldata) write("^") write(data) write(":") write(span) return size + 2 + len(span) + ldata if isinstance(value,str): lvalue = len(value) span = str(lvalue) write(",") write(value) write(":") write(span) return size + 2 + len(span) + lvalue if isinstance(value,(list,tuple,)): write("]") init_size = size = size + 1 for item in reversed(value): size = _rdumpq(q,size,item,encoding) span = str(size - init_size) write(":") write(span) return size + 1 + len(span) if isinstance(value,dict): write("}") init_size = size = size + 1 for (k,v) in value.iteritems(): size = _rdumpq(q,size,v,encoding) size = _rdumpq(q,size,k,encoding) span = str(size - init_size) write(":") write(span) return size + 1 + len(span) if isinstance(value,unicode): if encoding is None: raise ValueError("must specify encoding to dump unicode strings") value = value.encode(encoding) lvalue = len(value) span = str(lvalue) write(",") write(value) write(":") write(span) return size + 2 + len(span) + lvalue raise ValueError("unserializable object")
python
def _rdumpq(q,size,value,encoding=None): """Dump value as a tnetstring, to a deque instance, last chunks first. This function generates the tnetstring representation of the given value, pushing chunks of the output onto the given deque instance. It pushes the last chunk first, then recursively generates more chunks. When passed in the current size of the string in the queue, it will return the new size of the string in the queue. Operating last-chunk-first makes it easy to calculate the size written for recursive structures without having to build their representation as a string. This is measurably faster than generating the intermediate strings, especially on deeply nested structures. """ write = q.appendleft if value is None: write("0:~") return size + 3 if value is True: write("4:true!") return size + 7 if value is False: write("5:false!") return size + 8 if isinstance(value,(int,long)): data = str(value) ldata = len(data) span = str(ldata) write("#") write(data) write(":") write(span) return size + 2 + len(span) + ldata if isinstance(value,(float,)): # Use repr() for float rather than str(). # It round-trips more accurately. # Probably unnecessary in later python versions that # use David Gay's ftoa routines. data = repr(value) ldata = len(data) span = str(ldata) write("^") write(data) write(":") write(span) return size + 2 + len(span) + ldata if isinstance(value,str): lvalue = len(value) span = str(lvalue) write(",") write(value) write(":") write(span) return size + 2 + len(span) + lvalue if isinstance(value,(list,tuple,)): write("]") init_size = size = size + 1 for item in reversed(value): size = _rdumpq(q,size,item,encoding) span = str(size - init_size) write(":") write(span) return size + 1 + len(span) if isinstance(value,dict): write("}") init_size = size = size + 1 for (k,v) in value.iteritems(): size = _rdumpq(q,size,v,encoding) size = _rdumpq(q,size,k,encoding) span = str(size - init_size) write(":") write(span) return size + 1 + len(span) if isinstance(value,unicode): if encoding is None: raise ValueError("must specify encoding to dump unicode strings") value = value.encode(encoding) lvalue = len(value) span = str(lvalue) write(",") write(value) write(":") write(span) return size + 2 + len(span) + lvalue raise ValueError("unserializable object")
[ "def", "_rdumpq", "(", "q", ",", "size", ",", "value", ",", "encoding", "=", "None", ")", ":", "write", "=", "q", ".", "appendleft", "if", "value", "is", "None", ":", "write", "(", "\"0:~\"", ")", "return", "size", "+", "3", "if", "value", "is", ...
Dump value as a tnetstring, to a deque instance, last chunks first. This function generates the tnetstring representation of the given value, pushing chunks of the output onto the given deque instance. It pushes the last chunk first, then recursively generates more chunks. When passed in the current size of the string in the queue, it will return the new size of the string in the queue. Operating last-chunk-first makes it easy to calculate the size written for recursive structures without having to build their representation as a string. This is measurably faster than generating the intermediate strings, especially on deeply nested structures.
[ "Dump", "value", "as", "a", "tnetstring", "to", "a", "deque", "instance", "last", "chunks", "first", "." ]
train
https://github.com/rfk/tnetstring/blob/146381498a07d6053e044375562be08ef16017c2/tnetstring/__init__.py#L86-L171
rfk/tnetstring
tnetstring/__init__.py
_gdumps
def _gdumps(value,encoding): """Generate fragments of value dumped as a tnetstring. This is the naive dumping algorithm, implemented as a generator so that it's easy to pass to "".join() without building a new list. This is mainly here for comparison purposes; the _rdumpq version is measurably faster as it doesn't have to build intermediate strins. """ if value is None: yield "0:~" elif value is True: yield "4:true!" elif value is False: yield "5:false!" elif isinstance(value,(int,long)): data = str(value) yield str(len(data)) yield ":" yield data yield "#" elif isinstance(value,(float,)): data = repr(value) yield str(len(data)) yield ":" yield data yield "^" elif isinstance(value,(str,)): yield str(len(value)) yield ":" yield value yield "," elif isinstance(value,(list,tuple,)): sub = [] for item in value: sub.extend(_gdumps(item)) sub = "".join(sub) yield str(len(sub)) yield ":" yield sub yield "]" elif isinstance(value,(dict,)): sub = [] for (k,v) in value.iteritems(): sub.extend(_gdumps(k)) sub.extend(_gdumps(v)) sub = "".join(sub) yield str(len(sub)) yield ":" yield sub yield "}" elif isinstance(value,(unicode,)): if encoding is None: raise ValueError("must specify encoding to dump unicode strings") value = value.encode(encoding) yield str(len(value)) yield ":" yield value yield "," else: raise ValueError("unserializable object")
python
def _gdumps(value,encoding): """Generate fragments of value dumped as a tnetstring. This is the naive dumping algorithm, implemented as a generator so that it's easy to pass to "".join() without building a new list. This is mainly here for comparison purposes; the _rdumpq version is measurably faster as it doesn't have to build intermediate strins. """ if value is None: yield "0:~" elif value is True: yield "4:true!" elif value is False: yield "5:false!" elif isinstance(value,(int,long)): data = str(value) yield str(len(data)) yield ":" yield data yield "#" elif isinstance(value,(float,)): data = repr(value) yield str(len(data)) yield ":" yield data yield "^" elif isinstance(value,(str,)): yield str(len(value)) yield ":" yield value yield "," elif isinstance(value,(list,tuple,)): sub = [] for item in value: sub.extend(_gdumps(item)) sub = "".join(sub) yield str(len(sub)) yield ":" yield sub yield "]" elif isinstance(value,(dict,)): sub = [] for (k,v) in value.iteritems(): sub.extend(_gdumps(k)) sub.extend(_gdumps(v)) sub = "".join(sub) yield str(len(sub)) yield ":" yield sub yield "}" elif isinstance(value,(unicode,)): if encoding is None: raise ValueError("must specify encoding to dump unicode strings") value = value.encode(encoding) yield str(len(value)) yield ":" yield value yield "," else: raise ValueError("unserializable object")
[ "def", "_gdumps", "(", "value", ",", "encoding", ")", ":", "if", "value", "is", "None", ":", "yield", "\"0:~\"", "elif", "value", "is", "True", ":", "yield", "\"4:true!\"", "elif", "value", "is", "False", ":", "yield", "\"5:false!\"", "elif", "isinstance",...
Generate fragments of value dumped as a tnetstring. This is the naive dumping algorithm, implemented as a generator so that it's easy to pass to "".join() without building a new list. This is mainly here for comparison purposes; the _rdumpq version is measurably faster as it doesn't have to build intermediate strins.
[ "Generate", "fragments", "of", "value", "dumped", "as", "a", "tnetstring", "." ]
train
https://github.com/rfk/tnetstring/blob/146381498a07d6053e044375562be08ef16017c2/tnetstring/__init__.py#L174-L234
rfk/tnetstring
tnetstring/__init__.py
load
def load(file,encoding=None): """load(file,encoding=None) -> object This function reads a tnetstring from a file and parses it into a python object. The file must support the read() method, and this function promises not to read more data than necessary. """ # Read the length prefix one char at a time. # Note that the netstring spec explicitly forbids padding zeros. c = file.read(1) if not c.isdigit(): raise ValueError("not a tnetstring: missing or invalid length prefix") datalen = ord(c) - ord("0") c = file.read(1) if datalen != 0: while c.isdigit(): datalen = (10 * datalen) + (ord(c) - ord("0")) if datalen > 999999999: errmsg = "not a tnetstring: absurdly large length prefix" raise ValueError(errmsg) c = file.read(1) if c != ":": raise ValueError("not a tnetstring: missing or invalid length prefix") # Now we can read and parse the payload. # This repeats the dispatch logic of pop() so we can avoid # re-constructing the outermost tnetstring. data = file.read(datalen) if len(data) != datalen: raise ValueError("not a tnetstring: length prefix too big") type = file.read(1) if type == ",": if encoding is not None: return data.decode(encoding) return data if type == "#": try: return int(data) except ValueError: raise ValueError("not a tnetstring: invalid integer literal") if type == "^": try: return float(data) except ValueError: raise ValueError("not a tnetstring: invalid float literal") if type == "!": if data == "true": return True elif data == "false": return False else: raise ValueError("not a tnetstring: invalid boolean literal") if type == "~": if data: raise ValueError("not a tnetstring: invalid null literal") return None if type == "]": l = [] while data: (item,data) = pop(data,encoding) l.append(item) return l if type == "}": d = {} while data: (key,data) = pop(data,encoding) (val,data) = pop(data,encoding) d[key] = val return d raise ValueError("unknown type tag")
python
def load(file,encoding=None): """load(file,encoding=None) -> object This function reads a tnetstring from a file and parses it into a python object. The file must support the read() method, and this function promises not to read more data than necessary. """ # Read the length prefix one char at a time. # Note that the netstring spec explicitly forbids padding zeros. c = file.read(1) if not c.isdigit(): raise ValueError("not a tnetstring: missing or invalid length prefix") datalen = ord(c) - ord("0") c = file.read(1) if datalen != 0: while c.isdigit(): datalen = (10 * datalen) + (ord(c) - ord("0")) if datalen > 999999999: errmsg = "not a tnetstring: absurdly large length prefix" raise ValueError(errmsg) c = file.read(1) if c != ":": raise ValueError("not a tnetstring: missing or invalid length prefix") # Now we can read and parse the payload. # This repeats the dispatch logic of pop() so we can avoid # re-constructing the outermost tnetstring. data = file.read(datalen) if len(data) != datalen: raise ValueError("not a tnetstring: length prefix too big") type = file.read(1) if type == ",": if encoding is not None: return data.decode(encoding) return data if type == "#": try: return int(data) except ValueError: raise ValueError("not a tnetstring: invalid integer literal") if type == "^": try: return float(data) except ValueError: raise ValueError("not a tnetstring: invalid float literal") if type == "!": if data == "true": return True elif data == "false": return False else: raise ValueError("not a tnetstring: invalid boolean literal") if type == "~": if data: raise ValueError("not a tnetstring: invalid null literal") return None if type == "]": l = [] while data: (item,data) = pop(data,encoding) l.append(item) return l if type == "}": d = {} while data: (key,data) = pop(data,encoding) (val,data) = pop(data,encoding) d[key] = val return d raise ValueError("unknown type tag")
[ "def", "load", "(", "file", ",", "encoding", "=", "None", ")", ":", "# Read the length prefix one char at a time.", "# Note that the netstring spec explicitly forbids padding zeros.", "c", "=", "file", ".", "read", "(", "1", ")", "if", "not", "c", ".", "isdigit", ...
load(file,encoding=None) -> object This function reads a tnetstring from a file and parses it into a python object. The file must support the read() method, and this function promises not to read more data than necessary.
[ "load", "(", "file", "encoding", "=", "None", ")", "-", ">", "object" ]
train
https://github.com/rfk/tnetstring/blob/146381498a07d6053e044375562be08ef16017c2/tnetstring/__init__.py#L248-L316
rfk/tnetstring
tnetstring/__init__.py
pop
def pop(string,encoding=None): """pop(string,encoding=None) -> (object, remain) This function parses a tnetstring into a python object. It returns a tuple giving the parsed object and a string containing any unparsed data from the end of the string. """ # Parse out data length, type and remaining string. try: (dlen,rest) = string.split(":",1) dlen = int(dlen) except ValueError: raise ValueError("not a tnetstring: missing or invalid length prefix") try: (data,type,remain) = (rest[:dlen],rest[dlen],rest[dlen+1:]) except IndexError: # This fires if len(rest) < dlen, meaning we don't need # to further validate that data is the right length. raise ValueError("not a tnetstring: invalid length prefix") # Parse the data based on the type tag. if type == ",": if encoding is not None: return (data.decode(encoding),remain) return (data,remain) if type == "#": try: return (int(data),remain) except ValueError: raise ValueError("not a tnetstring: invalid integer literal") if type == "^": try: return (float(data),remain) except ValueError: raise ValueError("not a tnetstring: invalid float literal") if type == "!": if data == "true": return (True,remain) elif data == "false": return (False,remain) else: raise ValueError("not a tnetstring: invalid boolean literal") if type == "~": if data: raise ValueError("not a tnetstring: invalid null literal") return (None,remain) if type == "]": l = [] while data: (item,data) = pop(data,encoding) l.append(item) return (l,remain) if type == "}": d = {} while data: (key,data) = pop(data,encoding) (val,data) = pop(data,encoding) d[key] = val return (d,remain) raise ValueError("unknown type tag")
python
def pop(string,encoding=None): """pop(string,encoding=None) -> (object, remain) This function parses a tnetstring into a python object. It returns a tuple giving the parsed object and a string containing any unparsed data from the end of the string. """ # Parse out data length, type and remaining string. try: (dlen,rest) = string.split(":",1) dlen = int(dlen) except ValueError: raise ValueError("not a tnetstring: missing or invalid length prefix") try: (data,type,remain) = (rest[:dlen],rest[dlen],rest[dlen+1:]) except IndexError: # This fires if len(rest) < dlen, meaning we don't need # to further validate that data is the right length. raise ValueError("not a tnetstring: invalid length prefix") # Parse the data based on the type tag. if type == ",": if encoding is not None: return (data.decode(encoding),remain) return (data,remain) if type == "#": try: return (int(data),remain) except ValueError: raise ValueError("not a tnetstring: invalid integer literal") if type == "^": try: return (float(data),remain) except ValueError: raise ValueError("not a tnetstring: invalid float literal") if type == "!": if data == "true": return (True,remain) elif data == "false": return (False,remain) else: raise ValueError("not a tnetstring: invalid boolean literal") if type == "~": if data: raise ValueError("not a tnetstring: invalid null literal") return (None,remain) if type == "]": l = [] while data: (item,data) = pop(data,encoding) l.append(item) return (l,remain) if type == "}": d = {} while data: (key,data) = pop(data,encoding) (val,data) = pop(data,encoding) d[key] = val return (d,remain) raise ValueError("unknown type tag")
[ "def", "pop", "(", "string", ",", "encoding", "=", "None", ")", ":", "# Parse out data length, type and remaining string.", "try", ":", "(", "dlen", ",", "rest", ")", "=", "string", ".", "split", "(", "\":\"", ",", "1", ")", "dlen", "=", "int", "(", "dl...
pop(string,encoding=None) -> (object, remain) This function parses a tnetstring into a python object. It returns a tuple giving the parsed object and a string containing any unparsed data from the end of the string.
[ "pop", "(", "string", "encoding", "=", "None", ")", "-", ">", "(", "object", "remain", ")" ]
train
https://github.com/rfk/tnetstring/blob/146381498a07d6053e044375562be08ef16017c2/tnetstring/__init__.py#L320-L378
ucbvislab/radiotool
radiotool/utils.py
log_magnitude_spectrum
def log_magnitude_spectrum(frames): """Compute the log of the magnitude spectrum of frames""" return N.log(N.abs(N.fft.rfft(frames)).clip(1e-5, N.inf))
python
def log_magnitude_spectrum(frames): """Compute the log of the magnitude spectrum of frames""" return N.log(N.abs(N.fft.rfft(frames)).clip(1e-5, N.inf))
[ "def", "log_magnitude_spectrum", "(", "frames", ")", ":", "return", "N", ".", "log", "(", "N", ".", "abs", "(", "N", ".", "fft", ".", "rfft", "(", "frames", ")", ")", ".", "clip", "(", "1e-5", ",", "N", ".", "inf", ")", ")" ]
Compute the log of the magnitude spectrum of frames
[ "Compute", "the", "log", "of", "the", "magnitude", "spectrum", "of", "frames" ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/utils.py#L14-L16
ucbvislab/radiotool
radiotool/utils.py
RMS_energy
def RMS_energy(frames): """Computes the RMS energy of frames""" f = frames.flatten() return N.sqrt(N.mean(f * f))
python
def RMS_energy(frames): """Computes the RMS energy of frames""" f = frames.flatten() return N.sqrt(N.mean(f * f))
[ "def", "RMS_energy", "(", "frames", ")", ":", "f", "=", "frames", ".", "flatten", "(", ")", "return", "N", ".", "sqrt", "(", "N", ".", "mean", "(", "f", "*", "f", ")", ")" ]
Computes the RMS energy of frames
[ "Computes", "the", "RMS", "energy", "of", "frames" ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/utils.py#L24-L27
ucbvislab/radiotool
radiotool/utils.py
normalize_features
def normalize_features(features): """Standardizes features array to fall between 0 and 1""" return (features - N.min(features)) / (N.max(features) - N.min(features))
python
def normalize_features(features): """Standardizes features array to fall between 0 and 1""" return (features - N.min(features)) / (N.max(features) - N.min(features))
[ "def", "normalize_features", "(", "features", ")", ":", "return", "(", "features", "-", "N", ".", "min", "(", "features", ")", ")", "/", "(", "N", ".", "max", "(", "features", ")", "-", "N", ".", "min", "(", "features", ")", ")" ]
Standardizes features array to fall between 0 and 1
[ "Standardizes", "features", "array", "to", "fall", "between", "0", "and", "1" ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/utils.py#L30-L32
ucbvislab/radiotool
radiotool/utils.py
zero_crossing_last
def zero_crossing_last(frames): """Finds the last zero crossing in frames""" frames = N.array(frames) crossings = N.where(N.diff(N.sign(frames))) # crossings = N.where(frames[:n] * frames[1:n + 1] < 0) if len(crossings[0]) == 0: print "No zero crossing" return len(frames) - 1 return crossings[0][-1]
python
def zero_crossing_last(frames): """Finds the last zero crossing in frames""" frames = N.array(frames) crossings = N.where(N.diff(N.sign(frames))) # crossings = N.where(frames[:n] * frames[1:n + 1] < 0) if len(crossings[0]) == 0: print "No zero crossing" return len(frames) - 1 return crossings[0][-1]
[ "def", "zero_crossing_last", "(", "frames", ")", ":", "frames", "=", "N", ".", "array", "(", "frames", ")", "crossings", "=", "N", ".", "where", "(", "N", ".", "diff", "(", "N", ".", "sign", "(", "frames", ")", ")", ")", "# crossings = N.where(frames[:...
Finds the last zero crossing in frames
[ "Finds", "the", "last", "zero", "crossing", "in", "frames" ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/utils.py#L35-L45
ucbvislab/radiotool
radiotool/utils.py
limiter
def limiter(arr): """ Restrict the maximum and minimum values of arr """ dyn_range = 32767.0 / 32767.0 lim_thresh = 30000.0 / 32767.0 lim_range = dyn_range - lim_thresh new_arr = arr.copy() inds = N.where(arr > lim_thresh)[0] new_arr[inds] = (new_arr[inds] - lim_thresh) / lim_range new_arr[inds] = (N.arctan(new_arr[inds]) * 2.0 / N.pi) *\ lim_range + lim_thresh inds = N.where(arr < -lim_thresh)[0] new_arr[inds] = -(new_arr[inds] + lim_thresh) / lim_range new_arr[inds] = -( N.arctan(new_arr[inds]) * 2.0 / N.pi * lim_range + lim_thresh) return new_arr
python
def limiter(arr): """ Restrict the maximum and minimum values of arr """ dyn_range = 32767.0 / 32767.0 lim_thresh = 30000.0 / 32767.0 lim_range = dyn_range - lim_thresh new_arr = arr.copy() inds = N.where(arr > lim_thresh)[0] new_arr[inds] = (new_arr[inds] - lim_thresh) / lim_range new_arr[inds] = (N.arctan(new_arr[inds]) * 2.0 / N.pi) *\ lim_range + lim_thresh inds = N.where(arr < -lim_thresh)[0] new_arr[inds] = -(new_arr[inds] + lim_thresh) / lim_range new_arr[inds] = -( N.arctan(new_arr[inds]) * 2.0 / N.pi * lim_range + lim_thresh) return new_arr
[ "def", "limiter", "(", "arr", ")", ":", "dyn_range", "=", "32767.0", "/", "32767.0", "lim_thresh", "=", "30000.0", "/", "32767.0", "lim_range", "=", "dyn_range", "-", "lim_thresh", "new_arr", "=", "arr", ".", "copy", "(", ")", "inds", "=", "N", ".", "w...
Restrict the maximum and minimum values of arr
[ "Restrict", "the", "maximum", "and", "minimum", "values", "of", "arr" ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/utils.py#L66-L88
ucbvislab/radiotool
radiotool/utils.py
linear
def linear(arr1, arr2): """ Create a linear blend of arr1 (fading out) and arr2 (fading in) """ n = N.shape(arr1)[0] try: channels = N.shape(arr1)[1] except: channels = 1 f_in = N.linspace(0, 1, num=n) f_out = N.linspace(1, 0, num=n) # f_in = N.arange(n) / float(n - 1) # f_out = N.arange(n - 1, -1, -1) / float(n) if channels > 1: f_in = N.tile(f_in, (channels, 1)).T f_out = N.tile(f_out, (channels, 1)).T vals = f_out * arr1 + f_in * arr2 return vals
python
def linear(arr1, arr2): """ Create a linear blend of arr1 (fading out) and arr2 (fading in) """ n = N.shape(arr1)[0] try: channels = N.shape(arr1)[1] except: channels = 1 f_in = N.linspace(0, 1, num=n) f_out = N.linspace(1, 0, num=n) # f_in = N.arange(n) / float(n - 1) # f_out = N.arange(n - 1, -1, -1) / float(n) if channels > 1: f_in = N.tile(f_in, (channels, 1)).T f_out = N.tile(f_out, (channels, 1)).T vals = f_out * arr1 + f_in * arr2 return vals
[ "def", "linear", "(", "arr1", ",", "arr2", ")", ":", "n", "=", "N", ".", "shape", "(", "arr1", ")", "[", "0", "]", "try", ":", "channels", "=", "N", ".", "shape", "(", "arr1", ")", "[", "1", "]", "except", ":", "channels", "=", "1", "f_in", ...
Create a linear blend of arr1 (fading out) and arr2 (fading in)
[ "Create", "a", "linear", "blend", "of", "arr1", "(", "fading", "out", ")", "and", "arr2", "(", "fading", "in", ")" ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/utils.py#L91-L112
ucbvislab/radiotool
radiotool/utils.py
equal_power
def equal_power(arr1, arr2): """ Create an equal power blend of arr1 (fading out) and arr2 (fading in) """ n = N.shape(arr1)[0] try: channels = N.shape(arr1)[1] except: channels = 1 f_in = N.arange(n) / float(n - 1) f_out = N.arange(n - 1, -1, -1) / float(n) if channels > 1: f_in = N.tile(f_in, (channels, 1)).T f_out = N.tile(f_out, (channels, 1)).T vals = log_factor(f_out) * arr1 + log_factor(f_in) * arr2 return limiter(vals)
python
def equal_power(arr1, arr2): """ Create an equal power blend of arr1 (fading out) and arr2 (fading in) """ n = N.shape(arr1)[0] try: channels = N.shape(arr1)[1] except: channels = 1 f_in = N.arange(n) / float(n - 1) f_out = N.arange(n - 1, -1, -1) / float(n) if channels > 1: f_in = N.tile(f_in, (channels, 1)).T f_out = N.tile(f_out, (channels, 1)).T vals = log_factor(f_out) * arr1 + log_factor(f_in) * arr2 return limiter(vals)
[ "def", "equal_power", "(", "arr1", ",", "arr2", ")", ":", "n", "=", "N", ".", "shape", "(", "arr1", ")", "[", "0", "]", "try", ":", "channels", "=", "N", ".", "shape", "(", "arr1", ")", "[", "1", "]", "except", ":", "channels", "=", "1", "f_i...
Create an equal power blend of arr1 (fading out) and arr2 (fading in)
[ "Create", "an", "equal", "power", "blend", "of", "arr1", "(", "fading", "out", ")", "and", "arr2", "(", "fading", "in", ")" ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/utils.py#L115-L134
ucbvislab/radiotool
radiotool/utils.py
segment_array
def segment_array(arr, length, overlap=.5): """ Segment array into chunks of a specified length, with a specified proportion overlap. Operates on axis 0. :param integer length: Length of each segment :param float overlap: Proportion overlap of each frame """ arr = N.array(arr) offset = float(overlap) * length total_segments = int((N.shape(arr)[0] - length) / offset) + 1 # print "total segments", total_segments other_shape = N.shape(arr)[1:] out_shape = [total_segments, length] out_shape.extend(other_shape) out = N.empty(out_shape) for i in xrange(total_segments): out[i][:] = arr[i * offset:i * offset + length] return out
python
def segment_array(arr, length, overlap=.5): """ Segment array into chunks of a specified length, with a specified proportion overlap. Operates on axis 0. :param integer length: Length of each segment :param float overlap: Proportion overlap of each frame """ arr = N.array(arr) offset = float(overlap) * length total_segments = int((N.shape(arr)[0] - length) / offset) + 1 # print "total segments", total_segments other_shape = N.shape(arr)[1:] out_shape = [total_segments, length] out_shape.extend(other_shape) out = N.empty(out_shape) for i in xrange(total_segments): out[i][:] = arr[i * offset:i * offset + length] return out
[ "def", "segment_array", "(", "arr", ",", "length", ",", "overlap", "=", ".5", ")", ":", "arr", "=", "N", ".", "array", "(", "arr", ")", "offset", "=", "float", "(", "overlap", ")", "*", "length", "total_segments", "=", "int", "(", "(", "N", ".", ...
Segment array into chunks of a specified length, with a specified proportion overlap. Operates on axis 0. :param integer length: Length of each segment :param float overlap: Proportion overlap of each frame
[ "Segment", "array", "into", "chunks", "of", "a", "specified", "length", "with", "a", "specified", "proportion", "overlap", "." ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/utils.py#L137-L163
ihgazni2/edict
edict/edict.py
_reorder_via_klist
def _reorder_via_klist(d,nkl,**kwargs): ''' d = {'scheme': 'http', 'path': '/index.php', 'params': 'params', 'query': 'username=query', 'fragment': 'frag', 'username': '', 'password': '', 'hostname': 'www.baidu.com', 'port': ''} pobj(d) nkl = ['scheme', 'username', 'password', 'hostname', 'port', 'path', 'params', 'query', 'fragment'] pobj(_reorder_via_klist(d,nkl)) ''' if('deepcopy' in kwargs): deepcopy = kwargs['deepcopy'] else: deepcopy = True if(deepcopy): d = copy.deepcopy(d) else: pass nd = {} lngth = nkl.__len__() for i in range(0,lngth): k = nkl[i] nd[k] = d[k] return(nd)
python
def _reorder_via_klist(d,nkl,**kwargs): ''' d = {'scheme': 'http', 'path': '/index.php', 'params': 'params', 'query': 'username=query', 'fragment': 'frag', 'username': '', 'password': '', 'hostname': 'www.baidu.com', 'port': ''} pobj(d) nkl = ['scheme', 'username', 'password', 'hostname', 'port', 'path', 'params', 'query', 'fragment'] pobj(_reorder_via_klist(d,nkl)) ''' if('deepcopy' in kwargs): deepcopy = kwargs['deepcopy'] else: deepcopy = True if(deepcopy): d = copy.deepcopy(d) else: pass nd = {} lngth = nkl.__len__() for i in range(0,lngth): k = nkl[i] nd[k] = d[k] return(nd)
[ "def", "_reorder_via_klist", "(", "d", ",", "nkl", ",", "*", "*", "kwargs", ")", ":", "if", "(", "'deepcopy'", "in", "kwargs", ")", ":", "deepcopy", "=", "kwargs", "[", "'deepcopy'", "]", "else", ":", "deepcopy", "=", "True", "if", "(", "deepcopy", "...
d = {'scheme': 'http', 'path': '/index.php', 'params': 'params', 'query': 'username=query', 'fragment': 'frag', 'username': '', 'password': '', 'hostname': 'www.baidu.com', 'port': ''} pobj(d) nkl = ['scheme', 'username', 'password', 'hostname', 'port', 'path', 'params', 'query', 'fragment'] pobj(_reorder_via_klist(d,nkl))
[ "d", "=", "{", "scheme", ":", "http", "path", ":", "/", "index", ".", "php", "params", ":", "params", "query", ":", "username", "=", "query", "fragment", ":", "frag", "username", ":", "password", ":", "hostname", ":", "www", ".", "baidu", ".", "com",...
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L109-L129