repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
brunobord/md2ebook
md2ebook/commander.py
Commander.check
def check(self): "Checks EPUB integrity" config = self.load_config() if not check_dependency_epubcheck(): sys.exit(error('Unavailable command.')) epub_file = u"%s.epub" % config['fileroot'] epub_path = join(CWD, 'build', epub_file) print success("Starting to check %s..." % epub_file) epubcheck = u'epubcheck %s' % epub_path epubcheck = shell(epubcheck.encode()) for line in epubcheck.errors(): print error(line) for line in epubcheck.output(): print line
python
def check(self): "Checks EPUB integrity" config = self.load_config() if not check_dependency_epubcheck(): sys.exit(error('Unavailable command.')) epub_file = u"%s.epub" % config['fileroot'] epub_path = join(CWD, 'build', epub_file) print success("Starting to check %s..." % epub_file) epubcheck = u'epubcheck %s' % epub_path epubcheck = shell(epubcheck.encode()) for line in epubcheck.errors(): print error(line) for line in epubcheck.output(): print line
[ "def", "check", "(", "self", ")", ":", "config", "=", "self", ".", "load_config", "(", ")", "if", "not", "check_dependency_epubcheck", "(", ")", ":", "sys", ".", "exit", "(", "error", "(", "'Unavailable command.'", ")", ")", "epub_file", "=", "u\"%s.epub\"...
Checks EPUB integrity
[ "Checks", "EPUB", "integrity" ]
train
https://github.com/brunobord/md2ebook/blob/31e0d06b77f2d986e6af1115c9e613dfec0591a9/md2ebook/commander.py#L144-L157
jaraco/jaraco.postgres
jaraco/postgres/__init__.py
PostgresDatabase.create
def create(self, sql=None): """CREATE this DATABASE. @param sql: (Optional) A string of psql (such as might be generated by pg_dump); it will be executed by psql(1) after creating the database. @type sql: str @rtype: None """ create_sql = 'CREATE DATABASE {self.db_name} WITH OWNER {self.user}' create_sql = create_sql.format(**vars()) self.super_psql(['-c', create_sql]) if sql: self.psql_string(sql)
python
def create(self, sql=None): """CREATE this DATABASE. @param sql: (Optional) A string of psql (such as might be generated by pg_dump); it will be executed by psql(1) after creating the database. @type sql: str @rtype: None """ create_sql = 'CREATE DATABASE {self.db_name} WITH OWNER {self.user}' create_sql = create_sql.format(**vars()) self.super_psql(['-c', create_sql]) if sql: self.psql_string(sql)
[ "def", "create", "(", "self", ",", "sql", "=", "None", ")", ":", "create_sql", "=", "'CREATE DATABASE {self.db_name} WITH OWNER {self.user}'", "create_sql", "=", "create_sql", ".", "format", "(", "*", "*", "vars", "(", ")", ")", "self", ".", "super_psql", "(",...
CREATE this DATABASE. @param sql: (Optional) A string of psql (such as might be generated by pg_dump); it will be executed by psql(1) after creating the database. @type sql: str @rtype: None
[ "CREATE", "this", "DATABASE", "." ]
train
https://github.com/jaraco/jaraco.postgres/blob/57375043314a3ce821ac3b0372ba2465135daa95/jaraco/postgres/__init__.py#L113-L127
jaraco/jaraco.postgres
jaraco/postgres/__init__.py
PostgresDatabase.psql_string
def psql_string(self, sql): """ Evaluate the sql file (possibly multiple statements) using psql. """ argv = [ PostgresFinder.find_root() / 'psql', '--quiet', '-U', self.user, '-h', self.host, '-p', self.port, '-f', '-', self.db_name, ] popen = subprocess.Popen(argv, stdin=subprocess.PIPE) popen.communicate(input=sql.encode('utf-8')) if popen.returncode != 0: raise subprocess.CalledProcessError(popen.returncode, argv)
python
def psql_string(self, sql): """ Evaluate the sql file (possibly multiple statements) using psql. """ argv = [ PostgresFinder.find_root() / 'psql', '--quiet', '-U', self.user, '-h', self.host, '-p', self.port, '-f', '-', self.db_name, ] popen = subprocess.Popen(argv, stdin=subprocess.PIPE) popen.communicate(input=sql.encode('utf-8')) if popen.returncode != 0: raise subprocess.CalledProcessError(popen.returncode, argv)
[ "def", "psql_string", "(", "self", ",", "sql", ")", ":", "argv", "=", "[", "PostgresFinder", ".", "find_root", "(", ")", "/", "'psql'", ",", "'--quiet'", ",", "'-U'", ",", "self", ".", "user", ",", "'-h'", ",", "self", ".", "host", ",", "'-p'", ","...
Evaluate the sql file (possibly multiple statements) using psql.
[ "Evaluate", "the", "sql", "file", "(", "possibly", "multiple", "statements", ")", "using", "psql", "." ]
train
https://github.com/jaraco/jaraco.postgres/blob/57375043314a3ce821ac3b0372ba2465135daa95/jaraco/postgres/__init__.py#L129-L145
jaraco/jaraco.postgres
jaraco/postgres/__init__.py
PostgresDatabase.psql
def psql(self, args): r"""Invoke psql, passing the given command-line arguments. Typical <args> values: ['-c', <sql_string>] or ['-f', <pathname>]. Connection parameters are taken from self. STDIN, STDOUT, and STDERR are inherited from the parent. WARNING: This method uses the psql(1) program, which ignores SQL errors by default. That hides many real errors, making our software less reliable. To overcome this flaw, add this line to the head of your SQL: "\set ON_ERROR_STOP TRUE" @return: None. Raises an exception upon error, but *ignores SQL errors* unless "\set ON_ERROR_STOP TRUE" is used. """ argv = [ PostgresFinder.find_root() / 'psql', '--quiet', '-U', self.user, '-h', self.host, '-p', self.port, ] + args + [self.db_name] subprocess.check_call(argv)
python
def psql(self, args): r"""Invoke psql, passing the given command-line arguments. Typical <args> values: ['-c', <sql_string>] or ['-f', <pathname>]. Connection parameters are taken from self. STDIN, STDOUT, and STDERR are inherited from the parent. WARNING: This method uses the psql(1) program, which ignores SQL errors by default. That hides many real errors, making our software less reliable. To overcome this flaw, add this line to the head of your SQL: "\set ON_ERROR_STOP TRUE" @return: None. Raises an exception upon error, but *ignores SQL errors* unless "\set ON_ERROR_STOP TRUE" is used. """ argv = [ PostgresFinder.find_root() / 'psql', '--quiet', '-U', self.user, '-h', self.host, '-p', self.port, ] + args + [self.db_name] subprocess.check_call(argv)
[ "def", "psql", "(", "self", ",", "args", ")", ":", "argv", "=", "[", "PostgresFinder", ".", "find_root", "(", ")", "/", "'psql'", ",", "'--quiet'", ",", "'-U'", ",", "self", ".", "user", ",", "'-h'", ",", "self", ".", "host", ",", "'-p'", ",", "s...
r"""Invoke psql, passing the given command-line arguments. Typical <args> values: ['-c', <sql_string>] or ['-f', <pathname>]. Connection parameters are taken from self. STDIN, STDOUT, and STDERR are inherited from the parent. WARNING: This method uses the psql(1) program, which ignores SQL errors by default. That hides many real errors, making our software less reliable. To overcome this flaw, add this line to the head of your SQL: "\set ON_ERROR_STOP TRUE" @return: None. Raises an exception upon error, but *ignores SQL errors* unless "\set ON_ERROR_STOP TRUE" is used.
[ "r", "Invoke", "psql", "passing", "the", "given", "command", "-", "line", "arguments", "." ]
train
https://github.com/jaraco/jaraco.postgres/blob/57375043314a3ce821ac3b0372ba2465135daa95/jaraco/postgres/__init__.py#L186-L211
jaraco/jaraco.postgres
jaraco/postgres/__init__.py
PostgresDatabase.sql
def sql(self, input_string, *args): """Execute a SQL command using the Python DBI directly. Connection parameters are taken from self. Autocommit is in effect. Example: .sql('SELECT %s FROM %s WHERE age > %s', 'name', 'table1', '45') @param input_string: A string of SQL. May contain %s or %(name)s format specifiers; they are replaced with corresponding values taken from args. @param args: zero or more parameters to interpolate into the string. Note that they're passed individually, not as a single tuple. @return: Whatever .fetchall() returns. """ """ # I advise against using sqlalchemy here (it's more complicated than # what we need), but here's an implementation Just In Case. -jps import psycopg2, sqlalchemy engine = sqlalchemy.create_engine( 'postgres://%s@%s:%s/%s' % (self.user, self.host, self.port, self.db_name), echo=False, poolclass=sqlalchemy.pool.NullPool) connection = engine.connect() result = connection.execute(input_string, *args) try: # sqlalchemy 0.6.7 offers a result.returns_rows attribute, but # no prior version offers anything comparable. A tacky # workaround... try: return result.fetchall() except psycopg2.ProgrammingError: return None finally: result.close() connection.close() """ psycopg2 = importlib.import_module('psycopg2') importlib.import_module('psycopg2.extensions') connection = psycopg2.connect( user=self.user, host=self.host, port=self.port, database=self.db_name) connection.set_isolation_level( psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) try: cursor = connection.cursor() cursor.execute(input_string, args) # No way to ask whether any rows were returned, so just try it... try: return cursor.fetchall() except psycopg2.ProgrammingError: return None finally: connection.close()
python
def sql(self, input_string, *args): """Execute a SQL command using the Python DBI directly. Connection parameters are taken from self. Autocommit is in effect. Example: .sql('SELECT %s FROM %s WHERE age > %s', 'name', 'table1', '45') @param input_string: A string of SQL. May contain %s or %(name)s format specifiers; they are replaced with corresponding values taken from args. @param args: zero or more parameters to interpolate into the string. Note that they're passed individually, not as a single tuple. @return: Whatever .fetchall() returns. """ """ # I advise against using sqlalchemy here (it's more complicated than # what we need), but here's an implementation Just In Case. -jps import psycopg2, sqlalchemy engine = sqlalchemy.create_engine( 'postgres://%s@%s:%s/%s' % (self.user, self.host, self.port, self.db_name), echo=False, poolclass=sqlalchemy.pool.NullPool) connection = engine.connect() result = connection.execute(input_string, *args) try: # sqlalchemy 0.6.7 offers a result.returns_rows attribute, but # no prior version offers anything comparable. A tacky # workaround... try: return result.fetchall() except psycopg2.ProgrammingError: return None finally: result.close() connection.close() """ psycopg2 = importlib.import_module('psycopg2') importlib.import_module('psycopg2.extensions') connection = psycopg2.connect( user=self.user, host=self.host, port=self.port, database=self.db_name) connection.set_isolation_level( psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) try: cursor = connection.cursor() cursor.execute(input_string, args) # No way to ask whether any rows were returned, so just try it... try: return cursor.fetchall() except psycopg2.ProgrammingError: return None finally: connection.close()
[ "def", "sql", "(", "self", ",", "input_string", ",", "*", "args", ")", ":", "\"\"\"\n # I advise against using sqlalchemy here (it's more complicated than\n # what we need), but here's an implementation Just In Case. -jps\n import psycopg2, sqlalchemy\n engine = sq...
Execute a SQL command using the Python DBI directly. Connection parameters are taken from self. Autocommit is in effect. Example: .sql('SELECT %s FROM %s WHERE age > %s', 'name', 'table1', '45') @param input_string: A string of SQL. May contain %s or %(name)s format specifiers; they are replaced with corresponding values taken from args. @param args: zero or more parameters to interpolate into the string. Note that they're passed individually, not as a single tuple. @return: Whatever .fetchall() returns.
[ "Execute", "a", "SQL", "command", "using", "the", "Python", "DBI", "directly", "." ]
train
https://github.com/jaraco/jaraco.postgres/blob/57375043314a3ce821ac3b0372ba2465135daa95/jaraco/postgres/__init__.py#L213-L268
jaraco/jaraco.postgres
jaraco/postgres/__init__.py
PostgresDatabase.super_psql
def super_psql(self, args): """Just like .psql(), except that we connect as the database superuser (and we connect to the superuser's database, not the user's database). """ argv = [ PostgresFinder.find_root() / 'psql', '--quiet', '-U', self.superuser, '-h', self.host, '-p', self.port, ] + args subprocess.check_call(argv)
python
def super_psql(self, args): """Just like .psql(), except that we connect as the database superuser (and we connect to the superuser's database, not the user's database). """ argv = [ PostgresFinder.find_root() / 'psql', '--quiet', '-U', self.superuser, '-h', self.host, '-p', self.port, ] + args subprocess.check_call(argv)
[ "def", "super_psql", "(", "self", ",", "args", ")", ":", "argv", "=", "[", "PostgresFinder", ".", "find_root", "(", ")", "/", "'psql'", ",", "'--quiet'", ",", "'-U'", ",", "self", ".", "superuser", ",", "'-h'", ",", "self", ".", "host", ",", "'-p'", ...
Just like .psql(), except that we connect as the database superuser (and we connect to the superuser's database, not the user's database).
[ "Just", "like", ".", "psql", "()", "except", "that", "we", "connect", "as", "the", "database", "superuser", "(", "and", "we", "connect", "to", "the", "superuser", "s", "database", "not", "the", "user", "s", "database", ")", "." ]
train
https://github.com/jaraco/jaraco.postgres/blob/57375043314a3ce821ac3b0372ba2465135daa95/jaraco/postgres/__init__.py#L270-L281
jaraco/jaraco.postgres
jaraco/postgres/__init__.py
PostgresServer.destroy
def destroy(self): """Undo the effects of initdb. Destroy all evidence of this DBMS, including its backing files. """ self.stop() if self.base_pathname is not None: self._robust_remove(self.base_pathname)
python
def destroy(self): """Undo the effects of initdb. Destroy all evidence of this DBMS, including its backing files. """ self.stop() if self.base_pathname is not None: self._robust_remove(self.base_pathname)
[ "def", "destroy", "(", "self", ")", ":", "self", ".", "stop", "(", ")", "if", "self", ".", "base_pathname", "is", "not", "None", ":", "self", ".", "_robust_remove", "(", "self", ".", "base_pathname", ")" ]
Undo the effects of initdb. Destroy all evidence of this DBMS, including its backing files.
[ "Undo", "the", "effects", "of", "initdb", "." ]
train
https://github.com/jaraco/jaraco.postgres/blob/57375043314a3ce821ac3b0372ba2465135daa95/jaraco/postgres/__init__.py#L326-L333
jaraco/jaraco.postgres
jaraco/postgres/__init__.py
PostgresServer._robust_remove
def _robust_remove(path): """ Remove the directory specified by `path`. Because we can't determine directly if the path is in use, and on Windows, it's not possible to remove a path if it is in use, retry a few times until the call succeeds. """ tries = itertools.count() max_tries = 50 while os.path.isdir(path): try: shutil.rmtree(path) except WindowsError: if next(tries) >= max_tries: raise time.sleep(0.2)
python
def _robust_remove(path): """ Remove the directory specified by `path`. Because we can't determine directly if the path is in use, and on Windows, it's not possible to remove a path if it is in use, retry a few times until the call succeeds. """ tries = itertools.count() max_tries = 50 while os.path.isdir(path): try: shutil.rmtree(path) except WindowsError: if next(tries) >= max_tries: raise time.sleep(0.2)
[ "def", "_robust_remove", "(", "path", ")", ":", "tries", "=", "itertools", ".", "count", "(", ")", "max_tries", "=", "50", "while", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "try", ":", "shutil", ".", "rmtree", "(", "path", ")", "exce...
Remove the directory specified by `path`. Because we can't determine directly if the path is in use, and on Windows, it's not possible to remove a path if it is in use, retry a few times until the call succeeds.
[ "Remove", "the", "directory", "specified", "by", "path", ".", "Because", "we", "can", "t", "determine", "directly", "if", "the", "path", "is", "in", "use", "and", "on", "Windows", "it", "s", "not", "possible", "to", "remove", "a", "path", "if", "it", "...
train
https://github.com/jaraco/jaraco.postgres/blob/57375043314a3ce821ac3b0372ba2465135daa95/jaraco/postgres/__init__.py#L336-L351
jaraco/jaraco.postgres
jaraco/postgres/__init__.py
PostgresServer.initdb
def initdb(self, quiet=True, locale='en_US.UTF-8'): """Bootstrap this DBMS from nothing. If you're running in an environment where the DBMS is provided as part of the basic infrastructure, you probably don't want to call this method! @param quiet: Should we operate quietly, emitting nothing if things go well? """ # Defining base_pathname is deferred until this point because we don't # want to create a temp directory unless it's needed. And now it is! if self.base_pathname in [None, '']: self.base_pathname = tempfile.mkdtemp() if not os.path.isdir(self.base_pathname): os.mkdir(self.base_pathname) stdout = DEV_NULL if quiet else None # The database superuser needs no password at this point(!). arguments = [ '--auth=trust', '--username', self.superuser, ] if locale is not None: arguments.extend(('--locale', locale)) cmd = [ PostgresFinder.find_root() / 'initdb', ] + arguments + ['--pgdata', self.base_pathname] log.info('Initializing PostgreSQL with command: {}'.format( ' '.join(cmd) )) subprocess.check_call(cmd, stdout=stdout)
python
def initdb(self, quiet=True, locale='en_US.UTF-8'): """Bootstrap this DBMS from nothing. If you're running in an environment where the DBMS is provided as part of the basic infrastructure, you probably don't want to call this method! @param quiet: Should we operate quietly, emitting nothing if things go well? """ # Defining base_pathname is deferred until this point because we don't # want to create a temp directory unless it's needed. And now it is! if self.base_pathname in [None, '']: self.base_pathname = tempfile.mkdtemp() if not os.path.isdir(self.base_pathname): os.mkdir(self.base_pathname) stdout = DEV_NULL if quiet else None # The database superuser needs no password at this point(!). arguments = [ '--auth=trust', '--username', self.superuser, ] if locale is not None: arguments.extend(('--locale', locale)) cmd = [ PostgresFinder.find_root() / 'initdb', ] + arguments + ['--pgdata', self.base_pathname] log.info('Initializing PostgreSQL with command: {}'.format( ' '.join(cmd) )) subprocess.check_call(cmd, stdout=stdout)
[ "def", "initdb", "(", "self", ",", "quiet", "=", "True", ",", "locale", "=", "'en_US.UTF-8'", ")", ":", "# Defining base_pathname is deferred until this point because we don't", "# want to create a temp directory unless it's needed. And now it is!", "if", "self", ".", "base_pa...
Bootstrap this DBMS from nothing. If you're running in an environment where the DBMS is provided as part of the basic infrastructure, you probably don't want to call this method! @param quiet: Should we operate quietly, emitting nothing if things go well?
[ "Bootstrap", "this", "DBMS", "from", "nothing", "." ]
train
https://github.com/jaraco/jaraco.postgres/blob/57375043314a3ce821ac3b0372ba2465135daa95/jaraco/postgres/__init__.py#L353-L383
jaraco/jaraco.postgres
jaraco/postgres/__init__.py
PostgresServer._is_running
def _is_running(self, tries=10): """ Return if the server is running according to pg_ctl. """ # We can't possibly be running if our base_pathname isn't defined. if not self.base_pathname: return False if tries < 1: raise ValueError('tries must be > 0') cmd = [ PostgresFinder.find_root() / 'pg_ctl', 'status', '-D', self.base_pathname, ] votes = 0 while abs(votes) < tries: time.sleep(0.1) running = (subprocess.call(cmd, stdout=DEV_NULL) == 0) if running and votes >= 0: votes += 1 elif not running and votes <= 0: votes -= 1 else: votes = 0 return votes > 0
python
def _is_running(self, tries=10): """ Return if the server is running according to pg_ctl. """ # We can't possibly be running if our base_pathname isn't defined. if not self.base_pathname: return False if tries < 1: raise ValueError('tries must be > 0') cmd = [ PostgresFinder.find_root() / 'pg_ctl', 'status', '-D', self.base_pathname, ] votes = 0 while abs(votes) < tries: time.sleep(0.1) running = (subprocess.call(cmd, stdout=DEV_NULL) == 0) if running and votes >= 0: votes += 1 elif not running and votes <= 0: votes -= 1 else: votes = 0 return votes > 0
[ "def", "_is_running", "(", "self", ",", "tries", "=", "10", ")", ":", "# We can't possibly be running if our base_pathname isn't defined.", "if", "not", "self", ".", "base_pathname", ":", "return", "False", "if", "tries", "<", "1", ":", "raise", "ValueError", "(",...
Return if the server is running according to pg_ctl.
[ "Return", "if", "the", "server", "is", "running", "according", "to", "pg_ctl", "." ]
train
https://github.com/jaraco/jaraco.postgres/blob/57375043314a3ce821ac3b0372ba2465135daa95/jaraco/postgres/__init__.py#L422-L450
jaraco/jaraco.postgres
jaraco/postgres/__init__.py
PostgresServer.ready
def ready(self): """ Assumes postgres now talks to pg_ctl, but might not yet be listening or connections from psql. Test that psql is able to connect, as it occasionally takes 5-10 seconds for postgresql to start listening. """ cmd = self._psql_cmd() for i in range(50, -1, -1): res = subprocess.call( cmd, stdin=DEV_NULL, stdout=DEV_NULL, stderr=DEV_NULL) if res == 0: break time.sleep(0.2) return i != 0
python
def ready(self): """ Assumes postgres now talks to pg_ctl, but might not yet be listening or connections from psql. Test that psql is able to connect, as it occasionally takes 5-10 seconds for postgresql to start listening. """ cmd = self._psql_cmd() for i in range(50, -1, -1): res = subprocess.call( cmd, stdin=DEV_NULL, stdout=DEV_NULL, stderr=DEV_NULL) if res == 0: break time.sleep(0.2) return i != 0
[ "def", "ready", "(", "self", ")", ":", "cmd", "=", "self", ".", "_psql_cmd", "(", ")", "for", "i", "in", "range", "(", "50", ",", "-", "1", ",", "-", "1", ")", ":", "res", "=", "subprocess", ".", "call", "(", "cmd", ",", "stdin", "=", "DEV_NU...
Assumes postgres now talks to pg_ctl, but might not yet be listening or connections from psql. Test that psql is able to connect, as it occasionally takes 5-10 seconds for postgresql to start listening.
[ "Assumes", "postgres", "now", "talks", "to", "pg_ctl", "but", "might", "not", "yet", "be", "listening", "or", "connections", "from", "psql", ".", "Test", "that", "psql", "is", "able", "to", "connect", "as", "it", "occasionally", "takes", "5", "-", "10", ...
train
https://github.com/jaraco/jaraco.postgres/blob/57375043314a3ce821ac3b0372ba2465135daa95/jaraco/postgres/__init__.py#L467-L481
jaraco/jaraco.postgres
jaraco/postgres/__init__.py
PostgresServer.pid
def pid(self): """The server's PID (None if not running). """ # We can't possibly be running if our base_pathname isn't defined. if not self.base_pathname: return None try: pidfile = os.path.join(self.base_pathname, 'postmaster.pid') return int(open(pidfile).readline()) except (IOError, OSError): return None
python
def pid(self): """The server's PID (None if not running). """ # We can't possibly be running if our base_pathname isn't defined. if not self.base_pathname: return None try: pidfile = os.path.join(self.base_pathname, 'postmaster.pid') return int(open(pidfile).readline()) except (IOError, OSError): return None
[ "def", "pid", "(", "self", ")", ":", "# We can't possibly be running if our base_pathname isn't defined.", "if", "not", "self", ".", "base_pathname", ":", "return", "None", "try", ":", "pidfile", "=", "os", ".", "path", ".", "join", "(", "self", ".", "base_pathn...
The server's PID (None if not running).
[ "The", "server", "s", "PID", "(", "None", "if", "not", "running", ")", "." ]
train
https://github.com/jaraco/jaraco.postgres/blob/57375043314a3ce821ac3b0372ba2465135daa95/jaraco/postgres/__init__.py#L484-L494
jaraco/jaraco.postgres
jaraco/postgres/__init__.py
PostgresServer.get_version
def get_version(): """Returns the Postgres version in tuple form, e.g: (9, 1)""" cmd = [PostgresFinder.find_root() / 'pg_ctl', '--version'] results = subprocess.check_output(cmd).decode('utf-8') match = re.search(r'(\d+\.\d+(\.\d+)?)', results) if match: ver_string = match.group(0) return tuple(int(x) for x in ver_string.split('.'))
python
def get_version(): """Returns the Postgres version in tuple form, e.g: (9, 1)""" cmd = [PostgresFinder.find_root() / 'pg_ctl', '--version'] results = subprocess.check_output(cmd).decode('utf-8') match = re.search(r'(\d+\.\d+(\.\d+)?)', results) if match: ver_string = match.group(0) return tuple(int(x) for x in ver_string.split('.'))
[ "def", "get_version", "(", ")", ":", "cmd", "=", "[", "PostgresFinder", ".", "find_root", "(", ")", "/", "'pg_ctl'", ",", "'--version'", "]", "results", "=", "subprocess", ".", "check_output", "(", "cmd", ")", ".", "decode", "(", "'utf-8'", ")", "match",...
Returns the Postgres version in tuple form, e.g: (9, 1)
[ "Returns", "the", "Postgres", "version", "in", "tuple", "form", "e", ".", "g", ":", "(", "9", "1", ")" ]
train
https://github.com/jaraco/jaraco.postgres/blob/57375043314a3ce821ac3b0372ba2465135daa95/jaraco/postgres/__init__.py#L497-L504
jaraco/jaraco.postgres
jaraco/postgres/__init__.py
PostgresServer.start
def start(self): """Launch this postgres server. If it's already running, do nothing. If the backing storage directory isn't configured, raise NotInitializedError. This method is optional. If you're running in an environment where the DBMS is provided as part of the basic infrastructure, you probably want to skip this step! """ log.info('Starting PostgreSQL at %s:%s', self.host, self.port) if not self.base_pathname: tmpl = ('Invalid base_pathname: %r. Did you forget to call ' '.initdb()?') raise NotInitializedError(tmpl % self.base_pathname) conf_file = os.path.join(self.base_pathname, 'postgresql.conf') if not os.path.exists(conf_file): tmpl = 'No config file at: %r. Did you forget to call .initdb()?' raise NotInitializedError(tmpl % self.base_pathname) if not self.is_running(): version = self.get_version() if version and version >= (9, 3): socketop = 'unix_socket_directories' else: socketop = 'unix_socket_directory' postgres_options = [ # When running not as root, postgres might try to put files # where they're not writable (see # https://paste.yougov.net/YKdgi). So set the socket_dir. '-c', '{}={}'.format(socketop, self.base_pathname), '-h', self.host, '-i', # enable TCP/IP connections '-p', self.port, ] subprocess.check_call([ PostgresFinder.find_root() / 'pg_ctl', 'start', '-D', self.base_pathname, '-l', os.path.join(self.base_pathname, 'postgresql.log'), '-o', subprocess.list2cmdline(postgres_options), ]) # Postgres may launch, then abort if it's unhappy with some parameter. # This post-launch test helps us decide. if not self.is_running(): tmpl = ('%s aborted immediately after launch, check ' 'postgresql.log in storage dir') raise RuntimeError(tmpl % self)
python
def start(self): """Launch this postgres server. If it's already running, do nothing. If the backing storage directory isn't configured, raise NotInitializedError. This method is optional. If you're running in an environment where the DBMS is provided as part of the basic infrastructure, you probably want to skip this step! """ log.info('Starting PostgreSQL at %s:%s', self.host, self.port) if not self.base_pathname: tmpl = ('Invalid base_pathname: %r. Did you forget to call ' '.initdb()?') raise NotInitializedError(tmpl % self.base_pathname) conf_file = os.path.join(self.base_pathname, 'postgresql.conf') if not os.path.exists(conf_file): tmpl = 'No config file at: %r. Did you forget to call .initdb()?' raise NotInitializedError(tmpl % self.base_pathname) if not self.is_running(): version = self.get_version() if version and version >= (9, 3): socketop = 'unix_socket_directories' else: socketop = 'unix_socket_directory' postgres_options = [ # When running not as root, postgres might try to put files # where they're not writable (see # https://paste.yougov.net/YKdgi). So set the socket_dir. '-c', '{}={}'.format(socketop, self.base_pathname), '-h', self.host, '-i', # enable TCP/IP connections '-p', self.port, ] subprocess.check_call([ PostgresFinder.find_root() / 'pg_ctl', 'start', '-D', self.base_pathname, '-l', os.path.join(self.base_pathname, 'postgresql.log'), '-o', subprocess.list2cmdline(postgres_options), ]) # Postgres may launch, then abort if it's unhappy with some parameter. # This post-launch test helps us decide. if not self.is_running(): tmpl = ('%s aborted immediately after launch, check ' 'postgresql.log in storage dir') raise RuntimeError(tmpl % self)
[ "def", "start", "(", "self", ")", ":", "log", ".", "info", "(", "'Starting PostgreSQL at %s:%s'", ",", "self", ".", "host", ",", "self", ".", "port", ")", "if", "not", "self", ".", "base_pathname", ":", "tmpl", "=", "(", "'Invalid base_pathname: %r. Did you...
Launch this postgres server. If it's already running, do nothing. If the backing storage directory isn't configured, raise NotInitializedError. This method is optional. If you're running in an environment where the DBMS is provided as part of the basic infrastructure, you probably want to skip this step!
[ "Launch", "this", "postgres", "server", ".", "If", "it", "s", "already", "running", "do", "nothing", "." ]
train
https://github.com/jaraco/jaraco.postgres/blob/57375043314a3ce821ac3b0372ba2465135daa95/jaraco/postgres/__init__.py#L506-L555
jaraco/jaraco.postgres
jaraco/postgres/__init__.py
PostgresServer.stop
def stop(self): """Stop this DMBS daemon. If it's not currently running, do nothing. Don't return until it's terminated. """ log.info('Stopping PostgreSQL at %s:%s', self.host, self.port) if self._is_running(): cmd = [ PostgresFinder.find_root() / 'pg_ctl', 'stop', '-D', self.base_pathname, '-m', 'fast', ] subprocess.check_call(cmd) # pg_ctl isn't reliable if it's called at certain critical times if self.pid: os.kill(self.pid, signal.SIGTERM) # Can't use wait() because the server might not be our child while self._is_running(): time.sleep(0.1)
python
def stop(self): """Stop this DMBS daemon. If it's not currently running, do nothing. Don't return until it's terminated. """ log.info('Stopping PostgreSQL at %s:%s', self.host, self.port) if self._is_running(): cmd = [ PostgresFinder.find_root() / 'pg_ctl', 'stop', '-D', self.base_pathname, '-m', 'fast', ] subprocess.check_call(cmd) # pg_ctl isn't reliable if it's called at certain critical times if self.pid: os.kill(self.pid, signal.SIGTERM) # Can't use wait() because the server might not be our child while self._is_running(): time.sleep(0.1)
[ "def", "stop", "(", "self", ")", ":", "log", ".", "info", "(", "'Stopping PostgreSQL at %s:%s'", ",", "self", ".", "host", ",", "self", ".", "port", ")", "if", "self", ".", "_is_running", "(", ")", ":", "cmd", "=", "[", "PostgresFinder", ".", "find_roo...
Stop this DMBS daemon. If it's not currently running, do nothing. Don't return until it's terminated.
[ "Stop", "this", "DMBS", "daemon", ".", "If", "it", "s", "not", "currently", "running", "do", "nothing", "." ]
train
https://github.com/jaraco/jaraco.postgres/blob/57375043314a3ce821ac3b0372ba2465135daa95/jaraco/postgres/__init__.py#L557-L576
jaraco/jaraco.postgres
jaraco/postgres/__init__.py
PostgresServer.create
def create(self, db_name, **kwargs): """ Construct a PostgresDatabase and create it on self """ db = PostgresDatabase( db_name, host=self.host, port=self.port, superuser=self.superuser, **kwargs) db.ensure_user() db.create() return db
python
def create(self, db_name, **kwargs): """ Construct a PostgresDatabase and create it on self """ db = PostgresDatabase( db_name, host=self.host, port=self.port, superuser=self.superuser, **kwargs) db.ensure_user() db.create() return db
[ "def", "create", "(", "self", ",", "db_name", ",", "*", "*", "kwargs", ")", ":", "db", "=", "PostgresDatabase", "(", "db_name", ",", "host", "=", "self", ".", "host", ",", "port", "=", "self", ".", "port", ",", "superuser", "=", "self", ".", "super...
Construct a PostgresDatabase and create it on self
[ "Construct", "a", "PostgresDatabase", "and", "create", "it", "on", "self" ]
train
https://github.com/jaraco/jaraco.postgres/blob/57375043314a3ce821ac3b0372ba2465135daa95/jaraco/postgres/__init__.py#L578-L587
ClimateImpactLab/DataFS
datafs/managers/manager_dynamo.py
DynamoDBManager._search
def _search(self, search_terms, begins_with=None): """ Returns a list of Archive id's in the table on Dynamo """ kwargs = dict( ProjectionExpression='#id', ExpressionAttributeNames={"#id": "_id"}) if len(search_terms) > 0: kwargs['FilterExpression'] = reduce( lambda x, y: x & y, [Attr('tags').contains(arg) for arg in search_terms]) if begins_with: if 'FilterExpression' in kwargs: kwargs['FilterExpression'] = kwargs[ 'FilterExpression'] & Key('_id').begins_with(begins_with) else: kwargs['FilterExpression'] = Key( '_id').begins_with(begins_with) while True: res = self._table.scan(**kwargs) for r in res['Items']: yield r['_id'] if 'LastEvaluatedKey' in res: kwargs['ExclusiveStartKey'] = res['LastEvaluatedKey'] else: break
python
def _search(self, search_terms, begins_with=None): """ Returns a list of Archive id's in the table on Dynamo """ kwargs = dict( ProjectionExpression='#id', ExpressionAttributeNames={"#id": "_id"}) if len(search_terms) > 0: kwargs['FilterExpression'] = reduce( lambda x, y: x & y, [Attr('tags').contains(arg) for arg in search_terms]) if begins_with: if 'FilterExpression' in kwargs: kwargs['FilterExpression'] = kwargs[ 'FilterExpression'] & Key('_id').begins_with(begins_with) else: kwargs['FilterExpression'] = Key( '_id').begins_with(begins_with) while True: res = self._table.scan(**kwargs) for r in res['Items']: yield r['_id'] if 'LastEvaluatedKey' in res: kwargs['ExclusiveStartKey'] = res['LastEvaluatedKey'] else: break
[ "def", "_search", "(", "self", ",", "search_terms", ",", "begins_with", "=", "None", ")", ":", "kwargs", "=", "dict", "(", "ProjectionExpression", "=", "'#id'", ",", "ExpressionAttributeNames", "=", "{", "\"#id\"", ":", "\"_id\"", "}", ")", "if", "len", "(...
Returns a list of Archive id's in the table on Dynamo
[ "Returns", "a", "list", "of", "Archive", "id", "s", "in", "the", "table", "on", "Dynamo" ]
train
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/managers/manager_dynamo.py#L54-L85
ClimateImpactLab/DataFS
datafs/managers/manager_dynamo.py
DynamoDBManager._update
def _update(self, archive_name, version_metadata): ''' Updates the version specific metadata attribute in DynamoDB In DynamoDB this is simply a list append on this attribute value Parameters ---------- archive_name: str unique '_id' primary key version_metadata: dict dictionary of version metadata values Returns ------- dict list of dictionaries of version_history ''' command = "SET version_history = list_append(version_history, :v)" self._table.update_item( Key={'_id': archive_name}, UpdateExpression=command, ExpressionAttributeValues={':v': [version_metadata]}, ReturnValues='ALL_NEW')
python
def _update(self, archive_name, version_metadata): ''' Updates the version specific metadata attribute in DynamoDB In DynamoDB this is simply a list append on this attribute value Parameters ---------- archive_name: str unique '_id' primary key version_metadata: dict dictionary of version metadata values Returns ------- dict list of dictionaries of version_history ''' command = "SET version_history = list_append(version_history, :v)" self._table.update_item( Key={'_id': archive_name}, UpdateExpression=command, ExpressionAttributeValues={':v': [version_metadata]}, ReturnValues='ALL_NEW')
[ "def", "_update", "(", "self", ",", "archive_name", ",", "version_metadata", ")", ":", "command", "=", "\"SET version_history = list_append(version_history, :v)\"", "self", ".", "_table", ".", "update_item", "(", "Key", "=", "{", "'_id'", ":", "archive_name", "}", ...
Updates the version specific metadata attribute in DynamoDB In DynamoDB this is simply a list append on this attribute value Parameters ---------- archive_name: str unique '_id' primary key version_metadata: dict dictionary of version metadata values Returns ------- dict list of dictionaries of version_history
[ "Updates", "the", "version", "specific", "metadata", "attribute", "in", "DynamoDB", "In", "DynamoDB", "this", "is", "simply", "a", "list", "append", "on", "this", "attribute", "value" ]
train
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/managers/manager_dynamo.py#L87-L112
ClimateImpactLab/DataFS
datafs/managers/manager_dynamo.py
DynamoDBManager._create_archive_table
def _create_archive_table(self, table_name): ''' Dynamo implementation of BaseDataManager create_archive_table waiter object is implemented to ensure table creation before moving on this will slow down table creation. However, since we are only creating table once this should no impact users. Parameters ---------- table_name: str Returns ------- None ''' if table_name in self._get_table_names(): raise KeyError('Table "{}" already exists'.format(table_name)) try: table = self._resource.create_table( TableName=table_name, KeySchema=[{'AttributeName': '_id', 'KeyType': 'HASH'}], AttributeDefinitions=[ {'AttributeName': '_id', 'AttributeType': 'S'}], ProvisionedThroughput={ 'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123}) table.meta.client.get_waiter('table_exists').wait( TableName=table_name) except ValueError: # Error handling for windows incompatability issue msg = 'Table creation failed' assert table_name in self._get_table_names(), msg
python
def _create_archive_table(self, table_name): ''' Dynamo implementation of BaseDataManager create_archive_table waiter object is implemented to ensure table creation before moving on this will slow down table creation. However, since we are only creating table once this should no impact users. Parameters ---------- table_name: str Returns ------- None ''' if table_name in self._get_table_names(): raise KeyError('Table "{}" already exists'.format(table_name)) try: table = self._resource.create_table( TableName=table_name, KeySchema=[{'AttributeName': '_id', 'KeyType': 'HASH'}], AttributeDefinitions=[ {'AttributeName': '_id', 'AttributeType': 'S'}], ProvisionedThroughput={ 'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123}) table.meta.client.get_waiter('table_exists').wait( TableName=table_name) except ValueError: # Error handling for windows incompatability issue msg = 'Table creation failed' assert table_name in self._get_table_names(), msg
[ "def", "_create_archive_table", "(", "self", ",", "table_name", ")", ":", "if", "table_name", "in", "self", ".", "_get_table_names", "(", ")", ":", "raise", "KeyError", "(", "'Table \"{}\" already exists'", ".", "format", "(", "table_name", ")", ")", "try", ":...
Dynamo implementation of BaseDataManager create_archive_table waiter object is implemented to ensure table creation before moving on this will slow down table creation. However, since we are only creating table once this should no impact users. Parameters ---------- table_name: str Returns ------- None
[ "Dynamo", "implementation", "of", "BaseDataManager", "create_archive_table" ]
train
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/managers/manager_dynamo.py#L117-L153
ClimateImpactLab/DataFS
datafs/managers/manager_dynamo.py
DynamoDBManager._create_spec_config
def _create_spec_config(self, table_name, spec_documents): ''' Dynamo implementation of spec config creation Called by `create_archive_table()` in :py:class:`manager.BaseDataManager` Simply adds two rows to the spec table Parameters ---------- table_name : base table name (not including .spec suffix) spec_documents : list list of dictionary documents defining the manager spec ''' _spec_table = self._resource.Table(table_name + '.spec') for doc in spec_documents: _spec_table.put_item(Item=doc)
python
def _create_spec_config(self, table_name, spec_documents): ''' Dynamo implementation of spec config creation Called by `create_archive_table()` in :py:class:`manager.BaseDataManager` Simply adds two rows to the spec table Parameters ---------- table_name : base table name (not including .spec suffix) spec_documents : list list of dictionary documents defining the manager spec ''' _spec_table = self._resource.Table(table_name + '.spec') for doc in spec_documents: _spec_table.put_item(Item=doc)
[ "def", "_create_spec_config", "(", "self", ",", "table_name", ",", "spec_documents", ")", ":", "_spec_table", "=", "self", ".", "_resource", ".", "Table", "(", "table_name", "+", "'.spec'", ")", "for", "doc", "in", "spec_documents", ":", "_spec_table", ".", ...
Dynamo implementation of spec config creation Called by `create_archive_table()` in :py:class:`manager.BaseDataManager` Simply adds two rows to the spec table Parameters ---------- table_name : base table name (not including .spec suffix) spec_documents : list list of dictionary documents defining the manager spec
[ "Dynamo", "implementation", "of", "spec", "config", "creation" ]
train
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/managers/manager_dynamo.py#L155-L180
ClimateImpactLab/DataFS
datafs/managers/manager_dynamo.py
DynamoDBManager._update_spec_config
def _update_spec_config(self, document_name, spec): ''' Dynamo implementation of project specific metadata spec ''' # add the updated archive_metadata object to Dynamo self._spec_table.update_item( Key={'_id': '{}'.format(document_name)}, UpdateExpression="SET config = :v", ExpressionAttributeValues={':v': spec}, ReturnValues='ALL_NEW')
python
def _update_spec_config(self, document_name, spec): ''' Dynamo implementation of project specific metadata spec ''' # add the updated archive_metadata object to Dynamo self._spec_table.update_item( Key={'_id': '{}'.format(document_name)}, UpdateExpression="SET config = :v", ExpressionAttributeValues={':v': spec}, ReturnValues='ALL_NEW')
[ "def", "_update_spec_config", "(", "self", ",", "document_name", ",", "spec", ")", ":", "# add the updated archive_metadata object to Dynamo", "self", ".", "_spec_table", ".", "update_item", "(", "Key", "=", "{", "'_id'", ":", "'{}'", ".", "format", "(", "document...
Dynamo implementation of project specific metadata spec
[ "Dynamo", "implementation", "of", "project", "specific", "metadata", "spec" ]
train
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/managers/manager_dynamo.py#L182-L192
ClimateImpactLab/DataFS
datafs/managers/manager_dynamo.py
DynamoDBManager._update_metadata
def _update_metadata(self, archive_name, archive_metadata): """ Appends the updated_metada dict to the Metadata Attribute list Parameters ---------- archive_name: str ID of archive to update updated_metadata: dict dictionary of metadata keys and values to update. If the value for a particular key is `None`, the key is removed. """ archive_metadata_current = self._get_archive_metadata(archive_name) archive_metadata_current.update(archive_metadata) for k, v in archive_metadata_current.items(): if v is None: del archive_metadata_current[k] # add the updated archive_metadata object to Dynamo self._table.update_item( Key={'_id': archive_name}, UpdateExpression="SET archive_metadata = :v", ExpressionAttributeValues={':v': archive_metadata_current}, ReturnValues='ALL_NEW')
python
def _update_metadata(self, archive_name, archive_metadata): """ Appends the updated_metada dict to the Metadata Attribute list Parameters ---------- archive_name: str ID of archive to update updated_metadata: dict dictionary of metadata keys and values to update. If the value for a particular key is `None`, the key is removed. """ archive_metadata_current = self._get_archive_metadata(archive_name) archive_metadata_current.update(archive_metadata) for k, v in archive_metadata_current.items(): if v is None: del archive_metadata_current[k] # add the updated archive_metadata object to Dynamo self._table.update_item( Key={'_id': archive_name}, UpdateExpression="SET archive_metadata = :v", ExpressionAttributeValues={':v': archive_metadata_current}, ReturnValues='ALL_NEW')
[ "def", "_update_metadata", "(", "self", ",", "archive_name", ",", "archive_metadata", ")", ":", "archive_metadata_current", "=", "self", ".", "_get_archive_metadata", "(", "archive_name", ")", "archive_metadata_current", ".", "update", "(", "archive_metadata", ")", "f...
Appends the updated_metada dict to the Metadata Attribute list Parameters ---------- archive_name: str ID of archive to update updated_metadata: dict dictionary of metadata keys and values to update. If the value for a particular key is `None`, the key is removed.
[ "Appends", "the", "updated_metada", "dict", "to", "the", "Metadata", "Attribute", "list" ]
train
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/managers/manager_dynamo.py#L204-L232
ClimateImpactLab/DataFS
datafs/managers/manager_dynamo.py
DynamoDBManager._create_archive
def _create_archive( self, archive_name, metadata): ''' This adds an item in a DynamoDB table corresponding to a S3 object Args ---- arhive_name: str corresponds to the name of the Archive (e.g. ) Returns ------- Dictionary with confirmation of upload ''' archive_exists = False try: self.get_archive(archive_name) archive_exists = True except KeyError: pass if archive_exists: raise KeyError( "{} already exists. Use get_archive() to view".format( archive_name)) self._table.put_item(Item=metadata)
python
def _create_archive( self, archive_name, metadata): ''' This adds an item in a DynamoDB table corresponding to a S3 object Args ---- arhive_name: str corresponds to the name of the Archive (e.g. ) Returns ------- Dictionary with confirmation of upload ''' archive_exists = False try: self.get_archive(archive_name) archive_exists = True except KeyError: pass if archive_exists: raise KeyError( "{} already exists. Use get_archive() to view".format( archive_name)) self._table.put_item(Item=metadata)
[ "def", "_create_archive", "(", "self", ",", "archive_name", ",", "metadata", ")", ":", "archive_exists", "=", "False", "try", ":", "self", ".", "get_archive", "(", "archive_name", ")", "archive_exists", "=", "True", "except", "KeyError", ":", "pass", "if", "...
This adds an item in a DynamoDB table corresponding to a S3 object Args ---- arhive_name: str corresponds to the name of the Archive (e.g. ) Returns ------- Dictionary with confirmation of upload
[ "This", "adds", "an", "item", "in", "a", "DynamoDB", "table", "corresponding", "to", "a", "S3", "object" ]
train
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/managers/manager_dynamo.py#L234-L266
ClimateImpactLab/DataFS
datafs/managers/manager_dynamo.py
DynamoDBManager._batch_get_archive_listing
def _batch_get_archive_listing(self, archive_names): ''' Batched version of :py:meth:`~DynamoDBManager._get_archive_listing` Returns a list of full archive listings from an iterable of archive names .. note :: Invalid archive names will simply not be returned, so the response may not be the same length as the supplied `archive_names`. Parameters ---------- archive_names : list List of archive names Returns ------- archive_listings : list List of archive listings ''' archive_names = list(archive_names) MAX_QUERY_LENGTH = 100 archives = [] for query_index in range(0, len(archive_names), MAX_QUERY_LENGTH): current_query = { 'Keys': [{'_id': i} for i in archive_names[ query_index: query_index+MAX_QUERY_LENGTH]]} attempts = 0 res = {} while True: if attempts > 0 and len(res.get('UnprocessedKeys', {})) > 0: current_query = res['UnprocessedKeys'][self._table_name] elif attempts > 0 and len(res.get('UnprocessedKeys', {})) == 0: break res = self._resource.batch_get_item( RequestItems={self._table_name: current_query}) archives.extend(res['Responses'][self._table_name]) attempts += 1 return archives
python
def _batch_get_archive_listing(self, archive_names): ''' Batched version of :py:meth:`~DynamoDBManager._get_archive_listing` Returns a list of full archive listings from an iterable of archive names .. note :: Invalid archive names will simply not be returned, so the response may not be the same length as the supplied `archive_names`. Parameters ---------- archive_names : list List of archive names Returns ------- archive_listings : list List of archive listings ''' archive_names = list(archive_names) MAX_QUERY_LENGTH = 100 archives = [] for query_index in range(0, len(archive_names), MAX_QUERY_LENGTH): current_query = { 'Keys': [{'_id': i} for i in archive_names[ query_index: query_index+MAX_QUERY_LENGTH]]} attempts = 0 res = {} while True: if attempts > 0 and len(res.get('UnprocessedKeys', {})) > 0: current_query = res['UnprocessedKeys'][self._table_name] elif attempts > 0 and len(res.get('UnprocessedKeys', {})) == 0: break res = self._resource.batch_get_item( RequestItems={self._table_name: current_query}) archives.extend(res['Responses'][self._table_name]) attempts += 1 return archives
[ "def", "_batch_get_archive_listing", "(", "self", ",", "archive_names", ")", ":", "archive_names", "=", "list", "(", "archive_names", ")", "MAX_QUERY_LENGTH", "=", "100", "archives", "=", "[", "]", "for", "query_index", "in", "range", "(", "0", ",", "len", "...
Batched version of :py:meth:`~DynamoDBManager._get_archive_listing` Returns a list of full archive listings from an iterable of archive names .. note :: Invalid archive names will simply not be returned, so the response may not be the same length as the supplied `archive_names`. Parameters ---------- archive_names : list List of archive names Returns ------- archive_listings : list List of archive listings
[ "Batched", "version", "of", ":", "py", ":", "meth", ":", "~DynamoDBManager", ".", "_get_archive_listing" ]
train
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/managers/manager_dynamo.py#L278-L335
akissa/spamc
spamc/utils.py
load_backend
def load_backend(backend_name): """ load pool backend.""" try: if len(backend_name.split(".")) > 1: mod = import_module(backend_name) else: mod = import_module("spamc.backend_%s" % backend_name) return mod except ImportError: error_msg = "%s isn't a spamc backend" % backend_name raise ImportError(error_msg)
python
def load_backend(backend_name): """ load pool backend.""" try: if len(backend_name.split(".")) > 1: mod = import_module(backend_name) else: mod = import_module("spamc.backend_%s" % backend_name) return mod except ImportError: error_msg = "%s isn't a spamc backend" % backend_name raise ImportError(error_msg)
[ "def", "load_backend", "(", "backend_name", ")", ":", "try", ":", "if", "len", "(", "backend_name", ".", "split", "(", "\".\"", ")", ")", ">", "1", ":", "mod", "=", "import_module", "(", "backend_name", ")", "else", ":", "mod", "=", "import_module", "(...
load pool backend.
[ "load", "pool", "backend", "." ]
train
https://github.com/akissa/spamc/blob/da50732e276f7ed3d67cb75c31cb017d6a62f066/spamc/utils.py#L8-L18
timstaley/voevent-parse
src/voeventparse/voevent.py
Voevent
def Voevent(stream, stream_id, role): """Create a new VOEvent element tree, with specified IVORN and role. Args: stream (str): used to construct the IVORN like so:: ivorn = 'ivo://' + stream + '#' + stream_id (N.B. ``stream_id`` is converted to string if required.) So, e.g. we might set:: stream='voevent.soton.ac.uk/super_exciting_events' stream_id=77 stream_id (str): See above. role (str): role as defined in VOEvent spec. (See also :py:class:`.definitions.roles`) Returns: Root-node of the VOEvent, as represented by an lxml.objectify element tree ('etree'). See also http://lxml.de/objectify.html#the-lxml-objectify-api """ parser = objectify.makeparser(remove_blank_text=True) v = objectify.fromstring(voeventparse.definitions.v2_0_skeleton_str, parser=parser) _remove_root_tag_prefix(v) if not isinstance(stream_id, string_types): stream_id = repr(stream_id) v.attrib['ivorn'] = ''.join(('ivo://', stream, '#', stream_id)) v.attrib['role'] = role # Presumably we'll always want the following children: # (NB, valid to then leave them empty) etree.SubElement(v, 'Who') etree.SubElement(v, 'What') etree.SubElement(v, 'WhereWhen') v.Who.Description = ( 'VOEvent created with voevent-parse, version {}. ' 'See https://github.com/timstaley/voevent-parse for details.').format( __version__ ) return v
python
def Voevent(stream, stream_id, role): """Create a new VOEvent element tree, with specified IVORN and role. Args: stream (str): used to construct the IVORN like so:: ivorn = 'ivo://' + stream + '#' + stream_id (N.B. ``stream_id`` is converted to string if required.) So, e.g. we might set:: stream='voevent.soton.ac.uk/super_exciting_events' stream_id=77 stream_id (str): See above. role (str): role as defined in VOEvent spec. (See also :py:class:`.definitions.roles`) Returns: Root-node of the VOEvent, as represented by an lxml.objectify element tree ('etree'). See also http://lxml.de/objectify.html#the-lxml-objectify-api """ parser = objectify.makeparser(remove_blank_text=True) v = objectify.fromstring(voeventparse.definitions.v2_0_skeleton_str, parser=parser) _remove_root_tag_prefix(v) if not isinstance(stream_id, string_types): stream_id = repr(stream_id) v.attrib['ivorn'] = ''.join(('ivo://', stream, '#', stream_id)) v.attrib['role'] = role # Presumably we'll always want the following children: # (NB, valid to then leave them empty) etree.SubElement(v, 'Who') etree.SubElement(v, 'What') etree.SubElement(v, 'WhereWhen') v.Who.Description = ( 'VOEvent created with voevent-parse, version {}. ' 'See https://github.com/timstaley/voevent-parse for details.').format( __version__ ) return v
[ "def", "Voevent", "(", "stream", ",", "stream_id", ",", "role", ")", ":", "parser", "=", "objectify", ".", "makeparser", "(", "remove_blank_text", "=", "True", ")", "v", "=", "objectify", ".", "fromstring", "(", "voeventparse", ".", "definitions", ".", "v2...
Create a new VOEvent element tree, with specified IVORN and role. Args: stream (str): used to construct the IVORN like so:: ivorn = 'ivo://' + stream + '#' + stream_id (N.B. ``stream_id`` is converted to string if required.) So, e.g. we might set:: stream='voevent.soton.ac.uk/super_exciting_events' stream_id=77 stream_id (str): See above. role (str): role as defined in VOEvent spec. (See also :py:class:`.definitions.roles`) Returns: Root-node of the VOEvent, as represented by an lxml.objectify element tree ('etree'). See also http://lxml.de/objectify.html#the-lxml-objectify-api
[ "Create", "a", "new", "VOEvent", "element", "tree", "with", "specified", "IVORN", "and", "role", "." ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/voevent.py#L22-L65
timstaley/voevent-parse
src/voeventparse/voevent.py
loads
def loads(s, check_version=True): """ Load VOEvent from bytes. This parses a VOEvent XML packet string, taking care of some subtleties. For Python 3 users, ``s`` should be a bytes object - see also http://lxml.de/FAQ.html, "Why can't lxml parse my XML from unicode strings?" (Python 2 users can stick with old-school ``str`` type if preferred) By default, will raise an exception if the VOEvent is not of version 2.0. This can be disabled but voevent-parse routines are untested with other versions. Args: s (bytes): Bytes containing raw XML. check_version (bool): (Default=True) Checks that the VOEvent is of a supported schema version - currently only v2.0 is supported. Returns: :py:class:`Voevent`: Root-node of the etree. Raises: ValueError: If passed a VOEvent of wrong schema version (i.e. schema 1.1) """ # .. note:: # # The namespace is removed from the root element tag to make # objectify access work as expected, # (see :py:func:`._remove_root_tag_prefix`) # so we must re-insert it when we want to conform to schema. v = objectify.fromstring(s) _remove_root_tag_prefix(v) if check_version: version = v.attrib['version'] if not version == '2.0': raise ValueError('Unsupported VOEvent schema version:' + version) return v
python
def loads(s, check_version=True): """ Load VOEvent from bytes. This parses a VOEvent XML packet string, taking care of some subtleties. For Python 3 users, ``s`` should be a bytes object - see also http://lxml.de/FAQ.html, "Why can't lxml parse my XML from unicode strings?" (Python 2 users can stick with old-school ``str`` type if preferred) By default, will raise an exception if the VOEvent is not of version 2.0. This can be disabled but voevent-parse routines are untested with other versions. Args: s (bytes): Bytes containing raw XML. check_version (bool): (Default=True) Checks that the VOEvent is of a supported schema version - currently only v2.0 is supported. Returns: :py:class:`Voevent`: Root-node of the etree. Raises: ValueError: If passed a VOEvent of wrong schema version (i.e. schema 1.1) """ # .. note:: # # The namespace is removed from the root element tag to make # objectify access work as expected, # (see :py:func:`._remove_root_tag_prefix`) # so we must re-insert it when we want to conform to schema. v = objectify.fromstring(s) _remove_root_tag_prefix(v) if check_version: version = v.attrib['version'] if not version == '2.0': raise ValueError('Unsupported VOEvent schema version:' + version) return v
[ "def", "loads", "(", "s", ",", "check_version", "=", "True", ")", ":", "# .. note::", "#", "# The namespace is removed from the root element tag to make", "# objectify access work as expected,", "# (see :py:func:`._remove_root_tag_prefix`)", "# so we must re-inser...
Load VOEvent from bytes. This parses a VOEvent XML packet string, taking care of some subtleties. For Python 3 users, ``s`` should be a bytes object - see also http://lxml.de/FAQ.html, "Why can't lxml parse my XML from unicode strings?" (Python 2 users can stick with old-school ``str`` type if preferred) By default, will raise an exception if the VOEvent is not of version 2.0. This can be disabled but voevent-parse routines are untested with other versions. Args: s (bytes): Bytes containing raw XML. check_version (bool): (Default=True) Checks that the VOEvent is of a supported schema version - currently only v2.0 is supported. Returns: :py:class:`Voevent`: Root-node of the etree. Raises: ValueError: If passed a VOEvent of wrong schema version (i.e. schema 1.1)
[ "Load", "VOEvent", "from", "bytes", "." ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/voevent.py#L68-L107
timstaley/voevent-parse
src/voeventparse/voevent.py
load
def load(file, check_version=True): """Load VOEvent from file object. A simple wrapper to read a file before passing the contents to :py:func:`.loads`. Use with an open file object, e.g.:: with open('/path/to/voevent.xml', 'rb') as f: v = vp.load(f) Args: file (io.IOBase): An open file object (binary mode preferred), see also http://lxml.de/FAQ.html : "Can lxml parse from file objects opened in unicode/text mode?" check_version (bool): (Default=True) Checks that the VOEvent is of a supported schema version - currently only v2.0 is supported. Returns: :py:class:`Voevent`: Root-node of the etree. """ s = file.read() return loads(s, check_version)
python
def load(file, check_version=True): """Load VOEvent from file object. A simple wrapper to read a file before passing the contents to :py:func:`.loads`. Use with an open file object, e.g.:: with open('/path/to/voevent.xml', 'rb') as f: v = vp.load(f) Args: file (io.IOBase): An open file object (binary mode preferred), see also http://lxml.de/FAQ.html : "Can lxml parse from file objects opened in unicode/text mode?" check_version (bool): (Default=True) Checks that the VOEvent is of a supported schema version - currently only v2.0 is supported. Returns: :py:class:`Voevent`: Root-node of the etree. """ s = file.read() return loads(s, check_version)
[ "def", "load", "(", "file", ",", "check_version", "=", "True", ")", ":", "s", "=", "file", ".", "read", "(", ")", "return", "loads", "(", "s", ",", "check_version", ")" ]
Load VOEvent from file object. A simple wrapper to read a file before passing the contents to :py:func:`.loads`. Use with an open file object, e.g.:: with open('/path/to/voevent.xml', 'rb') as f: v = vp.load(f) Args: file (io.IOBase): An open file object (binary mode preferred), see also http://lxml.de/FAQ.html : "Can lxml parse from file objects opened in unicode/text mode?" check_version (bool): (Default=True) Checks that the VOEvent is of a supported schema version - currently only v2.0 is supported. Returns: :py:class:`Voevent`: Root-node of the etree.
[ "Load", "VOEvent", "from", "file", "object", "." ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/voevent.py#L110-L130
timstaley/voevent-parse
src/voeventparse/voevent.py
dumps
def dumps(voevent, pretty_print=False, xml_declaration=True, encoding='UTF-8'): """Converts voevent to string. .. note:: Default encoding is UTF-8, in line with VOE2.0 schema. Declaring the encoding can cause diffs with the original loaded VOEvent, but I think it's probably the right thing to do (and lxml doesn't really give you a choice anyway). Args: voevent (:class:`Voevent`): Root node of the VOevent etree. pretty_print (bool): indent the output for improved human-legibility when possible. See also: http://lxml.de/FAQ.html#why-doesn-t-the-pretty-print-option-reformat-my-xml-output xml_declaration (bool): Prepends a doctype tag to the string output, i.e. something like ``<?xml version='1.0' encoding='UTF-8'?>`` Returns: bytes: Bytestring containing raw XML representation of VOEvent. """ vcopy = copy.deepcopy(voevent) _return_to_standard_xml(vcopy) s = etree.tostring(vcopy, pretty_print=pretty_print, xml_declaration=xml_declaration, encoding=encoding) return s
python
def dumps(voevent, pretty_print=False, xml_declaration=True, encoding='UTF-8'): """Converts voevent to string. .. note:: Default encoding is UTF-8, in line with VOE2.0 schema. Declaring the encoding can cause diffs with the original loaded VOEvent, but I think it's probably the right thing to do (and lxml doesn't really give you a choice anyway). Args: voevent (:class:`Voevent`): Root node of the VOevent etree. pretty_print (bool): indent the output for improved human-legibility when possible. See also: http://lxml.de/FAQ.html#why-doesn-t-the-pretty-print-option-reformat-my-xml-output xml_declaration (bool): Prepends a doctype tag to the string output, i.e. something like ``<?xml version='1.0' encoding='UTF-8'?>`` Returns: bytes: Bytestring containing raw XML representation of VOEvent. """ vcopy = copy.deepcopy(voevent) _return_to_standard_xml(vcopy) s = etree.tostring(vcopy, pretty_print=pretty_print, xml_declaration=xml_declaration, encoding=encoding) return s
[ "def", "dumps", "(", "voevent", ",", "pretty_print", "=", "False", ",", "xml_declaration", "=", "True", ",", "encoding", "=", "'UTF-8'", ")", ":", "vcopy", "=", "copy", ".", "deepcopy", "(", "voevent", ")", "_return_to_standard_xml", "(", "vcopy", ")", "s"...
Converts voevent to string. .. note:: Default encoding is UTF-8, in line with VOE2.0 schema. Declaring the encoding can cause diffs with the original loaded VOEvent, but I think it's probably the right thing to do (and lxml doesn't really give you a choice anyway). Args: voevent (:class:`Voevent`): Root node of the VOevent etree. pretty_print (bool): indent the output for improved human-legibility when possible. See also: http://lxml.de/FAQ.html#why-doesn-t-the-pretty-print-option-reformat-my-xml-output xml_declaration (bool): Prepends a doctype tag to the string output, i.e. something like ``<?xml version='1.0' encoding='UTF-8'?>`` Returns: bytes: Bytestring containing raw XML representation of VOEvent.
[ "Converts", "voevent", "to", "string", "." ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/voevent.py#L133-L157
timstaley/voevent-parse
src/voeventparse/voevent.py
dump
def dump(voevent, file, pretty_print=True, xml_declaration=True): """Writes the voevent to the file object. e.g.:: with open('/tmp/myvoevent.xml','wb') as f: voeventparse.dump(v, f) Args: voevent(:class:`Voevent`): Root node of the VOevent etree. file (io.IOBase): An open (binary mode) file object for writing. pretty_print pretty_print(bool): See :func:`dumps` xml_declaration(bool): See :func:`dumps` """ file.write(dumps(voevent, pretty_print, xml_declaration))
python
def dump(voevent, file, pretty_print=True, xml_declaration=True): """Writes the voevent to the file object. e.g.:: with open('/tmp/myvoevent.xml','wb') as f: voeventparse.dump(v, f) Args: voevent(:class:`Voevent`): Root node of the VOevent etree. file (io.IOBase): An open (binary mode) file object for writing. pretty_print pretty_print(bool): See :func:`dumps` xml_declaration(bool): See :func:`dumps` """ file.write(dumps(voevent, pretty_print, xml_declaration))
[ "def", "dump", "(", "voevent", ",", "file", ",", "pretty_print", "=", "True", ",", "xml_declaration", "=", "True", ")", ":", "file", ".", "write", "(", "dumps", "(", "voevent", ",", "pretty_print", ",", "xml_declaration", ")", ")" ]
Writes the voevent to the file object. e.g.:: with open('/tmp/myvoevent.xml','wb') as f: voeventparse.dump(v, f) Args: voevent(:class:`Voevent`): Root node of the VOevent etree. file (io.IOBase): An open (binary mode) file object for writing. pretty_print pretty_print(bool): See :func:`dumps` xml_declaration(bool): See :func:`dumps`
[ "Writes", "the", "voevent", "to", "the", "file", "object", "." ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/voevent.py#L160-L175
timstaley/voevent-parse
src/voeventparse/voevent.py
valid_as_v2_0
def valid_as_v2_0(voevent): """Tests if a voevent conforms to the schema. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. Returns: bool: Whether VOEvent is valid """ _return_to_standard_xml(voevent) valid_bool = voevent_v2_0_schema.validate(voevent) _remove_root_tag_prefix(voevent) return valid_bool
python
def valid_as_v2_0(voevent): """Tests if a voevent conforms to the schema. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. Returns: bool: Whether VOEvent is valid """ _return_to_standard_xml(voevent) valid_bool = voevent_v2_0_schema.validate(voevent) _remove_root_tag_prefix(voevent) return valid_bool
[ "def", "valid_as_v2_0", "(", "voevent", ")", ":", "_return_to_standard_xml", "(", "voevent", ")", "valid_bool", "=", "voevent_v2_0_schema", ".", "validate", "(", "voevent", ")", "_remove_root_tag_prefix", "(", "voevent", ")", "return", "valid_bool" ]
Tests if a voevent conforms to the schema. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. Returns: bool: Whether VOEvent is valid
[ "Tests", "if", "a", "voevent", "conforms", "to", "the", "schema", "." ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/voevent.py#L178-L189
timstaley/voevent-parse
src/voeventparse/voevent.py
set_who
def set_who(voevent, date=None, author_ivorn=None): """Sets the minimal 'Who' attributes: date of authoring, AuthorIVORN. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. date(datetime.datetime): Date of authoring. NB Microseconds are ignored, as per the VOEvent spec. author_ivorn(str): Short author identifier, e.g. ``voevent.4pisky.org/ALARRM``. Note that the prefix ``ivo://`` will be prepended internally. """ if author_ivorn is not None: voevent.Who.AuthorIVORN = ''.join(('ivo://', author_ivorn)) if date is not None: voevent.Who.Date = date.replace(microsecond=0).isoformat()
python
def set_who(voevent, date=None, author_ivorn=None): """Sets the minimal 'Who' attributes: date of authoring, AuthorIVORN. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. date(datetime.datetime): Date of authoring. NB Microseconds are ignored, as per the VOEvent spec. author_ivorn(str): Short author identifier, e.g. ``voevent.4pisky.org/ALARRM``. Note that the prefix ``ivo://`` will be prepended internally. """ if author_ivorn is not None: voevent.Who.AuthorIVORN = ''.join(('ivo://', author_ivorn)) if date is not None: voevent.Who.Date = date.replace(microsecond=0).isoformat()
[ "def", "set_who", "(", "voevent", ",", "date", "=", "None", ",", "author_ivorn", "=", "None", ")", ":", "if", "author_ivorn", "is", "not", "None", ":", "voevent", ".", "Who", ".", "AuthorIVORN", "=", "''", ".", "join", "(", "(", "'ivo://'", ",", "aut...
Sets the minimal 'Who' attributes: date of authoring, AuthorIVORN. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. date(datetime.datetime): Date of authoring. NB Microseconds are ignored, as per the VOEvent spec. author_ivorn(str): Short author identifier, e.g. ``voevent.4pisky.org/ALARRM``. Note that the prefix ``ivo://`` will be prepended internally.
[ "Sets", "the", "minimal", "Who", "attributes", ":", "date", "of", "authoring", "AuthorIVORN", "." ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/voevent.py#L210-L225
timstaley/voevent-parse
src/voeventparse/voevent.py
set_author
def set_author(voevent, title=None, shortName=None, logoURL=None, contactName=None, contactEmail=None, contactPhone=None, contributor=None): """For setting fields in the detailed author description. This can optionally be neglected if a well defined AuthorIVORN is supplied. .. note:: Unusually for this library, the args here use CamelCase naming convention, since there's a direct mapping to the ``Author.*`` attributes to which they will be assigned. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. The rest of the arguments are strings corresponding to child elements. """ # We inspect all local variables except the voevent packet, # Cycling through and assigning them on the Who.Author element. AuthChildren = locals() AuthChildren.pop('voevent') if not voevent.xpath('Who/Author'): etree.SubElement(voevent.Who, 'Author') for k, v in AuthChildren.items(): if v is not None: voevent.Who.Author[k] = v
python
def set_author(voevent, title=None, shortName=None, logoURL=None, contactName=None, contactEmail=None, contactPhone=None, contributor=None): """For setting fields in the detailed author description. This can optionally be neglected if a well defined AuthorIVORN is supplied. .. note:: Unusually for this library, the args here use CamelCase naming convention, since there's a direct mapping to the ``Author.*`` attributes to which they will be assigned. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. The rest of the arguments are strings corresponding to child elements. """ # We inspect all local variables except the voevent packet, # Cycling through and assigning them on the Who.Author element. AuthChildren = locals() AuthChildren.pop('voevent') if not voevent.xpath('Who/Author'): etree.SubElement(voevent.Who, 'Author') for k, v in AuthChildren.items(): if v is not None: voevent.Who.Author[k] = v
[ "def", "set_author", "(", "voevent", ",", "title", "=", "None", ",", "shortName", "=", "None", ",", "logoURL", "=", "None", ",", "contactName", "=", "None", ",", "contactEmail", "=", "None", ",", "contactPhone", "=", "None", ",", "contributor", "=", "Non...
For setting fields in the detailed author description. This can optionally be neglected if a well defined AuthorIVORN is supplied. .. note:: Unusually for this library, the args here use CamelCase naming convention, since there's a direct mapping to the ``Author.*`` attributes to which they will be assigned. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. The rest of the arguments are strings corresponding to child elements.
[ "For", "setting", "fields", "in", "the", "detailed", "author", "description", "." ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/voevent.py#L228-L252
timstaley/voevent-parse
src/voeventparse/voevent.py
add_where_when
def add_where_when(voevent, coords, obs_time, observatory_location, allow_tz_naive_datetime=False): """ Add details of an observation to the WhereWhen section. We Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. coords(:class:`.Position2D`): Sky co-ordinates of event. obs_time(datetime.datetime): Nominal DateTime of the observation. Must either be timezone-aware, or should be carefully verified as representing UTC and then set parameter ``allow_tz_naive_datetime=True``. observatory_location(str): Telescope locale, e.g. 'La Palma'. May be a generic location as listed under :class:`voeventparse.definitions.observatory_location`. allow_tz_naive_datetime (bool): (Default False). Accept timezone-naive datetime-timestamps. See comments for ``obs_time``. """ # .. todo:: Implement TimeError using datetime.timedelta if obs_time.tzinfo is not None: utc_naive_obs_time = obs_time.astimezone(pytz.utc).replace(tzinfo=None) elif not allow_tz_naive_datetime: raise ValueError( "Datetime passed without tzinfo, cannot be sure if it is really a " "UTC timestamp. Please verify function call and either add tzinfo " "or pass parameter 'allow_tz_naive_obstime=True', as appropriate", ) else: utc_naive_obs_time = obs_time obs_data = etree.SubElement(voevent.WhereWhen, 'ObsDataLocation') etree.SubElement(obs_data, 'ObservatoryLocation', id=observatory_location) ol = etree.SubElement(obs_data, 'ObservationLocation') etree.SubElement(ol, 'AstroCoordSystem', id=coords.system) ac = etree.SubElement(ol, 'AstroCoords', coord_system_id=coords.system) time = etree.SubElement(ac, 'Time', unit='s') instant = etree.SubElement(time, 'TimeInstant') instant.ISOTime = utc_naive_obs_time.isoformat() # iso_time = etree.SubElement(instant, 'ISOTime') = obs_time.isoformat() pos2d = etree.SubElement(ac, 'Position2D', unit=coords.units) pos2d.Name1 = 'RA' pos2d.Name2 = 'Dec' pos2d_val = etree.SubElement(pos2d, 'Value2') pos2d_val.C1 = coords.ra pos2d_val.C2 = coords.dec pos2d.Error2Radius = coords.err
python
def add_where_when(voevent, coords, obs_time, observatory_location, allow_tz_naive_datetime=False): """ Add details of an observation to the WhereWhen section. We Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. coords(:class:`.Position2D`): Sky co-ordinates of event. obs_time(datetime.datetime): Nominal DateTime of the observation. Must either be timezone-aware, or should be carefully verified as representing UTC and then set parameter ``allow_tz_naive_datetime=True``. observatory_location(str): Telescope locale, e.g. 'La Palma'. May be a generic location as listed under :class:`voeventparse.definitions.observatory_location`. allow_tz_naive_datetime (bool): (Default False). Accept timezone-naive datetime-timestamps. See comments for ``obs_time``. """ # .. todo:: Implement TimeError using datetime.timedelta if obs_time.tzinfo is not None: utc_naive_obs_time = obs_time.astimezone(pytz.utc).replace(tzinfo=None) elif not allow_tz_naive_datetime: raise ValueError( "Datetime passed without tzinfo, cannot be sure if it is really a " "UTC timestamp. Please verify function call and either add tzinfo " "or pass parameter 'allow_tz_naive_obstime=True', as appropriate", ) else: utc_naive_obs_time = obs_time obs_data = etree.SubElement(voevent.WhereWhen, 'ObsDataLocation') etree.SubElement(obs_data, 'ObservatoryLocation', id=observatory_location) ol = etree.SubElement(obs_data, 'ObservationLocation') etree.SubElement(ol, 'AstroCoordSystem', id=coords.system) ac = etree.SubElement(ol, 'AstroCoords', coord_system_id=coords.system) time = etree.SubElement(ac, 'Time', unit='s') instant = etree.SubElement(time, 'TimeInstant') instant.ISOTime = utc_naive_obs_time.isoformat() # iso_time = etree.SubElement(instant, 'ISOTime') = obs_time.isoformat() pos2d = etree.SubElement(ac, 'Position2D', unit=coords.units) pos2d.Name1 = 'RA' pos2d.Name2 = 'Dec' pos2d_val = etree.SubElement(pos2d, 'Value2') pos2d_val.C1 = coords.ra pos2d_val.C2 = coords.dec pos2d.Error2Radius = coords.err
[ "def", "add_where_when", "(", "voevent", ",", "coords", ",", "obs_time", ",", "observatory_location", ",", "allow_tz_naive_datetime", "=", "False", ")", ":", "# .. todo:: Implement TimeError using datetime.timedelta", "if", "obs_time", ".", "tzinfo", "is", "not", "None"...
Add details of an observation to the WhereWhen section. We Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. coords(:class:`.Position2D`): Sky co-ordinates of event. obs_time(datetime.datetime): Nominal DateTime of the observation. Must either be timezone-aware, or should be carefully verified as representing UTC and then set parameter ``allow_tz_naive_datetime=True``. observatory_location(str): Telescope locale, e.g. 'La Palma'. May be a generic location as listed under :class:`voeventparse.definitions.observatory_location`. allow_tz_naive_datetime (bool): (Default False). Accept timezone-naive datetime-timestamps. See comments for ``obs_time``.
[ "Add", "details", "of", "an", "observation", "to", "the", "WhereWhen", "section", "." ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/voevent.py#L255-L306
timstaley/voevent-parse
src/voeventparse/voevent.py
add_how
def add_how(voevent, descriptions=None, references=None): """Add descriptions or references to the How section. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. descriptions(str): Description string, or list of description strings. references(:py:class:`voeventparse.misc.Reference`): A reference element (or list thereof). """ if not voevent.xpath('How'): etree.SubElement(voevent, 'How') if descriptions is not None: for desc in _listify(descriptions): # d = etree.SubElement(voevent.How, 'Description') # voevent.How.Description[voevent.How.index(d)] = desc ##Simpler: etree.SubElement(voevent.How, 'Description') voevent.How.Description[-1] = desc if references is not None: voevent.How.extend(_listify(references))
python
def add_how(voevent, descriptions=None, references=None): """Add descriptions or references to the How section. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. descriptions(str): Description string, or list of description strings. references(:py:class:`voeventparse.misc.Reference`): A reference element (or list thereof). """ if not voevent.xpath('How'): etree.SubElement(voevent, 'How') if descriptions is not None: for desc in _listify(descriptions): # d = etree.SubElement(voevent.How, 'Description') # voevent.How.Description[voevent.How.index(d)] = desc ##Simpler: etree.SubElement(voevent.How, 'Description') voevent.How.Description[-1] = desc if references is not None: voevent.How.extend(_listify(references))
[ "def", "add_how", "(", "voevent", ",", "descriptions", "=", "None", ",", "references", "=", "None", ")", ":", "if", "not", "voevent", ".", "xpath", "(", "'How'", ")", ":", "etree", ".", "SubElement", "(", "voevent", ",", "'How'", ")", "if", "descriptio...
Add descriptions or references to the How section. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. descriptions(str): Description string, or list of description strings. references(:py:class:`voeventparse.misc.Reference`): A reference element (or list thereof).
[ "Add", "descriptions", "or", "references", "to", "the", "How", "section", "." ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/voevent.py#L309-L329
timstaley/voevent-parse
src/voeventparse/voevent.py
add_why
def add_why(voevent, importance=None, expires=None, inferences=None): """Add Inferences, or set importance / expires attributes of the Why section. .. note:: ``importance`` / ``expires`` are 'Why' attributes, therefore setting them will overwrite previous values. ``inferences``, on the other hand, are appended to the list. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. importance(float): Value from 0.0 to 1.0 expires(datetime.datetime): Expiration date given inferred reason (See voevent spec). inferences(:class:`voeventparse.misc.Inference`): Inference or list of inferences, denoting probable identifications or associations, etc. """ if not voevent.xpath('Why'): etree.SubElement(voevent, 'Why') if importance is not None: voevent.Why.attrib['importance'] = str(importance) if expires is not None: voevent.Why.attrib['expires'] = expires.replace( microsecond=0).isoformat() if inferences is not None: voevent.Why.extend(_listify(inferences))
python
def add_why(voevent, importance=None, expires=None, inferences=None): """Add Inferences, or set importance / expires attributes of the Why section. .. note:: ``importance`` / ``expires`` are 'Why' attributes, therefore setting them will overwrite previous values. ``inferences``, on the other hand, are appended to the list. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. importance(float): Value from 0.0 to 1.0 expires(datetime.datetime): Expiration date given inferred reason (See voevent spec). inferences(:class:`voeventparse.misc.Inference`): Inference or list of inferences, denoting probable identifications or associations, etc. """ if not voevent.xpath('Why'): etree.SubElement(voevent, 'Why') if importance is not None: voevent.Why.attrib['importance'] = str(importance) if expires is not None: voevent.Why.attrib['expires'] = expires.replace( microsecond=0).isoformat() if inferences is not None: voevent.Why.extend(_listify(inferences))
[ "def", "add_why", "(", "voevent", ",", "importance", "=", "None", ",", "expires", "=", "None", ",", "inferences", "=", "None", ")", ":", "if", "not", "voevent", ".", "xpath", "(", "'Why'", ")", ":", "etree", ".", "SubElement", "(", "voevent", ",", "'...
Add Inferences, or set importance / expires attributes of the Why section. .. note:: ``importance`` / ``expires`` are 'Why' attributes, therefore setting them will overwrite previous values. ``inferences``, on the other hand, are appended to the list. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. importance(float): Value from 0.0 to 1.0 expires(datetime.datetime): Expiration date given inferred reason (See voevent spec). inferences(:class:`voeventparse.misc.Inference`): Inference or list of inferences, denoting probable identifications or associations, etc.
[ "Add", "Inferences", "or", "set", "importance", "/", "expires", "attributes", "of", "the", "Why", "section", "." ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/voevent.py#L332-L357
timstaley/voevent-parse
src/voeventparse/voevent.py
add_citations
def add_citations(voevent, event_ivorns): """Add citations to other voevents. The schema mandates that the 'Citations' section must either be entirely absent, or non-empty - hence we require this wrapper function for its creation prior to listing the first citation. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. event_ivorns (:class:`voeventparse.misc.EventIvorn`): List of EventIvorn elements to add to citation list. """ if not voevent.xpath('Citations'): etree.SubElement(voevent, 'Citations') voevent.Citations.extend(_listify(event_ivorns))
python
def add_citations(voevent, event_ivorns): """Add citations to other voevents. The schema mandates that the 'Citations' section must either be entirely absent, or non-empty - hence we require this wrapper function for its creation prior to listing the first citation. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. event_ivorns (:class:`voeventparse.misc.EventIvorn`): List of EventIvorn elements to add to citation list. """ if not voevent.xpath('Citations'): etree.SubElement(voevent, 'Citations') voevent.Citations.extend(_listify(event_ivorns))
[ "def", "add_citations", "(", "voevent", ",", "event_ivorns", ")", ":", "if", "not", "voevent", ".", "xpath", "(", "'Citations'", ")", ":", "etree", ".", "SubElement", "(", "voevent", ",", "'Citations'", ")", "voevent", ".", "Citations", ".", "extend", "(",...
Add citations to other voevents. The schema mandates that the 'Citations' section must either be entirely absent, or non-empty - hence we require this wrapper function for its creation prior to listing the first citation. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. event_ivorns (:class:`voeventparse.misc.EventIvorn`): List of EventIvorn elements to add to citation list.
[ "Add", "citations", "to", "other", "voevents", "." ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/voevent.py#L360-L375
timstaley/voevent-parse
src/voeventparse/voevent.py
_remove_root_tag_prefix
def _remove_root_tag_prefix(v): """ Removes 'voe' namespace prefix from root tag. When we load in a VOEvent, the root element has a tag prefixed by the VOE namespace, e.g. {http://www.ivoa.net/xml/VOEvent/v2.0}VOEvent Because objectify expects child elements to have the same namespace as their parent, this breaks the python-attribute style access mechanism. We can get around it without altering root, via e.g who = v['{}Who'] Alternatively, we can temporarily ditch the namespace altogether. This makes access to elements easier, but requires care to reinsert the namespace upon output. I've gone for the latter option. """ if v.prefix: # Create subelement without a prefix via etree.SubElement etree.SubElement(v, 'original_prefix') # Now carefully access said named subelement (without prefix cascade) # and alter the first value in the list of children with this name... # LXML syntax is a minefield! v['{}original_prefix'][0] = v.prefix v.tag = v.tag.replace(''.join(('{', v.nsmap[v.prefix], '}')), '') # Now v.tag = '{}VOEvent', v.prefix = None return
python
def _remove_root_tag_prefix(v): """ Removes 'voe' namespace prefix from root tag. When we load in a VOEvent, the root element has a tag prefixed by the VOE namespace, e.g. {http://www.ivoa.net/xml/VOEvent/v2.0}VOEvent Because objectify expects child elements to have the same namespace as their parent, this breaks the python-attribute style access mechanism. We can get around it without altering root, via e.g who = v['{}Who'] Alternatively, we can temporarily ditch the namespace altogether. This makes access to elements easier, but requires care to reinsert the namespace upon output. I've gone for the latter option. """ if v.prefix: # Create subelement without a prefix via etree.SubElement etree.SubElement(v, 'original_prefix') # Now carefully access said named subelement (without prefix cascade) # and alter the first value in the list of children with this name... # LXML syntax is a minefield! v['{}original_prefix'][0] = v.prefix v.tag = v.tag.replace(''.join(('{', v.nsmap[v.prefix], '}')), '') # Now v.tag = '{}VOEvent', v.prefix = None return
[ "def", "_remove_root_tag_prefix", "(", "v", ")", ":", "if", "v", ".", "prefix", ":", "# Create subelement without a prefix via etree.SubElement", "etree", ".", "SubElement", "(", "v", ",", "'original_prefix'", ")", "# Now carefully access said named subelement (without prefix...
Removes 'voe' namespace prefix from root tag. When we load in a VOEvent, the root element has a tag prefixed by the VOE namespace, e.g. {http://www.ivoa.net/xml/VOEvent/v2.0}VOEvent Because objectify expects child elements to have the same namespace as their parent, this breaks the python-attribute style access mechanism. We can get around it without altering root, via e.g who = v['{}Who'] Alternatively, we can temporarily ditch the namespace altogether. This makes access to elements easier, but requires care to reinsert the namespace upon output. I've gone for the latter option.
[ "Removes", "voe", "namespace", "prefix", "from", "root", "tag", "." ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/voevent.py#L381-L407
timstaley/voevent-parse
src/voeventparse/voevent.py
_reinsert_root_tag_prefix
def _reinsert_root_tag_prefix(v): """ Returns namespace prefix to root tag, if it had one. """ if hasattr(v, 'original_prefix'): original_prefix = v.original_prefix del v.original_prefix v.tag = ''.join(('{', v.nsmap[original_prefix], '}VOEvent')) return
python
def _reinsert_root_tag_prefix(v): """ Returns namespace prefix to root tag, if it had one. """ if hasattr(v, 'original_prefix'): original_prefix = v.original_prefix del v.original_prefix v.tag = ''.join(('{', v.nsmap[original_prefix], '}VOEvent')) return
[ "def", "_reinsert_root_tag_prefix", "(", "v", ")", ":", "if", "hasattr", "(", "v", ",", "'original_prefix'", ")", ":", "original_prefix", "=", "v", ".", "original_prefix", "del", "v", ".", "original_prefix", "v", ".", "tag", "=", "''", ".", "join", "(", ...
Returns namespace prefix to root tag, if it had one.
[ "Returns", "namespace", "prefix", "to", "root", "tag", "if", "it", "had", "one", "." ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/voevent.py#L410-L418
timstaley/voevent-parse
src/voeventparse/voevent.py
_listify
def _listify(x): """Ensure x is iterable; if not then enclose it in a list and return it.""" if isinstance(x, string_types): return [x] elif isinstance(x, collections.Iterable): return x else: return [x]
python
def _listify(x): """Ensure x is iterable; if not then enclose it in a list and return it.""" if isinstance(x, string_types): return [x] elif isinstance(x, collections.Iterable): return x else: return [x]
[ "def", "_listify", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "string_types", ")", ":", "return", "[", "x", "]", "elif", "isinstance", "(", "x", ",", "collections", ".", "Iterable", ")", ":", "return", "x", "else", ":", "return", "[", "...
Ensure x is iterable; if not then enclose it in a list and return it.
[ "Ensure", "x", "is", "iterable", ";", "if", "not", "then", "enclose", "it", "in", "a", "list", "and", "return", "it", "." ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/voevent.py#L430-L437
datasift/datasift-python
datasift/identity.py
Identity.list
def list(self, label=None, per_page=20, page=1): """ Get a list of identities that have been created :param per_page: The number of results per page returned :type per_page: int :param page: The page number of the results :type page: int :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'per_page': per_page, 'page': page} if label: params['label'] = label return self.request.get('', params)
python
def list(self, label=None, per_page=20, page=1): """ Get a list of identities that have been created :param per_page: The number of results per page returned :type per_page: int :param page: The page number of the results :type page: int :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'per_page': per_page, 'page': page} if label: params['label'] = label return self.request.get('', params)
[ "def", "list", "(", "self", ",", "label", "=", "None", ",", "per_page", "=", "20", ",", "page", "=", "1", ")", ":", "params", "=", "{", "'per_page'", ":", "per_page", ",", "'page'", ":", "page", "}", "if", "label", ":", "params", "[", "'label'", ...
Get a list of identities that have been created :param per_page: The number of results per page returned :type per_page: int :param page: The page number of the results :type page: int :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Get", "a", "list", "of", "identities", "that", "have", "been", "created" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/identity.py#L9-L27
datasift/datasift-python
datasift/identity.py
Identity.create
def create(self, label, status=None, master=None): """ Create an Identity :param label: The label to give this new identity :param status: The status of this identity. Default: 'active' :param master: Represents whether this identity is a master. Default: False :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'label': label} if status: params['status'] = status if master: params['master'] = master return self.request.post('', params)
python
def create(self, label, status=None, master=None): """ Create an Identity :param label: The label to give this new identity :param status: The status of this identity. Default: 'active' :param master: Represents whether this identity is a master. Default: False :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'label': label} if status: params['status'] = status if master: params['master'] = master return self.request.post('', params)
[ "def", "create", "(", "self", ",", "label", ",", "status", "=", "None", ",", "master", "=", "None", ")", ":", "params", "=", "{", "'label'", ":", "label", "}", "if", "status", ":", "params", "[", "'status'", "]", "=", "status", "if", "master", ":",...
Create an Identity :param label: The label to give this new identity :param status: The status of this identity. Default: 'active' :param master: Represents whether this identity is a master. Default: False :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Create", "an", "Identity" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/identity.py#L41-L61
datasift/datasift-python
datasift/identity.py
Identity.update
def update(self, id, label=None, status=None, master=None): """ Update an Identity :param label: The label to give this new identity :param status: The status of this identity. Default: 'active' :param master: Represents whether this identity is a master. Default: False :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {} if label: params['label'] = label if status: params['status'] = status if master: params['master'] = master return self.request.put(str(id), params)
python
def update(self, id, label=None, status=None, master=None): """ Update an Identity :param label: The label to give this new identity :param status: The status of this identity. Default: 'active' :param master: Represents whether this identity is a master. Default: False :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {} if label: params['label'] = label if status: params['status'] = status if master: params['master'] = master return self.request.put(str(id), params)
[ "def", "update", "(", "self", ",", "id", ",", "label", "=", "None", ",", "status", "=", "None", ",", "master", "=", "None", ")", ":", "params", "=", "{", "}", "if", "label", ":", "params", "[", "'label'", "]", "=", "label", "if", "status", ":", ...
Update an Identity :param label: The label to give this new identity :param status: The status of this identity. Default: 'active' :param master: Represents whether this identity is a master. Default: False :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Update", "an", "Identity" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/identity.py#L63-L85
datasift/datasift-python
datasift/pylon_task.py
PylonTask.get
def get(self, id, service='facebook', type='analysis'): """ Get a given Pylon task :param id: The ID of the task :type id: str :param service: The PYLON service (facebook) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.get(service + '/task/' + type + '/' + id)
python
def get(self, id, service='facebook', type='analysis'): """ Get a given Pylon task :param id: The ID of the task :type id: str :param service: The PYLON service (facebook) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.get(service + '/task/' + type + '/' + id)
[ "def", "get", "(", "self", ",", "id", ",", "service", "=", "'facebook'", ",", "type", "=", "'analysis'", ")", ":", "return", "self", ".", "request", ".", "get", "(", "service", "+", "'/task/'", "+", "type", "+", "'/'", "+", "id", ")" ]
Get a given Pylon task :param id: The ID of the task :type id: str :param service: The PYLON service (facebook) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Get", "a", "given", "Pylon", "task" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/pylon_task.py#L8-L20
datasift/datasift-python
datasift/pylon_task.py
PylonTask.list
def list(self, per_page=None, page=None, status=None, service='facebook'): """ Get a list of Pylon tasks :param per_page: How many tasks to display per page :type per_page: int :param page: Which page of tasks to display :type page: int :param status: The status of the tasks to list :type page: string :param service: The PYLON service (facebook) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {} if per_page is not None: params['per_page'] = per_page if page is not None: params['page'] = page if status: params['status'] = status return self.request.get(service + '/task', params)
python
def list(self, per_page=None, page=None, status=None, service='facebook'): """ Get a list of Pylon tasks :param per_page: How many tasks to display per page :type per_page: int :param page: Which page of tasks to display :type page: int :param status: The status of the tasks to list :type page: string :param service: The PYLON service (facebook) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {} if per_page is not None: params['per_page'] = per_page if page is not None: params['page'] = page if status: params['status'] = status return self.request.get(service + '/task', params)
[ "def", "list", "(", "self", ",", "per_page", "=", "None", ",", "page", "=", "None", ",", "status", "=", "None", ",", "service", "=", "'facebook'", ")", ":", "params", "=", "{", "}", "if", "per_page", "is", "not", "None", ":", "params", "[", "'per_p...
Get a list of Pylon tasks :param per_page: How many tasks to display per page :type per_page: int :param page: Which page of tasks to display :type page: int :param status: The status of the tasks to list :type page: string :param service: The PYLON service (facebook) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Get", "a", "list", "of", "Pylon", "tasks" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/pylon_task.py#L22-L48
datasift/datasift-python
datasift/pylon_task.py
PylonTask.create
def create(self, subscription_id, name, parameters, type='analysis', service='facebook'): """ Create a PYLON task :param subscription_id: The ID of the recording to create the task for :type subscription_id: str :param name: The name of the new task :type name: str :param parameters: The parameters for this task :type parameters: dict :param type: The type of analysis to create, currently only 'analysis' is accepted :type type: str :param service: The PYLON service (facebook) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = { 'subscription_id': subscription_id, 'name': name, 'parameters': parameters, 'type': type } return self.request.post(service + '/task/', params)
python
def create(self, subscription_id, name, parameters, type='analysis', service='facebook'): """ Create a PYLON task :param subscription_id: The ID of the recording to create the task for :type subscription_id: str :param name: The name of the new task :type name: str :param parameters: The parameters for this task :type parameters: dict :param type: The type of analysis to create, currently only 'analysis' is accepted :type type: str :param service: The PYLON service (facebook) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = { 'subscription_id': subscription_id, 'name': name, 'parameters': parameters, 'type': type } return self.request.post(service + '/task/', params)
[ "def", "create", "(", "self", ",", "subscription_id", ",", "name", ",", "parameters", ",", "type", "=", "'analysis'", ",", "service", "=", "'facebook'", ")", ":", "params", "=", "{", "'subscription_id'", ":", "subscription_id", ",", "'name'", ":", "name", ...
Create a PYLON task :param subscription_id: The ID of the recording to create the task for :type subscription_id: str :param name: The name of the new task :type name: str :param parameters: The parameters for this task :type parameters: dict :param type: The type of analysis to create, currently only 'analysis' is accepted :type type: str :param service: The PYLON service (facebook) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Create", "a", "PYLON", "task" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/pylon_task.py#L50-L76
caktus/django-sticky-uploads
stickyuploads/widgets.py
StickyUploadWidget.value_from_datadict
def value_from_datadict(self, data, files, name): """Returns uploaded file from serialized value.""" upload = super(StickyUploadWidget, self).value_from_datadict(data, files, name) if upload is not None: # File was posted or cleared as normal return upload else: # Try the hidden input hidden_name = self.get_hidden_name(name) value = data.get(hidden_name, None) if value is not None: upload = open_stored_file(value, self.url) if upload is not None: setattr(upload, '_seralized_location', value) return upload
python
def value_from_datadict(self, data, files, name): """Returns uploaded file from serialized value.""" upload = super(StickyUploadWidget, self).value_from_datadict(data, files, name) if upload is not None: # File was posted or cleared as normal return upload else: # Try the hidden input hidden_name = self.get_hidden_name(name) value = data.get(hidden_name, None) if value is not None: upload = open_stored_file(value, self.url) if upload is not None: setattr(upload, '_seralized_location', value) return upload
[ "def", "value_from_datadict", "(", "self", ",", "data", ",", "files", ",", "name", ")", ":", "upload", "=", "super", "(", "StickyUploadWidget", ",", "self", ")", ".", "value_from_datadict", "(", "data", ",", "files", ",", "name", ")", "if", "upload", "is...
Returns uploaded file from serialized value.
[ "Returns", "uploaded", "file", "from", "serialized", "value", "." ]
train
https://github.com/caktus/django-sticky-uploads/blob/a57539655ba991f63f31f0a5c98d790947bcd1b8/stickyuploads/widgets.py#L28-L42
caktus/django-sticky-uploads
stickyuploads/widgets.py
StickyUploadWidget.render
def render(self, name, value, attrs=None, renderer=None): """Include a hidden input to store the serialized upload value.""" location = getattr(value, '_seralized_location', '') if location and not hasattr(value, 'url'): value.url = '#' if hasattr(self, 'get_template_substitution_values'): # Django 1.8-1.10 self.template_with_initial = ( '%(initial_text)s: %(initial)s %(clear_template)s' '<br />%(input_text)s: %(input)s') attrs = attrs or {} attrs.update({'data-upload-url': self.url}) hidden_name = self.get_hidden_name(name) kwargs = {} if django_version >= (1, 11): kwargs['renderer'] = renderer parent = super(StickyUploadWidget, self).render(name, value, attrs=attrs, **kwargs) hidden = forms.HiddenInput().render(hidden_name, location, **kwargs) return mark_safe(parent + '\n' + hidden)
python
def render(self, name, value, attrs=None, renderer=None): """Include a hidden input to store the serialized upload value.""" location = getattr(value, '_seralized_location', '') if location and not hasattr(value, 'url'): value.url = '#' if hasattr(self, 'get_template_substitution_values'): # Django 1.8-1.10 self.template_with_initial = ( '%(initial_text)s: %(initial)s %(clear_template)s' '<br />%(input_text)s: %(input)s') attrs = attrs or {} attrs.update({'data-upload-url': self.url}) hidden_name = self.get_hidden_name(name) kwargs = {} if django_version >= (1, 11): kwargs['renderer'] = renderer parent = super(StickyUploadWidget, self).render(name, value, attrs=attrs, **kwargs) hidden = forms.HiddenInput().render(hidden_name, location, **kwargs) return mark_safe(parent + '\n' + hidden)
[ "def", "render", "(", "self", ",", "name", ",", "value", ",", "attrs", "=", "None", ",", "renderer", "=", "None", ")", ":", "location", "=", "getattr", "(", "value", ",", "'_seralized_location'", ",", "''", ")", "if", "location", "and", "not", "hasattr...
Include a hidden input to store the serialized upload value.
[ "Include", "a", "hidden", "input", "to", "store", "the", "serialized", "upload", "value", "." ]
train
https://github.com/caktus/django-sticky-uploads/blob/a57539655ba991f63f31f0a5c98d790947bcd1b8/stickyuploads/widgets.py#L44-L64
switchboardpy/switchboard
switchboard/manager.py
configure
def configure(config={}, datastore=None, nested=False): """ Useful for when you need to control Switchboard's setup """ if nested: config = nested_config(config) # Re-read settings to make sure we have everything. # XXX It would be really nice if we didn't need to do this. Settings.init(**config) if datastore: Switch.ds = datastore # Register the builtins __import__('switchboard.builtins')
python
def configure(config={}, datastore=None, nested=False): """ Useful for when you need to control Switchboard's setup """ if nested: config = nested_config(config) # Re-read settings to make sure we have everything. # XXX It would be really nice if we didn't need to do this. Settings.init(**config) if datastore: Switch.ds = datastore # Register the builtins __import__('switchboard.builtins')
[ "def", "configure", "(", "config", "=", "{", "}", ",", "datastore", "=", "None", ",", "nested", "=", "False", ")", ":", "if", "nested", ":", "config", "=", "nested_config", "(", "config", ")", "# Re-read settings to make sure we have everything.", "# XXX It woul...
Useful for when you need to control Switchboard's setup
[ "Useful", "for", "when", "you", "need", "to", "control", "Switchboard", "s", "setup" ]
train
https://github.com/switchboardpy/switchboard/blob/074b4838dbe140cb8f89d3c25ae25e70a29f9553/switchboard/manager.py#L37-L51
switchboardpy/switchboard
switchboard/manager.py
SwitchManager.with_result_cache
def with_result_cache(func): """ Decorator specifically for is_active. If self.result_cache is set to a {} the is_active results will be cached for each set of params. """ def inner(self, *args, **kwargs): dic = self.result_cache cache_key = None if dic is not None: cache_key = (args, tuple(kwargs.items())) try: result = dic.get(cache_key) except TypeError as e: # not hashable log.debug('Switchboard result cache not active for this "%s" check due to: %s within args: %s', args[0], e, repr(cache_key)[:200]) cache_key = None else: if result is not None: return result result = func(self, *args, **kwargs) if cache_key is not None: dic[cache_key] = result return result return inner
python
def with_result_cache(func): """ Decorator specifically for is_active. If self.result_cache is set to a {} the is_active results will be cached for each set of params. """ def inner(self, *args, **kwargs): dic = self.result_cache cache_key = None if dic is not None: cache_key = (args, tuple(kwargs.items())) try: result = dic.get(cache_key) except TypeError as e: # not hashable log.debug('Switchboard result cache not active for this "%s" check due to: %s within args: %s', args[0], e, repr(cache_key)[:200]) cache_key = None else: if result is not None: return result result = func(self, *args, **kwargs) if cache_key is not None: dic[cache_key] = result return result return inner
[ "def", "with_result_cache", "(", "func", ")", ":", "def", "inner", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "dic", "=", "self", ".", "result_cache", "cache_key", "=", "None", "if", "dic", "is", "not", "None", ":", "cache_key"...
Decorator specifically for is_active. If self.result_cache is set to a {} the is_active results will be cached for each set of params.
[ "Decorator", "specifically", "for", "is_active", ".", "If", "self", ".", "result_cache", "is", "set", "to", "a", "{}", "the", "is_active", "results", "will", "be", "cached", "for", "each", "set", "of", "params", "." ]
train
https://github.com/switchboardpy/switchboard/blob/074b4838dbe140cb8f89d3c25ae25e70a29f9553/switchboard/manager.py#L88-L111
switchboardpy/switchboard
switchboard/manager.py
SwitchManager.is_active
def is_active(self, key, *instances, **kwargs): """ Returns ``True`` if any of ``instances`` match an active switch. Otherwise returns ``False``. >>> operator.is_active('my_feature', request) #doctest: +SKIP """ try: default = kwargs.pop('default', False) # Check all parents for a disabled state parts = key.split(':') if len(parts) > 1: child_kwargs = kwargs.copy() child_kwargs['default'] = None result = self.is_active(':'.join(parts[:-1]), *instances, **child_kwargs) if result is False: return result elif result is True: default = result try: switch = self[key] except KeyError: # switch is not defined, defer to parent return default if switch.status == GLOBAL: return True elif switch.status == DISABLED: return False elif switch.status == INHERIT: return default conditions = switch.value # If no conditions are set, we inherit from parents if not conditions: return default instances = list(instances) if instances else [] instances.extend(self.context.values()) # check each switch to see if it can execute return_value = False for namespace, condition in conditions.iteritems(): condition_set = registry_by_namespace.get(namespace) if not condition_set: continue result = condition_set.has_active_condition(condition, instances) if result is False: return False elif result is True: return_value = True except: log.exception('Error checking if switch "%s" is active', key) return_value = False # there were no matching conditions, so it must not be enabled return return_value
python
def is_active(self, key, *instances, **kwargs): """ Returns ``True`` if any of ``instances`` match an active switch. Otherwise returns ``False``. >>> operator.is_active('my_feature', request) #doctest: +SKIP """ try: default = kwargs.pop('default', False) # Check all parents for a disabled state parts = key.split(':') if len(parts) > 1: child_kwargs = kwargs.copy() child_kwargs['default'] = None result = self.is_active(':'.join(parts[:-1]), *instances, **child_kwargs) if result is False: return result elif result is True: default = result try: switch = self[key] except KeyError: # switch is not defined, defer to parent return default if switch.status == GLOBAL: return True elif switch.status == DISABLED: return False elif switch.status == INHERIT: return default conditions = switch.value # If no conditions are set, we inherit from parents if not conditions: return default instances = list(instances) if instances else [] instances.extend(self.context.values()) # check each switch to see if it can execute return_value = False for namespace, condition in conditions.iteritems(): condition_set = registry_by_namespace.get(namespace) if not condition_set: continue result = condition_set.has_active_condition(condition, instances) if result is False: return False elif result is True: return_value = True except: log.exception('Error checking if switch "%s" is active', key) return_value = False # there were no matching conditions, so it must not be enabled return return_value
[ "def", "is_active", "(", "self", ",", "key", ",", "*", "instances", ",", "*", "*", "kwargs", ")", ":", "try", ":", "default", "=", "kwargs", ".", "pop", "(", "'default'", ",", "False", ")", "# Check all parents for a disabled state", "parts", "=", "key", ...
Returns ``True`` if any of ``instances`` match an active switch. Otherwise returns ``False``. >>> operator.is_active('my_feature', request) #doctest: +SKIP
[ "Returns", "True", "if", "any", "of", "instances", "match", "an", "active", "switch", ".", "Otherwise", "returns", "False", "." ]
train
https://github.com/switchboardpy/switchboard/blob/074b4838dbe140cb8f89d3c25ae25e70a29f9553/switchboard/manager.py#L114-L176
switchboardpy/switchboard
switchboard/manager.py
SwitchManager.register
def register(self, condition_set): """ Registers a condition set with the manager. >>> condition_set = MyConditionSet() #doctest: +SKIP >>> operator.register(condition_set) #doctest: +SKIP """ if callable(condition_set): condition_set = condition_set() registry[condition_set.get_id()] = condition_set registry_by_namespace[condition_set.get_namespace()] = condition_set
python
def register(self, condition_set): """ Registers a condition set with the manager. >>> condition_set = MyConditionSet() #doctest: +SKIP >>> operator.register(condition_set) #doctest: +SKIP """ if callable(condition_set): condition_set = condition_set() registry[condition_set.get_id()] = condition_set registry_by_namespace[condition_set.get_namespace()] = condition_set
[ "def", "register", "(", "self", ",", "condition_set", ")", ":", "if", "callable", "(", "condition_set", ")", ":", "condition_set", "=", "condition_set", "(", ")", "registry", "[", "condition_set", ".", "get_id", "(", ")", "]", "=", "condition_set", "registry...
Registers a condition set with the manager. >>> condition_set = MyConditionSet() #doctest: +SKIP >>> operator.register(condition_set) #doctest: +SKIP
[ "Registers", "a", "condition", "set", "with", "the", "manager", "." ]
train
https://github.com/switchboardpy/switchboard/blob/074b4838dbe140cb8f89d3c25ae25e70a29f9553/switchboard/manager.py#L178-L189
switchboardpy/switchboard
switchboard/manager.py
SwitchManager.unregister
def unregister(self, condition_set): """ Unregisters a condition set with the manager. >>> operator.unregister(condition_set) #doctest: +SKIP """ if callable(condition_set): condition_set = condition_set() registry.pop(condition_set.get_id(), None) registry_by_namespace.pop(condition_set.get_namespace(), None)
python
def unregister(self, condition_set): """ Unregisters a condition set with the manager. >>> operator.unregister(condition_set) #doctest: +SKIP """ if callable(condition_set): condition_set = condition_set() registry.pop(condition_set.get_id(), None) registry_by_namespace.pop(condition_set.get_namespace(), None)
[ "def", "unregister", "(", "self", ",", "condition_set", ")", ":", "if", "callable", "(", "condition_set", ")", ":", "condition_set", "=", "condition_set", "(", ")", "registry", ".", "pop", "(", "condition_set", ".", "get_id", "(", ")", ",", "None", ")", ...
Unregisters a condition set with the manager. >>> operator.unregister(condition_set) #doctest: +SKIP
[ "Unregisters", "a", "condition", "set", "with", "the", "manager", "." ]
train
https://github.com/switchboardpy/switchboard/blob/074b4838dbe140cb8f89d3c25ae25e70a29f9553/switchboard/manager.py#L191-L200
TeamHG-Memex/MaybeDont
maybedont/predict.py
DupePredictor.get_dupe_prob
def get_dupe_prob(self, url): """ A probability of given url being a duplicate of some content that has already been seem. """ path, query = _parse_url(url) dupestats = [] extend_ds = lambda x: dupestats.extend(filter(None, ( ds_dict.get(key) for ds_dict, key in x))) if self.urls_by_path.get(path): extend_ds([(self.path_dupstats, path)]) # If param is in the query for param, value in query.items(): qwp_key = _q_key(_without_key(query, param)) # Have we seen the query with param changed or removed? has_changed = self.urls_by_path_qwp.get((path, param, qwp_key)) has_removed = self.urls_by_path_q.get((path, qwp_key)) if has_changed or has_removed: extend_ds(self._param_dupstats(path, param, qwp_key)) if has_removed: extend_ds(self._param_value_dupstats(path, param, value)) # If param is not in the query, but we've crawled a page when it is q_key = _q_key(query) for param in (self.params_by_path.get(path, set()) - set(query)): if self.urls_by_path_qwp.get((path, param, q_key)): extend_ds(self._param_dupstats(path, param, q_key)) # FIXME - this could be a long list of param values, # it's better to somehow store only high-probability values? for value in self.param_values.get((path, param), set()): extend_ds(self._param_value_dupstats(path, param, value)) return max(ds.get_prob() for ds in dupestats) if dupestats else 0.
python
def get_dupe_prob(self, url): """ A probability of given url being a duplicate of some content that has already been seem. """ path, query = _parse_url(url) dupestats = [] extend_ds = lambda x: dupestats.extend(filter(None, ( ds_dict.get(key) for ds_dict, key in x))) if self.urls_by_path.get(path): extend_ds([(self.path_dupstats, path)]) # If param is in the query for param, value in query.items(): qwp_key = _q_key(_without_key(query, param)) # Have we seen the query with param changed or removed? has_changed = self.urls_by_path_qwp.get((path, param, qwp_key)) has_removed = self.urls_by_path_q.get((path, qwp_key)) if has_changed or has_removed: extend_ds(self._param_dupstats(path, param, qwp_key)) if has_removed: extend_ds(self._param_value_dupstats(path, param, value)) # If param is not in the query, but we've crawled a page when it is q_key = _q_key(query) for param in (self.params_by_path.get(path, set()) - set(query)): if self.urls_by_path_qwp.get((path, param, q_key)): extend_ds(self._param_dupstats(path, param, q_key)) # FIXME - this could be a long list of param values, # it's better to somehow store only high-probability values? for value in self.param_values.get((path, param), set()): extend_ds(self._param_value_dupstats(path, param, value)) return max(ds.get_prob() for ds in dupestats) if dupestats else 0.
[ "def", "get_dupe_prob", "(", "self", ",", "url", ")", ":", "path", ",", "query", "=", "_parse_url", "(", "url", ")", "dupestats", "=", "[", "]", "extend_ds", "=", "lambda", "x", ":", "dupestats", ".", "extend", "(", "filter", "(", "None", ",", "(", ...
A probability of given url being a duplicate of some content that has already been seem.
[ "A", "probability", "of", "given", "url", "being", "a", "duplicate", "of", "some", "content", "that", "has", "already", "been", "seem", "." ]
train
https://github.com/TeamHG-Memex/MaybeDont/blob/34721f67b69d426adda324a0ed905d3860828af9/maybedont/predict.py#L73-L102
TeamHG-Memex/MaybeDont
maybedont/predict.py
DupePredictor.update_model
def update_model(self, url, text): """ Update prediction model with a page by given url and text content. Return a list of item duplicates (for testing purposes). """ min_hash = get_min_hash(text, self.too_common_shingles, self.num_perm) item_url = canonicalize_url(url) item_path, item_query = _parse_url(item_url) all_duplicates = [ (url, self.seen_urls[url]) for url in self.lsh.query(min_hash)] duplicates = [(url, m.query) for url, m in all_duplicates if m.path == item_path] # Hypothesis (1) - just paths n_path_nodup = self._nodup_filter(min_hash, ( self.urls_by_path.get(item_path, set()) .difference(url for url, _ in duplicates))) self.path_dupstats[item_path].update(len(duplicates), n_path_nodup) # Other hypotheses, if param is in the query for param, value in item_query.items(): self._update_with_param( duplicates, min_hash, item_path, item_query, param, [value]) # Other hypotheses, if param is not in the query for param in ( self.params_by_path.get(item_path, set()) - set(item_query)): self._update_with_param( duplicates, min_hash, item_path, item_query, param, self.param_values.get((item_path, param), set())) # Update indexes for param, value in item_query.items(): self.urls_by_path_q[item_path, _q_key(item_query)].add(item_url) item_qwp_key = _q_key(_without_key(item_query, param)) self.urls_by_path_qwp[item_path, param, item_qwp_key].add(item_url) self.params_by_path[item_path].add(param) self.param_values[item_path, param].add(value) if not item_query: self.urls_by_path_q[item_path, ()].add(item_url) self.urls_by_path[item_path].add(item_url) if item_url in self.lsh: self.lsh.remove(item_url) self.lsh.insert(item_url, min_hash) self.seen_urls[item_url] = URLMeta(item_path, item_query, min_hash) if len(self.seen_urls) % 100 == 0: self.log_dupstats() return all_duplicates
python
def update_model(self, url, text): """ Update prediction model with a page by given url and text content. Return a list of item duplicates (for testing purposes). """ min_hash = get_min_hash(text, self.too_common_shingles, self.num_perm) item_url = canonicalize_url(url) item_path, item_query = _parse_url(item_url) all_duplicates = [ (url, self.seen_urls[url]) for url in self.lsh.query(min_hash)] duplicates = [(url, m.query) for url, m in all_duplicates if m.path == item_path] # Hypothesis (1) - just paths n_path_nodup = self._nodup_filter(min_hash, ( self.urls_by_path.get(item_path, set()) .difference(url for url, _ in duplicates))) self.path_dupstats[item_path].update(len(duplicates), n_path_nodup) # Other hypotheses, if param is in the query for param, value in item_query.items(): self._update_with_param( duplicates, min_hash, item_path, item_query, param, [value]) # Other hypotheses, if param is not in the query for param in ( self.params_by_path.get(item_path, set()) - set(item_query)): self._update_with_param( duplicates, min_hash, item_path, item_query, param, self.param_values.get((item_path, param), set())) # Update indexes for param, value in item_query.items(): self.urls_by_path_q[item_path, _q_key(item_query)].add(item_url) item_qwp_key = _q_key(_without_key(item_query, param)) self.urls_by_path_qwp[item_path, param, item_qwp_key].add(item_url) self.params_by_path[item_path].add(param) self.param_values[item_path, param].add(value) if not item_query: self.urls_by_path_q[item_path, ()].add(item_url) self.urls_by_path[item_path].add(item_url) if item_url in self.lsh: self.lsh.remove(item_url) self.lsh.insert(item_url, min_hash) self.seen_urls[item_url] = URLMeta(item_path, item_query, min_hash) if len(self.seen_urls) % 100 == 0: self.log_dupstats() return all_duplicates
[ "def", "update_model", "(", "self", ",", "url", ",", "text", ")", ":", "min_hash", "=", "get_min_hash", "(", "text", ",", "self", ".", "too_common_shingles", ",", "self", ".", "num_perm", ")", "item_url", "=", "canonicalize_url", "(", "url", ")", "item_pat...
Update prediction model with a page by given url and text content. Return a list of item duplicates (for testing purposes).
[ "Update", "prediction", "model", "with", "a", "page", "by", "given", "url", "and", "text", "content", ".", "Return", "a", "list", "of", "item", "duplicates", "(", "for", "testing", "purposes", ")", "." ]
train
https://github.com/TeamHG-Memex/MaybeDont/blob/34721f67b69d426adda324a0ed905d3860828af9/maybedont/predict.py#L104-L146
TeamHG-Memex/MaybeDont
maybedont/predict.py
DupePredictor._nodup_filter
def _nodup_filter(self, min_hash, all_urls, max_sample=200): """ This filters results that are considered not duplicates. But we really need to check that, because lsh.query does not always return ALL duplicates, esp. when there are a lot of them, so here we double-check and return only urls that are NOT duplicates. Return estimated number of not duplicates. """ if not all_urls: return 0 urls = random.sample(all_urls, max_sample) \ if len(all_urls) > max_sample else all_urls filtered = [ url for url in urls if min_hash.jaccard(self.seen_urls[url].min_hash) < self.jaccard_threshold] return int(len(filtered) / len(urls) * len(all_urls))
python
def _nodup_filter(self, min_hash, all_urls, max_sample=200): """ This filters results that are considered not duplicates. But we really need to check that, because lsh.query does not always return ALL duplicates, esp. when there are a lot of them, so here we double-check and return only urls that are NOT duplicates. Return estimated number of not duplicates. """ if not all_urls: return 0 urls = random.sample(all_urls, max_sample) \ if len(all_urls) > max_sample else all_urls filtered = [ url for url in urls if min_hash.jaccard(self.seen_urls[url].min_hash) < self.jaccard_threshold] return int(len(filtered) / len(urls) * len(all_urls))
[ "def", "_nodup_filter", "(", "self", ",", "min_hash", ",", "all_urls", ",", "max_sample", "=", "200", ")", ":", "if", "not", "all_urls", ":", "return", "0", "urls", "=", "random", ".", "sample", "(", "all_urls", ",", "max_sample", ")", "if", "len", "("...
This filters results that are considered not duplicates. But we really need to check that, because lsh.query does not always return ALL duplicates, esp. when there are a lot of them, so here we double-check and return only urls that are NOT duplicates. Return estimated number of not duplicates.
[ "This", "filters", "results", "that", "are", "considered", "not", "duplicates", ".", "But", "we", "really", "need", "to", "check", "that", "because", "lsh", ".", "query", "does", "not", "always", "return", "ALL", "duplicates", "esp", ".", "when", "there", ...
train
https://github.com/TeamHG-Memex/MaybeDont/blob/34721f67b69d426adda324a0ed905d3860828af9/maybedont/predict.py#L199-L214
orcasgit/django-template-field
templatefield/admin.py
UnrenderedAdmin.get_queryset
def get_queryset(self, request): """ Remove ``show_rendered`` from the context, if it's there. """ qs = super(UnrenderedAdmin, self).get_queryset(request) if 'show_rendered' in qs.query.context: del qs.query.context['show_rendered'] return qs
python
def get_queryset(self, request): """ Remove ``show_rendered`` from the context, if it's there. """ qs = super(UnrenderedAdmin, self).get_queryset(request) if 'show_rendered' in qs.query.context: del qs.query.context['show_rendered'] return qs
[ "def", "get_queryset", "(", "self", ",", "request", ")", ":", "qs", "=", "super", "(", "UnrenderedAdmin", ",", "self", ")", ".", "get_queryset", "(", "request", ")", "if", "'show_rendered'", "in", "qs", ".", "query", ".", "context", ":", "del", "qs", "...
Remove ``show_rendered`` from the context, if it's there.
[ "Remove", "show_rendered", "from", "the", "context", "if", "it", "s", "there", "." ]
train
https://github.com/orcasgit/django-template-field/blob/1b72f1767edb3cfbb70fbcf07fa39a974995961c/templatefield/admin.py#L7-L12
pavetok/cases
cases/casegen.py
Cases.get_one
def get_one(self, cls=None, **kwargs): """Returns a one case.""" case = cls() if cls else self._CasesClass() for attr, value in kwargs.iteritems(): setattr(case, attr, value) return case
python
def get_one(self, cls=None, **kwargs): """Returns a one case.""" case = cls() if cls else self._CasesClass() for attr, value in kwargs.iteritems(): setattr(case, attr, value) return case
[ "def", "get_one", "(", "self", ",", "cls", "=", "None", ",", "*", "*", "kwargs", ")", ":", "case", "=", "cls", "(", ")", "if", "cls", "else", "self", ".", "_CasesClass", "(", ")", "for", "attr", ",", "value", "in", "kwargs", ".", "iteritems", "("...
Returns a one case.
[ "Returns", "a", "one", "case", "." ]
train
https://github.com/pavetok/cases/blob/967b47758e309fa59ad4fc35da072bb1196bda9b/cases/casegen.py#L20-L25
pavetok/cases
cases/casegen.py
Cases.get_each_choice
def get_each_choice(self, cls=None, **kwargs): """Returns a generator that generates positive cases by "each choice" algorithm. """ defaults = {attr: kwargs[attr][0] for attr in kwargs} for set_of_values in izip_longest(*kwargs.values()): case = cls() if cls else self._CasesClass() for attr, value in izip(kwargs.keys(), set_of_values): if value is None: value = defaults[attr] setattr(case, attr, value) yield case
python
def get_each_choice(self, cls=None, **kwargs): """Returns a generator that generates positive cases by "each choice" algorithm. """ defaults = {attr: kwargs[attr][0] for attr in kwargs} for set_of_values in izip_longest(*kwargs.values()): case = cls() if cls else self._CasesClass() for attr, value in izip(kwargs.keys(), set_of_values): if value is None: value = defaults[attr] setattr(case, attr, value) yield case
[ "def", "get_each_choice", "(", "self", ",", "cls", "=", "None", ",", "*", "*", "kwargs", ")", ":", "defaults", "=", "{", "attr", ":", "kwargs", "[", "attr", "]", "[", "0", "]", "for", "attr", "in", "kwargs", "}", "for", "set_of_values", "in", "izip...
Returns a generator that generates positive cases by "each choice" algorithm.
[ "Returns", "a", "generator", "that", "generates", "positive", "cases", "by", "each", "choice", "algorithm", "." ]
train
https://github.com/pavetok/cases/blob/967b47758e309fa59ad4fc35da072bb1196bda9b/cases/casegen.py#L27-L38
pavetok/cases
cases/casegen.py
Cases.get_pairwise
def get_pairwise(self, cls=None, **kwargs): """Returns a generator that generates positive cases by "pairwise" algorithm. """ for set_of_values in allpairs(kwargs.values()): case = cls() if cls else self._CasesClass() for attr, value in izip(kwargs.keys(), set_of_values): setattr(case, attr, value) yield case
python
def get_pairwise(self, cls=None, **kwargs): """Returns a generator that generates positive cases by "pairwise" algorithm. """ for set_of_values in allpairs(kwargs.values()): case = cls() if cls else self._CasesClass() for attr, value in izip(kwargs.keys(), set_of_values): setattr(case, attr, value) yield case
[ "def", "get_pairwise", "(", "self", ",", "cls", "=", "None", ",", "*", "*", "kwargs", ")", ":", "for", "set_of_values", "in", "allpairs", "(", "kwargs", ".", "values", "(", ")", ")", ":", "case", "=", "cls", "(", ")", "if", "cls", "else", "self", ...
Returns a generator that generates positive cases by "pairwise" algorithm.
[ "Returns", "a", "generator", "that", "generates", "positive", "cases", "by", "pairwise", "algorithm", "." ]
train
https://github.com/pavetok/cases/blob/967b47758e309fa59ad4fc35da072bb1196bda9b/cases/casegen.py#L40-L48
pavetok/cases
cases/casegen.py
Cases.get_negative
def get_negative(self, cls=None, **kwargs): """Returns a generator that generates negative cases by "each negative value in separate case" algorithm. """ for attr, set_of_values in kwargs.iteritems(): defaults = {key: kwargs[key][-1]["default"] for key in kwargs} defaults.pop(attr) for value in set_of_values[:-1]: case = cls() if cls else self._CasesClass() setattr(case, attr, value) for key in defaults: setattr(case, key, defaults[key]) yield case
python
def get_negative(self, cls=None, **kwargs): """Returns a generator that generates negative cases by "each negative value in separate case" algorithm. """ for attr, set_of_values in kwargs.iteritems(): defaults = {key: kwargs[key][-1]["default"] for key in kwargs} defaults.pop(attr) for value in set_of_values[:-1]: case = cls() if cls else self._CasesClass() setattr(case, attr, value) for key in defaults: setattr(case, key, defaults[key]) yield case
[ "def", "get_negative", "(", "self", ",", "cls", "=", "None", ",", "*", "*", "kwargs", ")", ":", "for", "attr", ",", "set_of_values", "in", "kwargs", ".", "iteritems", "(", ")", ":", "defaults", "=", "{", "key", ":", "kwargs", "[", "key", "]", "[", ...
Returns a generator that generates negative cases by "each negative value in separate case" algorithm.
[ "Returns", "a", "generator", "that", "generates", "negative", "cases", "by", "each", "negative", "value", "in", "separate", "case", "algorithm", "." ]
train
https://github.com/pavetok/cases/blob/967b47758e309fa59ad4fc35da072bb1196bda9b/cases/casegen.py#L50-L62
pavetok/cases
cases/casegen.py
Cases.get_mix_gen
def get_mix_gen(self, sample): """Returns function that returns sequence of characters of a given length from a given sample """ def mix(length): result = "".join(random.choice(sample) for _ in xrange(length)).strip() if len(result) == length: return result return mix(length) return mix
python
def get_mix_gen(self, sample): """Returns function that returns sequence of characters of a given length from a given sample """ def mix(length): result = "".join(random.choice(sample) for _ in xrange(length)).strip() if len(result) == length: return result return mix(length) return mix
[ "def", "get_mix_gen", "(", "self", ",", "sample", ")", ":", "def", "mix", "(", "length", ")", ":", "result", "=", "\"\"", ".", "join", "(", "random", ".", "choice", "(", "sample", ")", "for", "_", "in", "xrange", "(", "length", ")", ")", ".", "st...
Returns function that returns sequence of characters of a given length from a given sample
[ "Returns", "function", "that", "returns", "sequence", "of", "characters", "of", "a", "given", "length", "from", "a", "given", "sample" ]
train
https://github.com/pavetok/cases/blob/967b47758e309fa59ad4fc35da072bb1196bda9b/cases/casegen.py#L64-L73
cebel/pyctd
src/pyctd/manager/database.py
update
def update(connection=None, urls=None, force_download=False): """Updates CTD database :param iter[str] urls: list of urls to download :param str connection: custom database connection string :param bool force_download: force method to download """ db = DbManager(connection) db.db_import(urls=urls, force_download=force_download) db.session.close()
python
def update(connection=None, urls=None, force_download=False): """Updates CTD database :param iter[str] urls: list of urls to download :param str connection: custom database connection string :param bool force_download: force method to download """ db = DbManager(connection) db.db_import(urls=urls, force_download=force_download) db.session.close()
[ "def", "update", "(", "connection", "=", "None", ",", "urls", "=", "None", ",", "force_download", "=", "False", ")", ":", "db", "=", "DbManager", "(", "connection", ")", "db", ".", "db_import", "(", "urls", "=", "urls", ",", "force_download", "=", "for...
Updates CTD database :param iter[str] urls: list of urls to download :param str connection: custom database connection string :param bool force_download: force method to download
[ "Updates", "CTD", "database" ]
train
https://github.com/cebel/pyctd/blob/38ba02adaddb60cef031d3b75516773fe8a046b5/src/pyctd/manager/database.py#L442-L451
cebel/pyctd
src/pyctd/manager/database.py
set_mysql_connection
def set_mysql_connection(host='localhost', user='pyctd_user', password='pyctd_passwd', db='pyctd', charset='utf8'): """Sets the connection using MySQL Parameters""" set_connection('mysql+pymysql://{user}:{passwd}@{host}/{db}?charset={charset}'.format( host=host, user=user, passwd=password, db=db, charset=charset) )
python
def set_mysql_connection(host='localhost', user='pyctd_user', password='pyctd_passwd', db='pyctd', charset='utf8'): """Sets the connection using MySQL Parameters""" set_connection('mysql+pymysql://{user}:{passwd}@{host}/{db}?charset={charset}'.format( host=host, user=user, passwd=password, db=db, charset=charset) )
[ "def", "set_mysql_connection", "(", "host", "=", "'localhost'", ",", "user", "=", "'pyctd_user'", ",", "password", "=", "'pyctd_passwd'", ",", "db", "=", "'pyctd'", ",", "charset", "=", "'utf8'", ")", ":", "set_connection", "(", "'mysql+pymysql://{user}:{passwd}@{...
Sets the connection using MySQL Parameters
[ "Sets", "the", "connection", "using", "MySQL", "Parameters" ]
train
https://github.com/cebel/pyctd/blob/38ba02adaddb60cef031d3b75516773fe8a046b5/src/pyctd/manager/database.py#L454-L462
cebel/pyctd
src/pyctd/manager/database.py
set_connection
def set_connection(connection=defaults.sqlalchemy_connection_string_default): """Set the connection string for SQLAlchemy :param str connection: SQLAlchemy connection string """ cfp = defaults.config_file_path config = RawConfigParser() if not os.path.exists(cfp): with open(cfp, 'w') as config_file: config['database'] = {'sqlalchemy_connection_string': connection} config.write(config_file) log.info('create configuration file %s', cfp) else: config.read(cfp) config.set('database', 'sqlalchemy_connection_string', connection) with open(cfp, 'w') as configfile: config.write(configfile)
python
def set_connection(connection=defaults.sqlalchemy_connection_string_default): """Set the connection string for SQLAlchemy :param str connection: SQLAlchemy connection string """ cfp = defaults.config_file_path config = RawConfigParser() if not os.path.exists(cfp): with open(cfp, 'w') as config_file: config['database'] = {'sqlalchemy_connection_string': connection} config.write(config_file) log.info('create configuration file %s', cfp) else: config.read(cfp) config.set('database', 'sqlalchemy_connection_string', connection) with open(cfp, 'w') as configfile: config.write(configfile)
[ "def", "set_connection", "(", "connection", "=", "defaults", ".", "sqlalchemy_connection_string_default", ")", ":", "cfp", "=", "defaults", ".", "config_file_path", "config", "=", "RawConfigParser", "(", ")", "if", "not", "os", ".", "path", ".", "exists", "(", ...
Set the connection string for SQLAlchemy :param str connection: SQLAlchemy connection string
[ "Set", "the", "connection", "string", "for", "SQLAlchemy" ]
train
https://github.com/cebel/pyctd/blob/38ba02adaddb60cef031d3b75516773fe8a046b5/src/pyctd/manager/database.py#L470-L487
cebel/pyctd
src/pyctd/manager/database.py
BaseDbManager.set_connection_string_by_user_input
def set_connection_string_by_user_input(self): """Prompts the user to input a connection string""" user_connection = input( bcolors.WARNING + "\nFor any reason connection to " + bcolors.ENDC + bcolors.FAIL + "{}".format(self.connection) + bcolors.ENDC + bcolors.WARNING + " is not possible.\n\n" + bcolors.ENDC + "For more information about SQLAlchemy connection strings go to:\n" + "http://docs.sqlalchemy.org/en/latest/core/engines.html\n\n" "Please insert a valid connection string:\n" + bcolors.UNDERLINE + "Examples:\n\n" + bcolors.ENDC + "MySQL (recommended):\n" + bcolors.OKGREEN + "\tmysql+pymysql://user:passwd@localhost/database?charset=utf8\n" + bcolors.ENDC + "PostgreSQL:\n" + bcolors.OKGREEN + "\tpostgresql://scott:tiger@localhost/mydatabase\n" + bcolors.ENDC + "MsSQL (pyodbc have to be installed):\n" + bcolors.OKGREEN + "\tmssql+pyodbc://user:passwd@database\n" + bcolors.ENDC + "SQLite (always works):\n" + " - Linux:\n" + bcolors.OKGREEN + "\tsqlite:////absolute/path/to/database.db\n" + bcolors.ENDC + " - Windows:\n" + bcolors.OKGREEN + "\tsqlite:///C:\\path\\to\\database.db\n" + bcolors.ENDC + "Oracle:\n" + bcolors.OKGREEN + "\toracle://user:passwd@127.0.0.1:1521/database\n\n" + bcolors.ENDC + "[RETURN] for standard connection {}:\n".format(defaults.sqlalchemy_connection_string_default) ) if not (user_connection or user_connection.strip()): user_connection = defaults.sqlalchemy_connection_string_default set_connection(user_connection.strip())
python
def set_connection_string_by_user_input(self): """Prompts the user to input a connection string""" user_connection = input( bcolors.WARNING + "\nFor any reason connection to " + bcolors.ENDC + bcolors.FAIL + "{}".format(self.connection) + bcolors.ENDC + bcolors.WARNING + " is not possible.\n\n" + bcolors.ENDC + "For more information about SQLAlchemy connection strings go to:\n" + "http://docs.sqlalchemy.org/en/latest/core/engines.html\n\n" "Please insert a valid connection string:\n" + bcolors.UNDERLINE + "Examples:\n\n" + bcolors.ENDC + "MySQL (recommended):\n" + bcolors.OKGREEN + "\tmysql+pymysql://user:passwd@localhost/database?charset=utf8\n" + bcolors.ENDC + "PostgreSQL:\n" + bcolors.OKGREEN + "\tpostgresql://scott:tiger@localhost/mydatabase\n" + bcolors.ENDC + "MsSQL (pyodbc have to be installed):\n" + bcolors.OKGREEN + "\tmssql+pyodbc://user:passwd@database\n" + bcolors.ENDC + "SQLite (always works):\n" + " - Linux:\n" + bcolors.OKGREEN + "\tsqlite:////absolute/path/to/database.db\n" + bcolors.ENDC + " - Windows:\n" + bcolors.OKGREEN + "\tsqlite:///C:\\path\\to\\database.db\n" + bcolors.ENDC + "Oracle:\n" + bcolors.OKGREEN + "\toracle://user:passwd@127.0.0.1:1521/database\n\n" + bcolors.ENDC + "[RETURN] for standard connection {}:\n".format(defaults.sqlalchemy_connection_string_default) ) if not (user_connection or user_connection.strip()): user_connection = defaults.sqlalchemy_connection_string_default set_connection(user_connection.strip())
[ "def", "set_connection_string_by_user_input", "(", "self", ")", ":", "user_connection", "=", "input", "(", "bcolors", ".", "WARNING", "+", "\"\\nFor any reason connection to \"", "+", "bcolors", ".", "ENDC", "+", "bcolors", ".", "FAIL", "+", "\"{}\"", ".", "format...
Prompts the user to input a connection string
[ "Prompts", "the", "user", "to", "input", "a", "connection", "string" ]
train
https://github.com/cebel/pyctd/blob/38ba02adaddb60cef031d3b75516773fe8a046b5/src/pyctd/manager/database.py#L94-L121
cebel/pyctd
src/pyctd/manager/database.py
BaseDbManager.create_all
def create_all(self, checkfirst=True): """Creates all tables from models in the database :param bool checkfirst: Check if tables already exists """ log.info('creating tables in %s', self.engine.url) models.Base.metadata.create_all(self.engine, checkfirst=checkfirst)
python
def create_all(self, checkfirst=True): """Creates all tables from models in the database :param bool checkfirst: Check if tables already exists """ log.info('creating tables in %s', self.engine.url) models.Base.metadata.create_all(self.engine, checkfirst=checkfirst)
[ "def", "create_all", "(", "self", ",", "checkfirst", "=", "True", ")", ":", "log", ".", "info", "(", "'creating tables in %s'", ",", "self", ".", "engine", ".", "url", ")", "models", ".", "Base", ".", "metadata", ".", "create_all", "(", "self", ".", "e...
Creates all tables from models in the database :param bool checkfirst: Check if tables already exists
[ "Creates", "all", "tables", "from", "models", "in", "the", "database", ":", "param", "bool", "checkfirst", ":", "Check", "if", "tables", "already", "exists" ]
train
https://github.com/cebel/pyctd/blob/38ba02adaddb60cef031d3b75516773fe8a046b5/src/pyctd/manager/database.py#L123-L129
cebel/pyctd
src/pyctd/manager/database.py
BaseDbManager.drop_all
def drop_all(self): """Drops all tables in the database""" log.info('dropping tables in %s', self.engine.url) self.session.commit() models.Base.metadata.drop_all(self.engine) self.session.commit()
python
def drop_all(self): """Drops all tables in the database""" log.info('dropping tables in %s', self.engine.url) self.session.commit() models.Base.metadata.drop_all(self.engine) self.session.commit()
[ "def", "drop_all", "(", "self", ")", ":", "log", ".", "info", "(", "'dropping tables in %s'", ",", "self", ".", "engine", ".", "url", ")", "self", ".", "session", ".", "commit", "(", ")", "models", ".", "Base", ".", "metadata", ".", "drop_all", "(", ...
Drops all tables in the database
[ "Drops", "all", "tables", "in", "the", "database" ]
train
https://github.com/cebel/pyctd/blob/38ba02adaddb60cef031d3b75516773fe8a046b5/src/pyctd/manager/database.py#L131-L136
cebel/pyctd
src/pyctd/manager/database.py
DbManager.db_import
def db_import(self, urls=None, force_download=False): """Updates the CTD database 1. downloads all files from CTD 2. drops all tables in database 3. creates all tables in database 4. import all data from CTD files :param iter[str] urls: An iterable of URL strings :param bool force_download: force method to download """ if not urls: urls = [ defaults.url_base + table_conf.tables[model]['file_name'] for model in table_conf.tables ] log.info('Update CTD database from %s', urls) self.drop_all() self.download_urls(urls=urls, force_download=force_download) self.create_all() self.import_tables() self.session.close()
python
def db_import(self, urls=None, force_download=False): """Updates the CTD database 1. downloads all files from CTD 2. drops all tables in database 3. creates all tables in database 4. import all data from CTD files :param iter[str] urls: An iterable of URL strings :param bool force_download: force method to download """ if not urls: urls = [ defaults.url_base + table_conf.tables[model]['file_name'] for model in table_conf.tables ] log.info('Update CTD database from %s', urls) self.drop_all() self.download_urls(urls=urls, force_download=force_download) self.create_all() self.import_tables() self.session.close()
[ "def", "db_import", "(", "self", ",", "urls", "=", "None", ",", "force_download", "=", "False", ")", ":", "if", "not", "urls", ":", "urls", "=", "[", "defaults", ".", "url_base", "+", "table_conf", ".", "tables", "[", "model", "]", "[", "'file_name'", ...
Updates the CTD database 1. downloads all files from CTD 2. drops all tables in database 3. creates all tables in database 4. import all data from CTD files :param iter[str] urls: An iterable of URL strings :param bool force_download: force method to download
[ "Updates", "the", "CTD", "database", "1", ".", "downloads", "all", "files", "from", "CTD", "2", ".", "drops", "all", "tables", "in", "database", "3", ".", "creates", "all", "tables", "in", "database", "4", ".", "import", "all", "data", "from", "CTD", "...
train
https://github.com/cebel/pyctd/blob/38ba02adaddb60cef031d3b75516773fe8a046b5/src/pyctd/manager/database.py#L155-L178
cebel/pyctd
src/pyctd/manager/database.py
DbManager.mapper
def mapper(self): """returns a dictionary with keys of pyctd.manager.table_con.domains_to_map and pandas.DataFrame as values. DataFrames column names: - domain_id (represents the domain identifier of e.g. chemical) - domain__id (represents the primary key in domain table) :return: dict of pandas DataFrames (keys:domain_name, values:DataFrame) :rtype: dict of pandas.DataFrame """ if not self.__mapper: for model in table_conf.models_to_map: domain = model.table_suffix tab_conf = table_conf.tables[model] file_path = os.path.join(self.pyctd_data_dir, tab_conf['file_name']) col_name_in_file, col_name_in_db = tab_conf['domain_id_column'] column_index = self.get_index_of_column(col_name_in_file, file_path) df = pd.read_table( file_path, names=[col_name_in_db], header=None, usecols=[column_index], comment='#', index_col=False, dtype=self.get_dtypes(model) ) if domain == 'chemical': df[col_name_in_db] = df[col_name_in_db].str.replace('MESH:', '').str.strip() df[domain + '__id'] = df.index + 1 self.__mapper[domain] = df return self.__mapper
python
def mapper(self): """returns a dictionary with keys of pyctd.manager.table_con.domains_to_map and pandas.DataFrame as values. DataFrames column names: - domain_id (represents the domain identifier of e.g. chemical) - domain__id (represents the primary key in domain table) :return: dict of pandas DataFrames (keys:domain_name, values:DataFrame) :rtype: dict of pandas.DataFrame """ if not self.__mapper: for model in table_conf.models_to_map: domain = model.table_suffix tab_conf = table_conf.tables[model] file_path = os.path.join(self.pyctd_data_dir, tab_conf['file_name']) col_name_in_file, col_name_in_db = tab_conf['domain_id_column'] column_index = self.get_index_of_column(col_name_in_file, file_path) df = pd.read_table( file_path, names=[col_name_in_db], header=None, usecols=[column_index], comment='#', index_col=False, dtype=self.get_dtypes(model) ) if domain == 'chemical': df[col_name_in_db] = df[col_name_in_db].str.replace('MESH:', '').str.strip() df[domain + '__id'] = df.index + 1 self.__mapper[domain] = df return self.__mapper
[ "def", "mapper", "(", "self", ")", ":", "if", "not", "self", ".", "__mapper", ":", "for", "model", "in", "table_conf", ".", "models_to_map", ":", "domain", "=", "model", ".", "table_suffix", "tab_conf", "=", "table_conf", ".", "tables", "[", "model", "]"...
returns a dictionary with keys of pyctd.manager.table_con.domains_to_map and pandas.DataFrame as values. DataFrames column names: - domain_id (represents the domain identifier of e.g. chemical) - domain__id (represents the primary key in domain table) :return: dict of pandas DataFrames (keys:domain_name, values:DataFrame) :rtype: dict of pandas.DataFrame
[ "returns", "a", "dictionary", "with", "keys", "of", "pyctd", ".", "manager", ".", "table_con", ".", "domains_to_map", "and", "pandas", ".", "DataFrame", "as", "values", ".", "DataFrames", "column", "names", ":" ]
train
https://github.com/cebel/pyctd/blob/38ba02adaddb60cef031d3b75516773fe8a046b5/src/pyctd/manager/database.py#L181-L218
cebel/pyctd
src/pyctd/manager/database.py
DbManager.import_tables
def import_tables(self, only_tables=None, exclude_tables=None): """Imports all data in database tables :param set[str] only_tables: names of tables to be imported :param set[str] exclude_tables: names of tables to be excluded """ for table in self.tables: if only_tables is not None and table.name not in only_tables: continue if exclude_tables is not None and table.name in exclude_tables: continue self.import_table(table)
python
def import_tables(self, only_tables=None, exclude_tables=None): """Imports all data in database tables :param set[str] only_tables: names of tables to be imported :param set[str] exclude_tables: names of tables to be excluded """ for table in self.tables: if only_tables is not None and table.name not in only_tables: continue if exclude_tables is not None and table.name in exclude_tables: continue self.import_table(table)
[ "def", "import_tables", "(", "self", ",", "only_tables", "=", "None", ",", "exclude_tables", "=", "None", ")", ":", "for", "table", "in", "self", ".", "tables", ":", "if", "only_tables", "is", "not", "None", "and", "table", ".", "name", "not", "in", "o...
Imports all data in database tables :param set[str] only_tables: names of tables to be imported :param set[str] exclude_tables: names of tables to be excluded
[ "Imports", "all", "data", "in", "database", "tables" ]
train
https://github.com/cebel/pyctd/blob/38ba02adaddb60cef031d3b75516773fe8a046b5/src/pyctd/manager/database.py#L220-L232
cebel/pyctd
src/pyctd/manager/database.py
DbManager.get_index_of_column
def get_index_of_column(cls, column, file_path): """Get index of a specific column name in a CTD file :param column: :param file_path: :return: Optional[int] """ columns = cls.get_column_names_from_file(file_path) if column in columns: return columns.index(column)
python
def get_index_of_column(cls, column, file_path): """Get index of a specific column name in a CTD file :param column: :param file_path: :return: Optional[int] """ columns = cls.get_column_names_from_file(file_path) if column in columns: return columns.index(column)
[ "def", "get_index_of_column", "(", "cls", ",", "column", ",", "file_path", ")", ":", "columns", "=", "cls", ".", "get_column_names_from_file", "(", "file_path", ")", "if", "column", "in", "columns", ":", "return", "columns", ".", "index", "(", "column", ")" ...
Get index of a specific column name in a CTD file :param column: :param file_path: :return: Optional[int]
[ "Get", "index", "of", "a", "specific", "column", "name", "in", "a", "CTD", "file", ":", "param", "column", ":", ":", "param", "file_path", ":", ":", "return", ":", "Optional", "[", "int", "]" ]
train
https://github.com/cebel/pyctd/blob/38ba02adaddb60cef031d3b75516773fe8a046b5/src/pyctd/manager/database.py#L235-L244
cebel/pyctd
src/pyctd/manager/database.py
DbManager.get_index_and_columns_order
def get_index_and_columns_order(cls, columns_in_file_expected, columns_dict, file_path): """ :param columns_in_file_expected: :param columns_dict: :param file_path: :rtype: tuple[list,list] """ use_columns_with_index = [] column_names_in_db = [] column_names_from_file = cls.get_column_names_from_file(file_path) if not set(columns_in_file_expected).issubset(column_names_from_file): log.exception( '%s columns are not a subset of columns %s in file %s', columns_in_file_expected, column_names_from_file, file_path ) else: for index, column in enumerate(column_names_from_file): if column in columns_dict: use_columns_with_index.append(index) column_names_in_db.append(columns_dict[column]) return use_columns_with_index, column_names_in_db
python
def get_index_and_columns_order(cls, columns_in_file_expected, columns_dict, file_path): """ :param columns_in_file_expected: :param columns_dict: :param file_path: :rtype: tuple[list,list] """ use_columns_with_index = [] column_names_in_db = [] column_names_from_file = cls.get_column_names_from_file(file_path) if not set(columns_in_file_expected).issubset(column_names_from_file): log.exception( '%s columns are not a subset of columns %s in file %s', columns_in_file_expected, column_names_from_file, file_path ) else: for index, column in enumerate(column_names_from_file): if column in columns_dict: use_columns_with_index.append(index) column_names_in_db.append(columns_dict[column]) return use_columns_with_index, column_names_in_db
[ "def", "get_index_and_columns_order", "(", "cls", ",", "columns_in_file_expected", ",", "columns_dict", ",", "file_path", ")", ":", "use_columns_with_index", "=", "[", "]", "column_names_in_db", "=", "[", "]", "column_names_from_file", "=", "cls", ".", "get_column_nam...
:param columns_in_file_expected: :param columns_dict: :param file_path: :rtype: tuple[list,list]
[ ":", "param", "columns_in_file_expected", ":", ":", "param", "columns_dict", ":", ":", "param", "file_path", ":", ":", "rtype", ":", "tuple", "[", "list", "list", "]" ]
train
https://github.com/cebel/pyctd/blob/38ba02adaddb60cef031d3b75516773fe8a046b5/src/pyctd/manager/database.py#L247-L271
cebel/pyctd
src/pyctd/manager/database.py
DbManager.import_table
def import_table(self, table): """import table by Table object :param `manager.table_conf.Table` table: Table object """ file_path = os.path.join(self.pyctd_data_dir, table.file_name) log.info('importing %s data into table %s', file_path, table.name) table_import_timer = time.time() use_columns_with_index, column_names_in_db = self.get_index_and_columns_order( table.columns_in_file_expected, table.columns_dict, file_path ) self.import_table_in_db(file_path, use_columns_with_index, column_names_in_db, table) for column_in_file, column_in_one2many_table in table.one_to_many: o2m_column_index = self.get_index_of_column(column_in_file, file_path) self.import_one_to_many(file_path, o2m_column_index, table, column_in_one2many_table) log.info('done importing %s in %.2f seconds', table.name, time.time() - table_import_timer)
python
def import_table(self, table): """import table by Table object :param `manager.table_conf.Table` table: Table object """ file_path = os.path.join(self.pyctd_data_dir, table.file_name) log.info('importing %s data into table %s', file_path, table.name) table_import_timer = time.time() use_columns_with_index, column_names_in_db = self.get_index_and_columns_order( table.columns_in_file_expected, table.columns_dict, file_path ) self.import_table_in_db(file_path, use_columns_with_index, column_names_in_db, table) for column_in_file, column_in_one2many_table in table.one_to_many: o2m_column_index = self.get_index_of_column(column_in_file, file_path) self.import_one_to_many(file_path, o2m_column_index, table, column_in_one2many_table) log.info('done importing %s in %.2f seconds', table.name, time.time() - table_import_timer)
[ "def", "import_table", "(", "self", ",", "table", ")", ":", "file_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "pyctd_data_dir", ",", "table", ".", "file_name", ")", "log", ".", "info", "(", "'importing %s data into table %s'", ",", "file_...
import table by Table object :param `manager.table_conf.Table` table: Table object
[ "import", "table", "by", "Table", "object", ":", "param", "manager", ".", "table_conf", ".", "Table", "table", ":", "Table", "object" ]
train
https://github.com/cebel/pyctd/blob/38ba02adaddb60cef031d3b75516773fe8a046b5/src/pyctd/manager/database.py#L273-L295
cebel/pyctd
src/pyctd/manager/database.py
DbManager.import_one_to_many
def import_one_to_many(self, file_path, column_index, parent_table, column_in_one2many_table): """ :param file_path: :param column_index: :param parent_table: :param column_in_one2many_table: """ chunks = pd.read_table( file_path, usecols=[column_index], header=None, comment='#', index_col=False, chunksize=1000000, dtype=self.get_dtypes(parent_table.model) ) for chunk in chunks: child_values = [] parent_id_values = [] chunk.dropna(inplace=True) chunk.index += 1 for parent_id, values in chunk.iterrows(): entry = values[column_index] if not isinstance(entry, str): entry = str(entry) for value in entry.split("|"): parent_id_values.append(parent_id) child_values.append(value.strip()) parent_id_column_name = parent_table.name + '__id' o2m_table_name = defaults.TABLE_PREFIX + parent_table.name + '__' + column_in_one2many_table pd.DataFrame({ parent_id_column_name: parent_id_values, column_in_one2many_table: child_values }).to_sql(name=o2m_table_name, if_exists='append', con=self.engine, index=False)
python
def import_one_to_many(self, file_path, column_index, parent_table, column_in_one2many_table): """ :param file_path: :param column_index: :param parent_table: :param column_in_one2many_table: """ chunks = pd.read_table( file_path, usecols=[column_index], header=None, comment='#', index_col=False, chunksize=1000000, dtype=self.get_dtypes(parent_table.model) ) for chunk in chunks: child_values = [] parent_id_values = [] chunk.dropna(inplace=True) chunk.index += 1 for parent_id, values in chunk.iterrows(): entry = values[column_index] if not isinstance(entry, str): entry = str(entry) for value in entry.split("|"): parent_id_values.append(parent_id) child_values.append(value.strip()) parent_id_column_name = parent_table.name + '__id' o2m_table_name = defaults.TABLE_PREFIX + parent_table.name + '__' + column_in_one2many_table pd.DataFrame({ parent_id_column_name: parent_id_values, column_in_one2many_table: child_values }).to_sql(name=o2m_table_name, if_exists='append', con=self.engine, index=False)
[ "def", "import_one_to_many", "(", "self", ",", "file_path", ",", "column_index", ",", "parent_table", ",", "column_in_one2many_table", ")", ":", "chunks", "=", "pd", ".", "read_table", "(", "file_path", ",", "usecols", "=", "[", "column_index", "]", ",", "head...
:param file_path: :param column_index: :param parent_table: :param column_in_one2many_table:
[ ":", "param", "file_path", ":", ":", "param", "column_index", ":", ":", "param", "parent_table", ":", ":", "param", "column_in_one2many_table", ":" ]
train
https://github.com/cebel/pyctd/blob/38ba02adaddb60cef031d3b75516773fe8a046b5/src/pyctd/manager/database.py#L297-L336
cebel/pyctd
src/pyctd/manager/database.py
DbManager.import_table_in_db
def import_table_in_db(self, file_path, use_columns_with_index, column_names_in_db, table): """Imports data from CTD file into database :param str file_path: path to file :param list[int] use_columns_with_index: list of column indices in file :param list[str] column_names_in_db: list of column names (have to fit to models except domain_id column name) :param table: `manager.table.Table` object """ chunks = pd.read_table( file_path, usecols=use_columns_with_index, names=column_names_in_db, header=None, comment='#', index_col=False, chunksize=1000000, dtype=self.get_dtypes(table.model) ) for chunk in chunks: # this is an evil hack because CTD is not using the MESH prefix in this table if table.name == 'exposure_event': chunk.disease_id = 'MESH:' + chunk.disease_id chunk['id'] = chunk.index + 1 if table.model not in table_conf.models_to_map: for model in table_conf.models_to_map: domain = model.table_suffix domain_id = domain + "_id" if domain_id in column_names_in_db: chunk = pd.merge(chunk, self.mapper[domain], on=domain_id, how='left') del chunk[domain_id] chunk.set_index('id', inplace=True) table_with_prefix = defaults.TABLE_PREFIX + table.name chunk.to_sql(name=table_with_prefix, if_exists='append', con=self.engine) del chunks
python
def import_table_in_db(self, file_path, use_columns_with_index, column_names_in_db, table): """Imports data from CTD file into database :param str file_path: path to file :param list[int] use_columns_with_index: list of column indices in file :param list[str] column_names_in_db: list of column names (have to fit to models except domain_id column name) :param table: `manager.table.Table` object """ chunks = pd.read_table( file_path, usecols=use_columns_with_index, names=column_names_in_db, header=None, comment='#', index_col=False, chunksize=1000000, dtype=self.get_dtypes(table.model) ) for chunk in chunks: # this is an evil hack because CTD is not using the MESH prefix in this table if table.name == 'exposure_event': chunk.disease_id = 'MESH:' + chunk.disease_id chunk['id'] = chunk.index + 1 if table.model not in table_conf.models_to_map: for model in table_conf.models_to_map: domain = model.table_suffix domain_id = domain + "_id" if domain_id in column_names_in_db: chunk = pd.merge(chunk, self.mapper[domain], on=domain_id, how='left') del chunk[domain_id] chunk.set_index('id', inplace=True) table_with_prefix = defaults.TABLE_PREFIX + table.name chunk.to_sql(name=table_with_prefix, if_exists='append', con=self.engine) del chunks
[ "def", "import_table_in_db", "(", "self", ",", "file_path", ",", "use_columns_with_index", ",", "column_names_in_db", ",", "table", ")", ":", "chunks", "=", "pd", ".", "read_table", "(", "file_path", ",", "usecols", "=", "use_columns_with_index", ",", "names", "...
Imports data from CTD file into database :param str file_path: path to file :param list[int] use_columns_with_index: list of column indices in file :param list[str] column_names_in_db: list of column names (have to fit to models except domain_id column name) :param table: `manager.table.Table` object
[ "Imports", "data", "from", "CTD", "file", "into", "database", ":", "param", "str", "file_path", ":", "path", "to", "file", ":", "param", "list", "[", "int", "]", "use_columns_with_index", ":", "list", "of", "column", "indices", "in", "file", ":", "param", ...
train
https://github.com/cebel/pyctd/blob/38ba02adaddb60cef031d3b75516773fe8a046b5/src/pyctd/manager/database.py#L353-L391
cebel/pyctd
src/pyctd/manager/database.py
DbManager.get_column_names_from_file
def get_column_names_from_file(file_path): """returns column names from CTD download file :param str file_path: path to CTD download file """ if file_path.endswith('.gz'): file_handler = io.TextIOWrapper(io.BufferedReader(gzip.open(file_path))) else: file_handler = open(file_path, 'r') fields_line = False with file_handler as file: for line in file: line = line.strip() if not fields_line and re.search('#\s*Fields\s*:$', line): fields_line = True elif fields_line and not (line == '' or line == '#'): return [column.strip() for column in line[1:].split("\t")]
python
def get_column_names_from_file(file_path): """returns column names from CTD download file :param str file_path: path to CTD download file """ if file_path.endswith('.gz'): file_handler = io.TextIOWrapper(io.BufferedReader(gzip.open(file_path))) else: file_handler = open(file_path, 'r') fields_line = False with file_handler as file: for line in file: line = line.strip() if not fields_line and re.search('#\s*Fields\s*:$', line): fields_line = True elif fields_line and not (line == '' or line == '#'): return [column.strip() for column in line[1:].split("\t")]
[ "def", "get_column_names_from_file", "(", "file_path", ")", ":", "if", "file_path", ".", "endswith", "(", "'.gz'", ")", ":", "file_handler", "=", "io", ".", "TextIOWrapper", "(", "io", ".", "BufferedReader", "(", "gzip", ".", "open", "(", "file_path", ")", ...
returns column names from CTD download file :param str file_path: path to CTD download file
[ "returns", "column", "names", "from", "CTD", "download", "file" ]
train
https://github.com/cebel/pyctd/blob/38ba02adaddb60cef031d3b75516773fe8a046b5/src/pyctd/manager/database.py#L394-L411
cebel/pyctd
src/pyctd/manager/database.py
DbManager.download_urls
def download_urls(cls, urls, force_download=False): """Downloads all CTD URLs that don't exist :param iter[str] urls: iterable of URL of CTD :param bool force_download: force method to download """ for url in urls: file_path = cls.get_path_to_file_from_url(url) if os.path.exists(file_path) and not force_download: log.info('already downloaded %s to %s', url, file_path) else: log.info('downloading %s to %s', url, file_path) download_timer = time.time() urlretrieve(url, file_path) log.info('downloaded in %.2f seconds', time.time() - download_timer)
python
def download_urls(cls, urls, force_download=False): """Downloads all CTD URLs that don't exist :param iter[str] urls: iterable of URL of CTD :param bool force_download: force method to download """ for url in urls: file_path = cls.get_path_to_file_from_url(url) if os.path.exists(file_path) and not force_download: log.info('already downloaded %s to %s', url, file_path) else: log.info('downloading %s to %s', url, file_path) download_timer = time.time() urlretrieve(url, file_path) log.info('downloaded in %.2f seconds', time.time() - download_timer)
[ "def", "download_urls", "(", "cls", ",", "urls", ",", "force_download", "=", "False", ")", ":", "for", "url", "in", "urls", ":", "file_path", "=", "cls", ".", "get_path_to_file_from_url", "(", "url", ")", "if", "os", ".", "path", ".", "exists", "(", "f...
Downloads all CTD URLs that don't exist :param iter[str] urls: iterable of URL of CTD :param bool force_download: force method to download
[ "Downloads", "all", "CTD", "URLs", "that", "don", "t", "exist", ":", "param", "iter", "[", "str", "]", "urls", ":", "iterable", "of", "URL", "of", "CTD", ":", "param", "bool", "force_download", ":", "force", "method", "to", "download" ]
train
https://github.com/cebel/pyctd/blob/38ba02adaddb60cef031d3b75516773fe8a046b5/src/pyctd/manager/database.py#L414-L429
fedora-infra/fedmsg_meta_fedora_infrastructure
fedmsg_meta_fedora_infrastructure/bz.py
comma_join
def comma_join(fields, oxford=True): """ Join together words. """ def fmt(field): return "'%s'" % field if not fields: return "nothing" elif len(fields) == 1: return fmt(fields[0]) elif len(fields) == 2: return " and ".join([fmt(f) for f in fields]) else: result = ", ".join([fmt(f) for f in fields[:-1]]) if oxford: result += "," result += " and %s" % fmt(fields[-1]) return result
python
def comma_join(fields, oxford=True): """ Join together words. """ def fmt(field): return "'%s'" % field if not fields: return "nothing" elif len(fields) == 1: return fmt(fields[0]) elif len(fields) == 2: return " and ".join([fmt(f) for f in fields]) else: result = ", ".join([fmt(f) for f in fields[:-1]]) if oxford: result += "," result += " and %s" % fmt(fields[-1]) return result
[ "def", "comma_join", "(", "fields", ",", "oxford", "=", "True", ")", ":", "def", "fmt", "(", "field", ")", ":", "return", "\"'%s'\"", "%", "field", "if", "not", "fields", ":", "return", "\"nothing\"", "elif", "len", "(", "fields", ")", "==", "1", ":"...
Join together words.
[ "Join", "together", "words", "." ]
train
https://github.com/fedora-infra/fedmsg_meta_fedora_infrastructure/blob/85bf4162692e3042c7dbcc12dfafaca4764b4ae6/fedmsg_meta_fedora_infrastructure/bz.py#L32-L49
fedora-infra/fedmsg_meta_fedora_infrastructure
fedmsg_meta_fedora_infrastructure/fasshim.py
avatar_url_from_openid
def avatar_url_from_openid(openid, size=64, default='retro', dns=False): """ Our own implementation since fas doesn't support this nicely yet. """ if dns: # This makes an extra DNS SRV query, which can slow down our webapps. # It is necessary for libravatar federation, though. import libravatar return libravatar.libravatar_url( openid=openid, size=size, default=default, ) else: params = _ordered_query_params([('s', size), ('d', default)]) query = parse.urlencode(params) hash = sha256(openid.encode('utf-8')).hexdigest() return "https://seccdn.libravatar.org/avatar/%s?%s" % (hash, query)
python
def avatar_url_from_openid(openid, size=64, default='retro', dns=False): """ Our own implementation since fas doesn't support this nicely yet. """ if dns: # This makes an extra DNS SRV query, which can slow down our webapps. # It is necessary for libravatar federation, though. import libravatar return libravatar.libravatar_url( openid=openid, size=size, default=default, ) else: params = _ordered_query_params([('s', size), ('d', default)]) query = parse.urlencode(params) hash = sha256(openid.encode('utf-8')).hexdigest() return "https://seccdn.libravatar.org/avatar/%s?%s" % (hash, query)
[ "def", "avatar_url_from_openid", "(", "openid", ",", "size", "=", "64", ",", "default", "=", "'retro'", ",", "dns", "=", "False", ")", ":", "if", "dns", ":", "# This makes an extra DNS SRV query, which can slow down our webapps.", "# It is necessary for libravatar federat...
Our own implementation since fas doesn't support this nicely yet.
[ "Our", "own", "implementation", "since", "fas", "doesn", "t", "support", "this", "nicely", "yet", "." ]
train
https://github.com/fedora-infra/fedmsg_meta_fedora_infrastructure/blob/85bf4162692e3042c7dbcc12dfafaca4764b4ae6/fedmsg_meta_fedora_infrastructure/fasshim.py#L52-L70
fedora-infra/fedmsg_meta_fedora_infrastructure
fedmsg_meta_fedora_infrastructure/fasshim.py
avatar_url_from_email
def avatar_url_from_email(email, size=64, default='retro', dns=False): """ Our own implementation since fas doesn't support this nicely yet. """ if dns: # This makes an extra DNS SRV query, which can slow down our webapps. # It is necessary for libravatar federation, though. import libravatar return libravatar.libravatar_url( email=email, size=size, default=default, ) else: params = _ordered_query_params([('s', size), ('d', default)]) query = parse.urlencode(params) hash = md5(email.encode('utf-8')).hexdigest() return "https://seccdn.libravatar.org/avatar/%s?%s" % (hash, query)
python
def avatar_url_from_email(email, size=64, default='retro', dns=False): """ Our own implementation since fas doesn't support this nicely yet. """ if dns: # This makes an extra DNS SRV query, which can slow down our webapps. # It is necessary for libravatar federation, though. import libravatar return libravatar.libravatar_url( email=email, size=size, default=default, ) else: params = _ordered_query_params([('s', size), ('d', default)]) query = parse.urlencode(params) hash = md5(email.encode('utf-8')).hexdigest() return "https://seccdn.libravatar.org/avatar/%s?%s" % (hash, query)
[ "def", "avatar_url_from_email", "(", "email", ",", "size", "=", "64", ",", "default", "=", "'retro'", ",", "dns", "=", "False", ")", ":", "if", "dns", ":", "# This makes an extra DNS SRV query, which can slow down our webapps.", "# It is necessary for libravatar federatio...
Our own implementation since fas doesn't support this nicely yet.
[ "Our", "own", "implementation", "since", "fas", "doesn", "t", "support", "this", "nicely", "yet", "." ]
train
https://github.com/fedora-infra/fedmsg_meta_fedora_infrastructure/blob/85bf4162692e3042c7dbcc12dfafaca4764b4ae6/fedmsg_meta_fedora_infrastructure/fasshim.py#L73-L91
snipsco/snipsmanagercore
snipsmanagercore/thread_handler.py
ThreadHandler.run
def run(self, target, args=()): """ Run a function in a separate thread. :param target: the function to run. :param args: the parameters to pass to the function. """ run_event = threading.Event() run_event.set() thread = threading.Thread(target=target, args=args + (run_event, )) self.thread_pool.append(thread) self.run_events.append(run_event) thread.start()
python
def run(self, target, args=()): """ Run a function in a separate thread. :param target: the function to run. :param args: the parameters to pass to the function. """ run_event = threading.Event() run_event.set() thread = threading.Thread(target=target, args=args + (run_event, )) self.thread_pool.append(thread) self.run_events.append(run_event) thread.start()
[ "def", "run", "(", "self", ",", "target", ",", "args", "=", "(", ")", ")", ":", "run_event", "=", "threading", ".", "Event", "(", ")", "run_event", ".", "set", "(", ")", "thread", "=", "threading", ".", "Thread", "(", "target", "=", "target", ",", ...
Run a function in a separate thread. :param target: the function to run. :param args: the parameters to pass to the function.
[ "Run", "a", "function", "in", "a", "separate", "thread", "." ]
train
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/thread_handler.py#L19-L30
snipsco/snipsmanagercore
snipsmanagercore/thread_handler.py
ThreadHandler.stop
def stop(self): """ Stop all functions running in the thread handler.""" for run_event in self.run_events: run_event.clear() for thread in self.thread_pool: thread.join()
python
def stop(self): """ Stop all functions running in the thread handler.""" for run_event in self.run_events: run_event.clear() for thread in self.thread_pool: thread.join()
[ "def", "stop", "(", "self", ")", ":", "for", "run_event", "in", "self", ".", "run_events", ":", "run_event", ".", "clear", "(", ")", "for", "thread", "in", "self", ".", "thread_pool", ":", "thread", ".", "join", "(", ")" ]
Stop all functions running in the thread handler.
[ "Stop", "all", "functions", "running", "in", "the", "thread", "handler", "." ]
train
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/thread_handler.py#L42-L48
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/noise.py
NoiseGenerator.generate
def generate(self, labels, split_idx): """Generate peak-specific noise abstract method, must be reimplemented in a subclass. :param tuple labels: Dimension labels of a peak. :param int split_idx: Index specifying which peak list split parameters to use. :return: List of noise values for dimensions ordered as they appear in a peak. :rtype: :py:class:`list` """ atom_labels = [label[0] for label in labels] noise = [] distribution_function = distributions[self.distribution_name]["function"] for label in atom_labels: params = [self.parameters["{}_{}".format(label, param)][split_idx] for param in self.distribution_parameter_names] if None in params: dim_noise = 0.0 else: try: dim_noise = distribution_function(*params) except ValueError: raise ValueError noise.append(dim_noise) return noise
python
def generate(self, labels, split_idx): """Generate peak-specific noise abstract method, must be reimplemented in a subclass. :param tuple labels: Dimension labels of a peak. :param int split_idx: Index specifying which peak list split parameters to use. :return: List of noise values for dimensions ordered as they appear in a peak. :rtype: :py:class:`list` """ atom_labels = [label[0] for label in labels] noise = [] distribution_function = distributions[self.distribution_name]["function"] for label in atom_labels: params = [self.parameters["{}_{}".format(label, param)][split_idx] for param in self.distribution_parameter_names] if None in params: dim_noise = 0.0 else: try: dim_noise = distribution_function(*params) except ValueError: raise ValueError noise.append(dim_noise) return noise
[ "def", "generate", "(", "self", ",", "labels", ",", "split_idx", ")", ":", "atom_labels", "=", "[", "label", "[", "0", "]", "for", "label", "in", "labels", "]", "noise", "=", "[", "]", "distribution_function", "=", "distributions", "[", "self", ".", "d...
Generate peak-specific noise abstract method, must be reimplemented in a subclass. :param tuple labels: Dimension labels of a peak. :param int split_idx: Index specifying which peak list split parameters to use. :return: List of noise values for dimensions ordered as they appear in a peak. :rtype: :py:class:`list`
[ "Generate", "peak", "-", "specific", "noise", "abstract", "method", "must", "be", "reimplemented", "in", "a", "subclass", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/noise.py#L134-L160
brunobord/md2ebook
md2ebook/generators.py
Generator.load_cover
def load_cover(self): """Load the cover out of the config, options and conventions. Priority goes this way: 1. if a --cover option is set, use it. 2. if there's a "cover" key in the config file, use it. 3. if a cover.(png|jpg|jpeg|svg) exists in the directory, use it. Once the choice is set, the program will check if the file exists before using it. If it doesn't exist, you'll be warned and the default (ugly) cover will be used. """ if not self.args: return False filename = self.args.get('--cover', None) \ or self.config.get('cover', None) \ or None if not filename: for extension in ('png', 'jpg', 'jpeg', 'svg'): temp_filename = 'cover.%s' % extension if exists(temp_filename): filename = temp_filename break if filename: if filename.startswith('http://') or \ filename.startswith('https://'): return filename if not exists(filename): print error('The designated cover (%s) does not exists.' ' Please check your settings.' % filename) filename = None if not filename: print warning('No cover is set, will use the default (ugly) one.') return False return abspath(filename)
python
def load_cover(self): """Load the cover out of the config, options and conventions. Priority goes this way: 1. if a --cover option is set, use it. 2. if there's a "cover" key in the config file, use it. 3. if a cover.(png|jpg|jpeg|svg) exists in the directory, use it. Once the choice is set, the program will check if the file exists before using it. If it doesn't exist, you'll be warned and the default (ugly) cover will be used. """ if not self.args: return False filename = self.args.get('--cover', None) \ or self.config.get('cover', None) \ or None if not filename: for extension in ('png', 'jpg', 'jpeg', 'svg'): temp_filename = 'cover.%s' % extension if exists(temp_filename): filename = temp_filename break if filename: if filename.startswith('http://') or \ filename.startswith('https://'): return filename if not exists(filename): print error('The designated cover (%s) does not exists.' ' Please check your settings.' % filename) filename = None if not filename: print warning('No cover is set, will use the default (ugly) one.') return False return abspath(filename)
[ "def", "load_cover", "(", "self", ")", ":", "if", "not", "self", ".", "args", ":", "return", "False", "filename", "=", "self", ".", "args", ".", "get", "(", "'--cover'", ",", "None", ")", "or", "self", ".", "config", ".", "get", "(", "'cover'", ","...
Load the cover out of the config, options and conventions. Priority goes this way: 1. if a --cover option is set, use it. 2. if there's a "cover" key in the config file, use it. 3. if a cover.(png|jpg|jpeg|svg) exists in the directory, use it. Once the choice is set, the program will check if the file exists before using it. If it doesn't exist, you'll be warned and the default (ugly) cover will be used.
[ "Load", "the", "cover", "out", "of", "the", "config", "options", "and", "conventions", ".", "Priority", "goes", "this", "way", ":" ]
train
https://github.com/brunobord/md2ebook/blob/31e0d06b77f2d986e6af1115c9e613dfec0591a9/md2ebook/generators.py#L76-L110
yunojuno-archive/django-package-monitor
package_monitor/pypi.py
parse_python
def parse_python(classifiers): """Parse out the versions of python supported a/c classifiers.""" prefix = 'Programming Language :: Python ::' python_classifiers = [c.split('::')[2].strip() for c in classifiers if c.startswith(prefix)] return ', '.join([c for c in python_classifiers if parse_version(c)])
python
def parse_python(classifiers): """Parse out the versions of python supported a/c classifiers.""" prefix = 'Programming Language :: Python ::' python_classifiers = [c.split('::')[2].strip() for c in classifiers if c.startswith(prefix)] return ', '.join([c for c in python_classifiers if parse_version(c)])
[ "def", "parse_python", "(", "classifiers", ")", ":", "prefix", "=", "'Programming Language :: Python ::'", "python_classifiers", "=", "[", "c", ".", "split", "(", "'::'", ")", "[", "2", "]", ".", "strip", "(", ")", "for", "c", "in", "classifiers", "if", "c...
Parse out the versions of python supported a/c classifiers.
[ "Parse", "out", "the", "versions", "of", "python", "supported", "a", "/", "c", "classifiers", "." ]
train
https://github.com/yunojuno-archive/django-package-monitor/blob/534aa35ccfe187d2c55aeca0cb52b8278254e437/package_monitor/pypi.py#L32-L36
yunojuno-archive/django-package-monitor
package_monitor/pypi.py
parse_django
def parse_django(classifiers): """Parse out the versions of django supported a/c classifiers.""" prefix = 'Framework :: Django ::' django_classifiers = [c.split('::')[2].strip() for c in classifiers if c.startswith(prefix)] return ', '.join([c for c in django_classifiers if parse_version(c)])
python
def parse_django(classifiers): """Parse out the versions of django supported a/c classifiers.""" prefix = 'Framework :: Django ::' django_classifiers = [c.split('::')[2].strip() for c in classifiers if c.startswith(prefix)] return ', '.join([c for c in django_classifiers if parse_version(c)])
[ "def", "parse_django", "(", "classifiers", ")", ":", "prefix", "=", "'Framework :: Django ::'", "django_classifiers", "=", "[", "c", ".", "split", "(", "'::'", ")", "[", "2", "]", ".", "strip", "(", ")", "for", "c", "in", "classifiers", "if", "c", ".", ...
Parse out the versions of django supported a/c classifiers.
[ "Parse", "out", "the", "versions", "of", "django", "supported", "a", "/", "c", "classifiers", "." ]
train
https://github.com/yunojuno-archive/django-package-monitor/blob/534aa35ccfe187d2c55aeca0cb52b8278254e437/package_monitor/pypi.py#L39-L43
yunojuno-archive/django-package-monitor
package_monitor/pypi.py
version_diff
def version_diff(version1, version2): """Return string representing the diff between package versions. We're interested in whether this is a major, minor, patch or 'other' update. This method will compare the two versions and return None if they are the same, else it will return a string value indicating the type of diff - 'major', 'minor', 'patch', 'other'. Args: version1: the Version object we are interested in (e.g. current) version2: the Version object to compare against (e.g. latest) Returns a string - 'major', 'minor', 'patch', 'other', or None if the two are identical. """ if version1 is None or version2 is None: return 'unknown' if version1 == version2: return 'none' for v in ('major', 'minor', 'patch'): if getattr(version1, v) != getattr(version2, v): return v return 'other'
python
def version_diff(version1, version2): """Return string representing the diff between package versions. We're interested in whether this is a major, minor, patch or 'other' update. This method will compare the two versions and return None if they are the same, else it will return a string value indicating the type of diff - 'major', 'minor', 'patch', 'other'. Args: version1: the Version object we are interested in (e.g. current) version2: the Version object to compare against (e.g. latest) Returns a string - 'major', 'minor', 'patch', 'other', or None if the two are identical. """ if version1 is None or version2 is None: return 'unknown' if version1 == version2: return 'none' for v in ('major', 'minor', 'patch'): if getattr(version1, v) != getattr(version2, v): return v return 'other'
[ "def", "version_diff", "(", "version1", ",", "version2", ")", ":", "if", "version1", "is", "None", "or", "version2", "is", "None", ":", "return", "'unknown'", "if", "version1", "==", "version2", ":", "return", "'none'", "for", "v", "in", "(", "'major'", ...
Return string representing the diff between package versions. We're interested in whether this is a major, minor, patch or 'other' update. This method will compare the two versions and return None if they are the same, else it will return a string value indicating the type of diff - 'major', 'minor', 'patch', 'other'. Args: version1: the Version object we are interested in (e.g. current) version2: the Version object to compare against (e.g. latest) Returns a string - 'major', 'minor', 'patch', 'other', or None if the two are identical.
[ "Return", "string", "representing", "the", "diff", "between", "package", "versions", "." ]
train
https://github.com/yunojuno-archive/django-package-monitor/blob/534aa35ccfe187d2c55aeca0cb52b8278254e437/package_monitor/pypi.py#L46-L71
yunojuno-archive/django-package-monitor
package_monitor/pypi.py
Package.data
def data(self): """Fetch latest data from PyPI, and cache for 30s.""" key = cache_key(self.name) data = cache.get(key) if data is None: logger.debug("Updating package info for %s from PyPI.", self.name) data = requests.get(self.url).json() cache.set(key, data, PYPI_CACHE_EXPIRY) return data
python
def data(self): """Fetch latest data from PyPI, and cache for 30s.""" key = cache_key(self.name) data = cache.get(key) if data is None: logger.debug("Updating package info for %s from PyPI.", self.name) data = requests.get(self.url).json() cache.set(key, data, PYPI_CACHE_EXPIRY) return data
[ "def", "data", "(", "self", ")", ":", "key", "=", "cache_key", "(", "self", ".", "name", ")", "data", "=", "cache", ".", "get", "(", "key", ")", "if", "data", "is", "None", ":", "logger", ".", "debug", "(", "\"Updating package info for %s from PyPI.\"", ...
Fetch latest data from PyPI, and cache for 30s.
[ "Fetch", "latest", "data", "from", "PyPI", "and", "cache", "for", "30s", "." ]
train
https://github.com/yunojuno-archive/django-package-monitor/blob/534aa35ccfe187d2c55aeca0cb52b8278254e437/package_monitor/pypi.py#L85-L93
beetbox/pylastfp
lastfp/__init__.py
_query_wrap
def _query_wrap(fun, *args, **kwargs): """Wait until at least QUERY_WAIT_TIME seconds have passed since the last invocation of this function, then call the given function with the given arguments. """ with _query_lock: global _last_query_time since_last_query = time.time() - _last_query_time if since_last_query < QUERY_WAIT_TIME: time.sleep(QUERY_WAIT_TIME - since_last_query) _last_query_time = time.time() return fun(*args, **kwargs)
python
def _query_wrap(fun, *args, **kwargs): """Wait until at least QUERY_WAIT_TIME seconds have passed since the last invocation of this function, then call the given function with the given arguments. """ with _query_lock: global _last_query_time since_last_query = time.time() - _last_query_time if since_last_query < QUERY_WAIT_TIME: time.sleep(QUERY_WAIT_TIME - since_last_query) _last_query_time = time.time() return fun(*args, **kwargs)
[ "def", "_query_wrap", "(", "fun", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "with", "_query_lock", ":", "global", "_last_query_time", "since_last_query", "=", "time", ".", "time", "(", ")", "-", "_last_query_time", "if", "since_last_query", "<", ...
Wait until at least QUERY_WAIT_TIME seconds have passed since the last invocation of this function, then call the given function with the given arguments.
[ "Wait", "until", "at", "least", "QUERY_WAIT_TIME", "seconds", "have", "passed", "since", "the", "last", "invocation", "of", "this", "function", "then", "call", "the", "given", "function", "with", "the", "given", "arguments", "." ]
train
https://github.com/beetbox/pylastfp/blob/55edfad638bb1c849cbbd7406355385e8b1ea3d8/lastfp/__init__.py#L44-L55
beetbox/pylastfp
lastfp/__init__.py
formdata_encode
def formdata_encode(fields): """Encode fields (a dict) as a multipart/form-data HTTP request payload. Returns a (content type, request body) pair. """ BOUNDARY = '----form-data-boundary-ZmRkNzJkMjUtMjkyMC00' out = [] for (key, value) in fields.items(): out.append('--' + BOUNDARY) out.append('Content-Disposition: form-data; name="%s"' % key) out.append('') out.append(value) out.append('--' + BOUNDARY + '--') out.append('') body = '\r\n'.join(out) content_type = 'multipart/form-data; boundary=%s' % BOUNDARY return content_type, body
python
def formdata_encode(fields): """Encode fields (a dict) as a multipart/form-data HTTP request payload. Returns a (content type, request body) pair. """ BOUNDARY = '----form-data-boundary-ZmRkNzJkMjUtMjkyMC00' out = [] for (key, value) in fields.items(): out.append('--' + BOUNDARY) out.append('Content-Disposition: form-data; name="%s"' % key) out.append('') out.append(value) out.append('--' + BOUNDARY + '--') out.append('') body = '\r\n'.join(out) content_type = 'multipart/form-data; boundary=%s' % BOUNDARY return content_type, body
[ "def", "formdata_encode", "(", "fields", ")", ":", "BOUNDARY", "=", "'----form-data-boundary-ZmRkNzJkMjUtMjkyMC00'", "out", "=", "[", "]", "for", "(", "key", ",", "value", ")", "in", "fields", ".", "items", "(", ")", ":", "out", ".", "append", "(", "'--'",...
Encode fields (a dict) as a multipart/form-data HTTP request payload. Returns a (content type, request body) pair.
[ "Encode", "fields", "(", "a", "dict", ")", "as", "a", "multipart", "/", "form", "-", "data", "HTTP", "request", "payload", ".", "Returns", "a", "(", "content", "type", "request", "body", ")", "pair", "." ]
train
https://github.com/beetbox/pylastfp/blob/55edfad638bb1c849cbbd7406355385e8b1ea3d8/lastfp/__init__.py#L61-L76
beetbox/pylastfp
lastfp/__init__.py
formdata_post
def formdata_post(url, fields): """Send an HTTP request with a multipart/form-data body for the given URL and return the data returned by the server. """ content_type, data = formdata_encode(fields) req = urllib2.Request(url, data) req.add_header('Content-Type', content_type) return urllib2.urlopen(req).read()
python
def formdata_post(url, fields): """Send an HTTP request with a multipart/form-data body for the given URL and return the data returned by the server. """ content_type, data = formdata_encode(fields) req = urllib2.Request(url, data) req.add_header('Content-Type', content_type) return urllib2.urlopen(req).read()
[ "def", "formdata_post", "(", "url", ",", "fields", ")", ":", "content_type", ",", "data", "=", "formdata_encode", "(", "fields", ")", "req", "=", "urllib2", ".", "Request", "(", "url", ",", "data", ")", "req", ".", "add_header", "(", "'Content-Type'", ",...
Send an HTTP request with a multipart/form-data body for the given URL and return the data returned by the server.
[ "Send", "an", "HTTP", "request", "with", "a", "multipart", "/", "form", "-", "data", "body", "for", "the", "given", "URL", "and", "return", "the", "data", "returned", "by", "the", "server", "." ]
train
https://github.com/beetbox/pylastfp/blob/55edfad638bb1c849cbbd7406355385e8b1ea3d8/lastfp/__init__.py#L77-L84
beetbox/pylastfp
lastfp/__init__.py
fpid_query
def fpid_query(duration, fpdata, metadata=None): """Send fingerprint data to Last.fm to get the corresponding fingerprint ID, which can then be used to fetch metadata. duration is the length of the track in (integral) seconds. If metadata is provided, it is a dictionary with three optional fields reflecting the current metadata for the file: "artist", "album", and "title". These values are optional but might help improve the database. Returns the fpid, an integer, or raises a QueryError. """ metadata = metadata or {} params = { 'artist': metadata.get('artist', ''), 'album': metadata.get('album', ''), 'track': metadata.get('title', ''), 'duration': duration, } url = '%s?%s' % (URL_FPID, urllib.urlencode(params)) try: res = _query_wrap(formdata_post, url, {'fpdata': fpdata}) except urllib2.HTTPError: raise CommunicationError('ID query failed') except httplib.BadStatusLine: raise CommunicationError('bad response in ID query') except IOError: raise CommunicationError('ID query failed') try: fpid, status = res.split()[:2] fpid = int(fpid) except ValueError: raise BadResponseError('malformed response: ' + res) if status == 'NEW': raise NotFoundError() elif status == 'FOUND': return fpid else: raise BadResponseError('unknown status: ' + res)
python
def fpid_query(duration, fpdata, metadata=None): """Send fingerprint data to Last.fm to get the corresponding fingerprint ID, which can then be used to fetch metadata. duration is the length of the track in (integral) seconds. If metadata is provided, it is a dictionary with three optional fields reflecting the current metadata for the file: "artist", "album", and "title". These values are optional but might help improve the database. Returns the fpid, an integer, or raises a QueryError. """ metadata = metadata or {} params = { 'artist': metadata.get('artist', ''), 'album': metadata.get('album', ''), 'track': metadata.get('title', ''), 'duration': duration, } url = '%s?%s' % (URL_FPID, urllib.urlencode(params)) try: res = _query_wrap(formdata_post, url, {'fpdata': fpdata}) except urllib2.HTTPError: raise CommunicationError('ID query failed') except httplib.BadStatusLine: raise CommunicationError('bad response in ID query') except IOError: raise CommunicationError('ID query failed') try: fpid, status = res.split()[:2] fpid = int(fpid) except ValueError: raise BadResponseError('malformed response: ' + res) if status == 'NEW': raise NotFoundError() elif status == 'FOUND': return fpid else: raise BadResponseError('unknown status: ' + res)
[ "def", "fpid_query", "(", "duration", ",", "fpdata", ",", "metadata", "=", "None", ")", ":", "metadata", "=", "metadata", "or", "{", "}", "params", "=", "{", "'artist'", ":", "metadata", ".", "get", "(", "'artist'", ",", "''", ")", ",", "'album'", ":...
Send fingerprint data to Last.fm to get the corresponding fingerprint ID, which can then be used to fetch metadata. duration is the length of the track in (integral) seconds. If metadata is provided, it is a dictionary with three optional fields reflecting the current metadata for the file: "artist", "album", and "title". These values are optional but might help improve the database. Returns the fpid, an integer, or raises a QueryError.
[ "Send", "fingerprint", "data", "to", "Last", ".", "fm", "to", "get", "the", "corresponding", "fingerprint", "ID", "which", "can", "then", "be", "used", "to", "fetch", "metadata", ".", "duration", "is", "the", "length", "of", "the", "track", "in", "(", "i...
train
https://github.com/beetbox/pylastfp/blob/55edfad638bb1c849cbbd7406355385e8b1ea3d8/lastfp/__init__.py#L95-L133
beetbox/pylastfp
lastfp/__init__.py
metadata_query
def metadata_query(fpid, apikey): """Queries the Last.fm servers for metadata about a given fingerprint ID (an integer). Returns the XML response (a string). """ params = { 'method': 'track.getFingerprintMetadata', 'fingerprintid': fpid, 'api_key': apikey, } url = '%s?%s' % (URL_METADATA, urllib.urlencode(params)) try: fh = _query_wrap(urllib.urlopen, url) except urllib2.HTTPError: raise CommunicationError('metadata query failed') except httplib.BadStatusLine: raise CommunicationError('bad response in metadata query') except IOError: raise CommunicationError('metadata query failed') return fh.read()
python
def metadata_query(fpid, apikey): """Queries the Last.fm servers for metadata about a given fingerprint ID (an integer). Returns the XML response (a string). """ params = { 'method': 'track.getFingerprintMetadata', 'fingerprintid': fpid, 'api_key': apikey, } url = '%s?%s' % (URL_METADATA, urllib.urlencode(params)) try: fh = _query_wrap(urllib.urlopen, url) except urllib2.HTTPError: raise CommunicationError('metadata query failed') except httplib.BadStatusLine: raise CommunicationError('bad response in metadata query') except IOError: raise CommunicationError('metadata query failed') return fh.read()
[ "def", "metadata_query", "(", "fpid", ",", "apikey", ")", ":", "params", "=", "{", "'method'", ":", "'track.getFingerprintMetadata'", ",", "'fingerprintid'", ":", "fpid", ",", "'api_key'", ":", "apikey", ",", "}", "url", "=", "'%s?%s'", "%", "(", "URL_METADA...
Queries the Last.fm servers for metadata about a given fingerprint ID (an integer). Returns the XML response (a string).
[ "Queries", "the", "Last", ".", "fm", "servers", "for", "metadata", "about", "a", "given", "fingerprint", "ID", "(", "an", "integer", ")", ".", "Returns", "the", "XML", "response", "(", "a", "string", ")", "." ]
train
https://github.com/beetbox/pylastfp/blob/55edfad638bb1c849cbbd7406355385e8b1ea3d8/lastfp/__init__.py#L135-L153
beetbox/pylastfp
lastfp/__init__.py
extract
def extract(pcmiter, samplerate, channels, duration = -1): """Given a PCM data stream, extract fingerprint data from the audio. Returns a byte string of fingerprint data. Raises an ExtractionError if fingerprinting fails. """ extractor = _fplib.Extractor(samplerate, channels, duration) # Get first block. try: next_block = next(pcmiter) except StopIteration: raise ExtractionError() # Get and process subsequent blocks. while True: # Shift over blocks. cur_block = next_block try: next_block = next(pcmiter) except StopIteration: next_block = None done = next_block is None # Process the block. try: if extractor.process(cur_block, done): # Success! break except RuntimeError as exc: # Exception from fplib. Most likely the file is too short. raise ExtractionError(exc.args[0]) # End of file but processor never became ready? if done: raise ExtractionError() # Get resulting fingerprint data. out = extractor.result() if out is None: raise ExtractionError() # Free extractor memory. extractor.free() return out
python
def extract(pcmiter, samplerate, channels, duration = -1): """Given a PCM data stream, extract fingerprint data from the audio. Returns a byte string of fingerprint data. Raises an ExtractionError if fingerprinting fails. """ extractor = _fplib.Extractor(samplerate, channels, duration) # Get first block. try: next_block = next(pcmiter) except StopIteration: raise ExtractionError() # Get and process subsequent blocks. while True: # Shift over blocks. cur_block = next_block try: next_block = next(pcmiter) except StopIteration: next_block = None done = next_block is None # Process the block. try: if extractor.process(cur_block, done): # Success! break except RuntimeError as exc: # Exception from fplib. Most likely the file is too short. raise ExtractionError(exc.args[0]) # End of file but processor never became ready? if done: raise ExtractionError() # Get resulting fingerprint data. out = extractor.result() if out is None: raise ExtractionError() # Free extractor memory. extractor.free() return out
[ "def", "extract", "(", "pcmiter", ",", "samplerate", ",", "channels", ",", "duration", "=", "-", "1", ")", ":", "extractor", "=", "_fplib", ".", "Extractor", "(", "samplerate", ",", "channels", ",", "duration", ")", "# Get first block.", "try", ":", "next_...
Given a PCM data stream, extract fingerprint data from the audio. Returns a byte string of fingerprint data. Raises an ExtractionError if fingerprinting fails.
[ "Given", "a", "PCM", "data", "stream", "extract", "fingerprint", "data", "from", "the", "audio", ".", "Returns", "a", "byte", "string", "of", "fingerprint", "data", ".", "Raises", "an", "ExtractionError", "if", "fingerprinting", "fails", "." ]
train
https://github.com/beetbox/pylastfp/blob/55edfad638bb1c849cbbd7406355385e8b1ea3d8/lastfp/__init__.py#L157-L201
beetbox/pylastfp
lastfp/__init__.py
match
def match(apikey, pcmiter, samplerate, duration, channels=2, metadata=None): """Given a PCM data stream, perform fingerprinting and look up the metadata for the audio. pcmiter must be an iterable of blocks of PCM data (buffers). duration is the total length of the track in seconds (an integer). metadata may be a dictionary containing existing metadata for the file (optional keys: "artist", "album", and "title"). Returns a list of track info dictionaries describing the candidate metadata returned by Last.fm. Raises a subclass of FingerprintError if any step fails. """ fpdata = extract(pcmiter, samplerate, channels) fpid = fpid_query(duration, fpdata, metadata) return metadata_query(fpid, apikey)
python
def match(apikey, pcmiter, samplerate, duration, channels=2, metadata=None): """Given a PCM data stream, perform fingerprinting and look up the metadata for the audio. pcmiter must be an iterable of blocks of PCM data (buffers). duration is the total length of the track in seconds (an integer). metadata may be a dictionary containing existing metadata for the file (optional keys: "artist", "album", and "title"). Returns a list of track info dictionaries describing the candidate metadata returned by Last.fm. Raises a subclass of FingerprintError if any step fails. """ fpdata = extract(pcmiter, samplerate, channels) fpid = fpid_query(duration, fpdata, metadata) return metadata_query(fpid, apikey)
[ "def", "match", "(", "apikey", ",", "pcmiter", ",", "samplerate", ",", "duration", ",", "channels", "=", "2", ",", "metadata", "=", "None", ")", ":", "fpdata", "=", "extract", "(", "pcmiter", ",", "samplerate", ",", "channels", ")", "fpid", "=", "fpid_...
Given a PCM data stream, perform fingerprinting and look up the metadata for the audio. pcmiter must be an iterable of blocks of PCM data (buffers). duration is the total length of the track in seconds (an integer). metadata may be a dictionary containing existing metadata for the file (optional keys: "artist", "album", and "title"). Returns a list of track info dictionaries describing the candidate metadata returned by Last.fm. Raises a subclass of FingerprintError if any step fails.
[ "Given", "a", "PCM", "data", "stream", "perform", "fingerprinting", "and", "look", "up", "the", "metadata", "for", "the", "audio", ".", "pcmiter", "must", "be", "an", "iterable", "of", "blocks", "of", "PCM", "data", "(", "buffers", ")", ".", "duration", ...
train
https://github.com/beetbox/pylastfp/blob/55edfad638bb1c849cbbd7406355385e8b1ea3d8/lastfp/__init__.py#L206-L218
beetbox/pylastfp
lastfp/__init__.py
parse_metadata
def parse_metadata(xml): """Given an XML document (string) returned from metadata_query(), parse the response into a list of track info dicts. May raise an APIError if the lookup fails. """ try: root = etree.fromstring(xml) except (ExpatError, etree.ParseError): # The Last.fm API occasionally generates malformed XML when its # includes an illegal character (UTF8-legal but prohibited by # the XML standard). raise CommunicationError('malformed XML response') status = root.attrib['status'] if status == 'failed': error = root.find('error') raise APIError(int(error.attrib['code']), error.text) out = [] for track in root.find('tracks').findall('track'): out.append({ 'rank': float(track.attrib['rank']), 'artist': track.find('artist').find('name').text, 'artist_mbid': track.find('artist').find('mbid').text, 'title': track.find('name').text, 'track_mbid': track.find('mbid').text, }) return out
python
def parse_metadata(xml): """Given an XML document (string) returned from metadata_query(), parse the response into a list of track info dicts. May raise an APIError if the lookup fails. """ try: root = etree.fromstring(xml) except (ExpatError, etree.ParseError): # The Last.fm API occasionally generates malformed XML when its # includes an illegal character (UTF8-legal but prohibited by # the XML standard). raise CommunicationError('malformed XML response') status = root.attrib['status'] if status == 'failed': error = root.find('error') raise APIError(int(error.attrib['code']), error.text) out = [] for track in root.find('tracks').findall('track'): out.append({ 'rank': float(track.attrib['rank']), 'artist': track.find('artist').find('name').text, 'artist_mbid': track.find('artist').find('mbid').text, 'title': track.find('name').text, 'track_mbid': track.find('mbid').text, }) return out
[ "def", "parse_metadata", "(", "xml", ")", ":", "try", ":", "root", "=", "etree", ".", "fromstring", "(", "xml", ")", "except", "(", "ExpatError", ",", "etree", ".", "ParseError", ")", ":", "# The Last.fm API occasionally generates malformed XML when its", "# inclu...
Given an XML document (string) returned from metadata_query(), parse the response into a list of track info dicts. May raise an APIError if the lookup fails.
[ "Given", "an", "XML", "document", "(", "string", ")", "returned", "from", "metadata_query", "()", "parse", "the", "response", "into", "a", "list", "of", "track", "info", "dicts", ".", "May", "raise", "an", "APIError", "if", "the", "lookup", "fails", "." ]
train
https://github.com/beetbox/pylastfp/blob/55edfad638bb1c849cbbd7406355385e8b1ea3d8/lastfp/__init__.py#L226-L253
beetbox/pylastfp
lastfp/__init__.py
match_file
def match_file(apikey, path, metadata=None): """Uses the audioread library to decode an audio file and match it. """ import audioread with audioread.audio_open(path) as f: return match(apikey, iter(f), f.samplerate, int(f.duration), f.channels, metadata)
python
def match_file(apikey, path, metadata=None): """Uses the audioread library to decode an audio file and match it. """ import audioread with audioread.audio_open(path) as f: return match(apikey, iter(f), f.samplerate, int(f.duration), f.channels, metadata)
[ "def", "match_file", "(", "apikey", ",", "path", ",", "metadata", "=", "None", ")", ":", "import", "audioread", "with", "audioread", ".", "audio_open", "(", "path", ")", "as", "f", ":", "return", "match", "(", "apikey", ",", "iter", "(", "f", ")", ",...
Uses the audioread library to decode an audio file and match it.
[ "Uses", "the", "audioread", "library", "to", "decode", "an", "audio", "file", "and", "match", "it", "." ]
train
https://github.com/beetbox/pylastfp/blob/55edfad638bb1c849cbbd7406355385e8b1ea3d8/lastfp/__init__.py#L258-L264
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/nmrstarlib.py
update_constants
def update_constants(nmrstar2cfg="", nmrstar3cfg="", resonance_classes_cfg="", spectrum_descriptions_cfg=""): """Update constant variables. :return: None :rtype: :py:obj:`None` """ nmrstar_constants = {} resonance_classes = {} spectrum_descriptions = {} this_directory = os.path.dirname(__file__) nmrstar2_config_filepath = os.path.join(this_directory, "conf/constants_nmrstar2.json") nmrstar3_config_filepath = os.path.join(this_directory, "conf/constants_nmrstar3.json") resonance_classes_config_filepath = os.path.join(this_directory, "conf/resonance_classes.json") spectrum_descriptions_config_filepath = os.path.join(this_directory, "conf/spectrum_descriptions.json") with open(nmrstar2_config_filepath, "r") as nmrstar2config, open(nmrstar3_config_filepath, "r") as nmrstar3config: nmrstar_constants["2"] = json.load(nmrstar2config) nmrstar_constants["3"] = json.load(nmrstar3config) with open(resonance_classes_config_filepath, "r") as config: resonance_classes.update(json.load(config)) with open(spectrum_descriptions_config_filepath, "r") as config: spectrum_descriptions.update(json.load(config)) if nmrstar2cfg: with open(nmrstar2cfg, "r") as nmrstar2config: nmrstar_constants["2"].update(json.load(nmrstar2config)) if nmrstar3cfg: with open(nmrstar2cfg, "r") as nmrstar3config: nmrstar_constants["3"].update(json.load(nmrstar3config)) if resonance_classes_cfg: with open(nmrstar2cfg, "r") as config: resonance_classes.update(json.load(config)) if spectrum_descriptions_cfg: with open(spectrum_descriptions_cfg, "r") as config: spectrum_descriptions.update(json.load(config)) NMRSTAR_CONSTANTS.update(nmrstar_constants) RESONANCE_CLASSES.update(resonance_classes) SPECTRUM_DESCRIPTIONS.update(spectrum_descriptions)
python
def update_constants(nmrstar2cfg="", nmrstar3cfg="", resonance_classes_cfg="", spectrum_descriptions_cfg=""): """Update constant variables. :return: None :rtype: :py:obj:`None` """ nmrstar_constants = {} resonance_classes = {} spectrum_descriptions = {} this_directory = os.path.dirname(__file__) nmrstar2_config_filepath = os.path.join(this_directory, "conf/constants_nmrstar2.json") nmrstar3_config_filepath = os.path.join(this_directory, "conf/constants_nmrstar3.json") resonance_classes_config_filepath = os.path.join(this_directory, "conf/resonance_classes.json") spectrum_descriptions_config_filepath = os.path.join(this_directory, "conf/spectrum_descriptions.json") with open(nmrstar2_config_filepath, "r") as nmrstar2config, open(nmrstar3_config_filepath, "r") as nmrstar3config: nmrstar_constants["2"] = json.load(nmrstar2config) nmrstar_constants["3"] = json.load(nmrstar3config) with open(resonance_classes_config_filepath, "r") as config: resonance_classes.update(json.load(config)) with open(spectrum_descriptions_config_filepath, "r") as config: spectrum_descriptions.update(json.load(config)) if nmrstar2cfg: with open(nmrstar2cfg, "r") as nmrstar2config: nmrstar_constants["2"].update(json.load(nmrstar2config)) if nmrstar3cfg: with open(nmrstar2cfg, "r") as nmrstar3config: nmrstar_constants["3"].update(json.load(nmrstar3config)) if resonance_classes_cfg: with open(nmrstar2cfg, "r") as config: resonance_classes.update(json.load(config)) if spectrum_descriptions_cfg: with open(spectrum_descriptions_cfg, "r") as config: spectrum_descriptions.update(json.load(config)) NMRSTAR_CONSTANTS.update(nmrstar_constants) RESONANCE_CLASSES.update(resonance_classes) SPECTRUM_DESCRIPTIONS.update(spectrum_descriptions)
[ "def", "update_constants", "(", "nmrstar2cfg", "=", "\"\"", ",", "nmrstar3cfg", "=", "\"\"", ",", "resonance_classes_cfg", "=", "\"\"", ",", "spectrum_descriptions_cfg", "=", "\"\"", ")", ":", "nmrstar_constants", "=", "{", "}", "resonance_classes", "=", "{", "}...
Update constant variables. :return: None :rtype: :py:obj:`None`
[ "Update", "constant", "variables", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/nmrstarlib.py#L54-L99
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/nmrstarlib.py
list_spectrum_descriptions
def list_spectrum_descriptions(*args): """List all available spectrum descriptions that can be used for peak list simulation. :param str args: Spectrum name(s), e.g. list_spectrum_descriptions("HNCO", "HNcoCACB"), leave empty to list everything. :return: None :rtype: :py:obj:`None` """ if args: for spectrum_name in args: pprint.pprint({spectrum_name: SPECTRUM_DESCRIPTIONS.get(spectrum_name, None)}, width=120) else: pprint.pprint(SPECTRUM_DESCRIPTIONS, width=120)
python
def list_spectrum_descriptions(*args): """List all available spectrum descriptions that can be used for peak list simulation. :param str args: Spectrum name(s), e.g. list_spectrum_descriptions("HNCO", "HNcoCACB"), leave empty to list everything. :return: None :rtype: :py:obj:`None` """ if args: for spectrum_name in args: pprint.pprint({spectrum_name: SPECTRUM_DESCRIPTIONS.get(spectrum_name, None)}, width=120) else: pprint.pprint(SPECTRUM_DESCRIPTIONS, width=120)
[ "def", "list_spectrum_descriptions", "(", "*", "args", ")", ":", "if", "args", ":", "for", "spectrum_name", "in", "args", ":", "pprint", ".", "pprint", "(", "{", "spectrum_name", ":", "SPECTRUM_DESCRIPTIONS", ".", "get", "(", "spectrum_name", ",", "None", ")...
List all available spectrum descriptions that can be used for peak list simulation. :param str args: Spectrum name(s), e.g. list_spectrum_descriptions("HNCO", "HNcoCACB"), leave empty to list everything. :return: None :rtype: :py:obj:`None`
[ "List", "all", "available", "spectrum", "descriptions", "that", "can", "be", "used", "for", "peak", "list", "simulation", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/nmrstarlib.py#L759-L770