repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
ets-labs/python-dependency-injector | examples/miniapps/password_hashing/example.py | UsersService.create_user | python | def create_user(self, name, password):
hashed_password = self._password_hasher(password)
return dict(name=name, password=hashed_password) | Create user with hashed password. | train | https://github.com/ets-labs/python-dependency-injector/blob/d04fe41eb17f667da38b97525e2d16c8f2d272fe/examples/miniapps/password_hashing/example.py#L16-L19 | null | class UsersService(object):
"""Users service."""
def __init__(self, password_hasher):
"""Initializer."""
self._password_hasher = password_hasher
|
ets-labs/python-dependency-injector | examples/miniapps/api_client/api.py | ApiClient.call | python | def call(self, operation, data):
print('API call [{0}:{1}], method - {2}, data - {3}'.format(
self.host, self.api_key, operation, repr(data))) | Make some network operations. | train | https://github.com/ets-labs/python-dependency-injector/blob/d04fe41eb17f667da38b97525e2d16c8f2d272fe/examples/miniapps/api_client/api.py#L12-L15 | null | class ApiClient(object):
"""Some API client."""
def __init__(self, host, api_key):
"""Initializer."""
self.host = host
self.api_key = api_key
|
ets-labs/python-dependency-injector | examples/miniapps/movie_lister/movies/listers.py | MovieLister.movies_directed_by | python | def movies_directed_by(self, director):
return [movie for movie in self._movie_finder.find_all()
if movie.director == director] | Return list of movies that were directed by certain person.
:param director: Director's name
:type director: str
:rtype: list[movies.models.Movie]
:return: List of movie instances. | train | https://github.com/ets-labs/python-dependency-injector/blob/d04fe41eb17f667da38b97525e2d16c8f2d272fe/examples/miniapps/movie_lister/movies/listers.py#L22-L32 | null | class MovieLister(object):
"""Movie lister component.
Movie lister component provides several methods for filtering movies by
specific criteria.
"""
def __init__(self, movie_finder):
"""Initializer.
:param movie_finder: Movie finder instance
:type movie_finder: movies.finders.MovieFinder
"""
self._movie_finder = movie_finder
def movies_released_in(self, year):
"""Return list of movies that were released in certain year.
:param year: Release year
:type year: int
:rtype: list[movies.models.Movie]
:return: List of movie instances.
"""
return [movie for movie in self._movie_finder.find_all()
if movie.year == year]
|
ets-labs/python-dependency-injector | examples/miniapps/movie_lister/movies/listers.py | MovieLister.movies_released_in | python | def movies_released_in(self, year):
return [movie for movie in self._movie_finder.find_all()
if movie.year == year] | Return list of movies that were released in certain year.
:param year: Release year
:type year: int
:rtype: list[movies.models.Movie]
:return: List of movie instances. | train | https://github.com/ets-labs/python-dependency-injector/blob/d04fe41eb17f667da38b97525e2d16c8f2d272fe/examples/miniapps/movie_lister/movies/listers.py#L34-L44 | null | class MovieLister(object):
"""Movie lister component.
Movie lister component provides several methods for filtering movies by
specific criteria.
"""
def __init__(self, movie_finder):
"""Initializer.
:param movie_finder: Movie finder instance
:type movie_finder: movies.finders.MovieFinder
"""
self._movie_finder = movie_finder
def movies_directed_by(self, director):
"""Return list of movies that were directed by certain person.
:param director: Director's name
:type director: str
:rtype: list[movies.models.Movie]
:return: List of movie instances.
"""
return [movie for movie in self._movie_finder.find_all()
if movie.director == director]
|
ets-labs/python-dependency-injector | examples/providers/dependency.py | UsersService.init_database | python | def init_database(self):
with contextlib.closing(self.database.cursor()) as cursor:
cursor.execute("""
CREATE TABLE IF NOT EXISTS users(
id INTEGER PRIMARY KEY AUTOINCREMENT,
name VARCHAR(32)
)
""") | Initialize database, if it has not been initialized yet. | train | https://github.com/ets-labs/python-dependency-injector/blob/d04fe41eb17f667da38b97525e2d16c8f2d272fe/examples/providers/dependency.py#L24-L32 | null | class UsersService(object):
"""Example class UsersService.
UsersService has dependency on DBAPI 2.0 database connection.
"""
def __init__(self, database):
"""Initializer.
:param database: Database connection.
:type database: sqlite3.dbapi2.Connection
"""
self.database = database
self.database.row_factory = sqlite3.dbapi2.Row
def create(self, name):
"""Create user with provided name and return his id."""
with contextlib.closing(self.database.cursor()) as cursor:
cursor.execute('INSERT INTO users(name) VALUES (?)', (name,))
return cursor.lastrowid
def get_by_id(self, id):
"""Return user info by user id."""
with contextlib.closing(self.database.cursor()) as cursor:
cursor.execute('SELECT id, name FROM users WHERE id=?', (id,))
return cursor.fetchone()
|
ets-labs/python-dependency-injector | examples/providers/dependency.py | UsersService.create | python | def create(self, name):
with contextlib.closing(self.database.cursor()) as cursor:
cursor.execute('INSERT INTO users(name) VALUES (?)', (name,))
return cursor.lastrowid | Create user with provided name and return his id. | train | https://github.com/ets-labs/python-dependency-injector/blob/d04fe41eb17f667da38b97525e2d16c8f2d272fe/examples/providers/dependency.py#L34-L38 | null | class UsersService(object):
"""Example class UsersService.
UsersService has dependency on DBAPI 2.0 database connection.
"""
def __init__(self, database):
"""Initializer.
:param database: Database connection.
:type database: sqlite3.dbapi2.Connection
"""
self.database = database
self.database.row_factory = sqlite3.dbapi2.Row
def init_database(self):
"""Initialize database, if it has not been initialized yet."""
with contextlib.closing(self.database.cursor()) as cursor:
cursor.execute("""
CREATE TABLE IF NOT EXISTS users(
id INTEGER PRIMARY KEY AUTOINCREMENT,
name VARCHAR(32)
)
""")
def get_by_id(self, id):
"""Return user info by user id."""
with contextlib.closing(self.database.cursor()) as cursor:
cursor.execute('SELECT id, name FROM users WHERE id=?', (id,))
return cursor.fetchone()
|
ets-labs/python-dependency-injector | examples/providers/dependency.py | UsersService.get_by_id | python | def get_by_id(self, id):
with contextlib.closing(self.database.cursor()) as cursor:
cursor.execute('SELECT id, name FROM users WHERE id=?', (id,))
return cursor.fetchone() | Return user info by user id. | train | https://github.com/ets-labs/python-dependency-injector/blob/d04fe41eb17f667da38b97525e2d16c8f2d272fe/examples/providers/dependency.py#L40-L44 | null | class UsersService(object):
"""Example class UsersService.
UsersService has dependency on DBAPI 2.0 database connection.
"""
def __init__(self, database):
"""Initializer.
:param database: Database connection.
:type database: sqlite3.dbapi2.Connection
"""
self.database = database
self.database.row_factory = sqlite3.dbapi2.Row
def init_database(self):
"""Initialize database, if it has not been initialized yet."""
with contextlib.closing(self.database.cursor()) as cursor:
cursor.execute("""
CREATE TABLE IF NOT EXISTS users(
id INTEGER PRIMARY KEY AUTOINCREMENT,
name VARCHAR(32)
)
""")
def create(self, name):
"""Create user with provided name and return his id."""
with contextlib.closing(self.database.cursor()) as cursor:
cursor.execute('INSERT INTO users(name) VALUES (?)', (name,))
return cursor.lastrowid
|
Alexis-benoist/eralchemy | eralchemy/parser.py | filter_lines_from_comments | python | def filter_lines_from_comments(lines):
for line_nb, raw_line in enumerate(lines):
clean_line = remove_comments_from_line(raw_line)
if clean_line == '':
continue
yield line_nb, clean_line, raw_line | Filter the lines from comments and non code lines. | train | https://github.com/Alexis-benoist/eralchemy/blob/d6fcdc67d6d413bb174bf008fd360044e1dff5a7/eralchemy/parser.py#L43-L49 | [
"def remove_comments_from_line(line):\n if '#' not in line:\n return line.strip()\n return line[:line.index('#')].strip()\n"
] | # -*- coding: utf-8 -*-
from eralchemy.models import Table, Relation, Column
class ParsingException(Exception):
base_traceback = 'Error on line {line_nb}: {line}\n{error}'
hint = None
@property
def traceback(self):
rv = self.base_traceback.format(
line_nb=getattr(self, 'line_nb', '?'),
line=getattr(self, 'line', ''),
error=self.args[0],
)
if self.hint is not None:
rv += '\nHINT: {}'.format(self.hint)
return rv
class DuplicateTableException(ParsingException):
pass
class DuplicateColumnException(ParsingException):
pass
class RelationNoColException(ParsingException):
hint = 'Try to declare the tables before the relationships.'
class NoCurrentTableException(ParsingException):
hint = 'Try to declare the tables before the relationships and columns.'
def remove_comments_from_line(line):
if '#' not in line:
return line.strip()
return line[:line.index('#')].strip()
def parse_line(line):
for typ in [Table, Relation, Column]:
match = typ.RE.match(line)
if match:
return typ.make_from_match(match)
msg = 'Line "{}" could not be parsed to an object.'
raise ValueError(msg.format(line))
def _check_no_current_table(new_obj, current_table):
""" Raises exception if we try to add a relation or a column
with no current table. """
if current_table is None:
msg = 'Cannot add {} before adding table'
if isinstance(new_obj, Relation):
raise NoCurrentTableException(msg.format('relation'))
if isinstance(new_obj, Column):
raise NoCurrentTableException(msg.format('column'))
def _update_check_inputs(current_table, tables, relations):
assert current_table is None or isinstance(current_table, Table)
assert isinstance(tables, list)
assert all(isinstance(t, Table) for t in tables)
assert all(isinstance(r, Relation) for r in relations)
assert current_table is None or current_table in tables
def _check_colname_in_lst(column_name, columns_names):
if column_name not in columns_names:
msg = 'Cannot add a relation with column "{}" which is undefined'
raise RelationNoColException(msg.format(column_name))
def _check_not_creating_duplicates(new_name, names, type, exc):
if new_name in names:
msg = 'Cannot add {} named "{}" which is ' \
'already present in the schema.'
raise exc(msg.format(type, new_name))
def update_models(new_obj, current_table, tables, relations):
""" Update the state of the parsing. """
_update_check_inputs(current_table, tables, relations)
_check_no_current_table(new_obj, current_table)
if isinstance(new_obj, Table):
tables_names = [t.name for t in tables]
_check_not_creating_duplicates(new_obj.name, tables_names, 'table', DuplicateTableException)
return new_obj, tables + [new_obj], relations
if isinstance(new_obj, Relation):
tables_names = [t.name for t in tables]
_check_colname_in_lst(new_obj.right_col, tables_names)
_check_colname_in_lst(new_obj.left_col, tables_names)
return current_table, tables, relations + [new_obj]
if isinstance(new_obj, Column):
columns_names = [c.name for c in current_table.columns]
_check_not_creating_duplicates(new_obj.name, columns_names, 'column', DuplicateColumnException)
current_table.columns.append(new_obj)
return current_table, tables, relations
msg = "new_obj cannot be of type {}"
raise ValueError(msg.format(new_obj.__class__.__name__))
def markdown_file_to_intermediary(filename):
""" Parse a file and return to intermediary syntax. """
with open(filename) as f:
lines = f.readlines()
return line_iterator_to_intermediary(lines)
def line_iterator_to_intermediary(line_iterator):
""" Parse an iterator of str (one string per line) to the intermediary syntax"""
current_table = None
tables = []
relations = []
errors = []
for line_nb, line, raw_line in filter_lines_from_comments(line_iterator):
try:
new_obj = parse_line(line)
current_table, tables, relations = update_models(new_obj, current_table, tables, relations)
except ParsingException as e:
e.line_nb = line_nb
e.line = raw_line
errors.append(e)
if len(errors) != 0:
msg = 'ERAlchemy couldn\'t complete the generation due the {} following errors'.format(len(errors))
raise ParsingException(msg + '\n\n'.join(e.traceback for e in errors))
return tables, relations
|
Alexis-benoist/eralchemy | eralchemy/parser.py | _check_no_current_table | python | def _check_no_current_table(new_obj, current_table):
if current_table is None:
msg = 'Cannot add {} before adding table'
if isinstance(new_obj, Relation):
raise NoCurrentTableException(msg.format('relation'))
if isinstance(new_obj, Column):
raise NoCurrentTableException(msg.format('column')) | Raises exception if we try to add a relation or a column
with no current table. | train | https://github.com/Alexis-benoist/eralchemy/blob/d6fcdc67d6d413bb174bf008fd360044e1dff5a7/eralchemy/parser.py#L61-L69 | null | # -*- coding: utf-8 -*-
from eralchemy.models import Table, Relation, Column
class ParsingException(Exception):
base_traceback = 'Error on line {line_nb}: {line}\n{error}'
hint = None
@property
def traceback(self):
rv = self.base_traceback.format(
line_nb=getattr(self, 'line_nb', '?'),
line=getattr(self, 'line', ''),
error=self.args[0],
)
if self.hint is not None:
rv += '\nHINT: {}'.format(self.hint)
return rv
class DuplicateTableException(ParsingException):
pass
class DuplicateColumnException(ParsingException):
pass
class RelationNoColException(ParsingException):
hint = 'Try to declare the tables before the relationships.'
class NoCurrentTableException(ParsingException):
hint = 'Try to declare the tables before the relationships and columns.'
def remove_comments_from_line(line):
if '#' not in line:
return line.strip()
return line[:line.index('#')].strip()
def filter_lines_from_comments(lines):
""" Filter the lines from comments and non code lines. """
for line_nb, raw_line in enumerate(lines):
clean_line = remove_comments_from_line(raw_line)
if clean_line == '':
continue
yield line_nb, clean_line, raw_line
def parse_line(line):
for typ in [Table, Relation, Column]:
match = typ.RE.match(line)
if match:
return typ.make_from_match(match)
msg = 'Line "{}" could not be parsed to an object.'
raise ValueError(msg.format(line))
def _update_check_inputs(current_table, tables, relations):
assert current_table is None or isinstance(current_table, Table)
assert isinstance(tables, list)
assert all(isinstance(t, Table) for t in tables)
assert all(isinstance(r, Relation) for r in relations)
assert current_table is None or current_table in tables
def _check_colname_in_lst(column_name, columns_names):
if column_name not in columns_names:
msg = 'Cannot add a relation with column "{}" which is undefined'
raise RelationNoColException(msg.format(column_name))
def _check_not_creating_duplicates(new_name, names, type, exc):
if new_name in names:
msg = 'Cannot add {} named "{}" which is ' \
'already present in the schema.'
raise exc(msg.format(type, new_name))
def update_models(new_obj, current_table, tables, relations):
""" Update the state of the parsing. """
_update_check_inputs(current_table, tables, relations)
_check_no_current_table(new_obj, current_table)
if isinstance(new_obj, Table):
tables_names = [t.name for t in tables]
_check_not_creating_duplicates(new_obj.name, tables_names, 'table', DuplicateTableException)
return new_obj, tables + [new_obj], relations
if isinstance(new_obj, Relation):
tables_names = [t.name for t in tables]
_check_colname_in_lst(new_obj.right_col, tables_names)
_check_colname_in_lst(new_obj.left_col, tables_names)
return current_table, tables, relations + [new_obj]
if isinstance(new_obj, Column):
columns_names = [c.name for c in current_table.columns]
_check_not_creating_duplicates(new_obj.name, columns_names, 'column', DuplicateColumnException)
current_table.columns.append(new_obj)
return current_table, tables, relations
msg = "new_obj cannot be of type {}"
raise ValueError(msg.format(new_obj.__class__.__name__))
def markdown_file_to_intermediary(filename):
""" Parse a file and return to intermediary syntax. """
with open(filename) as f:
lines = f.readlines()
return line_iterator_to_intermediary(lines)
def line_iterator_to_intermediary(line_iterator):
""" Parse an iterator of str (one string per line) to the intermediary syntax"""
current_table = None
tables = []
relations = []
errors = []
for line_nb, line, raw_line in filter_lines_from_comments(line_iterator):
try:
new_obj = parse_line(line)
current_table, tables, relations = update_models(new_obj, current_table, tables, relations)
except ParsingException as e:
e.line_nb = line_nb
e.line = raw_line
errors.append(e)
if len(errors) != 0:
msg = 'ERAlchemy couldn\'t complete the generation due the {} following errors'.format(len(errors))
raise ParsingException(msg + '\n\n'.join(e.traceback for e in errors))
return tables, relations
|
Alexis-benoist/eralchemy | eralchemy/parser.py | update_models | python | def update_models(new_obj, current_table, tables, relations):
_update_check_inputs(current_table, tables, relations)
_check_no_current_table(new_obj, current_table)
if isinstance(new_obj, Table):
tables_names = [t.name for t in tables]
_check_not_creating_duplicates(new_obj.name, tables_names, 'table', DuplicateTableException)
return new_obj, tables + [new_obj], relations
if isinstance(new_obj, Relation):
tables_names = [t.name for t in tables]
_check_colname_in_lst(new_obj.right_col, tables_names)
_check_colname_in_lst(new_obj.left_col, tables_names)
return current_table, tables, relations + [new_obj]
if isinstance(new_obj, Column):
columns_names = [c.name for c in current_table.columns]
_check_not_creating_duplicates(new_obj.name, columns_names, 'column', DuplicateColumnException)
current_table.columns.append(new_obj)
return current_table, tables, relations
msg = "new_obj cannot be of type {}"
raise ValueError(msg.format(new_obj.__class__.__name__)) | Update the state of the parsing. | train | https://github.com/Alexis-benoist/eralchemy/blob/d6fcdc67d6d413bb174bf008fd360044e1dff5a7/eralchemy/parser.py#L93-L116 | [
"def _check_no_current_table(new_obj, current_table):\n \"\"\" Raises exception if we try to add a relation or a column\n with no current table. \"\"\"\n if current_table is None:\n msg = 'Cannot add {} before adding table'\n if isinstance(new_obj, Relation):\n raise NoCurrentTable... | # -*- coding: utf-8 -*-
from eralchemy.models import Table, Relation, Column
class ParsingException(Exception):
base_traceback = 'Error on line {line_nb}: {line}\n{error}'
hint = None
@property
def traceback(self):
rv = self.base_traceback.format(
line_nb=getattr(self, 'line_nb', '?'),
line=getattr(self, 'line', ''),
error=self.args[0],
)
if self.hint is not None:
rv += '\nHINT: {}'.format(self.hint)
return rv
class DuplicateTableException(ParsingException):
pass
class DuplicateColumnException(ParsingException):
pass
class RelationNoColException(ParsingException):
hint = 'Try to declare the tables before the relationships.'
class NoCurrentTableException(ParsingException):
hint = 'Try to declare the tables before the relationships and columns.'
def remove_comments_from_line(line):
if '#' not in line:
return line.strip()
return line[:line.index('#')].strip()
def filter_lines_from_comments(lines):
""" Filter the lines from comments and non code lines. """
for line_nb, raw_line in enumerate(lines):
clean_line = remove_comments_from_line(raw_line)
if clean_line == '':
continue
yield line_nb, clean_line, raw_line
def parse_line(line):
for typ in [Table, Relation, Column]:
match = typ.RE.match(line)
if match:
return typ.make_from_match(match)
msg = 'Line "{}" could not be parsed to an object.'
raise ValueError(msg.format(line))
def _check_no_current_table(new_obj, current_table):
""" Raises exception if we try to add a relation or a column
with no current table. """
if current_table is None:
msg = 'Cannot add {} before adding table'
if isinstance(new_obj, Relation):
raise NoCurrentTableException(msg.format('relation'))
if isinstance(new_obj, Column):
raise NoCurrentTableException(msg.format('column'))
def _update_check_inputs(current_table, tables, relations):
assert current_table is None or isinstance(current_table, Table)
assert isinstance(tables, list)
assert all(isinstance(t, Table) for t in tables)
assert all(isinstance(r, Relation) for r in relations)
assert current_table is None or current_table in tables
def _check_colname_in_lst(column_name, columns_names):
if column_name not in columns_names:
msg = 'Cannot add a relation with column "{}" which is undefined'
raise RelationNoColException(msg.format(column_name))
def _check_not_creating_duplicates(new_name, names, type, exc):
if new_name in names:
msg = 'Cannot add {} named "{}" which is ' \
'already present in the schema.'
raise exc(msg.format(type, new_name))
def markdown_file_to_intermediary(filename):
""" Parse a file and return to intermediary syntax. """
with open(filename) as f:
lines = f.readlines()
return line_iterator_to_intermediary(lines)
def line_iterator_to_intermediary(line_iterator):
""" Parse an iterator of str (one string per line) to the intermediary syntax"""
current_table = None
tables = []
relations = []
errors = []
for line_nb, line, raw_line in filter_lines_from_comments(line_iterator):
try:
new_obj = parse_line(line)
current_table, tables, relations = update_models(new_obj, current_table, tables, relations)
except ParsingException as e:
e.line_nb = line_nb
e.line = raw_line
errors.append(e)
if len(errors) != 0:
msg = 'ERAlchemy couldn\'t complete the generation due the {} following errors'.format(len(errors))
raise ParsingException(msg + '\n\n'.join(e.traceback for e in errors))
return tables, relations
|
Alexis-benoist/eralchemy | eralchemy/parser.py | markdown_file_to_intermediary | python | def markdown_file_to_intermediary(filename):
with open(filename) as f:
lines = f.readlines()
return line_iterator_to_intermediary(lines) | Parse a file and return to intermediary syntax. | train | https://github.com/Alexis-benoist/eralchemy/blob/d6fcdc67d6d413bb174bf008fd360044e1dff5a7/eralchemy/parser.py#L119-L123 | [
"def line_iterator_to_intermediary(line_iterator):\n \"\"\" Parse an iterator of str (one string per line) to the intermediary syntax\"\"\"\n current_table = None\n tables = []\n relations = []\n errors = []\n for line_nb, line, raw_line in filter_lines_from_comments(line_iterator):\n try:\... | # -*- coding: utf-8 -*-
from eralchemy.models import Table, Relation, Column
class ParsingException(Exception):
base_traceback = 'Error on line {line_nb}: {line}\n{error}'
hint = None
@property
def traceback(self):
rv = self.base_traceback.format(
line_nb=getattr(self, 'line_nb', '?'),
line=getattr(self, 'line', ''),
error=self.args[0],
)
if self.hint is not None:
rv += '\nHINT: {}'.format(self.hint)
return rv
class DuplicateTableException(ParsingException):
pass
class DuplicateColumnException(ParsingException):
pass
class RelationNoColException(ParsingException):
hint = 'Try to declare the tables before the relationships.'
class NoCurrentTableException(ParsingException):
hint = 'Try to declare the tables before the relationships and columns.'
def remove_comments_from_line(line):
if '#' not in line:
return line.strip()
return line[:line.index('#')].strip()
def filter_lines_from_comments(lines):
""" Filter the lines from comments and non code lines. """
for line_nb, raw_line in enumerate(lines):
clean_line = remove_comments_from_line(raw_line)
if clean_line == '':
continue
yield line_nb, clean_line, raw_line
def parse_line(line):
for typ in [Table, Relation, Column]:
match = typ.RE.match(line)
if match:
return typ.make_from_match(match)
msg = 'Line "{}" could not be parsed to an object.'
raise ValueError(msg.format(line))
def _check_no_current_table(new_obj, current_table):
""" Raises exception if we try to add a relation or a column
with no current table. """
if current_table is None:
msg = 'Cannot add {} before adding table'
if isinstance(new_obj, Relation):
raise NoCurrentTableException(msg.format('relation'))
if isinstance(new_obj, Column):
raise NoCurrentTableException(msg.format('column'))
def _update_check_inputs(current_table, tables, relations):
assert current_table is None or isinstance(current_table, Table)
assert isinstance(tables, list)
assert all(isinstance(t, Table) for t in tables)
assert all(isinstance(r, Relation) for r in relations)
assert current_table is None or current_table in tables
def _check_colname_in_lst(column_name, columns_names):
if column_name not in columns_names:
msg = 'Cannot add a relation with column "{}" which is undefined'
raise RelationNoColException(msg.format(column_name))
def _check_not_creating_duplicates(new_name, names, type, exc):
if new_name in names:
msg = 'Cannot add {} named "{}" which is ' \
'already present in the schema.'
raise exc(msg.format(type, new_name))
def update_models(new_obj, current_table, tables, relations):
""" Update the state of the parsing. """
_update_check_inputs(current_table, tables, relations)
_check_no_current_table(new_obj, current_table)
if isinstance(new_obj, Table):
tables_names = [t.name for t in tables]
_check_not_creating_duplicates(new_obj.name, tables_names, 'table', DuplicateTableException)
return new_obj, tables + [new_obj], relations
if isinstance(new_obj, Relation):
tables_names = [t.name for t in tables]
_check_colname_in_lst(new_obj.right_col, tables_names)
_check_colname_in_lst(new_obj.left_col, tables_names)
return current_table, tables, relations + [new_obj]
if isinstance(new_obj, Column):
columns_names = [c.name for c in current_table.columns]
_check_not_creating_duplicates(new_obj.name, columns_names, 'column', DuplicateColumnException)
current_table.columns.append(new_obj)
return current_table, tables, relations
msg = "new_obj cannot be of type {}"
raise ValueError(msg.format(new_obj.__class__.__name__))
def line_iterator_to_intermediary(line_iterator):
""" Parse an iterator of str (one string per line) to the intermediary syntax"""
current_table = None
tables = []
relations = []
errors = []
for line_nb, line, raw_line in filter_lines_from_comments(line_iterator):
try:
new_obj = parse_line(line)
current_table, tables, relations = update_models(new_obj, current_table, tables, relations)
except ParsingException as e:
e.line_nb = line_nb
e.line = raw_line
errors.append(e)
if len(errors) != 0:
msg = 'ERAlchemy couldn\'t complete the generation due the {} following errors'.format(len(errors))
raise ParsingException(msg + '\n\n'.join(e.traceback for e in errors))
return tables, relations
|
Alexis-benoist/eralchemy | eralchemy/parser.py | line_iterator_to_intermediary | python | def line_iterator_to_intermediary(line_iterator):
current_table = None
tables = []
relations = []
errors = []
for line_nb, line, raw_line in filter_lines_from_comments(line_iterator):
try:
new_obj = parse_line(line)
current_table, tables, relations = update_models(new_obj, current_table, tables, relations)
except ParsingException as e:
e.line_nb = line_nb
e.line = raw_line
errors.append(e)
if len(errors) != 0:
msg = 'ERAlchemy couldn\'t complete the generation due the {} following errors'.format(len(errors))
raise ParsingException(msg + '\n\n'.join(e.traceback for e in errors))
return tables, relations | Parse an iterator of str (one string per line) to the intermediary syntax | train | https://github.com/Alexis-benoist/eralchemy/blob/d6fcdc67d6d413bb174bf008fd360044e1dff5a7/eralchemy/parser.py#L126-L143 | [
"def filter_lines_from_comments(lines):\n \"\"\" Filter the lines from comments and non code lines. \"\"\"\n for line_nb, raw_line in enumerate(lines):\n clean_line = remove_comments_from_line(raw_line)\n if clean_line == '':\n continue\n yield line_nb, clean_line, raw_line\n",... | # -*- coding: utf-8 -*-
from eralchemy.models import Table, Relation, Column
class ParsingException(Exception):
base_traceback = 'Error on line {line_nb}: {line}\n{error}'
hint = None
@property
def traceback(self):
rv = self.base_traceback.format(
line_nb=getattr(self, 'line_nb', '?'),
line=getattr(self, 'line', ''),
error=self.args[0],
)
if self.hint is not None:
rv += '\nHINT: {}'.format(self.hint)
return rv
class DuplicateTableException(ParsingException):
pass
class DuplicateColumnException(ParsingException):
pass
class RelationNoColException(ParsingException):
hint = 'Try to declare the tables before the relationships.'
class NoCurrentTableException(ParsingException):
hint = 'Try to declare the tables before the relationships and columns.'
def remove_comments_from_line(line):
if '#' not in line:
return line.strip()
return line[:line.index('#')].strip()
def filter_lines_from_comments(lines):
""" Filter the lines from comments and non code lines. """
for line_nb, raw_line in enumerate(lines):
clean_line = remove_comments_from_line(raw_line)
if clean_line == '':
continue
yield line_nb, clean_line, raw_line
def parse_line(line):
for typ in [Table, Relation, Column]:
match = typ.RE.match(line)
if match:
return typ.make_from_match(match)
msg = 'Line "{}" could not be parsed to an object.'
raise ValueError(msg.format(line))
def _check_no_current_table(new_obj, current_table):
""" Raises exception if we try to add a relation or a column
with no current table. """
if current_table is None:
msg = 'Cannot add {} before adding table'
if isinstance(new_obj, Relation):
raise NoCurrentTableException(msg.format('relation'))
if isinstance(new_obj, Column):
raise NoCurrentTableException(msg.format('column'))
def _update_check_inputs(current_table, tables, relations):
assert current_table is None or isinstance(current_table, Table)
assert isinstance(tables, list)
assert all(isinstance(t, Table) for t in tables)
assert all(isinstance(r, Relation) for r in relations)
assert current_table is None or current_table in tables
def _check_colname_in_lst(column_name, columns_names):
if column_name not in columns_names:
msg = 'Cannot add a relation with column "{}" which is undefined'
raise RelationNoColException(msg.format(column_name))
def _check_not_creating_duplicates(new_name, names, type, exc):
if new_name in names:
msg = 'Cannot add {} named "{}" which is ' \
'already present in the schema.'
raise exc(msg.format(type, new_name))
def update_models(new_obj, current_table, tables, relations):
""" Update the state of the parsing. """
_update_check_inputs(current_table, tables, relations)
_check_no_current_table(new_obj, current_table)
if isinstance(new_obj, Table):
tables_names = [t.name for t in tables]
_check_not_creating_duplicates(new_obj.name, tables_names, 'table', DuplicateTableException)
return new_obj, tables + [new_obj], relations
if isinstance(new_obj, Relation):
tables_names = [t.name for t in tables]
_check_colname_in_lst(new_obj.right_col, tables_names)
_check_colname_in_lst(new_obj.left_col, tables_names)
return current_table, tables, relations + [new_obj]
if isinstance(new_obj, Column):
columns_names = [c.name for c in current_table.columns]
_check_not_creating_duplicates(new_obj.name, columns_names, 'column', DuplicateColumnException)
current_table.columns.append(new_obj)
return current_table, tables, relations
msg = "new_obj cannot be of type {}"
raise ValueError(msg.format(new_obj.__class__.__name__))
def markdown_file_to_intermediary(filename):
""" Parse a file and return to intermediary syntax. """
with open(filename) as f:
lines = f.readlines()
return line_iterator_to_intermediary(lines)
|
Alexis-benoist/eralchemy | eralchemy/helpers.py | check_args | python | def check_args(args):
check_args_has_attributes(args)
if args.v:
non_version_attrs = [v for k, v in args.__dict__.items() if k != 'v']
print('non_version_attrs', non_version_attrs)
if len([v for v in non_version_attrs if v is not None]) != 0:
fail('Cannot show the version number with another command.')
return
if args.i is None:
fail('Cannot draw ER diagram of no database.')
if args.o is None:
fail('Cannot draw ER diagram with no output file.') | Checks that the args are coherent. | train | https://github.com/Alexis-benoist/eralchemy/blob/d6fcdc67d6d413bb174bf008fd360044e1dff5a7/eralchemy/helpers.py#L11-L23 | [
"def fail(message, *args):\n print('Error:', message % args, file=sys.stderr)\n sys.exit(1)\n",
"def check_args_has_attributes(args):\n check_args_has_attribute(args, 'i')\n check_args_has_attribute(args, 'o')\n check_args_has_attribute(args, 'include_tables')\n check_args_has_attribute(args, 'i... | from __future__ import print_function
import sys
# from https://github.com/mitsuhiko/flask/blob/master/scripts/make-release.py L92
def fail(message, *args):
print('Error:', message % args, file=sys.stderr)
sys.exit(1)
def check_args_has_attributes(args):
check_args_has_attribute(args, 'i')
check_args_has_attribute(args, 'o')
check_args_has_attribute(args, 'include_tables')
check_args_has_attribute(args, 'include_columns')
check_args_has_attribute(args, 'exclude_tables')
check_args_has_attribute(args, 'exclude_columns')
check_args_has_attribute(args, 's')
def check_args_has_attribute(args, name):
if not hasattr(args, name):
raise Exception('{} should be set'.format(name))
|
Alexis-benoist/eralchemy | eralchemy/sqla.py | relation_to_intermediary | python | def relation_to_intermediary(fk):
return Relation(
right_col=format_name(fk.parent.table.fullname),
left_col=format_name(fk._column_tokens[1]),
right_cardinality='?',
left_cardinality='*',
) | Transform an SQLAlchemy ForeignKey object to it's intermediary representation. | train | https://github.com/Alexis-benoist/eralchemy/blob/d6fcdc67d6d413bb174bf008fd360044e1dff5a7/eralchemy/sqla.py#L14-L21 | [
"def format_name(name):\n \"\"\" Transforms the name into a nice string representation. \"\"\"\n return unicode(name)\n"
] | # -*- coding: utf-8 -*-
"""
This class allow to transform SQLAlchemy metadata to the intermediary syntax.
"""
from eralchemy.models import Relation, Column, Table
import sys
from sqlalchemy.exc import CompileError
if sys.version_info[0] == 3:
unicode = str
def format_type(typ):
""" Transforms the type into a nice string representation. """
try:
return unicode(typ)
except CompileError:
return 'Null'
def format_name(name):
""" Transforms the name into a nice string representation. """
return unicode(name)
def column_to_intermediary(col, type_formatter=format_type):
"""Transform an SQLAlchemy Column object to it's intermediary representation. """
return Column(
name=col.name,
type=type_formatter(col.type),
is_key=col.primary_key,
)
def table_to_intermediary(table):
"""Transform an SQLAlchemy Table object to it's intermediary representation. """
return Table(
name=table.fullname,
columns=[column_to_intermediary(col) for col in table.c._data.values()]
)
def metadata_to_intermediary(metadata):
""" Transforms SQLAlchemy metadata to the intermediary representation. """
tables = [table_to_intermediary(table) for table in metadata.tables.values()]
relationships = [relation_to_intermediary(fk) for table in metadata.tables.values() for fk in table.foreign_keys]
return tables, relationships
def declarative_to_intermediary(base):
""" Transform an SQLAlchemy Declarative Base to the intermediary representation. """
return metadata_to_intermediary(base.metadata)
def name_for_scalar_relationship(base, local_cls, referred_cls, constraint):
""" Overriding naming schemes. """
name = referred_cls.__name__.lower() + "_ref"
return name
def database_to_intermediary(database_uri, schema=None):
""" Introspect from the database (given the database_uri) to create the intermediary representation. """
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import create_engine
Base = automap_base()
engine = create_engine(database_uri)
if schema is not None:
Base.metadata.schema = schema
# reflect the tables
Base.prepare(engine, reflect=True, name_for_scalar_relationship=name_for_scalar_relationship)
return declarative_to_intermediary(Base)
|
Alexis-benoist/eralchemy | eralchemy/sqla.py | column_to_intermediary | python | def column_to_intermediary(col, type_formatter=format_type):
return Column(
name=col.name,
type=type_formatter(col.type),
is_key=col.primary_key,
) | Transform an SQLAlchemy Column object to it's intermediary representation. | train | https://github.com/Alexis-benoist/eralchemy/blob/d6fcdc67d6d413bb174bf008fd360044e1dff5a7/eralchemy/sqla.py#L37-L43 | [
"def format_type(typ):\n \"\"\" Transforms the type into a nice string representation. \"\"\"\n try:\n return unicode(typ)\n except CompileError:\n return 'Null'\n"
] | # -*- coding: utf-8 -*-
"""
This class allow to transform SQLAlchemy metadata to the intermediary syntax.
"""
from eralchemy.models import Relation, Column, Table
import sys
from sqlalchemy.exc import CompileError
if sys.version_info[0] == 3:
unicode = str
def relation_to_intermediary(fk):
"""Transform an SQLAlchemy ForeignKey object to it's intermediary representation. """
return Relation(
right_col=format_name(fk.parent.table.fullname),
left_col=format_name(fk._column_tokens[1]),
right_cardinality='?',
left_cardinality='*',
)
def format_type(typ):
""" Transforms the type into a nice string representation. """
try:
return unicode(typ)
except CompileError:
return 'Null'
def format_name(name):
""" Transforms the name into a nice string representation. """
return unicode(name)
def table_to_intermediary(table):
"""Transform an SQLAlchemy Table object to it's intermediary representation. """
return Table(
name=table.fullname,
columns=[column_to_intermediary(col) for col in table.c._data.values()]
)
def metadata_to_intermediary(metadata):
""" Transforms SQLAlchemy metadata to the intermediary representation. """
tables = [table_to_intermediary(table) for table in metadata.tables.values()]
relationships = [relation_to_intermediary(fk) for table in metadata.tables.values() for fk in table.foreign_keys]
return tables, relationships
def declarative_to_intermediary(base):
""" Transform an SQLAlchemy Declarative Base to the intermediary representation. """
return metadata_to_intermediary(base.metadata)
def name_for_scalar_relationship(base, local_cls, referred_cls, constraint):
""" Overriding naming schemes. """
name = referred_cls.__name__.lower() + "_ref"
return name
def database_to_intermediary(database_uri, schema=None):
""" Introspect from the database (given the database_uri) to create the intermediary representation. """
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import create_engine
Base = automap_base()
engine = create_engine(database_uri)
if schema is not None:
Base.metadata.schema = schema
# reflect the tables
Base.prepare(engine, reflect=True, name_for_scalar_relationship=name_for_scalar_relationship)
return declarative_to_intermediary(Base)
|
Alexis-benoist/eralchemy | eralchemy/sqla.py | table_to_intermediary | python | def table_to_intermediary(table):
return Table(
name=table.fullname,
columns=[column_to_intermediary(col) for col in table.c._data.values()]
) | Transform an SQLAlchemy Table object to it's intermediary representation. | train | https://github.com/Alexis-benoist/eralchemy/blob/d6fcdc67d6d413bb174bf008fd360044e1dff5a7/eralchemy/sqla.py#L46-L51 | null | # -*- coding: utf-8 -*-
"""
This class allow to transform SQLAlchemy metadata to the intermediary syntax.
"""
from eralchemy.models import Relation, Column, Table
import sys
from sqlalchemy.exc import CompileError
if sys.version_info[0] == 3:
unicode = str
def relation_to_intermediary(fk):
"""Transform an SQLAlchemy ForeignKey object to it's intermediary representation. """
return Relation(
right_col=format_name(fk.parent.table.fullname),
left_col=format_name(fk._column_tokens[1]),
right_cardinality='?',
left_cardinality='*',
)
def format_type(typ):
""" Transforms the type into a nice string representation. """
try:
return unicode(typ)
except CompileError:
return 'Null'
def format_name(name):
""" Transforms the name into a nice string representation. """
return unicode(name)
def column_to_intermediary(col, type_formatter=format_type):
"""Transform an SQLAlchemy Column object to it's intermediary representation. """
return Column(
name=col.name,
type=type_formatter(col.type),
is_key=col.primary_key,
)
def metadata_to_intermediary(metadata):
""" Transforms SQLAlchemy metadata to the intermediary representation. """
tables = [table_to_intermediary(table) for table in metadata.tables.values()]
relationships = [relation_to_intermediary(fk) for table in metadata.tables.values() for fk in table.foreign_keys]
return tables, relationships
def declarative_to_intermediary(base):
""" Transform an SQLAlchemy Declarative Base to the intermediary representation. """
return metadata_to_intermediary(base.metadata)
def name_for_scalar_relationship(base, local_cls, referred_cls, constraint):
""" Overriding naming schemes. """
name = referred_cls.__name__.lower() + "_ref"
return name
def database_to_intermediary(database_uri, schema=None):
""" Introspect from the database (given the database_uri) to create the intermediary representation. """
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import create_engine
Base = automap_base()
engine = create_engine(database_uri)
if schema is not None:
Base.metadata.schema = schema
# reflect the tables
Base.prepare(engine, reflect=True, name_for_scalar_relationship=name_for_scalar_relationship)
return declarative_to_intermediary(Base)
|
Alexis-benoist/eralchemy | eralchemy/sqla.py | metadata_to_intermediary | python | def metadata_to_intermediary(metadata):
tables = [table_to_intermediary(table) for table in metadata.tables.values()]
relationships = [relation_to_intermediary(fk) for table in metadata.tables.values() for fk in table.foreign_keys]
return tables, relationships | Transforms SQLAlchemy metadata to the intermediary representation. | train | https://github.com/Alexis-benoist/eralchemy/blob/d6fcdc67d6d413bb174bf008fd360044e1dff5a7/eralchemy/sqla.py#L54-L58 | null | # -*- coding: utf-8 -*-
"""
This class allow to transform SQLAlchemy metadata to the intermediary syntax.
"""
from eralchemy.models import Relation, Column, Table
import sys
from sqlalchemy.exc import CompileError
if sys.version_info[0] == 3:
unicode = str
def relation_to_intermediary(fk):
"""Transform an SQLAlchemy ForeignKey object to it's intermediary representation. """
return Relation(
right_col=format_name(fk.parent.table.fullname),
left_col=format_name(fk._column_tokens[1]),
right_cardinality='?',
left_cardinality='*',
)
def format_type(typ):
""" Transforms the type into a nice string representation. """
try:
return unicode(typ)
except CompileError:
return 'Null'
def format_name(name):
""" Transforms the name into a nice string representation. """
return unicode(name)
def column_to_intermediary(col, type_formatter=format_type):
"""Transform an SQLAlchemy Column object to it's intermediary representation. """
return Column(
name=col.name,
type=type_formatter(col.type),
is_key=col.primary_key,
)
def table_to_intermediary(table):
"""Transform an SQLAlchemy Table object to it's intermediary representation. """
return Table(
name=table.fullname,
columns=[column_to_intermediary(col) for col in table.c._data.values()]
)
def declarative_to_intermediary(base):
""" Transform an SQLAlchemy Declarative Base to the intermediary representation. """
return metadata_to_intermediary(base.metadata)
def name_for_scalar_relationship(base, local_cls, referred_cls, constraint):
""" Overriding naming schemes. """
name = referred_cls.__name__.lower() + "_ref"
return name
def database_to_intermediary(database_uri, schema=None):
""" Introspect from the database (given the database_uri) to create the intermediary representation. """
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import create_engine
Base = automap_base()
engine = create_engine(database_uri)
if schema is not None:
Base.metadata.schema = schema
# reflect the tables
Base.prepare(engine, reflect=True, name_for_scalar_relationship=name_for_scalar_relationship)
return declarative_to_intermediary(Base)
|
Alexis-benoist/eralchemy | eralchemy/sqla.py | name_for_scalar_relationship | python | def name_for_scalar_relationship(base, local_cls, referred_cls, constraint):
name = referred_cls.__name__.lower() + "_ref"
return name | Overriding naming schemes. | train | https://github.com/Alexis-benoist/eralchemy/blob/d6fcdc67d6d413bb174bf008fd360044e1dff5a7/eralchemy/sqla.py#L66-L69 | null | # -*- coding: utf-8 -*-
"""
This class allow to transform SQLAlchemy metadata to the intermediary syntax.
"""
from eralchemy.models import Relation, Column, Table
import sys
from sqlalchemy.exc import CompileError
if sys.version_info[0] == 3:
unicode = str
def relation_to_intermediary(fk):
"""Transform an SQLAlchemy ForeignKey object to it's intermediary representation. """
return Relation(
right_col=format_name(fk.parent.table.fullname),
left_col=format_name(fk._column_tokens[1]),
right_cardinality='?',
left_cardinality='*',
)
def format_type(typ):
""" Transforms the type into a nice string representation. """
try:
return unicode(typ)
except CompileError:
return 'Null'
def format_name(name):
""" Transforms the name into a nice string representation. """
return unicode(name)
def column_to_intermediary(col, type_formatter=format_type):
"""Transform an SQLAlchemy Column object to it's intermediary representation. """
return Column(
name=col.name,
type=type_formatter(col.type),
is_key=col.primary_key,
)
def table_to_intermediary(table):
"""Transform an SQLAlchemy Table object to it's intermediary representation. """
return Table(
name=table.fullname,
columns=[column_to_intermediary(col) for col in table.c._data.values()]
)
def metadata_to_intermediary(metadata):
""" Transforms SQLAlchemy metadata to the intermediary representation. """
tables = [table_to_intermediary(table) for table in metadata.tables.values()]
relationships = [relation_to_intermediary(fk) for table in metadata.tables.values() for fk in table.foreign_keys]
return tables, relationships
def declarative_to_intermediary(base):
""" Transform an SQLAlchemy Declarative Base to the intermediary representation. """
return metadata_to_intermediary(base.metadata)
def database_to_intermediary(database_uri, schema=None):
""" Introspect from the database (given the database_uri) to create the intermediary representation. """
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import create_engine
Base = automap_base()
engine = create_engine(database_uri)
if schema is not None:
Base.metadata.schema = schema
# reflect the tables
Base.prepare(engine, reflect=True, name_for_scalar_relationship=name_for_scalar_relationship)
return declarative_to_intermediary(Base)
|
Alexis-benoist/eralchemy | eralchemy/sqla.py | database_to_intermediary | python | def database_to_intermediary(database_uri, schema=None):
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import create_engine
Base = automap_base()
engine = create_engine(database_uri)
if schema is not None:
Base.metadata.schema = schema
# reflect the tables
Base.prepare(engine, reflect=True, name_for_scalar_relationship=name_for_scalar_relationship)
return declarative_to_intermediary(Base) | Introspect from the database (given the database_uri) to create the intermediary representation. | train | https://github.com/Alexis-benoist/eralchemy/blob/d6fcdc67d6d413bb174bf008fd360044e1dff5a7/eralchemy/sqla.py#L72-L84 | [
"def declarative_to_intermediary(base):\n \"\"\" Transform an SQLAlchemy Declarative Base to the intermediary representation. \"\"\"\n return metadata_to_intermediary(base.metadata)\n"
] | # -*- coding: utf-8 -*-
"""
This class allow to transform SQLAlchemy metadata to the intermediary syntax.
"""
from eralchemy.models import Relation, Column, Table
import sys
from sqlalchemy.exc import CompileError
if sys.version_info[0] == 3:
unicode = str
def relation_to_intermediary(fk):
"""Transform an SQLAlchemy ForeignKey object to it's intermediary representation. """
return Relation(
right_col=format_name(fk.parent.table.fullname),
left_col=format_name(fk._column_tokens[1]),
right_cardinality='?',
left_cardinality='*',
)
def format_type(typ):
""" Transforms the type into a nice string representation. """
try:
return unicode(typ)
except CompileError:
return 'Null'
def format_name(name):
""" Transforms the name into a nice string representation. """
return unicode(name)
def column_to_intermediary(col, type_formatter=format_type):
"""Transform an SQLAlchemy Column object to it's intermediary representation. """
return Column(
name=col.name,
type=type_formatter(col.type),
is_key=col.primary_key,
)
def table_to_intermediary(table):
"""Transform an SQLAlchemy Table object to it's intermediary representation. """
return Table(
name=table.fullname,
columns=[column_to_intermediary(col) for col in table.c._data.values()]
)
def metadata_to_intermediary(metadata):
""" Transforms SQLAlchemy metadata to the intermediary representation. """
tables = [table_to_intermediary(table) for table in metadata.tables.values()]
relationships = [relation_to_intermediary(fk) for table in metadata.tables.values() for fk in table.foreign_keys]
return tables, relationships
def declarative_to_intermediary(base):
""" Transform an SQLAlchemy Declarative Base to the intermediary representation. """
return metadata_to_intermediary(base.metadata)
def name_for_scalar_relationship(base, local_cls, referred_cls, constraint):
""" Overriding naming schemes. """
name = referred_cls.__name__.lower() + "_ref"
return name
|
Alexis-benoist/eralchemy | script/make_release.py | parse_args | python | def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-M', action='store_true')
parser.add_argument('-m', action='store_true')
parser.add_argument('-f', action='store_true')
args = parser.parse_args()
major, minor, fix = args.M, args.m, args.f
if major + minor + fix != 1:
fail('Please select one and only one action.')
return major, minor, fix | Parse the args, returns if the type of update:
Major, minor, fix | train | https://github.com/Alexis-benoist/eralchemy/blob/d6fcdc67d6d413bb174bf008fd360044e1dff5a7/script/make_release.py#L74-L86 | null | from __future__ import print_function
import os
import sys
from subprocess import Popen, PIPE
from getpass import getpass
from shutil import rmtree
import argparse
# inspired by https://github.com/mitsuhiko/flask/blob/master/scripts/make-release.py
def set_filename_version(filename, version_number):
with open(filename, 'w+') as f:
f.write("version = '{}'\n".format(version_number))
def set_init_version(version_str):
info('Setting __init__.py version to %s', version_str)
set_filename_version('eralchemy/version.py', version_str)
def rm(filename):
info('Delete {}'.format(filename))
rmtree(filename, ignore_errors=True)
def build_and_upload():
rm('ERAlchemy.egg-info')
rm('build')
rm('dist')
Popen(['pandoc', '--from=markdown', '--to=rst', 'readme.md', '--output=readme.rst'],
stdout=PIPE).wait()
Popen([sys.executable, 'setup.py', 'bdist_wheel', '--universal'], stdout=PIPE).wait()
Popen([sys.executable, 'setup.py', 'sdist'], stdout=PIPE).wait()
pypi_pwd = getpass(prompt='Pypi Password: ')
Popen(['twine', 'upload', 'dist/*', '-u', 'alexis.benoist', '-p', pypi_pwd]).wait()
Popen(['open', 'https://pypi.python.org/pypi/ERAlchemy'])
Popen(['git', 'tag'], stdout=PIPE).communicate()[0].splitlines()
Popen(['git', 'push', '--tags']).wait()
def fail(message, *args):
print('Error:', message % args, file=sys.stderr)
sys.exit(1)
def info(message, *args):
print('Error:', message % args, file=sys.stderr)
def git_is_clean():
return Popen(['git', 'diff', '--quiet']).wait() == 0
def make_git_commit(message, *args):
message = message % args
Popen(['git', 'commit', '-am', message]).wait()
def make_git_tag(tag):
info('Tagging "%s"', tag)
Popen(['git', 'tag', tag]).wait()
def version_str_to_lst(v):
return [int(s) for s in v.split('.')]
def version_lst_to_str(v):
return '.'.join(str(n) for n in v)
def get_current_version():
with open('eralchemy/version.py') as f:
lines = f.readlines()
namespace = {}
exec(lines[0], namespace)
return version_str_to_lst(namespace['version'])
def get_git_tags():
return set(Popen(['git', 'tag'], stdout=PIPE).communicate()[0].splitlines())
def get_next_version(major, minor, fix, current_version):
if major:
return [current_version[0] + 1, 0, 0]
if minor:
return [current_version[0], current_version[1] + 1, 0]
if fix:
return [current_version[0], current_version[1], current_version[2] + 1]
raise UserWarning()
def main():
os.chdir(os.path.join(os.path.dirname(__file__), '..'))
current_version = get_current_version()
major, minor, fix = parse_args()
next_version = get_next_version(major, minor, fix, current_version)
next_version_str = version_lst_to_str(next_version)
tags = get_git_tags()
if next_version_str in tags:
fail('Version "%s" is already tagged', next_version_str)
if not git_is_clean():
fail('You have uncommitted changes in git')
set_init_version(next_version_str)
make_git_commit('Bump version number to %s', next_version_str)
make_git_tag('v' + next_version_str)
build_and_upload()
if __name__ == '__main__':
main()
|
Alexis-benoist/eralchemy | eralchemy/main.py | cli | python | def cli():
parser = get_argparser()
args = parser.parse_args()
check_args(args)
if args.v:
print('ERAlchemy version {}.'.format(__version__))
exit(0)
render_er(
args.i,
args.o,
include_tables=args.include_tables,
include_columns=args.include_columns,
exclude_tables=args.exclude_tables,
exclude_columns=args.exclude_columns,
schema=args.s
) | Entry point for the application script | train | https://github.com/Alexis-benoist/eralchemy/blob/d6fcdc67d6d413bb174bf008fd360044e1dff5a7/eralchemy/main.py#L22-L39 | [
"def render_er(input, output, mode='auto', include_tables=None, include_columns=None,\n exclude_tables=None, exclude_columns=None, schema=None):\n \"\"\"\n Transform the metadata into a representation.\n :param input: Possible inputs are instances of:\n MetaData: SQLAlchemy Metadata\n ... | # -*- coding: utf-8 -*-
import argparse
import sys
import copy
from pygraphviz.agraph import AGraph
from sqlalchemy.engine.url import make_url
from sqlalchemy.exc import ArgumentError
from eralchemy.version import version as __version__
from eralchemy.cst import GRAPH_BEGINNING
from eralchemy.sqla import metadata_to_intermediary, declarative_to_intermediary, database_to_intermediary
from eralchemy.helpers import check_args
from eralchemy.parser import markdown_file_to_intermediary, line_iterator_to_intermediary, ParsingException
try:
basestring
except NameError:
basestring = str
def get_argparser():
parser = argparse.ArgumentParser(prog='ERAlchemy')
parser.add_argument('-i', nargs='?', help='Database URI to process.')
parser.add_argument('-o', nargs='?', help='Name of the file to write.')
parser.add_argument('-s', nargs='?', help='Name of the schema.')
parser.add_argument('--exclude-tables', '-x', nargs='+', help='Name of tables not to be displayed.')
parser.add_argument('--exclude-columns', nargs='+', help='Name of columns not to be displayed (for all tables).')
parser.add_argument('--include-tables', nargs='+', help='Name of tables to be displayed alone.')
parser.add_argument('--include-columns', nargs='+', help='Name of columns to be displayed alone (for all tables).')
parser.add_argument('-v', help='Prints version number.', action='store_true')
return parser
def intermediary_to_markdown(tables, relationships, output):
""" Saves the intermediary representation to markdown. """
er_markup = _intermediary_to_markdown(tables, relationships)
with open(output, "w") as file_out:
file_out.write(er_markup)
def intermediary_to_dot(tables, relationships, output):
""" Save the intermediary representation to dot format. """
dot_file = _intermediary_to_dot(tables, relationships)
with open(output, "w") as file_out:
file_out.write(dot_file)
def intermediary_to_schema(tables, relationships, output):
""" Transforms and save the intermediary representation to the file chosen. """
dot_file = _intermediary_to_dot(tables, relationships)
graph = AGraph()
graph = graph.from_string(dot_file)
extension = output.split('.')[-1]
graph.draw(path=output, prog='dot', format=extension)
def _intermediary_to_markdown(tables, relationships):
""" Returns the er markup source in a string. """
t = '\n'.join(t.to_markdown() for t in tables)
r = '\n'.join(r.to_markdown() for r in relationships)
return '{}\n{}'.format(t, r)
def _intermediary_to_dot(tables, relationships):
""" Returns the dot source representing the database in a string. """
t = '\n'.join(t.to_dot() for t in tables)
r = '\n'.join(r.to_dot() for r in relationships)
return '{}\n{}\n{}\n}}'.format(GRAPH_BEGINNING, t, r)
# Routes from the class name to the function transforming this class in
# the intermediary representation.
switch_input_class_to_method = {
'MetaData': metadata_to_intermediary,
'DeclarativeMeta': declarative_to_intermediary,
# For compatibility with Flask-SQLAlchemy
'_BoundDeclarativeMeta': declarative_to_intermediary,
# Renamed in Flask-SQLAlchemy 2.3
'DefaultMeta': declarative_to_intermediary
}
# Routes from the mode to the method to transform the intermediary
# representation to the desired output.
switch_output_mode_auto = {
'er': intermediary_to_markdown,
'graph': intermediary_to_schema,
'dot': intermediary_to_dot
}
# Routes from the file extension to the method to transform
# the intermediary representation to the desired output.
switch_output_mode = {
'er': intermediary_to_markdown,
'dot': intermediary_to_dot,
}
def all_to_intermediary(filename_or_input, schema=None):
""" Dispatch the filename_or_input to the different function to produce the intermediary syntax.
All the supported classes names are in `swich_input_class_to_method`.
The input can also be a list of strings in markdown format or a filename finishing by '.er' containing markdown
format.
"""
# Try to convert from the name of the class
input_class_name = filename_or_input.__class__.__name__
try:
this_to_intermediary = switch_input_class_to_method[input_class_name]
tables, relationships = this_to_intermediary(filename_or_input)
return tables, relationships
except KeyError:
pass
# try to read markdown file.
if isinstance(filename_or_input, basestring):
if filename_or_input.split('.')[-1] == 'er':
return markdown_file_to_intermediary(filename_or_input)
# try to read a markdown in a string
if not isinstance(filename_or_input, basestring):
if all(isinstance(e, basestring) for e in filename_or_input):
return line_iterator_to_intermediary(filename_or_input)
# try to read DB URI.
try:
make_url(filename_or_input)
return database_to_intermediary(filename_or_input, schema=schema)
except ArgumentError:
pass
msg = 'Cannot process filename_or_input {}'.format(input_class_name)
raise ValueError(msg)
def get_output_mode(output, mode):
"""
From the output name and the mode returns a the function that will transform the intermediary
representation to the output.
"""
if mode != 'auto':
try:
return switch_output_mode_auto[mode]
except KeyError:
raise ValueError('Mode "{}" is not supported.')
extension = output.split('.')[-1]
try:
return switch_output_mode[extension]
except KeyError:
return intermediary_to_schema
def filter_resources(tables, relationships,
include_tables=None, include_columns=None,
exclude_tables=None, exclude_columns=None):
"""
Include the following:
1. Tables and relationships with tables present in the include_tables (lst of str, tables names)
2. Columns (of whichever table) present in the include_columns (lst of str, columns names)
Exclude the following:
1. Tables and relationships with tables present in the exclude_tables (lst of str, tables names)
2. Columns (of whichever table) present in the exclude_columns (lst of str, columns names)
Disclosure note:
All relationships are taken into consideration before ignoring columns.
In other words, if one excludes primary or foreign keys, it will still keep the relations display amongst tables
"""
_tables = copy.deepcopy(tables)
_relationships = copy.deepcopy(relationships)
include_tables = include_tables or [t.name for t in _tables]
include_columns = include_columns or [c.name for t in _tables for c in t.columns]
exclude_tables = exclude_tables or list()
exclude_columns = exclude_columns or list()
_tables = [t for t in _tables if t.name not in exclude_tables and t.name in include_tables]
_relationships = [r for r in _relationships
if r.right_col not in exclude_tables
and r.left_col not in exclude_tables
and r.right_col in include_tables
and r.left_col in include_tables]
for t in _tables:
t.columns = [c for c in t.columns if c.name not in exclude_columns and c.name in include_columns]
return _tables, _relationships
def render_er(input, output, mode='auto', include_tables=None, include_columns=None,
exclude_tables=None, exclude_columns=None, schema=None):
"""
Transform the metadata into a representation.
:param input: Possible inputs are instances of:
MetaData: SQLAlchemy Metadata
DeclarativeMeta: SQLAlchemy declarative Base
:param output: name of the file to output the
:param mode: str in list:
'er': writes to a file the markup to generate an ER style diagram.
'graph': writes the image of the ER diagram.
'dot': write to file the diagram in dot format.
'auto': choose from the filename:
'*.er': writes to a file the markup to generate an ER style diagram.
'.dot': returns the graph in the dot syntax.
else: return a graph to the format graph
:param include_tables: lst of str, table names to include, None means include all
:param include_columns: lst of str, column names to include, None means include all
:param exclude_tables: lst of str, table names to exclude, None means exclude nothing
:param exclude_columns: lst of str, field names to exclude, None means exclude nothing
:param schema: name of the schema
"""
try:
tables, relationships = all_to_intermediary(input, schema=schema)
tables, relationships = filter_resources(tables, relationships,
include_tables=include_tables, include_columns=include_columns,
exclude_tables=exclude_tables, exclude_columns=exclude_columns)
intermediary_to_output = get_output_mode(output, mode)
intermediary_to_output(tables, relationships, output)
except ImportError as e:
module_name = e.message.split()[-1]
print('Please install {0} using "pip install {0}".'.format(module_name))
except ParsingException as e:
sys.stderr.write(e.message)
if __name__ == '__main__':
cli()
|
Alexis-benoist/eralchemy | eralchemy/main.py | intermediary_to_markdown | python | def intermediary_to_markdown(tables, relationships, output):
er_markup = _intermediary_to_markdown(tables, relationships)
with open(output, "w") as file_out:
file_out.write(er_markup) | Saves the intermediary representation to markdown. | train | https://github.com/Alexis-benoist/eralchemy/blob/d6fcdc67d6d413bb174bf008fd360044e1dff5a7/eralchemy/main.py#L55-L59 | [
"def _intermediary_to_markdown(tables, relationships):\n \"\"\" Returns the er markup source in a string. \"\"\"\n t = '\\n'.join(t.to_markdown() for t in tables)\n r = '\\n'.join(r.to_markdown() for r in relationships)\n return '{}\\n{}'.format(t, r)\n"
] | # -*- coding: utf-8 -*-
import argparse
import sys
import copy
from pygraphviz.agraph import AGraph
from sqlalchemy.engine.url import make_url
from sqlalchemy.exc import ArgumentError
from eralchemy.version import version as __version__
from eralchemy.cst import GRAPH_BEGINNING
from eralchemy.sqla import metadata_to_intermediary, declarative_to_intermediary, database_to_intermediary
from eralchemy.helpers import check_args
from eralchemy.parser import markdown_file_to_intermediary, line_iterator_to_intermediary, ParsingException
try:
basestring
except NameError:
basestring = str
def cli():
"""Entry point for the application script"""
parser = get_argparser()
args = parser.parse_args()
check_args(args)
if args.v:
print('ERAlchemy version {}.'.format(__version__))
exit(0)
render_er(
args.i,
args.o,
include_tables=args.include_tables,
include_columns=args.include_columns,
exclude_tables=args.exclude_tables,
exclude_columns=args.exclude_columns,
schema=args.s
)
def get_argparser():
parser = argparse.ArgumentParser(prog='ERAlchemy')
parser.add_argument('-i', nargs='?', help='Database URI to process.')
parser.add_argument('-o', nargs='?', help='Name of the file to write.')
parser.add_argument('-s', nargs='?', help='Name of the schema.')
parser.add_argument('--exclude-tables', '-x', nargs='+', help='Name of tables not to be displayed.')
parser.add_argument('--exclude-columns', nargs='+', help='Name of columns not to be displayed (for all tables).')
parser.add_argument('--include-tables', nargs='+', help='Name of tables to be displayed alone.')
parser.add_argument('--include-columns', nargs='+', help='Name of columns to be displayed alone (for all tables).')
parser.add_argument('-v', help='Prints version number.', action='store_true')
return parser
def intermediary_to_dot(tables, relationships, output):
""" Save the intermediary representation to dot format. """
dot_file = _intermediary_to_dot(tables, relationships)
with open(output, "w") as file_out:
file_out.write(dot_file)
def intermediary_to_schema(tables, relationships, output):
""" Transforms and save the intermediary representation to the file chosen. """
dot_file = _intermediary_to_dot(tables, relationships)
graph = AGraph()
graph = graph.from_string(dot_file)
extension = output.split('.')[-1]
graph.draw(path=output, prog='dot', format=extension)
def _intermediary_to_markdown(tables, relationships):
""" Returns the er markup source in a string. """
t = '\n'.join(t.to_markdown() for t in tables)
r = '\n'.join(r.to_markdown() for r in relationships)
return '{}\n{}'.format(t, r)
def _intermediary_to_dot(tables, relationships):
""" Returns the dot source representing the database in a string. """
t = '\n'.join(t.to_dot() for t in tables)
r = '\n'.join(r.to_dot() for r in relationships)
return '{}\n{}\n{}\n}}'.format(GRAPH_BEGINNING, t, r)
# Routes from the class name to the function transforming this class in
# the intermediary representation.
switch_input_class_to_method = {
'MetaData': metadata_to_intermediary,
'DeclarativeMeta': declarative_to_intermediary,
# For compatibility with Flask-SQLAlchemy
'_BoundDeclarativeMeta': declarative_to_intermediary,
# Renamed in Flask-SQLAlchemy 2.3
'DefaultMeta': declarative_to_intermediary
}
# Routes from the mode to the method to transform the intermediary
# representation to the desired output.
switch_output_mode_auto = {
'er': intermediary_to_markdown,
'graph': intermediary_to_schema,
'dot': intermediary_to_dot
}
# Routes from the file extension to the method to transform
# the intermediary representation to the desired output.
switch_output_mode = {
'er': intermediary_to_markdown,
'dot': intermediary_to_dot,
}
def all_to_intermediary(filename_or_input, schema=None):
""" Dispatch the filename_or_input to the different function to produce the intermediary syntax.
All the supported classes names are in `swich_input_class_to_method`.
The input can also be a list of strings in markdown format or a filename finishing by '.er' containing markdown
format.
"""
# Try to convert from the name of the class
input_class_name = filename_or_input.__class__.__name__
try:
this_to_intermediary = switch_input_class_to_method[input_class_name]
tables, relationships = this_to_intermediary(filename_or_input)
return tables, relationships
except KeyError:
pass
# try to read markdown file.
if isinstance(filename_or_input, basestring):
if filename_or_input.split('.')[-1] == 'er':
return markdown_file_to_intermediary(filename_or_input)
# try to read a markdown in a string
if not isinstance(filename_or_input, basestring):
if all(isinstance(e, basestring) for e in filename_or_input):
return line_iterator_to_intermediary(filename_or_input)
# try to read DB URI.
try:
make_url(filename_or_input)
return database_to_intermediary(filename_or_input, schema=schema)
except ArgumentError:
pass
msg = 'Cannot process filename_or_input {}'.format(input_class_name)
raise ValueError(msg)
def get_output_mode(output, mode):
"""
From the output name and the mode returns a the function that will transform the intermediary
representation to the output.
"""
if mode != 'auto':
try:
return switch_output_mode_auto[mode]
except KeyError:
raise ValueError('Mode "{}" is not supported.')
extension = output.split('.')[-1]
try:
return switch_output_mode[extension]
except KeyError:
return intermediary_to_schema
def filter_resources(tables, relationships,
include_tables=None, include_columns=None,
exclude_tables=None, exclude_columns=None):
"""
Include the following:
1. Tables and relationships with tables present in the include_tables (lst of str, tables names)
2. Columns (of whichever table) present in the include_columns (lst of str, columns names)
Exclude the following:
1. Tables and relationships with tables present in the exclude_tables (lst of str, tables names)
2. Columns (of whichever table) present in the exclude_columns (lst of str, columns names)
Disclosure note:
All relationships are taken into consideration before ignoring columns.
In other words, if one excludes primary or foreign keys, it will still keep the relations display amongst tables
"""
_tables = copy.deepcopy(tables)
_relationships = copy.deepcopy(relationships)
include_tables = include_tables or [t.name for t in _tables]
include_columns = include_columns or [c.name for t in _tables for c in t.columns]
exclude_tables = exclude_tables or list()
exclude_columns = exclude_columns or list()
_tables = [t for t in _tables if t.name not in exclude_tables and t.name in include_tables]
_relationships = [r for r in _relationships
if r.right_col not in exclude_tables
and r.left_col not in exclude_tables
and r.right_col in include_tables
and r.left_col in include_tables]
for t in _tables:
t.columns = [c for c in t.columns if c.name not in exclude_columns and c.name in include_columns]
return _tables, _relationships
def render_er(input, output, mode='auto', include_tables=None, include_columns=None,
exclude_tables=None, exclude_columns=None, schema=None):
"""
Transform the metadata into a representation.
:param input: Possible inputs are instances of:
MetaData: SQLAlchemy Metadata
DeclarativeMeta: SQLAlchemy declarative Base
:param output: name of the file to output the
:param mode: str in list:
'er': writes to a file the markup to generate an ER style diagram.
'graph': writes the image of the ER diagram.
'dot': write to file the diagram in dot format.
'auto': choose from the filename:
'*.er': writes to a file the markup to generate an ER style diagram.
'.dot': returns the graph in the dot syntax.
else: return a graph to the format graph
:param include_tables: lst of str, table names to include, None means include all
:param include_columns: lst of str, column names to include, None means include all
:param exclude_tables: lst of str, table names to exclude, None means exclude nothing
:param exclude_columns: lst of str, field names to exclude, None means exclude nothing
:param schema: name of the schema
"""
try:
tables, relationships = all_to_intermediary(input, schema=schema)
tables, relationships = filter_resources(tables, relationships,
include_tables=include_tables, include_columns=include_columns,
exclude_tables=exclude_tables, exclude_columns=exclude_columns)
intermediary_to_output = get_output_mode(output, mode)
intermediary_to_output(tables, relationships, output)
except ImportError as e:
module_name = e.message.split()[-1]
print('Please install {0} using "pip install {0}".'.format(module_name))
except ParsingException as e:
sys.stderr.write(e.message)
if __name__ == '__main__':
cli()
|
Alexis-benoist/eralchemy | eralchemy/main.py | intermediary_to_dot | python | def intermediary_to_dot(tables, relationships, output):
dot_file = _intermediary_to_dot(tables, relationships)
with open(output, "w") as file_out:
file_out.write(dot_file) | Save the intermediary representation to dot format. | train | https://github.com/Alexis-benoist/eralchemy/blob/d6fcdc67d6d413bb174bf008fd360044e1dff5a7/eralchemy/main.py#L62-L66 | [
"def _intermediary_to_dot(tables, relationships):\n \"\"\" Returns the dot source representing the database in a string. \"\"\"\n t = '\\n'.join(t.to_dot() for t in tables)\n r = '\\n'.join(r.to_dot() for r in relationships)\n return '{}\\n{}\\n{}\\n}}'.format(GRAPH_BEGINNING, t, r)\n"
] | # -*- coding: utf-8 -*-
import argparse
import sys
import copy
from pygraphviz.agraph import AGraph
from sqlalchemy.engine.url import make_url
from sqlalchemy.exc import ArgumentError
from eralchemy.version import version as __version__
from eralchemy.cst import GRAPH_BEGINNING
from eralchemy.sqla import metadata_to_intermediary, declarative_to_intermediary, database_to_intermediary
from eralchemy.helpers import check_args
from eralchemy.parser import markdown_file_to_intermediary, line_iterator_to_intermediary, ParsingException
try:
basestring
except NameError:
basestring = str
def cli():
"""Entry point for the application script"""
parser = get_argparser()
args = parser.parse_args()
check_args(args)
if args.v:
print('ERAlchemy version {}.'.format(__version__))
exit(0)
render_er(
args.i,
args.o,
include_tables=args.include_tables,
include_columns=args.include_columns,
exclude_tables=args.exclude_tables,
exclude_columns=args.exclude_columns,
schema=args.s
)
def get_argparser():
parser = argparse.ArgumentParser(prog='ERAlchemy')
parser.add_argument('-i', nargs='?', help='Database URI to process.')
parser.add_argument('-o', nargs='?', help='Name of the file to write.')
parser.add_argument('-s', nargs='?', help='Name of the schema.')
parser.add_argument('--exclude-tables', '-x', nargs='+', help='Name of tables not to be displayed.')
parser.add_argument('--exclude-columns', nargs='+', help='Name of columns not to be displayed (for all tables).')
parser.add_argument('--include-tables', nargs='+', help='Name of tables to be displayed alone.')
parser.add_argument('--include-columns', nargs='+', help='Name of columns to be displayed alone (for all tables).')
parser.add_argument('-v', help='Prints version number.', action='store_true')
return parser
def intermediary_to_markdown(tables, relationships, output):
""" Saves the intermediary representation to markdown. """
er_markup = _intermediary_to_markdown(tables, relationships)
with open(output, "w") as file_out:
file_out.write(er_markup)
def intermediary_to_schema(tables, relationships, output):
""" Transforms and save the intermediary representation to the file chosen. """
dot_file = _intermediary_to_dot(tables, relationships)
graph = AGraph()
graph = graph.from_string(dot_file)
extension = output.split('.')[-1]
graph.draw(path=output, prog='dot', format=extension)
def _intermediary_to_markdown(tables, relationships):
""" Returns the er markup source in a string. """
t = '\n'.join(t.to_markdown() for t in tables)
r = '\n'.join(r.to_markdown() for r in relationships)
return '{}\n{}'.format(t, r)
def _intermediary_to_dot(tables, relationships):
""" Returns the dot source representing the database in a string. """
t = '\n'.join(t.to_dot() for t in tables)
r = '\n'.join(r.to_dot() for r in relationships)
return '{}\n{}\n{}\n}}'.format(GRAPH_BEGINNING, t, r)
# Routes from the class name to the function transforming this class in
# the intermediary representation.
switch_input_class_to_method = {
'MetaData': metadata_to_intermediary,
'DeclarativeMeta': declarative_to_intermediary,
# For compatibility with Flask-SQLAlchemy
'_BoundDeclarativeMeta': declarative_to_intermediary,
# Renamed in Flask-SQLAlchemy 2.3
'DefaultMeta': declarative_to_intermediary
}
# Routes from the mode to the method to transform the intermediary
# representation to the desired output.
switch_output_mode_auto = {
'er': intermediary_to_markdown,
'graph': intermediary_to_schema,
'dot': intermediary_to_dot
}
# Routes from the file extension to the method to transform
# the intermediary representation to the desired output.
switch_output_mode = {
'er': intermediary_to_markdown,
'dot': intermediary_to_dot,
}
def all_to_intermediary(filename_or_input, schema=None):
""" Dispatch the filename_or_input to the different function to produce the intermediary syntax.
All the supported classes names are in `swich_input_class_to_method`.
The input can also be a list of strings in markdown format or a filename finishing by '.er' containing markdown
format.
"""
# Try to convert from the name of the class
input_class_name = filename_or_input.__class__.__name__
try:
this_to_intermediary = switch_input_class_to_method[input_class_name]
tables, relationships = this_to_intermediary(filename_or_input)
return tables, relationships
except KeyError:
pass
# try to read markdown file.
if isinstance(filename_or_input, basestring):
if filename_or_input.split('.')[-1] == 'er':
return markdown_file_to_intermediary(filename_or_input)
# try to read a markdown in a string
if not isinstance(filename_or_input, basestring):
if all(isinstance(e, basestring) for e in filename_or_input):
return line_iterator_to_intermediary(filename_or_input)
# try to read DB URI.
try:
make_url(filename_or_input)
return database_to_intermediary(filename_or_input, schema=schema)
except ArgumentError:
pass
msg = 'Cannot process filename_or_input {}'.format(input_class_name)
raise ValueError(msg)
def get_output_mode(output, mode):
"""
From the output name and the mode returns a the function that will transform the intermediary
representation to the output.
"""
if mode != 'auto':
try:
return switch_output_mode_auto[mode]
except KeyError:
raise ValueError('Mode "{}" is not supported.')
extension = output.split('.')[-1]
try:
return switch_output_mode[extension]
except KeyError:
return intermediary_to_schema
def filter_resources(tables, relationships,
include_tables=None, include_columns=None,
exclude_tables=None, exclude_columns=None):
"""
Include the following:
1. Tables and relationships with tables present in the include_tables (lst of str, tables names)
2. Columns (of whichever table) present in the include_columns (lst of str, columns names)
Exclude the following:
1. Tables and relationships with tables present in the exclude_tables (lst of str, tables names)
2. Columns (of whichever table) present in the exclude_columns (lst of str, columns names)
Disclosure note:
All relationships are taken into consideration before ignoring columns.
In other words, if one excludes primary or foreign keys, it will still keep the relations display amongst tables
"""
_tables = copy.deepcopy(tables)
_relationships = copy.deepcopy(relationships)
include_tables = include_tables or [t.name for t in _tables]
include_columns = include_columns or [c.name for t in _tables for c in t.columns]
exclude_tables = exclude_tables or list()
exclude_columns = exclude_columns or list()
_tables = [t for t in _tables if t.name not in exclude_tables and t.name in include_tables]
_relationships = [r for r in _relationships
if r.right_col not in exclude_tables
and r.left_col not in exclude_tables
and r.right_col in include_tables
and r.left_col in include_tables]
for t in _tables:
t.columns = [c for c in t.columns if c.name not in exclude_columns and c.name in include_columns]
return _tables, _relationships
def render_er(input, output, mode='auto', include_tables=None, include_columns=None,
exclude_tables=None, exclude_columns=None, schema=None):
"""
Transform the metadata into a representation.
:param input: Possible inputs are instances of:
MetaData: SQLAlchemy Metadata
DeclarativeMeta: SQLAlchemy declarative Base
:param output: name of the file to output the
:param mode: str in list:
'er': writes to a file the markup to generate an ER style diagram.
'graph': writes the image of the ER diagram.
'dot': write to file the diagram in dot format.
'auto': choose from the filename:
'*.er': writes to a file the markup to generate an ER style diagram.
'.dot': returns the graph in the dot syntax.
else: return a graph to the format graph
:param include_tables: lst of str, table names to include, None means include all
:param include_columns: lst of str, column names to include, None means include all
:param exclude_tables: lst of str, table names to exclude, None means exclude nothing
:param exclude_columns: lst of str, field names to exclude, None means exclude nothing
:param schema: name of the schema
"""
try:
tables, relationships = all_to_intermediary(input, schema=schema)
tables, relationships = filter_resources(tables, relationships,
include_tables=include_tables, include_columns=include_columns,
exclude_tables=exclude_tables, exclude_columns=exclude_columns)
intermediary_to_output = get_output_mode(output, mode)
intermediary_to_output(tables, relationships, output)
except ImportError as e:
module_name = e.message.split()[-1]
print('Please install {0} using "pip install {0}".'.format(module_name))
except ParsingException as e:
sys.stderr.write(e.message)
if __name__ == '__main__':
cli()
|
Alexis-benoist/eralchemy | eralchemy/main.py | intermediary_to_schema | python | def intermediary_to_schema(tables, relationships, output):
dot_file = _intermediary_to_dot(tables, relationships)
graph = AGraph()
graph = graph.from_string(dot_file)
extension = output.split('.')[-1]
graph.draw(path=output, prog='dot', format=extension) | Transforms and save the intermediary representation to the file chosen. | train | https://github.com/Alexis-benoist/eralchemy/blob/d6fcdc67d6d413bb174bf008fd360044e1dff5a7/eralchemy/main.py#L69-L75 | [
"def _intermediary_to_dot(tables, relationships):\n \"\"\" Returns the dot source representing the database in a string. \"\"\"\n t = '\\n'.join(t.to_dot() for t in tables)\n r = '\\n'.join(r.to_dot() for r in relationships)\n return '{}\\n{}\\n{}\\n}}'.format(GRAPH_BEGINNING, t, r)\n"
] | # -*- coding: utf-8 -*-
import argparse
import sys
import copy
from pygraphviz.agraph import AGraph
from sqlalchemy.engine.url import make_url
from sqlalchemy.exc import ArgumentError
from eralchemy.version import version as __version__
from eralchemy.cst import GRAPH_BEGINNING
from eralchemy.sqla import metadata_to_intermediary, declarative_to_intermediary, database_to_intermediary
from eralchemy.helpers import check_args
from eralchemy.parser import markdown_file_to_intermediary, line_iterator_to_intermediary, ParsingException
try:
basestring
except NameError:
basestring = str
def cli():
"""Entry point for the application script"""
parser = get_argparser()
args = parser.parse_args()
check_args(args)
if args.v:
print('ERAlchemy version {}.'.format(__version__))
exit(0)
render_er(
args.i,
args.o,
include_tables=args.include_tables,
include_columns=args.include_columns,
exclude_tables=args.exclude_tables,
exclude_columns=args.exclude_columns,
schema=args.s
)
def get_argparser():
parser = argparse.ArgumentParser(prog='ERAlchemy')
parser.add_argument('-i', nargs='?', help='Database URI to process.')
parser.add_argument('-o', nargs='?', help='Name of the file to write.')
parser.add_argument('-s', nargs='?', help='Name of the schema.')
parser.add_argument('--exclude-tables', '-x', nargs='+', help='Name of tables not to be displayed.')
parser.add_argument('--exclude-columns', nargs='+', help='Name of columns not to be displayed (for all tables).')
parser.add_argument('--include-tables', nargs='+', help='Name of tables to be displayed alone.')
parser.add_argument('--include-columns', nargs='+', help='Name of columns to be displayed alone (for all tables).')
parser.add_argument('-v', help='Prints version number.', action='store_true')
return parser
def intermediary_to_markdown(tables, relationships, output):
""" Saves the intermediary representation to markdown. """
er_markup = _intermediary_to_markdown(tables, relationships)
with open(output, "w") as file_out:
file_out.write(er_markup)
def intermediary_to_dot(tables, relationships, output):
""" Save the intermediary representation to dot format. """
dot_file = _intermediary_to_dot(tables, relationships)
with open(output, "w") as file_out:
file_out.write(dot_file)
def _intermediary_to_markdown(tables, relationships):
""" Returns the er markup source in a string. """
t = '\n'.join(t.to_markdown() for t in tables)
r = '\n'.join(r.to_markdown() for r in relationships)
return '{}\n{}'.format(t, r)
def _intermediary_to_dot(tables, relationships):
""" Returns the dot source representing the database in a string. """
t = '\n'.join(t.to_dot() for t in tables)
r = '\n'.join(r.to_dot() for r in relationships)
return '{}\n{}\n{}\n}}'.format(GRAPH_BEGINNING, t, r)
# Routes from the class name to the function transforming this class in
# the intermediary representation.
switch_input_class_to_method = {
'MetaData': metadata_to_intermediary,
'DeclarativeMeta': declarative_to_intermediary,
# For compatibility with Flask-SQLAlchemy
'_BoundDeclarativeMeta': declarative_to_intermediary,
# Renamed in Flask-SQLAlchemy 2.3
'DefaultMeta': declarative_to_intermediary
}
# Routes from the mode to the method to transform the intermediary
# representation to the desired output.
switch_output_mode_auto = {
'er': intermediary_to_markdown,
'graph': intermediary_to_schema,
'dot': intermediary_to_dot
}
# Routes from the file extension to the method to transform
# the intermediary representation to the desired output.
switch_output_mode = {
'er': intermediary_to_markdown,
'dot': intermediary_to_dot,
}
def all_to_intermediary(filename_or_input, schema=None):
""" Dispatch the filename_or_input to the different function to produce the intermediary syntax.
All the supported classes names are in `swich_input_class_to_method`.
The input can also be a list of strings in markdown format or a filename finishing by '.er' containing markdown
format.
"""
# Try to convert from the name of the class
input_class_name = filename_or_input.__class__.__name__
try:
this_to_intermediary = switch_input_class_to_method[input_class_name]
tables, relationships = this_to_intermediary(filename_or_input)
return tables, relationships
except KeyError:
pass
# try to read markdown file.
if isinstance(filename_or_input, basestring):
if filename_or_input.split('.')[-1] == 'er':
return markdown_file_to_intermediary(filename_or_input)
# try to read a markdown in a string
if not isinstance(filename_or_input, basestring):
if all(isinstance(e, basestring) for e in filename_or_input):
return line_iterator_to_intermediary(filename_or_input)
# try to read DB URI.
try:
make_url(filename_or_input)
return database_to_intermediary(filename_or_input, schema=schema)
except ArgumentError:
pass
msg = 'Cannot process filename_or_input {}'.format(input_class_name)
raise ValueError(msg)
def get_output_mode(output, mode):
"""
From the output name and the mode returns a the function that will transform the intermediary
representation to the output.
"""
if mode != 'auto':
try:
return switch_output_mode_auto[mode]
except KeyError:
raise ValueError('Mode "{}" is not supported.')
extension = output.split('.')[-1]
try:
return switch_output_mode[extension]
except KeyError:
return intermediary_to_schema
def filter_resources(tables, relationships,
include_tables=None, include_columns=None,
exclude_tables=None, exclude_columns=None):
"""
Include the following:
1. Tables and relationships with tables present in the include_tables (lst of str, tables names)
2. Columns (of whichever table) present in the include_columns (lst of str, columns names)
Exclude the following:
1. Tables and relationships with tables present in the exclude_tables (lst of str, tables names)
2. Columns (of whichever table) present in the exclude_columns (lst of str, columns names)
Disclosure note:
All relationships are taken into consideration before ignoring columns.
In other words, if one excludes primary or foreign keys, it will still keep the relations display amongst tables
"""
_tables = copy.deepcopy(tables)
_relationships = copy.deepcopy(relationships)
include_tables = include_tables or [t.name for t in _tables]
include_columns = include_columns or [c.name for t in _tables for c in t.columns]
exclude_tables = exclude_tables or list()
exclude_columns = exclude_columns or list()
_tables = [t for t in _tables if t.name not in exclude_tables and t.name in include_tables]
_relationships = [r for r in _relationships
if r.right_col not in exclude_tables
and r.left_col not in exclude_tables
and r.right_col in include_tables
and r.left_col in include_tables]
for t in _tables:
t.columns = [c for c in t.columns if c.name not in exclude_columns and c.name in include_columns]
return _tables, _relationships
def render_er(input, output, mode='auto', include_tables=None, include_columns=None,
exclude_tables=None, exclude_columns=None, schema=None):
"""
Transform the metadata into a representation.
:param input: Possible inputs are instances of:
MetaData: SQLAlchemy Metadata
DeclarativeMeta: SQLAlchemy declarative Base
:param output: name of the file to output the
:param mode: str in list:
'er': writes to a file the markup to generate an ER style diagram.
'graph': writes the image of the ER diagram.
'dot': write to file the diagram in dot format.
'auto': choose from the filename:
'*.er': writes to a file the markup to generate an ER style diagram.
'.dot': returns the graph in the dot syntax.
else: return a graph to the format graph
:param include_tables: lst of str, table names to include, None means include all
:param include_columns: lst of str, column names to include, None means include all
:param exclude_tables: lst of str, table names to exclude, None means exclude nothing
:param exclude_columns: lst of str, field names to exclude, None means exclude nothing
:param schema: name of the schema
"""
try:
tables, relationships = all_to_intermediary(input, schema=schema)
tables, relationships = filter_resources(tables, relationships,
include_tables=include_tables, include_columns=include_columns,
exclude_tables=exclude_tables, exclude_columns=exclude_columns)
intermediary_to_output = get_output_mode(output, mode)
intermediary_to_output(tables, relationships, output)
except ImportError as e:
module_name = e.message.split()[-1]
print('Please install {0} using "pip install {0}".'.format(module_name))
except ParsingException as e:
sys.stderr.write(e.message)
if __name__ == '__main__':
cli()
|
Alexis-benoist/eralchemy | eralchemy/main.py | _intermediary_to_markdown | python | def _intermediary_to_markdown(tables, relationships):
t = '\n'.join(t.to_markdown() for t in tables)
r = '\n'.join(r.to_markdown() for r in relationships)
return '{}\n{}'.format(t, r) | Returns the er markup source in a string. | train | https://github.com/Alexis-benoist/eralchemy/blob/d6fcdc67d6d413bb174bf008fd360044e1dff5a7/eralchemy/main.py#L78-L82 | null | # -*- coding: utf-8 -*-
import argparse
import sys
import copy
from pygraphviz.agraph import AGraph
from sqlalchemy.engine.url import make_url
from sqlalchemy.exc import ArgumentError
from eralchemy.version import version as __version__
from eralchemy.cst import GRAPH_BEGINNING
from eralchemy.sqla import metadata_to_intermediary, declarative_to_intermediary, database_to_intermediary
from eralchemy.helpers import check_args
from eralchemy.parser import markdown_file_to_intermediary, line_iterator_to_intermediary, ParsingException
try:
basestring
except NameError:
basestring = str
def cli():
"""Entry point for the application script"""
parser = get_argparser()
args = parser.parse_args()
check_args(args)
if args.v:
print('ERAlchemy version {}.'.format(__version__))
exit(0)
render_er(
args.i,
args.o,
include_tables=args.include_tables,
include_columns=args.include_columns,
exclude_tables=args.exclude_tables,
exclude_columns=args.exclude_columns,
schema=args.s
)
def get_argparser():
parser = argparse.ArgumentParser(prog='ERAlchemy')
parser.add_argument('-i', nargs='?', help='Database URI to process.')
parser.add_argument('-o', nargs='?', help='Name of the file to write.')
parser.add_argument('-s', nargs='?', help='Name of the schema.')
parser.add_argument('--exclude-tables', '-x', nargs='+', help='Name of tables not to be displayed.')
parser.add_argument('--exclude-columns', nargs='+', help='Name of columns not to be displayed (for all tables).')
parser.add_argument('--include-tables', nargs='+', help='Name of tables to be displayed alone.')
parser.add_argument('--include-columns', nargs='+', help='Name of columns to be displayed alone (for all tables).')
parser.add_argument('-v', help='Prints version number.', action='store_true')
return parser
def intermediary_to_markdown(tables, relationships, output):
""" Saves the intermediary representation to markdown. """
er_markup = _intermediary_to_markdown(tables, relationships)
with open(output, "w") as file_out:
file_out.write(er_markup)
def intermediary_to_dot(tables, relationships, output):
""" Save the intermediary representation to dot format. """
dot_file = _intermediary_to_dot(tables, relationships)
with open(output, "w") as file_out:
file_out.write(dot_file)
def intermediary_to_schema(tables, relationships, output):
""" Transforms and save the intermediary representation to the file chosen. """
dot_file = _intermediary_to_dot(tables, relationships)
graph = AGraph()
graph = graph.from_string(dot_file)
extension = output.split('.')[-1]
graph.draw(path=output, prog='dot', format=extension)
def _intermediary_to_dot(tables, relationships):
""" Returns the dot source representing the database in a string. """
t = '\n'.join(t.to_dot() for t in tables)
r = '\n'.join(r.to_dot() for r in relationships)
return '{}\n{}\n{}\n}}'.format(GRAPH_BEGINNING, t, r)
# Routes from the class name to the function transforming this class in
# the intermediary representation.
switch_input_class_to_method = {
'MetaData': metadata_to_intermediary,
'DeclarativeMeta': declarative_to_intermediary,
# For compatibility with Flask-SQLAlchemy
'_BoundDeclarativeMeta': declarative_to_intermediary,
# Renamed in Flask-SQLAlchemy 2.3
'DefaultMeta': declarative_to_intermediary
}
# Routes from the mode to the method to transform the intermediary
# representation to the desired output.
switch_output_mode_auto = {
'er': intermediary_to_markdown,
'graph': intermediary_to_schema,
'dot': intermediary_to_dot
}
# Routes from the file extension to the method to transform
# the intermediary representation to the desired output.
switch_output_mode = {
'er': intermediary_to_markdown,
'dot': intermediary_to_dot,
}
def all_to_intermediary(filename_or_input, schema=None):
""" Dispatch the filename_or_input to the different function to produce the intermediary syntax.
All the supported classes names are in `swich_input_class_to_method`.
The input can also be a list of strings in markdown format or a filename finishing by '.er' containing markdown
format.
"""
# Try to convert from the name of the class
input_class_name = filename_or_input.__class__.__name__
try:
this_to_intermediary = switch_input_class_to_method[input_class_name]
tables, relationships = this_to_intermediary(filename_or_input)
return tables, relationships
except KeyError:
pass
# try to read markdown file.
if isinstance(filename_or_input, basestring):
if filename_or_input.split('.')[-1] == 'er':
return markdown_file_to_intermediary(filename_or_input)
# try to read a markdown in a string
if not isinstance(filename_or_input, basestring):
if all(isinstance(e, basestring) for e in filename_or_input):
return line_iterator_to_intermediary(filename_or_input)
# try to read DB URI.
try:
make_url(filename_or_input)
return database_to_intermediary(filename_or_input, schema=schema)
except ArgumentError:
pass
msg = 'Cannot process filename_or_input {}'.format(input_class_name)
raise ValueError(msg)
def get_output_mode(output, mode):
"""
From the output name and the mode returns a the function that will transform the intermediary
representation to the output.
"""
if mode != 'auto':
try:
return switch_output_mode_auto[mode]
except KeyError:
raise ValueError('Mode "{}" is not supported.')
extension = output.split('.')[-1]
try:
return switch_output_mode[extension]
except KeyError:
return intermediary_to_schema
def filter_resources(tables, relationships,
include_tables=None, include_columns=None,
exclude_tables=None, exclude_columns=None):
"""
Include the following:
1. Tables and relationships with tables present in the include_tables (lst of str, tables names)
2. Columns (of whichever table) present in the include_columns (lst of str, columns names)
Exclude the following:
1. Tables and relationships with tables present in the exclude_tables (lst of str, tables names)
2. Columns (of whichever table) present in the exclude_columns (lst of str, columns names)
Disclosure note:
All relationships are taken into consideration before ignoring columns.
In other words, if one excludes primary or foreign keys, it will still keep the relations display amongst tables
"""
_tables = copy.deepcopy(tables)
_relationships = copy.deepcopy(relationships)
include_tables = include_tables or [t.name for t in _tables]
include_columns = include_columns or [c.name for t in _tables for c in t.columns]
exclude_tables = exclude_tables or list()
exclude_columns = exclude_columns or list()
_tables = [t for t in _tables if t.name not in exclude_tables and t.name in include_tables]
_relationships = [r for r in _relationships
if r.right_col not in exclude_tables
and r.left_col not in exclude_tables
and r.right_col in include_tables
and r.left_col in include_tables]
for t in _tables:
t.columns = [c for c in t.columns if c.name not in exclude_columns and c.name in include_columns]
return _tables, _relationships
def render_er(input, output, mode='auto', include_tables=None, include_columns=None,
exclude_tables=None, exclude_columns=None, schema=None):
"""
Transform the metadata into a representation.
:param input: Possible inputs are instances of:
MetaData: SQLAlchemy Metadata
DeclarativeMeta: SQLAlchemy declarative Base
:param output: name of the file to output the
:param mode: str in list:
'er': writes to a file the markup to generate an ER style diagram.
'graph': writes the image of the ER diagram.
'dot': write to file the diagram in dot format.
'auto': choose from the filename:
'*.er': writes to a file the markup to generate an ER style diagram.
'.dot': returns the graph in the dot syntax.
else: return a graph to the format graph
:param include_tables: lst of str, table names to include, None means include all
:param include_columns: lst of str, column names to include, None means include all
:param exclude_tables: lst of str, table names to exclude, None means exclude nothing
:param exclude_columns: lst of str, field names to exclude, None means exclude nothing
:param schema: name of the schema
"""
try:
tables, relationships = all_to_intermediary(input, schema=schema)
tables, relationships = filter_resources(tables, relationships,
include_tables=include_tables, include_columns=include_columns,
exclude_tables=exclude_tables, exclude_columns=exclude_columns)
intermediary_to_output = get_output_mode(output, mode)
intermediary_to_output(tables, relationships, output)
except ImportError as e:
module_name = e.message.split()[-1]
print('Please install {0} using "pip install {0}".'.format(module_name))
except ParsingException as e:
sys.stderr.write(e.message)
if __name__ == '__main__':
cli()
|
Alexis-benoist/eralchemy | eralchemy/main.py | _intermediary_to_dot | python | def _intermediary_to_dot(tables, relationships):
t = '\n'.join(t.to_dot() for t in tables)
r = '\n'.join(r.to_dot() for r in relationships)
return '{}\n{}\n{}\n}}'.format(GRAPH_BEGINNING, t, r) | Returns the dot source representing the database in a string. | train | https://github.com/Alexis-benoist/eralchemy/blob/d6fcdc67d6d413bb174bf008fd360044e1dff5a7/eralchemy/main.py#L85-L89 | null | # -*- coding: utf-8 -*-
import argparse
import sys
import copy
from pygraphviz.agraph import AGraph
from sqlalchemy.engine.url import make_url
from sqlalchemy.exc import ArgumentError
from eralchemy.version import version as __version__
from eralchemy.cst import GRAPH_BEGINNING
from eralchemy.sqla import metadata_to_intermediary, declarative_to_intermediary, database_to_intermediary
from eralchemy.helpers import check_args
from eralchemy.parser import markdown_file_to_intermediary, line_iterator_to_intermediary, ParsingException
try:
basestring
except NameError:
basestring = str
def cli():
"""Entry point for the application script"""
parser = get_argparser()
args = parser.parse_args()
check_args(args)
if args.v:
print('ERAlchemy version {}.'.format(__version__))
exit(0)
render_er(
args.i,
args.o,
include_tables=args.include_tables,
include_columns=args.include_columns,
exclude_tables=args.exclude_tables,
exclude_columns=args.exclude_columns,
schema=args.s
)
def get_argparser():
parser = argparse.ArgumentParser(prog='ERAlchemy')
parser.add_argument('-i', nargs='?', help='Database URI to process.')
parser.add_argument('-o', nargs='?', help='Name of the file to write.')
parser.add_argument('-s', nargs='?', help='Name of the schema.')
parser.add_argument('--exclude-tables', '-x', nargs='+', help='Name of tables not to be displayed.')
parser.add_argument('--exclude-columns', nargs='+', help='Name of columns not to be displayed (for all tables).')
parser.add_argument('--include-tables', nargs='+', help='Name of tables to be displayed alone.')
parser.add_argument('--include-columns', nargs='+', help='Name of columns to be displayed alone (for all tables).')
parser.add_argument('-v', help='Prints version number.', action='store_true')
return parser
def intermediary_to_markdown(tables, relationships, output):
""" Saves the intermediary representation to markdown. """
er_markup = _intermediary_to_markdown(tables, relationships)
with open(output, "w") as file_out:
file_out.write(er_markup)
def intermediary_to_dot(tables, relationships, output):
""" Save the intermediary representation to dot format. """
dot_file = _intermediary_to_dot(tables, relationships)
with open(output, "w") as file_out:
file_out.write(dot_file)
def intermediary_to_schema(tables, relationships, output):
""" Transforms and save the intermediary representation to the file chosen. """
dot_file = _intermediary_to_dot(tables, relationships)
graph = AGraph()
graph = graph.from_string(dot_file)
extension = output.split('.')[-1]
graph.draw(path=output, prog='dot', format=extension)
def _intermediary_to_markdown(tables, relationships):
""" Returns the er markup source in a string. """
t = '\n'.join(t.to_markdown() for t in tables)
r = '\n'.join(r.to_markdown() for r in relationships)
return '{}\n{}'.format(t, r)
# Routes from the class name to the function transforming this class in
# the intermediary representation.
switch_input_class_to_method = {
'MetaData': metadata_to_intermediary,
'DeclarativeMeta': declarative_to_intermediary,
# For compatibility with Flask-SQLAlchemy
'_BoundDeclarativeMeta': declarative_to_intermediary,
# Renamed in Flask-SQLAlchemy 2.3
'DefaultMeta': declarative_to_intermediary
}
# Routes from the mode to the method to transform the intermediary
# representation to the desired output.
switch_output_mode_auto = {
'er': intermediary_to_markdown,
'graph': intermediary_to_schema,
'dot': intermediary_to_dot
}
# Routes from the file extension to the method to transform
# the intermediary representation to the desired output.
switch_output_mode = {
'er': intermediary_to_markdown,
'dot': intermediary_to_dot,
}
def all_to_intermediary(filename_or_input, schema=None):
""" Dispatch the filename_or_input to the different function to produce the intermediary syntax.
All the supported classes names are in `swich_input_class_to_method`.
The input can also be a list of strings in markdown format or a filename finishing by '.er' containing markdown
format.
"""
# Try to convert from the name of the class
input_class_name = filename_or_input.__class__.__name__
try:
this_to_intermediary = switch_input_class_to_method[input_class_name]
tables, relationships = this_to_intermediary(filename_or_input)
return tables, relationships
except KeyError:
pass
# try to read markdown file.
if isinstance(filename_or_input, basestring):
if filename_or_input.split('.')[-1] == 'er':
return markdown_file_to_intermediary(filename_or_input)
# try to read a markdown in a string
if not isinstance(filename_or_input, basestring):
if all(isinstance(e, basestring) for e in filename_or_input):
return line_iterator_to_intermediary(filename_or_input)
# try to read DB URI.
try:
make_url(filename_or_input)
return database_to_intermediary(filename_or_input, schema=schema)
except ArgumentError:
pass
msg = 'Cannot process filename_or_input {}'.format(input_class_name)
raise ValueError(msg)
def get_output_mode(output, mode):
"""
From the output name and the mode returns a the function that will transform the intermediary
representation to the output.
"""
if mode != 'auto':
try:
return switch_output_mode_auto[mode]
except KeyError:
raise ValueError('Mode "{}" is not supported.')
extension = output.split('.')[-1]
try:
return switch_output_mode[extension]
except KeyError:
return intermediary_to_schema
def filter_resources(tables, relationships,
include_tables=None, include_columns=None,
exclude_tables=None, exclude_columns=None):
"""
Include the following:
1. Tables and relationships with tables present in the include_tables (lst of str, tables names)
2. Columns (of whichever table) present in the include_columns (lst of str, columns names)
Exclude the following:
1. Tables and relationships with tables present in the exclude_tables (lst of str, tables names)
2. Columns (of whichever table) present in the exclude_columns (lst of str, columns names)
Disclosure note:
All relationships are taken into consideration before ignoring columns.
In other words, if one excludes primary or foreign keys, it will still keep the relations display amongst tables
"""
_tables = copy.deepcopy(tables)
_relationships = copy.deepcopy(relationships)
include_tables = include_tables or [t.name for t in _tables]
include_columns = include_columns or [c.name for t in _tables for c in t.columns]
exclude_tables = exclude_tables or list()
exclude_columns = exclude_columns or list()
_tables = [t for t in _tables if t.name not in exclude_tables and t.name in include_tables]
_relationships = [r for r in _relationships
if r.right_col not in exclude_tables
and r.left_col not in exclude_tables
and r.right_col in include_tables
and r.left_col in include_tables]
for t in _tables:
t.columns = [c for c in t.columns if c.name not in exclude_columns and c.name in include_columns]
return _tables, _relationships
def render_er(input, output, mode='auto', include_tables=None, include_columns=None,
exclude_tables=None, exclude_columns=None, schema=None):
"""
Transform the metadata into a representation.
:param input: Possible inputs are instances of:
MetaData: SQLAlchemy Metadata
DeclarativeMeta: SQLAlchemy declarative Base
:param output: name of the file to output the
:param mode: str in list:
'er': writes to a file the markup to generate an ER style diagram.
'graph': writes the image of the ER diagram.
'dot': write to file the diagram in dot format.
'auto': choose from the filename:
'*.er': writes to a file the markup to generate an ER style diagram.
'.dot': returns the graph in the dot syntax.
else: return a graph to the format graph
:param include_tables: lst of str, table names to include, None means include all
:param include_columns: lst of str, column names to include, None means include all
:param exclude_tables: lst of str, table names to exclude, None means exclude nothing
:param exclude_columns: lst of str, field names to exclude, None means exclude nothing
:param schema: name of the schema
"""
try:
tables, relationships = all_to_intermediary(input, schema=schema)
tables, relationships = filter_resources(tables, relationships,
include_tables=include_tables, include_columns=include_columns,
exclude_tables=exclude_tables, exclude_columns=exclude_columns)
intermediary_to_output = get_output_mode(output, mode)
intermediary_to_output(tables, relationships, output)
except ImportError as e:
module_name = e.message.split()[-1]
print('Please install {0} using "pip install {0}".'.format(module_name))
except ParsingException as e:
sys.stderr.write(e.message)
if __name__ == '__main__':
cli()
|
Alexis-benoist/eralchemy | eralchemy/main.py | all_to_intermediary | python | def all_to_intermediary(filename_or_input, schema=None):
# Try to convert from the name of the class
input_class_name = filename_or_input.__class__.__name__
try:
this_to_intermediary = switch_input_class_to_method[input_class_name]
tables, relationships = this_to_intermediary(filename_or_input)
return tables, relationships
except KeyError:
pass
# try to read markdown file.
if isinstance(filename_or_input, basestring):
if filename_or_input.split('.')[-1] == 'er':
return markdown_file_to_intermediary(filename_or_input)
# try to read a markdown in a string
if not isinstance(filename_or_input, basestring):
if all(isinstance(e, basestring) for e in filename_or_input):
return line_iterator_to_intermediary(filename_or_input)
# try to read DB URI.
try:
make_url(filename_or_input)
return database_to_intermediary(filename_or_input, schema=schema)
except ArgumentError:
pass
msg = 'Cannot process filename_or_input {}'.format(input_class_name)
raise ValueError(msg) | Dispatch the filename_or_input to the different function to produce the intermediary syntax.
All the supported classes names are in `swich_input_class_to_method`.
The input can also be a list of strings in markdown format or a filename finishing by '.er' containing markdown
format. | train | https://github.com/Alexis-benoist/eralchemy/blob/d6fcdc67d6d413bb174bf008fd360044e1dff5a7/eralchemy/main.py#L119-L152 | [
"def database_to_intermediary(database_uri, schema=None):\n \"\"\" Introspect from the database (given the database_uri) to create the intermediary representation. \"\"\"\n from sqlalchemy.ext.automap import automap_base\n from sqlalchemy import create_engine\n\n Base = automap_base()\n engine = crea... | # -*- coding: utf-8 -*-
import argparse
import sys
import copy
from pygraphviz.agraph import AGraph
from sqlalchemy.engine.url import make_url
from sqlalchemy.exc import ArgumentError
from eralchemy.version import version as __version__
from eralchemy.cst import GRAPH_BEGINNING
from eralchemy.sqla import metadata_to_intermediary, declarative_to_intermediary, database_to_intermediary
from eralchemy.helpers import check_args
from eralchemy.parser import markdown_file_to_intermediary, line_iterator_to_intermediary, ParsingException
try:
basestring
except NameError:
basestring = str
def cli():
"""Entry point for the application script"""
parser = get_argparser()
args = parser.parse_args()
check_args(args)
if args.v:
print('ERAlchemy version {}.'.format(__version__))
exit(0)
render_er(
args.i,
args.o,
include_tables=args.include_tables,
include_columns=args.include_columns,
exclude_tables=args.exclude_tables,
exclude_columns=args.exclude_columns,
schema=args.s
)
def get_argparser():
parser = argparse.ArgumentParser(prog='ERAlchemy')
parser.add_argument('-i', nargs='?', help='Database URI to process.')
parser.add_argument('-o', nargs='?', help='Name of the file to write.')
parser.add_argument('-s', nargs='?', help='Name of the schema.')
parser.add_argument('--exclude-tables', '-x', nargs='+', help='Name of tables not to be displayed.')
parser.add_argument('--exclude-columns', nargs='+', help='Name of columns not to be displayed (for all tables).')
parser.add_argument('--include-tables', nargs='+', help='Name of tables to be displayed alone.')
parser.add_argument('--include-columns', nargs='+', help='Name of columns to be displayed alone (for all tables).')
parser.add_argument('-v', help='Prints version number.', action='store_true')
return parser
def intermediary_to_markdown(tables, relationships, output):
""" Saves the intermediary representation to markdown. """
er_markup = _intermediary_to_markdown(tables, relationships)
with open(output, "w") as file_out:
file_out.write(er_markup)
def intermediary_to_dot(tables, relationships, output):
""" Save the intermediary representation to dot format. """
dot_file = _intermediary_to_dot(tables, relationships)
with open(output, "w") as file_out:
file_out.write(dot_file)
def intermediary_to_schema(tables, relationships, output):
""" Transforms and save the intermediary representation to the file chosen. """
dot_file = _intermediary_to_dot(tables, relationships)
graph = AGraph()
graph = graph.from_string(dot_file)
extension = output.split('.')[-1]
graph.draw(path=output, prog='dot', format=extension)
def _intermediary_to_markdown(tables, relationships):
""" Returns the er markup source in a string. """
t = '\n'.join(t.to_markdown() for t in tables)
r = '\n'.join(r.to_markdown() for r in relationships)
return '{}\n{}'.format(t, r)
def _intermediary_to_dot(tables, relationships):
""" Returns the dot source representing the database in a string. """
t = '\n'.join(t.to_dot() for t in tables)
r = '\n'.join(r.to_dot() for r in relationships)
return '{}\n{}\n{}\n}}'.format(GRAPH_BEGINNING, t, r)
# Routes from the class name to the function transforming this class in
# the intermediary representation.
switch_input_class_to_method = {
'MetaData': metadata_to_intermediary,
'DeclarativeMeta': declarative_to_intermediary,
# For compatibility with Flask-SQLAlchemy
'_BoundDeclarativeMeta': declarative_to_intermediary,
# Renamed in Flask-SQLAlchemy 2.3
'DefaultMeta': declarative_to_intermediary
}
# Routes from the mode to the method to transform the intermediary
# representation to the desired output.
switch_output_mode_auto = {
'er': intermediary_to_markdown,
'graph': intermediary_to_schema,
'dot': intermediary_to_dot
}
# Routes from the file extension to the method to transform
# the intermediary representation to the desired output.
switch_output_mode = {
'er': intermediary_to_markdown,
'dot': intermediary_to_dot,
}
def get_output_mode(output, mode):
"""
From the output name and the mode returns a the function that will transform the intermediary
representation to the output.
"""
if mode != 'auto':
try:
return switch_output_mode_auto[mode]
except KeyError:
raise ValueError('Mode "{}" is not supported.')
extension = output.split('.')[-1]
try:
return switch_output_mode[extension]
except KeyError:
return intermediary_to_schema
def filter_resources(tables, relationships,
include_tables=None, include_columns=None,
exclude_tables=None, exclude_columns=None):
"""
Include the following:
1. Tables and relationships with tables present in the include_tables (lst of str, tables names)
2. Columns (of whichever table) present in the include_columns (lst of str, columns names)
Exclude the following:
1. Tables and relationships with tables present in the exclude_tables (lst of str, tables names)
2. Columns (of whichever table) present in the exclude_columns (lst of str, columns names)
Disclosure note:
All relationships are taken into consideration before ignoring columns.
In other words, if one excludes primary or foreign keys, it will still keep the relations display amongst tables
"""
_tables = copy.deepcopy(tables)
_relationships = copy.deepcopy(relationships)
include_tables = include_tables or [t.name for t in _tables]
include_columns = include_columns or [c.name for t in _tables for c in t.columns]
exclude_tables = exclude_tables or list()
exclude_columns = exclude_columns or list()
_tables = [t for t in _tables if t.name not in exclude_tables and t.name in include_tables]
_relationships = [r for r in _relationships
if r.right_col not in exclude_tables
and r.left_col not in exclude_tables
and r.right_col in include_tables
and r.left_col in include_tables]
for t in _tables:
t.columns = [c for c in t.columns if c.name not in exclude_columns and c.name in include_columns]
return _tables, _relationships
def render_er(input, output, mode='auto', include_tables=None, include_columns=None,
exclude_tables=None, exclude_columns=None, schema=None):
"""
Transform the metadata into a representation.
:param input: Possible inputs are instances of:
MetaData: SQLAlchemy Metadata
DeclarativeMeta: SQLAlchemy declarative Base
:param output: name of the file to output the
:param mode: str in list:
'er': writes to a file the markup to generate an ER style diagram.
'graph': writes the image of the ER diagram.
'dot': write to file the diagram in dot format.
'auto': choose from the filename:
'*.er': writes to a file the markup to generate an ER style diagram.
'.dot': returns the graph in the dot syntax.
else: return a graph to the format graph
:param include_tables: lst of str, table names to include, None means include all
:param include_columns: lst of str, column names to include, None means include all
:param exclude_tables: lst of str, table names to exclude, None means exclude nothing
:param exclude_columns: lst of str, field names to exclude, None means exclude nothing
:param schema: name of the schema
"""
try:
tables, relationships = all_to_intermediary(input, schema=schema)
tables, relationships = filter_resources(tables, relationships,
include_tables=include_tables, include_columns=include_columns,
exclude_tables=exclude_tables, exclude_columns=exclude_columns)
intermediary_to_output = get_output_mode(output, mode)
intermediary_to_output(tables, relationships, output)
except ImportError as e:
module_name = e.message.split()[-1]
print('Please install {0} using "pip install {0}".'.format(module_name))
except ParsingException as e:
sys.stderr.write(e.message)
if __name__ == '__main__':
cli()
|
Alexis-benoist/eralchemy | eralchemy/main.py | get_output_mode | python | def get_output_mode(output, mode):
if mode != 'auto':
try:
return switch_output_mode_auto[mode]
except KeyError:
raise ValueError('Mode "{}" is not supported.')
extension = output.split('.')[-1]
try:
return switch_output_mode[extension]
except KeyError:
return intermediary_to_schema | From the output name and the mode returns a the function that will transform the intermediary
representation to the output. | train | https://github.com/Alexis-benoist/eralchemy/blob/d6fcdc67d6d413bb174bf008fd360044e1dff5a7/eralchemy/main.py#L155-L170 | null | # -*- coding: utf-8 -*-
import argparse
import sys
import copy
from pygraphviz.agraph import AGraph
from sqlalchemy.engine.url import make_url
from sqlalchemy.exc import ArgumentError
from eralchemy.version import version as __version__
from eralchemy.cst import GRAPH_BEGINNING
from eralchemy.sqla import metadata_to_intermediary, declarative_to_intermediary, database_to_intermediary
from eralchemy.helpers import check_args
from eralchemy.parser import markdown_file_to_intermediary, line_iterator_to_intermediary, ParsingException
try:
basestring
except NameError:
basestring = str
def cli():
"""Entry point for the application script"""
parser = get_argparser()
args = parser.parse_args()
check_args(args)
if args.v:
print('ERAlchemy version {}.'.format(__version__))
exit(0)
render_er(
args.i,
args.o,
include_tables=args.include_tables,
include_columns=args.include_columns,
exclude_tables=args.exclude_tables,
exclude_columns=args.exclude_columns,
schema=args.s
)
def get_argparser():
parser = argparse.ArgumentParser(prog='ERAlchemy')
parser.add_argument('-i', nargs='?', help='Database URI to process.')
parser.add_argument('-o', nargs='?', help='Name of the file to write.')
parser.add_argument('-s', nargs='?', help='Name of the schema.')
parser.add_argument('--exclude-tables', '-x', nargs='+', help='Name of tables not to be displayed.')
parser.add_argument('--exclude-columns', nargs='+', help='Name of columns not to be displayed (for all tables).')
parser.add_argument('--include-tables', nargs='+', help='Name of tables to be displayed alone.')
parser.add_argument('--include-columns', nargs='+', help='Name of columns to be displayed alone (for all tables).')
parser.add_argument('-v', help='Prints version number.', action='store_true')
return parser
def intermediary_to_markdown(tables, relationships, output):
""" Saves the intermediary representation to markdown. """
er_markup = _intermediary_to_markdown(tables, relationships)
with open(output, "w") as file_out:
file_out.write(er_markup)
def intermediary_to_dot(tables, relationships, output):
""" Save the intermediary representation to dot format. """
dot_file = _intermediary_to_dot(tables, relationships)
with open(output, "w") as file_out:
file_out.write(dot_file)
def intermediary_to_schema(tables, relationships, output):
""" Transforms and save the intermediary representation to the file chosen. """
dot_file = _intermediary_to_dot(tables, relationships)
graph = AGraph()
graph = graph.from_string(dot_file)
extension = output.split('.')[-1]
graph.draw(path=output, prog='dot', format=extension)
def _intermediary_to_markdown(tables, relationships):
""" Returns the er markup source in a string. """
t = '\n'.join(t.to_markdown() for t in tables)
r = '\n'.join(r.to_markdown() for r in relationships)
return '{}\n{}'.format(t, r)
def _intermediary_to_dot(tables, relationships):
""" Returns the dot source representing the database in a string. """
t = '\n'.join(t.to_dot() for t in tables)
r = '\n'.join(r.to_dot() for r in relationships)
return '{}\n{}\n{}\n}}'.format(GRAPH_BEGINNING, t, r)
# Routes from the class name to the function transforming this class in
# the intermediary representation.
switch_input_class_to_method = {
'MetaData': metadata_to_intermediary,
'DeclarativeMeta': declarative_to_intermediary,
# For compatibility with Flask-SQLAlchemy
'_BoundDeclarativeMeta': declarative_to_intermediary,
# Renamed in Flask-SQLAlchemy 2.3
'DefaultMeta': declarative_to_intermediary
}
# Routes from the mode to the method to transform the intermediary
# representation to the desired output.
switch_output_mode_auto = {
'er': intermediary_to_markdown,
'graph': intermediary_to_schema,
'dot': intermediary_to_dot
}
# Routes from the file extension to the method to transform
# the intermediary representation to the desired output.
switch_output_mode = {
'er': intermediary_to_markdown,
'dot': intermediary_to_dot,
}
def all_to_intermediary(filename_or_input, schema=None):
""" Dispatch the filename_or_input to the different function to produce the intermediary syntax.
All the supported classes names are in `swich_input_class_to_method`.
The input can also be a list of strings in markdown format or a filename finishing by '.er' containing markdown
format.
"""
# Try to convert from the name of the class
input_class_name = filename_or_input.__class__.__name__
try:
this_to_intermediary = switch_input_class_to_method[input_class_name]
tables, relationships = this_to_intermediary(filename_or_input)
return tables, relationships
except KeyError:
pass
# try to read markdown file.
if isinstance(filename_or_input, basestring):
if filename_or_input.split('.')[-1] == 'er':
return markdown_file_to_intermediary(filename_or_input)
# try to read a markdown in a string
if not isinstance(filename_or_input, basestring):
if all(isinstance(e, basestring) for e in filename_or_input):
return line_iterator_to_intermediary(filename_or_input)
# try to read DB URI.
try:
make_url(filename_or_input)
return database_to_intermediary(filename_or_input, schema=schema)
except ArgumentError:
pass
msg = 'Cannot process filename_or_input {}'.format(input_class_name)
raise ValueError(msg)
def filter_resources(tables, relationships,
include_tables=None, include_columns=None,
exclude_tables=None, exclude_columns=None):
"""
Include the following:
1. Tables and relationships with tables present in the include_tables (lst of str, tables names)
2. Columns (of whichever table) present in the include_columns (lst of str, columns names)
Exclude the following:
1. Tables and relationships with tables present in the exclude_tables (lst of str, tables names)
2. Columns (of whichever table) present in the exclude_columns (lst of str, columns names)
Disclosure note:
All relationships are taken into consideration before ignoring columns.
In other words, if one excludes primary or foreign keys, it will still keep the relations display amongst tables
"""
_tables = copy.deepcopy(tables)
_relationships = copy.deepcopy(relationships)
include_tables = include_tables or [t.name for t in _tables]
include_columns = include_columns or [c.name for t in _tables for c in t.columns]
exclude_tables = exclude_tables or list()
exclude_columns = exclude_columns or list()
_tables = [t for t in _tables if t.name not in exclude_tables and t.name in include_tables]
_relationships = [r for r in _relationships
if r.right_col not in exclude_tables
and r.left_col not in exclude_tables
and r.right_col in include_tables
and r.left_col in include_tables]
for t in _tables:
t.columns = [c for c in t.columns if c.name not in exclude_columns and c.name in include_columns]
return _tables, _relationships
def render_er(input, output, mode='auto', include_tables=None, include_columns=None,
exclude_tables=None, exclude_columns=None, schema=None):
"""
Transform the metadata into a representation.
:param input: Possible inputs are instances of:
MetaData: SQLAlchemy Metadata
DeclarativeMeta: SQLAlchemy declarative Base
:param output: name of the file to output the
:param mode: str in list:
'er': writes to a file the markup to generate an ER style diagram.
'graph': writes the image of the ER diagram.
'dot': write to file the diagram in dot format.
'auto': choose from the filename:
'*.er': writes to a file the markup to generate an ER style diagram.
'.dot': returns the graph in the dot syntax.
else: return a graph to the format graph
:param include_tables: lst of str, table names to include, None means include all
:param include_columns: lst of str, column names to include, None means include all
:param exclude_tables: lst of str, table names to exclude, None means exclude nothing
:param exclude_columns: lst of str, field names to exclude, None means exclude nothing
:param schema: name of the schema
"""
try:
tables, relationships = all_to_intermediary(input, schema=schema)
tables, relationships = filter_resources(tables, relationships,
include_tables=include_tables, include_columns=include_columns,
exclude_tables=exclude_tables, exclude_columns=exclude_columns)
intermediary_to_output = get_output_mode(output, mode)
intermediary_to_output(tables, relationships, output)
except ImportError as e:
module_name = e.message.split()[-1]
print('Please install {0} using "pip install {0}".'.format(module_name))
except ParsingException as e:
sys.stderr.write(e.message)
if __name__ == '__main__':
cli()
|
Alexis-benoist/eralchemy | eralchemy/main.py | filter_resources | python | def filter_resources(tables, relationships,
include_tables=None, include_columns=None,
exclude_tables=None, exclude_columns=None):
_tables = copy.deepcopy(tables)
_relationships = copy.deepcopy(relationships)
include_tables = include_tables or [t.name for t in _tables]
include_columns = include_columns or [c.name for t in _tables for c in t.columns]
exclude_tables = exclude_tables or list()
exclude_columns = exclude_columns or list()
_tables = [t for t in _tables if t.name not in exclude_tables and t.name in include_tables]
_relationships = [r for r in _relationships
if r.right_col not in exclude_tables
and r.left_col not in exclude_tables
and r.right_col in include_tables
and r.left_col in include_tables]
for t in _tables:
t.columns = [c for c in t.columns if c.name not in exclude_columns and c.name in include_columns]
return _tables, _relationships | Include the following:
1. Tables and relationships with tables present in the include_tables (lst of str, tables names)
2. Columns (of whichever table) present in the include_columns (lst of str, columns names)
Exclude the following:
1. Tables and relationships with tables present in the exclude_tables (lst of str, tables names)
2. Columns (of whichever table) present in the exclude_columns (lst of str, columns names)
Disclosure note:
All relationships are taken into consideration before ignoring columns.
In other words, if one excludes primary or foreign keys, it will still keep the relations display amongst tables | train | https://github.com/Alexis-benoist/eralchemy/blob/d6fcdc67d6d413bb174bf008fd360044e1dff5a7/eralchemy/main.py#L173-L205 | null | # -*- coding: utf-8 -*-
import argparse
import sys
import copy
from pygraphviz.agraph import AGraph
from sqlalchemy.engine.url import make_url
from sqlalchemy.exc import ArgumentError
from eralchemy.version import version as __version__
from eralchemy.cst import GRAPH_BEGINNING
from eralchemy.sqla import metadata_to_intermediary, declarative_to_intermediary, database_to_intermediary
from eralchemy.helpers import check_args
from eralchemy.parser import markdown_file_to_intermediary, line_iterator_to_intermediary, ParsingException
try:
basestring
except NameError:
basestring = str
def cli():
"""Entry point for the application script"""
parser = get_argparser()
args = parser.parse_args()
check_args(args)
if args.v:
print('ERAlchemy version {}.'.format(__version__))
exit(0)
render_er(
args.i,
args.o,
include_tables=args.include_tables,
include_columns=args.include_columns,
exclude_tables=args.exclude_tables,
exclude_columns=args.exclude_columns,
schema=args.s
)
def get_argparser():
parser = argparse.ArgumentParser(prog='ERAlchemy')
parser.add_argument('-i', nargs='?', help='Database URI to process.')
parser.add_argument('-o', nargs='?', help='Name of the file to write.')
parser.add_argument('-s', nargs='?', help='Name of the schema.')
parser.add_argument('--exclude-tables', '-x', nargs='+', help='Name of tables not to be displayed.')
parser.add_argument('--exclude-columns', nargs='+', help='Name of columns not to be displayed (for all tables).')
parser.add_argument('--include-tables', nargs='+', help='Name of tables to be displayed alone.')
parser.add_argument('--include-columns', nargs='+', help='Name of columns to be displayed alone (for all tables).')
parser.add_argument('-v', help='Prints version number.', action='store_true')
return parser
def intermediary_to_markdown(tables, relationships, output):
""" Saves the intermediary representation to markdown. """
er_markup = _intermediary_to_markdown(tables, relationships)
with open(output, "w") as file_out:
file_out.write(er_markup)
def intermediary_to_dot(tables, relationships, output):
""" Save the intermediary representation to dot format. """
dot_file = _intermediary_to_dot(tables, relationships)
with open(output, "w") as file_out:
file_out.write(dot_file)
def intermediary_to_schema(tables, relationships, output):
""" Transforms and save the intermediary representation to the file chosen. """
dot_file = _intermediary_to_dot(tables, relationships)
graph = AGraph()
graph = graph.from_string(dot_file)
extension = output.split('.')[-1]
graph.draw(path=output, prog='dot', format=extension)
def _intermediary_to_markdown(tables, relationships):
""" Returns the er markup source in a string. """
t = '\n'.join(t.to_markdown() for t in tables)
r = '\n'.join(r.to_markdown() for r in relationships)
return '{}\n{}'.format(t, r)
def _intermediary_to_dot(tables, relationships):
""" Returns the dot source representing the database in a string. """
t = '\n'.join(t.to_dot() for t in tables)
r = '\n'.join(r.to_dot() for r in relationships)
return '{}\n{}\n{}\n}}'.format(GRAPH_BEGINNING, t, r)
# Routes from the class name to the function transforming this class in
# the intermediary representation.
switch_input_class_to_method = {
'MetaData': metadata_to_intermediary,
'DeclarativeMeta': declarative_to_intermediary,
# For compatibility with Flask-SQLAlchemy
'_BoundDeclarativeMeta': declarative_to_intermediary,
# Renamed in Flask-SQLAlchemy 2.3
'DefaultMeta': declarative_to_intermediary
}
# Routes from the mode to the method to transform the intermediary
# representation to the desired output.
switch_output_mode_auto = {
'er': intermediary_to_markdown,
'graph': intermediary_to_schema,
'dot': intermediary_to_dot
}
# Routes from the file extension to the method to transform
# the intermediary representation to the desired output.
switch_output_mode = {
'er': intermediary_to_markdown,
'dot': intermediary_to_dot,
}
def all_to_intermediary(filename_or_input, schema=None):
""" Dispatch the filename_or_input to the different function to produce the intermediary syntax.
All the supported classes names are in `swich_input_class_to_method`.
The input can also be a list of strings in markdown format or a filename finishing by '.er' containing markdown
format.
"""
# Try to convert from the name of the class
input_class_name = filename_or_input.__class__.__name__
try:
this_to_intermediary = switch_input_class_to_method[input_class_name]
tables, relationships = this_to_intermediary(filename_or_input)
return tables, relationships
except KeyError:
pass
# try to read markdown file.
if isinstance(filename_or_input, basestring):
if filename_or_input.split('.')[-1] == 'er':
return markdown_file_to_intermediary(filename_or_input)
# try to read a markdown in a string
if not isinstance(filename_or_input, basestring):
if all(isinstance(e, basestring) for e in filename_or_input):
return line_iterator_to_intermediary(filename_or_input)
# try to read DB URI.
try:
make_url(filename_or_input)
return database_to_intermediary(filename_or_input, schema=schema)
except ArgumentError:
pass
msg = 'Cannot process filename_or_input {}'.format(input_class_name)
raise ValueError(msg)
def get_output_mode(output, mode):
"""
From the output name and the mode returns a the function that will transform the intermediary
representation to the output.
"""
if mode != 'auto':
try:
return switch_output_mode_auto[mode]
except KeyError:
raise ValueError('Mode "{}" is not supported.')
extension = output.split('.')[-1]
try:
return switch_output_mode[extension]
except KeyError:
return intermediary_to_schema
def render_er(input, output, mode='auto', include_tables=None, include_columns=None,
exclude_tables=None, exclude_columns=None, schema=None):
"""
Transform the metadata into a representation.
:param input: Possible inputs are instances of:
MetaData: SQLAlchemy Metadata
DeclarativeMeta: SQLAlchemy declarative Base
:param output: name of the file to output the
:param mode: str in list:
'er': writes to a file the markup to generate an ER style diagram.
'graph': writes the image of the ER diagram.
'dot': write to file the diagram in dot format.
'auto': choose from the filename:
'*.er': writes to a file the markup to generate an ER style diagram.
'.dot': returns the graph in the dot syntax.
else: return a graph to the format graph
:param include_tables: lst of str, table names to include, None means include all
:param include_columns: lst of str, column names to include, None means include all
:param exclude_tables: lst of str, table names to exclude, None means exclude nothing
:param exclude_columns: lst of str, field names to exclude, None means exclude nothing
:param schema: name of the schema
"""
try:
tables, relationships = all_to_intermediary(input, schema=schema)
tables, relationships = filter_resources(tables, relationships,
include_tables=include_tables, include_columns=include_columns,
exclude_tables=exclude_tables, exclude_columns=exclude_columns)
intermediary_to_output = get_output_mode(output, mode)
intermediary_to_output(tables, relationships, output)
except ImportError as e:
module_name = e.message.split()[-1]
print('Please install {0} using "pip install {0}".'.format(module_name))
except ParsingException as e:
sys.stderr.write(e.message)
if __name__ == '__main__':
cli()
|
Alexis-benoist/eralchemy | eralchemy/main.py | render_er | python | def render_er(input, output, mode='auto', include_tables=None, include_columns=None,
exclude_tables=None, exclude_columns=None, schema=None):
try:
tables, relationships = all_to_intermediary(input, schema=schema)
tables, relationships = filter_resources(tables, relationships,
include_tables=include_tables, include_columns=include_columns,
exclude_tables=exclude_tables, exclude_columns=exclude_columns)
intermediary_to_output = get_output_mode(output, mode)
intermediary_to_output(tables, relationships, output)
except ImportError as e:
module_name = e.message.split()[-1]
print('Please install {0} using "pip install {0}".'.format(module_name))
except ParsingException as e:
sys.stderr.write(e.message) | Transform the metadata into a representation.
:param input: Possible inputs are instances of:
MetaData: SQLAlchemy Metadata
DeclarativeMeta: SQLAlchemy declarative Base
:param output: name of the file to output the
:param mode: str in list:
'er': writes to a file the markup to generate an ER style diagram.
'graph': writes the image of the ER diagram.
'dot': write to file the diagram in dot format.
'auto': choose from the filename:
'*.er': writes to a file the markup to generate an ER style diagram.
'.dot': returns the graph in the dot syntax.
else: return a graph to the format graph
:param include_tables: lst of str, table names to include, None means include all
:param include_columns: lst of str, column names to include, None means include all
:param exclude_tables: lst of str, table names to exclude, None means exclude nothing
:param exclude_columns: lst of str, field names to exclude, None means exclude nothing
:param schema: name of the schema | train | https://github.com/Alexis-benoist/eralchemy/blob/d6fcdc67d6d413bb174bf008fd360044e1dff5a7/eralchemy/main.py#L208-L241 | [
"def intermediary_to_schema(tables, relationships, output):\n \"\"\" Transforms and save the intermediary representation to the file chosen. \"\"\"\n dot_file = _intermediary_to_dot(tables, relationships)\n graph = AGraph()\n graph = graph.from_string(dot_file)\n extension = output.split('.')[-1]\n ... | # -*- coding: utf-8 -*-
import argparse
import sys
import copy
from pygraphviz.agraph import AGraph
from sqlalchemy.engine.url import make_url
from sqlalchemy.exc import ArgumentError
from eralchemy.version import version as __version__
from eralchemy.cst import GRAPH_BEGINNING
from eralchemy.sqla import metadata_to_intermediary, declarative_to_intermediary, database_to_intermediary
from eralchemy.helpers import check_args
from eralchemy.parser import markdown_file_to_intermediary, line_iterator_to_intermediary, ParsingException
try:
basestring
except NameError:
basestring = str
def cli():
"""Entry point for the application script"""
parser = get_argparser()
args = parser.parse_args()
check_args(args)
if args.v:
print('ERAlchemy version {}.'.format(__version__))
exit(0)
render_er(
args.i,
args.o,
include_tables=args.include_tables,
include_columns=args.include_columns,
exclude_tables=args.exclude_tables,
exclude_columns=args.exclude_columns,
schema=args.s
)
def get_argparser():
parser = argparse.ArgumentParser(prog='ERAlchemy')
parser.add_argument('-i', nargs='?', help='Database URI to process.')
parser.add_argument('-o', nargs='?', help='Name of the file to write.')
parser.add_argument('-s', nargs='?', help='Name of the schema.')
parser.add_argument('--exclude-tables', '-x', nargs='+', help='Name of tables not to be displayed.')
parser.add_argument('--exclude-columns', nargs='+', help='Name of columns not to be displayed (for all tables).')
parser.add_argument('--include-tables', nargs='+', help='Name of tables to be displayed alone.')
parser.add_argument('--include-columns', nargs='+', help='Name of columns to be displayed alone (for all tables).')
parser.add_argument('-v', help='Prints version number.', action='store_true')
return parser
def intermediary_to_markdown(tables, relationships, output):
""" Saves the intermediary representation to markdown. """
er_markup = _intermediary_to_markdown(tables, relationships)
with open(output, "w") as file_out:
file_out.write(er_markup)
def intermediary_to_dot(tables, relationships, output):
""" Save the intermediary representation to dot format. """
dot_file = _intermediary_to_dot(tables, relationships)
with open(output, "w") as file_out:
file_out.write(dot_file)
def intermediary_to_schema(tables, relationships, output):
""" Transforms and save the intermediary representation to the file chosen. """
dot_file = _intermediary_to_dot(tables, relationships)
graph = AGraph()
graph = graph.from_string(dot_file)
extension = output.split('.')[-1]
graph.draw(path=output, prog='dot', format=extension)
def _intermediary_to_markdown(tables, relationships):
""" Returns the er markup source in a string. """
t = '\n'.join(t.to_markdown() for t in tables)
r = '\n'.join(r.to_markdown() for r in relationships)
return '{}\n{}'.format(t, r)
def _intermediary_to_dot(tables, relationships):
""" Returns the dot source representing the database in a string. """
t = '\n'.join(t.to_dot() for t in tables)
r = '\n'.join(r.to_dot() for r in relationships)
return '{}\n{}\n{}\n}}'.format(GRAPH_BEGINNING, t, r)
# Routes from the class name to the function transforming this class in
# the intermediary representation.
switch_input_class_to_method = {
'MetaData': metadata_to_intermediary,
'DeclarativeMeta': declarative_to_intermediary,
# For compatibility with Flask-SQLAlchemy
'_BoundDeclarativeMeta': declarative_to_intermediary,
# Renamed in Flask-SQLAlchemy 2.3
'DefaultMeta': declarative_to_intermediary
}
# Routes from the mode to the method to transform the intermediary
# representation to the desired output.
switch_output_mode_auto = {
'er': intermediary_to_markdown,
'graph': intermediary_to_schema,
'dot': intermediary_to_dot
}
# Routes from the file extension to the method to transform
# the intermediary representation to the desired output.
switch_output_mode = {
'er': intermediary_to_markdown,
'dot': intermediary_to_dot,
}
def all_to_intermediary(filename_or_input, schema=None):
""" Dispatch the filename_or_input to the different function to produce the intermediary syntax.
All the supported classes names are in `swich_input_class_to_method`.
The input can also be a list of strings in markdown format or a filename finishing by '.er' containing markdown
format.
"""
# Try to convert from the name of the class
input_class_name = filename_or_input.__class__.__name__
try:
this_to_intermediary = switch_input_class_to_method[input_class_name]
tables, relationships = this_to_intermediary(filename_or_input)
return tables, relationships
except KeyError:
pass
# try to read markdown file.
if isinstance(filename_or_input, basestring):
if filename_or_input.split('.')[-1] == 'er':
return markdown_file_to_intermediary(filename_or_input)
# try to read a markdown in a string
if not isinstance(filename_or_input, basestring):
if all(isinstance(e, basestring) for e in filename_or_input):
return line_iterator_to_intermediary(filename_or_input)
# try to read DB URI.
try:
make_url(filename_or_input)
return database_to_intermediary(filename_or_input, schema=schema)
except ArgumentError:
pass
msg = 'Cannot process filename_or_input {}'.format(input_class_name)
raise ValueError(msg)
def get_output_mode(output, mode):
"""
From the output name and the mode returns a the function that will transform the intermediary
representation to the output.
"""
if mode != 'auto':
try:
return switch_output_mode_auto[mode]
except KeyError:
raise ValueError('Mode "{}" is not supported.')
extension = output.split('.')[-1]
try:
return switch_output_mode[extension]
except KeyError:
return intermediary_to_schema
def filter_resources(tables, relationships,
include_tables=None, include_columns=None,
exclude_tables=None, exclude_columns=None):
"""
Include the following:
1. Tables and relationships with tables present in the include_tables (lst of str, tables names)
2. Columns (of whichever table) present in the include_columns (lst of str, columns names)
Exclude the following:
1. Tables and relationships with tables present in the exclude_tables (lst of str, tables names)
2. Columns (of whichever table) present in the exclude_columns (lst of str, columns names)
Disclosure note:
All relationships are taken into consideration before ignoring columns.
In other words, if one excludes primary or foreign keys, it will still keep the relations display amongst tables
"""
_tables = copy.deepcopy(tables)
_relationships = copy.deepcopy(relationships)
include_tables = include_tables or [t.name for t in _tables]
include_columns = include_columns or [c.name for t in _tables for c in t.columns]
exclude_tables = exclude_tables or list()
exclude_columns = exclude_columns or list()
_tables = [t for t in _tables if t.name not in exclude_tables and t.name in include_tables]
_relationships = [r for r in _relationships
if r.right_col not in exclude_tables
and r.left_col not in exclude_tables
and r.right_col in include_tables
and r.left_col in include_tables]
for t in _tables:
t.columns = [c for c in t.columns if c.name not in exclude_columns and c.name in include_columns]
return _tables, _relationships
if __name__ == '__main__':
cli()
|
nuSTORM/gnomon | gnomon/JsonToROOT.py | JsonToROOTConverter.objwalk | python | def objwalk(self, obj, path=(), memo=None):
# dual python 2/3 compatability, inspired by the "six" library
string_types = (str, unicode) if str is bytes else (str, bytes)
iteritems = lambda mapping: getattr(mapping, 'iteritems', mapping.items)()
if memo is None:
memo = set()
iterator = None
if isinstance(obj, Mapping):
iterator = iteritems
elif isinstance(obj, (Sequence, Set)) and not isinstance(obj, string_types):
iterator = enumerate
if iterator:
if id(obj) not in memo:
memo.add(id(obj))
for path_component, value in iterator(obj):
for result in self.objwalk(value, path + (path_component,), memo):
yield result
memo.remove(id(obj))
else:
yield path, obj | Traverse a dictionary recursively and save path
Taken from:
http://code.activestate.com/recipes/577982-recursively-walk-python-objects/ | train | https://github.com/nuSTORM/gnomon/blob/7616486ecd6e26b76f677c380e62db1c0ade558a/gnomon/JsonToROOT.py#L149-L175 | null | class JsonToROOTConverter(object):
"""Convert JSON output to a ROOT file
Start with a JSON schema which maps keys to their type and also where in the ROOT
file they should go. Then get passed each event. There are two main initialization
events: a struct is created in memory using ROOT and CINT, then a TTree is bound to
the branches within the struct."""
def __init__(self, schema):
self.log = logging.getLogger('root')
self.log = self.log.getChild(self.__class__.__name__)
self.schema = schema
# Create ROOT file and TTree
filename = 'gnomon.root'
self.file = ROOT.TFile(filename, 'RECREATE')
self.t = ROOT.TTree('t', '')
# This code will be passed into CINT
self.names_lookup, self.types_lookup = self.make_lookups(schema)
my_struct_code = self.form_cint(self.names_lookup, self.types_lookup)
self.log.info('Using following structure for converting Python: %s' % my_struct_code)
ROOT.gROOT.ProcessLine(my_struct_code)
from ROOT import MyStruct
self.my_struct = MyStruct()
for key, val in self.names_lookup.iteritems():
name = val
my_type = self.types_lookup[key]
code = None
if my_type == 'string':
code = 'C'
elif my_type == 'integer':
code = 'I'
elif my_type == 'number' or my_type == 'array':
code = 'F'
elif my_type == 'object':
pass
elif my_type == 'boolean':
code = 'O'
else:
raise ValueError
x = ROOT.AddressOf(self.my_struct, str(name))
if my_type == 'array':
self.t.Branch(name, x, '%s[%d]/%s' % (name, BUFFER_SIZE, code))
else:
self.t.Branch(name, x, '%s/%s' % (name, code))
def Process(self, doc):
try:
validictory.validate(doc, self.schema, required_by_default=False)
except:
raise
default = -99999
for key in self.names_lookup.values():
if isinstance(getattr(self.my_struct, key), (float, int)):
setattr(self.my_struct, key, default) # not defined, ie. default
elif isinstance(getattr(self.my_struct, key), (str)):
setattr(self.my_struct, key, str(-9999)) # not defined, ie. default
for key, val in self.objwalk(doc):
if key in self.names_lookup.keys():
setattr(self.my_struct,
self.names_lookup[key],
val)
# check if list
if len(key) > 1 and isinstance(key[-1], int) and key[0:-1] in self.names_lookup.keys():
temp = getattr(self.my_struct,
self.names_lookup[key[0:-1]])
temp[key[-1]] = val
setattr(self.my_struct,
self.names_lookup[key[0:-1]],
temp)
self.t.Fill()
def Shutdown(self):
self.file.cd()
self.t.Write("treegnome")
#self.file.Write()
self.file.Close()
def make_lookups(self, my_dict):
# This will map paths in the event tree to ROOT variable names
names_lookup = {}
types_lookup = {}
for key, val in self.objwalk(my_dict):
trunc_key = key[:-1] # trunacte off end
trunc_key = tuple([x for x in list(trunc_key) if x != 'properties'])
if key[-1] == 'description':
names_lookup[trunc_key] = val
elif key[-1] == 'type':
types_lookup[trunc_key] = val
else:
pass
self.log.info("Names lookup", names_lookup)
return names_lookup, types_lookup
def form_cint(self, names_lookup, types_lookup):
my_struct_code = 'struct MyStruct {'
for key, val in names_lookup.iteritems():
name = val
my_type = types_lookup[key]
if my_type == "string":
my_struct_code += 'char %s[256];' % name
elif my_type == "integer":
my_struct_code += 'int %s;' % name
elif my_type == "number":
my_struct_code += 'float %s;' % name
elif my_type == "boolean":
my_struct_code += 'bool %s;' % name
elif my_type == 'array':
my_struct_code += 'float %s[%d];' % (name, BUFFER_SIZE)
my_struct_code += '};'
return my_struct_code
|
nuSTORM/gnomon | gnomon/GeneratorAction.py | lookup_cc_partner | python | def lookup_cc_partner(nu_pid):
neutrino_type = math.fabs(nu_pid)
assert neutrino_type in [12, 14, 16]
cc_partner = neutrino_type - 1 # get e, mu, tau
cc_partner = math.copysign(
cc_partner, nu_pid) # make sure matter/antimatter
cc_partner = int(cc_partner) # convert to int
return cc_partner | Lookup the charge current partner
Takes as an input neutrino nu_pid is a PDG code, then returns
the charged lepton partner. So 12 (nu_e) returns 11. Keeps sign | train | https://github.com/nuSTORM/gnomon/blob/7616486ecd6e26b76f677c380e62db1c0ade558a/gnomon/GeneratorAction.py#L24-L39 | null | """Generator actions
These classes are used to tell Geant4 what particles it is meant to simulate.
One always has to inherit from a UserPrimaryGeneratorAction base class in
Geant4 and then define the function GeneratePrimaries.
"""
import Geant4 as G4
import ROOT
import logging
import sys
import os
import random
import tempfile
import subprocess
import math
import gnomon.Configuration as Configuration
from gnomon.Configuration import RUNTIME_CONFIG as rc
from scipy.stats.distributions import rv_frozen
import scipy
def convert_3vector_to_dict(value):
if not isinstance(value, list):
raise ValueError('Wrong type for 3-vector since not list', value)
if len(value) != 3:
raise ValueError('Wrong dimensions for 3-vector')
new_dict = {}
new_dict['x'] = value[0]
new_dict['y'] = value[1]
new_dict['z'] = value[2]
return new_dict
def convert_dict_to_g4vector(value, new_vector=G4.G4ThreeVector()):
new_vector.x = value['x']
new_vector.y = value['y']
new_vector.z = value['z']
return new_vector
def is_neutrino_code(pdg_code):
if math.fabs(pdg_code) in [12, 14, 16]:
return True
return False
class GnomonGeneratorAction(G4.G4VUserPrimaryGeneratorAction):
"""Geant4 interface class"""
def __init__(self, generator):
G4.G4VUserPrimaryGeneratorAction.__init__(self)
self.log = logging.getLogger('root')
self.log = self.log.getChild(self.__class__.__name__)
self.log.debug('Initialized %s', self.__class__.__name__)
self.particle_generator = generator
self.config = Configuration.GLOBAL_CONFIG
def setMCInfo(self, info):
rc['generator'] = info
def GeneratePrimaries(self, event):
particles = self.particle_generator.generate()
for particle in particles:
pp = G4.G4PrimaryParticle()
pp.SetPDGcode(particle['pid'])
pp.SetMomentum(particle['momentum']['x'],
particle['momentum']['y'],
particle['momentum']['z'])
v = G4.G4PrimaryVertex()
v.SetPosition(particle['position']['x'],
particle['position']['y'],
particle['position']['z'])
v.SetPrimary(pp)
event.AddPrimaryVertex(v)
# Write particleS information to the runtime configuration so the Truth
# processor can find it in order to output it
self.setMCInfo(particles)
class composite_z():
"""Deriving from scipy.stats failed, so just overloaded rvs.
This really should hook into the GDML or something since it is geo
dependent this way"""
def __init__(self, config):
self.log = logging.getLogger('root')
self.log = self.log.getChild(self.__class__.__name__)
self.log.debug('Initialized %s', self.__class__.__name__)
self.layers = config['layers']
self.z_extent = config['layers'] * config['thickness_layer']
self.thickness_layer = config['thickness_layer']
self.thickness_bar = config['thickness_bar']
self.density = {}
self.density['Iron'] = config['density_iron']
self.density['Scint.'] = config['density_scint']
self.material = None
def get_material(self): # hack for Genie to know material
"""Return material of last rvs call"""
return self.material
def rvs(self):
layer = random.randint(-self.layers / 2, self.layers / 2 - 1) # inclusive
d_sc = self.density['Scint.']
t_sc = 2 * self.thickness_bar
t_fe = self.thickness_layer - t_sc
d_fe = self.density['Iron']
z = layer * self.thickness_layer
# How much material is there
my_max = t_fe * d_fe + t_sc * d_sc
# Choose random by gram
my_choice = random.uniform(0, my_max)
if my_choice < t_fe * d_fe: # Is iron
z += random.uniform(0, t_fe)
self.material = 'Iron'
else: # is scint
z += t_fe
z += random.uniform(0, t_sc)
self.material = 'Scint.'
#self.log.debug('Material is %s' % self.material)
return z
class Distribution():
def __init__(self, some_obj):
self.static_value = None
self.scipy_dist = None
if isinstance(some_obj, (float, int)):
self.static_value = some_obj
elif hasattr(some_obj, 'rvs'):
self.scipy_dist = some_obj
else:
raise ValueError("Do not understand", some_obj)
self.cache = None
def dist(self):
# Horrible HACK. Since we don't have Genie know about GDML we have
# to let GenieGenerator have a hook to know what material to simulate
if self.scipy_dist is None:
return None
return self.scipy_dist
def is_static(self):
if self.static_value is not None:
return True
return False
def get_cache(self):
return self.cache
def set_cache(self, value):
self.cache = value
def get(self):
if self.static_value is not None:
self.set_cache(self.static_value)
elif self.scipy_dist is not None:
self.set_cache(self.scipy_dist.rvs())
else:
raise RuntimeError("Should never get here")
return self.get_cache()
class Generator():
"""Generator base class"""
def __init__(self, position, momentum, pid):
self.log = logging.getLogger('root')
self.log = self.log.getChild(self.__class__.__name__)
self.log.debug('Initialized %s', self.__class__.__name__)
self.config = Configuration.GLOBAL_CONFIG
self.particle = {}
self.set_position(position)
self.set_momentum(momentum)
self.set_pid(pid) # Can be neutrino with forced interaction
def set_position(self, position):
self._set_vector_value('position', position)
def set_momentum(self, momentum):
self._set_vector_value('momentum', momentum)
def set_pid(self, pid):
assert isinstance(pid, int)
self.particle['pid'] = Distribution(pid)
def _set_vector_value(self, var_name, value):
"""Private"""
self.particle[var_name] = convert_3vector_to_dict(value)
for coord in self.particle[var_name].keys():
new_value = Distribution(self.particle[var_name][coord])
self.particle[var_name][coord] = new_value
class ParticleGenerator(Generator):
"""Baseclass for gnomon particle generators"""
def __init__(self, position, momentum, pid):
Generator.__init__(self, position, momentum, pid)
def generate(self):
new_particle = {}
for key, value in self.particle.iteritems():
if isinstance(value, dict): # restricts 2 depth, recurse instead?
new_particle[key] = {}
for key2, value2 in value.iteritems():
new_particle[key][key2] = value2.get()
else:
new_particle[key] = value.get()
self.log.info("Generated particle:")
self.log.info(new_particle)
return [new_particle]
class GenieGenerator(Generator):
"""Generate events from a Genie ntuple
A Genie ntuple that already knew the GDML would be useful. Otherwise,
we run gevgen per material and have to do nasty geometry stuff here
"""
def __init__(self, position, momentum, pid):
Generator.__init__(self, position, momentum, pid)
# The event list is a generator so requires calling 'next' on it.
# The key is a material 'Iron', 'Scint.' etc
self.event_list = {}
self.filenames = {}
self.genie_temp_dir = tempfile.mkdtemp()
def __del__(self):
for key in self.filenames.keys():
os.remove(self.filenames[key])
os.rmdir(self.genie_temp_dir)
def _create_file(self, material):
my_id, filename = tempfile.mkstemp(suffix='.root')
seed = random.randint(1, sys.maxint)
max_energy = self.config['generator']['max_energy_GeV']
xsec_filename = os.path.join(self.config['data_dir'], 'xsec.xml')
# Environmental variables need to be set to tell Genie where cross
# section files are and a repeatable random number seed. Also, disable
# any X windows popping up (was causing crashes...)
env_vars = 'DISPLAY= GSPLOAD=%s GSEED=%d' % (xsec_filename, seed)
command = '%s gevgen' % env_vars
command += ' -p %d' % self.particle['pid'].get()
command += ' -r %d' % self.config['run_number']
pdg_codes = {}
pdg_codes['Iron'] = '1000260560'
pdg_codes['Scint.'] = '1000010010[0.085]+1000060120[0.915]'
command += ' -t %s' % pdg_codes[material]
command += ' -n %d' % self.config['generator']['size_of_genie_buffer']
self.energy_distribution = self.config['distribution']
self.log.info('Neutrino energy distribution: %s' % self.energy_distribution)
if self.energy_distribution == 'muon' or\
self.energy_distribution == 'electron' or\
self.energy_distribution == 'flat':
# muon, electron, or flat
command += ' -e 0.1,%f' % max_energy
# Just use the file associated with the neutrino distribution of
# muon decay without any accelerator effects. This is a good
# approximation in the far detector limit ONLY.
flux_filename = 'flux_file_%s.dat' % self.energy_distribution[0]
flux_filename = os.path.join(self.config['data_dir'],
flux_filename)
command += ' -f %s' % flux_filename
elif type(self.energy_distribution) == float:
command += ' -e %f' % self.energy_distribution
else:
raise ValueError('bad energy distribution')
self.log.info('Running the command: %s', command)
print filename
intermediate_file = os.path.join(self.genie_temp_dir,
"gntp.%d.ghep.root" % self.config['run_number'])
command = """cd %(tmpdir)s
%(command)s
DISPLAY= gntpc -i %(int_file)s -o %(filename)s -f gst
""" % {"tmpdir": self.genie_temp_dir,
"command": command,
"int_file": intermediate_file,
"filename": filename}
self.log.info('Running the command: %s', command)
s = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True) # unsafe, but no easy way TODO
# Trick to send stdout -> debug
while True:
line = s.stdout.readline()
if not line:
break
self.log.debug(line)
# Trick to send stderr -> error
while True:
line = s.stdout.readline()
if not line:
break
self.log.error(line)
os.remove(intermediate_file)
self.filenames[material] = filename
self.event_list[material] = self._get_next_events(material)
def _get_next_events(self, material):
"""Get next events from Genie ROOT file
Looks over the generator"""
f = ROOT.TFile(self.filenames[material])
try:
t = f.Get('gst')
n = t.GetEntries()
except:
self.log.critical('Could not open the ROOT file with Genie events')
raise
for i in range(n):
t.GetEntry(i)
next_events = []
position = convert_3vector_to_dict([self.particle['position']['x'].get_cache(),
self.particle['position']['y'].get_cache(),
self.particle['position']['z'].get_cache()])
lepton_event = {}
if t.El ** 2 - (t.pxl ** 2 + t.pyl ** 2 + t.pzl ** 2) < 1e-7:
lepton_event['pid'] = self.particle[
'pid'].get() # Either NC or ES
else:
lepton_event['pid'] = lookup_cc_partner(
self.particle['pid'].get())
# units: GeV -> MeV
momentum_vector = [1000 * x for x in [t.pxl, t.pyl, t.pzl]]
lepton_event['momentum'] = convert_3vector_to_dict(momentum_vector)
lepton_event['position'] = position
next_events.append(lepton_event)
for j in range(t.nf): # nf, number final hadronic states
hadron_event = {}
hadron_event['pid'] = t.pdgf[j]
hadron_event['position'] = position
# units: GeV -> MeV
momentum_vector = [1000 * x for x in [t.pxf[j], t.pyf[j], t.pzf[j]]]
hadron_event['momentum'] = convert_3vector_to_dict(momentum_vector)
next_events.append(hadron_event)
event_type = {}
event_type['vertex'] = position
to_save = {} # maps our names to Genie gst names
to_save['incoming_neutrino'] = 'neu'
to_save['neutrino_energy'] = 'Ev'
to_save['target_material'] = 'tgt'
for key, value in to_save.iteritems():
self.log.info('%s : %s' % (key, str(t.__getattr__(value))))
event_type[key] = t.__getattr__(value)
self.log.debug('Event type:')
for my_type in ['qel', 'res', 'dis', 'coh', 'dfr',
'imd', 'nuel', 'em']:
if t.__getattr__(my_type) == 1:
self.log.debug('\t%s', my_type)
event_type[my_type] = t.__getattr__(my_type)
self.log.debug('Propogator:')
for prop in ['nc', 'cc']:
if t.__getattr__(prop) == 1:
self.log.debug('\t%s', prop)
event_type[prop] = t.__getattr__(prop)
yield next_events, event_type
f.Close()
os.remove(self.filenames[material])
def generate(self):
if self.particle['pid'].is_static() == False:
raise ValueError("PID must be static")
if not is_neutrino_code(self.particle['pid'].get()):
raise ValueError("PID must be neutrino PDG code")
material = 'Iron'
# More hack: need to know position to know material...
position = convert_3vector_to_dict([self.particle['position']['x'].get(),
self.particle['position']['y'].get(),
self.particle['position']['z'].get()])
# Is this a distribution? Need material hook HACK
dist = self.particle['position']['z'].dist()
if dist.__class__.__name__ == 'composite_z':
material = dist.get_material()
self.log.info("Choosing material %s" % material)
if material not in self.event_list:
self.event_list[material] = None
try:
if self.event_list[material] == None:
self.log.info('Empty event list, populating with Genie...')
self._create_file(material)
particles, event_type = next(self.event_list[material])
except StopIteration:
self.log.info("Generating more Genie events")
self._create_file(material)
particles, event_type = next(self.event_list[material])
rc['event_type'] = event_type
return particles
|
nuSTORM/gnomon | gnomon/GeneratorAction.py | Generator._set_vector_value | python | def _set_vector_value(self, var_name, value):
self.particle[var_name] = convert_3vector_to_dict(value)
for coord in self.particle[var_name].keys():
new_value = Distribution(self.particle[var_name][coord])
self.particle[var_name][coord] = new_value | Private | train | https://github.com/nuSTORM/gnomon/blob/7616486ecd6e26b76f677c380e62db1c0ade558a/gnomon/GeneratorAction.py#L235-L241 | [
"def convert_3vector_to_dict(value):\n if not isinstance(value, list):\n raise ValueError('Wrong type for 3-vector since not list', value)\n if len(value) != 3:\n raise ValueError('Wrong dimensions for 3-vector')\n\n new_dict = {}\n new_dict['x'] = value[0]\n new_dict['y'] = value[1]\n ... | class Generator():
"""Generator base class"""
def __init__(self, position, momentum, pid):
self.log = logging.getLogger('root')
self.log = self.log.getChild(self.__class__.__name__)
self.log.debug('Initialized %s', self.__class__.__name__)
self.config = Configuration.GLOBAL_CONFIG
self.particle = {}
self.set_position(position)
self.set_momentum(momentum)
self.set_pid(pid) # Can be neutrino with forced interaction
def set_position(self, position):
self._set_vector_value('position', position)
def set_momentum(self, momentum):
self._set_vector_value('momentum', momentum)
def set_pid(self, pid):
assert isinstance(pid, int)
self.particle['pid'] = Distribution(pid)
|
nuSTORM/gnomon | gnomon/GeneratorAction.py | GenieGenerator._get_next_events | python | def _get_next_events(self, material):
f = ROOT.TFile(self.filenames[material])
try:
t = f.Get('gst')
n = t.GetEntries()
except:
self.log.critical('Could not open the ROOT file with Genie events')
raise
for i in range(n):
t.GetEntry(i)
next_events = []
position = convert_3vector_to_dict([self.particle['position']['x'].get_cache(),
self.particle['position']['y'].get_cache(),
self.particle['position']['z'].get_cache()])
lepton_event = {}
if t.El ** 2 - (t.pxl ** 2 + t.pyl ** 2 + t.pzl ** 2) < 1e-7:
lepton_event['pid'] = self.particle[
'pid'].get() # Either NC or ES
else:
lepton_event['pid'] = lookup_cc_partner(
self.particle['pid'].get())
# units: GeV -> MeV
momentum_vector = [1000 * x for x in [t.pxl, t.pyl, t.pzl]]
lepton_event['momentum'] = convert_3vector_to_dict(momentum_vector)
lepton_event['position'] = position
next_events.append(lepton_event)
for j in range(t.nf): # nf, number final hadronic states
hadron_event = {}
hadron_event['pid'] = t.pdgf[j]
hadron_event['position'] = position
# units: GeV -> MeV
momentum_vector = [1000 * x for x in [t.pxf[j], t.pyf[j], t.pzf[j]]]
hadron_event['momentum'] = convert_3vector_to_dict(momentum_vector)
next_events.append(hadron_event)
event_type = {}
event_type['vertex'] = position
to_save = {} # maps our names to Genie gst names
to_save['incoming_neutrino'] = 'neu'
to_save['neutrino_energy'] = 'Ev'
to_save['target_material'] = 'tgt'
for key, value in to_save.iteritems():
self.log.info('%s : %s' % (key, str(t.__getattr__(value))))
event_type[key] = t.__getattr__(value)
self.log.debug('Event type:')
for my_type in ['qel', 'res', 'dis', 'coh', 'dfr',
'imd', 'nuel', 'em']:
if t.__getattr__(my_type) == 1:
self.log.debug('\t%s', my_type)
event_type[my_type] = t.__getattr__(my_type)
self.log.debug('Propogator:')
for prop in ['nc', 'cc']:
if t.__getattr__(prop) == 1:
self.log.debug('\t%s', prop)
event_type[prop] = t.__getattr__(prop)
yield next_events, event_type
f.Close()
os.remove(self.filenames[material]) | Get next events from Genie ROOT file
Looks over the generator | train | https://github.com/nuSTORM/gnomon/blob/7616486ecd6e26b76f677c380e62db1c0ade558a/gnomon/GeneratorAction.py#L383-L462 | null | class GenieGenerator(Generator):
"""Generate events from a Genie ntuple
A Genie ntuple that already knew the GDML would be useful. Otherwise,
we run gevgen per material and have to do nasty geometry stuff here
"""
def __init__(self, position, momentum, pid):
Generator.__init__(self, position, momentum, pid)
# The event list is a generator so requires calling 'next' on it.
# The key is a material 'Iron', 'Scint.' etc
self.event_list = {}
self.filenames = {}
self.genie_temp_dir = tempfile.mkdtemp()
def __del__(self):
for key in self.filenames.keys():
os.remove(self.filenames[key])
os.rmdir(self.genie_temp_dir)
def _create_file(self, material):
my_id, filename = tempfile.mkstemp(suffix='.root')
seed = random.randint(1, sys.maxint)
max_energy = self.config['generator']['max_energy_GeV']
xsec_filename = os.path.join(self.config['data_dir'], 'xsec.xml')
# Environmental variables need to be set to tell Genie where cross
# section files are and a repeatable random number seed. Also, disable
# any X windows popping up (was causing crashes...)
env_vars = 'DISPLAY= GSPLOAD=%s GSEED=%d' % (xsec_filename, seed)
command = '%s gevgen' % env_vars
command += ' -p %d' % self.particle['pid'].get()
command += ' -r %d' % self.config['run_number']
pdg_codes = {}
pdg_codes['Iron'] = '1000260560'
pdg_codes['Scint.'] = '1000010010[0.085]+1000060120[0.915]'
command += ' -t %s' % pdg_codes[material]
command += ' -n %d' % self.config['generator']['size_of_genie_buffer']
self.energy_distribution = self.config['distribution']
self.log.info('Neutrino energy distribution: %s' % self.energy_distribution)
if self.energy_distribution == 'muon' or\
self.energy_distribution == 'electron' or\
self.energy_distribution == 'flat':
# muon, electron, or flat
command += ' -e 0.1,%f' % max_energy
# Just use the file associated with the neutrino distribution of
# muon decay without any accelerator effects. This is a good
# approximation in the far detector limit ONLY.
flux_filename = 'flux_file_%s.dat' % self.energy_distribution[0]
flux_filename = os.path.join(self.config['data_dir'],
flux_filename)
command += ' -f %s' % flux_filename
elif type(self.energy_distribution) == float:
command += ' -e %f' % self.energy_distribution
else:
raise ValueError('bad energy distribution')
self.log.info('Running the command: %s', command)
print filename
intermediate_file = os.path.join(self.genie_temp_dir,
"gntp.%d.ghep.root" % self.config['run_number'])
command = """cd %(tmpdir)s
%(command)s
DISPLAY= gntpc -i %(int_file)s -o %(filename)s -f gst
""" % {"tmpdir": self.genie_temp_dir,
"command": command,
"int_file": intermediate_file,
"filename": filename}
self.log.info('Running the command: %s', command)
s = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True) # unsafe, but no easy way TODO
# Trick to send stdout -> debug
while True:
line = s.stdout.readline()
if not line:
break
self.log.debug(line)
# Trick to send stderr -> error
while True:
line = s.stdout.readline()
if not line:
break
self.log.error(line)
os.remove(intermediate_file)
self.filenames[material] = filename
self.event_list[material] = self._get_next_events(material)
def generate(self):
if self.particle['pid'].is_static() == False:
raise ValueError("PID must be static")
if not is_neutrino_code(self.particle['pid'].get()):
raise ValueError("PID must be neutrino PDG code")
material = 'Iron'
# More hack: need to know position to know material...
position = convert_3vector_to_dict([self.particle['position']['x'].get(),
self.particle['position']['y'].get(),
self.particle['position']['z'].get()])
# Is this a distribution? Need material hook HACK
dist = self.particle['position']['z'].dist()
if dist.__class__.__name__ == 'composite_z':
material = dist.get_material()
self.log.info("Choosing material %s" % material)
if material not in self.event_list:
self.event_list[material] = None
try:
if self.event_list[material] == None:
self.log.info('Empty event list, populating with Genie...')
self._create_file(material)
particles, event_type = next(self.event_list[material])
except StopIteration:
self.log.info("Generating more Genie events")
self._create_file(material)
particles, event_type = next(self.event_list[material])
rc['event_type'] = event_type
return particles
|
nuSTORM/gnomon | gnomon/Graph.py | Graph.CreateVertices | python | def CreateVertices(self, points):
gr = digraph()
for z, x, Q in points:
node = (z, x, Q)
gr.add_nodes([node])
return gr | Returns a dictionary object with keys that are 2tuples
represnting a point. | train | https://github.com/nuSTORM/gnomon/blob/7616486ecd6e26b76f677c380e62db1c0ade558a/gnomon/Graph.py#L29-L40 | null | class Graph():
def FindParentNode(self, gr):
transitive_closure = accessibility.accessibility(gr)
most_accesisible_node = None
for node_in, nodes_out in transitive_closure.iteritems():
if most_accesisible_node is None:
most_accesisible_node = node_in
max_value = len(transitive_closure[most_accesisible_node])
this_value = len(nodes_out)
if this_value > max_value:
most_accesisible_node = node_in
return most_accesisible_node
def CreateDirectedEdges(self, points, gr, layer_width):
"""
Take each key (ie. point) in the graph and for that point
create an edge to every point downstream of it where the weight
of the edge is the tuple (distance, angle)
"""
for z0, x0, Q0 in points:
for z1, x1, Q1 in points:
dz = z1 - z0 # no fabs because we check arrow direction
if dz > 0.0: # make sure arrow in right direction
if dz - layer_width < distance_threshold: # only adjacents
dx = math.fabs(x1 - x0)
if dx > 5 * bar_width:
continue
# Weights are negative to in order to use shortest path
# algorithms on the graph.
weight = -1 * math.hypot(dz, dx)
edge = ((z0, x0, Q0), (z1, x1, Q1))
gr.add_edge(edge, wt=weight)
# Ensure that it is already transitively reduced
assert len(critical.transitive_edges(gr)) == 0
return gr
def GetFarthestNode(self, gr, node):
"""node is start node"""
# Remember: weights are negative
distance = minmax.shortest_path_bellman_ford(gr, node)[1]
# Find the farthest node, which is end of track
min_key = None
for key, value in distance.iteritems():
if min_key is None or value < distance[min_key]:
min_key = key
return min_key
def NegateGraph(self, gr):
for edge in gr.edges():
weight = gr.edge_weight(edge)
gr.set_edge_weight(edge, -1 * weight)
return gr
def ComputeLongestPath(self, gr, parent_node):
parent_node = self.FindParentNode(gr)
farthest_node = self.GetFarthestNode(gr, parent_node)
gr = self.NegateGraph(gr)
st, distance = minmax.shortest_path_bellman_ford(gr, parent_node)
gr = self.NegateGraph(gr)
max_distance = distance[farthest_node]
# Then swim back to the parent node. Record the path.
node_list = [farthest_node]
i = 0
while parent_node not in node_list:
node_list.append(st[node_list[-1]])
i += 1
if i > 10000:
raise ValueError()
# Grab doublets
node_list2 = []
for node1 in node_list:
for node2 in gr.nodes():
z1, x1, Q1 = node1
z2, x2, Q2 = node2
dx = math.fabs(x1 - x2)
if z1 == z2 and math.fabs(dx - bar_width) < distance_threshold:
node_list2.append(node2)
for node in node_list + node_list2:
if node:
gr.del_node(node)
node_list = [x for x in node_list if x is not None]
return node_list, max_distance, gr
|
nuSTORM/gnomon | gnomon/Graph.py | Graph.CreateDirectedEdges | python | def CreateDirectedEdges(self, points, gr, layer_width):
for z0, x0, Q0 in points:
for z1, x1, Q1 in points:
dz = z1 - z0 # no fabs because we check arrow direction
if dz > 0.0: # make sure arrow in right direction
if dz - layer_width < distance_threshold: # only adjacents
dx = math.fabs(x1 - x0)
if dx > 5 * bar_width:
continue
# Weights are negative to in order to use shortest path
# algorithms on the graph.
weight = -1 * math.hypot(dz, dx)
edge = ((z0, x0, Q0), (z1, x1, Q1))
gr.add_edge(edge, wt=weight)
# Ensure that it is already transitively reduced
assert len(critical.transitive_edges(gr)) == 0
return gr | Take each key (ie. point) in the graph and for that point
create an edge to every point downstream of it where the weight
of the edge is the tuple (distance, angle) | train | https://github.com/nuSTORM/gnomon/blob/7616486ecd6e26b76f677c380e62db1c0ade558a/gnomon/Graph.py#L42-L69 | null | class Graph():
def FindParentNode(self, gr):
transitive_closure = accessibility.accessibility(gr)
most_accesisible_node = None
for node_in, nodes_out in transitive_closure.iteritems():
if most_accesisible_node is None:
most_accesisible_node = node_in
max_value = len(transitive_closure[most_accesisible_node])
this_value = len(nodes_out)
if this_value > max_value:
most_accesisible_node = node_in
return most_accesisible_node
def CreateVertices(self, points):
"""
Returns a dictionary object with keys that are 2tuples
represnting a point.
"""
gr = digraph()
for z, x, Q in points:
node = (z, x, Q)
gr.add_nodes([node])
return gr
def GetFarthestNode(self, gr, node):
"""node is start node"""
# Remember: weights are negative
distance = minmax.shortest_path_bellman_ford(gr, node)[1]
# Find the farthest node, which is end of track
min_key = None
for key, value in distance.iteritems():
if min_key is None or value < distance[min_key]:
min_key = key
return min_key
def NegateGraph(self, gr):
for edge in gr.edges():
weight = gr.edge_weight(edge)
gr.set_edge_weight(edge, -1 * weight)
return gr
def ComputeLongestPath(self, gr, parent_node):
parent_node = self.FindParentNode(gr)
farthest_node = self.GetFarthestNode(gr, parent_node)
gr = self.NegateGraph(gr)
st, distance = minmax.shortest_path_bellman_ford(gr, parent_node)
gr = self.NegateGraph(gr)
max_distance = distance[farthest_node]
# Then swim back to the parent node. Record the path.
node_list = [farthest_node]
i = 0
while parent_node not in node_list:
node_list.append(st[node_list[-1]])
i += 1
if i > 10000:
raise ValueError()
# Grab doublets
node_list2 = []
for node1 in node_list:
for node2 in gr.nodes():
z1, x1, Q1 = node1
z2, x2, Q2 = node2
dx = math.fabs(x1 - x2)
if z1 == z2 and math.fabs(dx - bar_width) < distance_threshold:
node_list2.append(node2)
for node in node_list + node_list2:
if node:
gr.del_node(node)
node_list = [x for x in node_list if x is not None]
return node_list, max_distance, gr
|
nuSTORM/gnomon | gnomon/Graph.py | Graph.GetFarthestNode | python | def GetFarthestNode(self, gr, node):
# Remember: weights are negative
distance = minmax.shortest_path_bellman_ford(gr, node)[1]
# Find the farthest node, which is end of track
min_key = None
for key, value in distance.iteritems():
if min_key is None or value < distance[min_key]:
min_key = key
return min_key | node is start node | train | https://github.com/nuSTORM/gnomon/blob/7616486ecd6e26b76f677c380e62db1c0ade558a/gnomon/Graph.py#L71-L82 | null | class Graph():
def FindParentNode(self, gr):
transitive_closure = accessibility.accessibility(gr)
most_accesisible_node = None
for node_in, nodes_out in transitive_closure.iteritems():
if most_accesisible_node is None:
most_accesisible_node = node_in
max_value = len(transitive_closure[most_accesisible_node])
this_value = len(nodes_out)
if this_value > max_value:
most_accesisible_node = node_in
return most_accesisible_node
def CreateVertices(self, points):
"""
Returns a dictionary object with keys that are 2tuples
represnting a point.
"""
gr = digraph()
for z, x, Q in points:
node = (z, x, Q)
gr.add_nodes([node])
return gr
def CreateDirectedEdges(self, points, gr, layer_width):
"""
Take each key (ie. point) in the graph and for that point
create an edge to every point downstream of it where the weight
of the edge is the tuple (distance, angle)
"""
for z0, x0, Q0 in points:
for z1, x1, Q1 in points:
dz = z1 - z0 # no fabs because we check arrow direction
if dz > 0.0: # make sure arrow in right direction
if dz - layer_width < distance_threshold: # only adjacents
dx = math.fabs(x1 - x0)
if dx > 5 * bar_width:
continue
# Weights are negative to in order to use shortest path
# algorithms on the graph.
weight = -1 * math.hypot(dz, dx)
edge = ((z0, x0, Q0), (z1, x1, Q1))
gr.add_edge(edge, wt=weight)
# Ensure that it is already transitively reduced
assert len(critical.transitive_edges(gr)) == 0
return gr
def NegateGraph(self, gr):
for edge in gr.edges():
weight = gr.edge_weight(edge)
gr.set_edge_weight(edge, -1 * weight)
return gr
def ComputeLongestPath(self, gr, parent_node):
parent_node = self.FindParentNode(gr)
farthest_node = self.GetFarthestNode(gr, parent_node)
gr = self.NegateGraph(gr)
st, distance = minmax.shortest_path_bellman_ford(gr, parent_node)
gr = self.NegateGraph(gr)
max_distance = distance[farthest_node]
# Then swim back to the parent node. Record the path.
node_list = [farthest_node]
i = 0
while parent_node not in node_list:
node_list.append(st[node_list[-1]])
i += 1
if i > 10000:
raise ValueError()
# Grab doublets
node_list2 = []
for node1 in node_list:
for node2 in gr.nodes():
z1, x1, Q1 = node1
z2, x2, Q2 = node2
dx = math.fabs(x1 - x2)
if z1 == z2 and math.fabs(dx - bar_width) < distance_threshold:
node_list2.append(node2)
for node in node_list + node_list2:
if node:
gr.del_node(node)
node_list = [x for x in node_list if x is not None]
return node_list, max_distance, gr
|
nuSTORM/gnomon | gnomon/Configuration.py | fetch_config | python | def fetch_config(filename):
# This trick gets the directory of *this* file Configuration.py thus
# allowing to find the schema files relative to this file.
dir_name = get_source_dir()
# Append json
filename = os.path.join('json', filename)
fileobj = open(os.path.join(dir_name, filename), 'r')
my_dict = json.loads(fileobj.read())
return my_dict | Fetch the Configuration schema information
Finds the schema file, loads the file and reads the JSON, then converts to a dictionary that is returned | train | https://github.com/nuSTORM/gnomon/blob/7616486ecd6e26b76f677c380e62db1c0ade558a/gnomon/Configuration.py#L117-L132 | [
"def get_source_dir():\n \"\"\"Find where the truth path to the directory containing the Configuration module source code\n\n It can be useful to know the full path to the Configuration module's source code in order to try to guess where the\n data and log files are stored. It does this by inspecting the ... | """Manage how gnomon configures itself and its proceesors
This class is used within gnomon to tell various classes how to configure
themselves. Each Configuration class will generate or retrieve a JSON
file that is used afterwards by various other classes.
Any proposed configuration JSON file is compared against a configuration schema. The schema requires certain attributes
be specified in order to start gnomon. The schema checker is forgiving in that new configuration keys are allowed
without changing the schema; the schema only requires certain things be defined, it doesn't prevent you from defining
new things.
Any new configuration must inherit from ConfigurationBase.
"""
import os
import inspect
import random
import sys
import json
import validictory
# Note: Do not use the python logging library! This class gets called before
# the loggers are setup.
class ConfigurationBase(object):
"""Base class for all configuration classes
If the run number is zero, replace it with a random run number"""
def __init__(self, name, run, overload=None): # pylint: disable-msg=W0613
if run == 0:
run = random.randint(1, sys.maxint)
self.name = name
self.run = run
self.configuration_dict = None
def set_json(self, config_json):
"""Permanently set the JSON configuration
Unable to call twice."""
if self.configuration_dict is not None:
raise RuntimeError("Can only set configuration once", self.configuration_dict)
schema = fetch_config('ConfigurationSchema.json')
validictory.validate(config_json, schema)
config_json['name'] = self.name
config_json['run_number'] = self.run
config_json['src_dir'] = get_source_dir()
config_json['data_dir'] = get_data_dir()
config_json['log_dir'] = get_log_dir()
self.configuration_dict = config_json
def get_configuration_dict(self):
"""Return configuration as a dictionary
"""
return self.configuration_dict
class LocalConfiguration(ConfigurationBase):
"""Read a configuration from disk and overload if necessary
"""
def __init__(self, name, run=0, overload=None,
filename='ConfigurationDefaults.json'):
ConfigurationBase.__init__(self, name, run)
defaults = fetch_config(filename)
if overload:
for key, val in overload.iteritems():
if val is not None:
defaults[key] = val
self.set_json(defaults)
class MockConfiguration(LocalConfiguration):
"""Mock configuration for testing
This is just a copy of LocalConfiguration for now
"""
pass
def get_source_dir():
"""Find where the truth path to the directory containing the Configuration module source code
It can be useful to know the full path to the Configuration module's source code in order to try to guess where the
data and log files are stored. It does this by inspecting the current running python instance."""
# This trick gets the directory of *this* file Configuration.py thus
# allowing to find the schema files relative to this file.
return os.path.dirname(inspect.getfile(inspect.currentframe()))
def get_data_dir():
"""Find the data directory that stores geometries, cross sections, etc."""
src_dir = get_source_dir()
return os.path.join(src_dir, '../data')
def get_log_dir():
"""Find the directory used for saving log files"""
src_dir = get_source_dir()
return os.path.join(src_dir, '../log')
def populate_args(parser):
"""Add commandline arguments to parser from schema
"""
schema = fetch_config('ConfigurationSchema.json')
populate_args_level(schema, parser)
def populate_args_level(schema, parser):
"""Use a schema to populate a command line argument parser"""
for key, value in schema['properties'].iteritems():
if key == 'name':
continue
arg = '--%s' % key
desc = value['description']
if 'type' in value:
if value['type'] == 'string':
if 'enum' in value:
parser.add_argument(arg, help=desc, type=str,
choices=value['enum'])
else:
parser.add_argument(arg, help=desc, type=str)
elif value['type'] == 'number':
parser.add_argument(arg, help=desc, type=float)
elif value['type'] == 'integer':
parser.add_argument(arg, help=desc, type=int)
elif str(value['type']) == 'array':
assert value['minItems'] == value['maxItems']
if value['items']['type'] != 'number':
raise NotImplementedError("Only float arrays work")
parser.add_argument(arg, help=desc, type=float,
nargs=value['maxItems'], metavar='N')
elif value['type'] == 'object':
#group = parser.add_argument_group(key, value['description'])
#populate_args_level(value, group)
pass
DEFAULT = LocalConfiguration
GLOBAL_CONFIG = None
RUNTIME_CONFIG = {}
|
nuSTORM/gnomon | gnomon/Configuration.py | populate_args_level | python | def populate_args_level(schema, parser):
for key, value in schema['properties'].iteritems():
if key == 'name':
continue
arg = '--%s' % key
desc = value['description']
if 'type' in value:
if value['type'] == 'string':
if 'enum' in value:
parser.add_argument(arg, help=desc, type=str,
choices=value['enum'])
else:
parser.add_argument(arg, help=desc, type=str)
elif value['type'] == 'number':
parser.add_argument(arg, help=desc, type=float)
elif value['type'] == 'integer':
parser.add_argument(arg, help=desc, type=int)
elif str(value['type']) == 'array':
assert value['minItems'] == value['maxItems']
if value['items']['type'] != 'number':
raise NotImplementedError("Only float arrays work")
parser.add_argument(arg, help=desc, type=float,
nargs=value['maxItems'], metavar='N')
elif value['type'] == 'object':
#group = parser.add_argument_group(key, value['description'])
#populate_args_level(value, group)
pass | Use a schema to populate a command line argument parser | train | https://github.com/nuSTORM/gnomon/blob/7616486ecd6e26b76f677c380e62db1c0ade558a/gnomon/Configuration.py#L143-L173 | null | """Manage how gnomon configures itself and its proceesors
This class is used within gnomon to tell various classes how to configure
themselves. Each Configuration class will generate or retrieve a JSON
file that is used afterwards by various other classes.
Any proposed configuration JSON file is compared against a configuration schema. The schema requires certain attributes
be specified in order to start gnomon. The schema checker is forgiving in that new configuration keys are allowed
without changing the schema; the schema only requires certain things be defined, it doesn't prevent you from defining
new things.
Any new configuration must inherit from ConfigurationBase.
"""
import os
import inspect
import random
import sys
import json
import validictory
# Note: Do not use the python logging library! This class gets called before
# the loggers are setup.
class ConfigurationBase(object):
"""Base class for all configuration classes
If the run number is zero, replace it with a random run number"""
def __init__(self, name, run, overload=None): # pylint: disable-msg=W0613
if run == 0:
run = random.randint(1, sys.maxint)
self.name = name
self.run = run
self.configuration_dict = None
def set_json(self, config_json):
"""Permanently set the JSON configuration
Unable to call twice."""
if self.configuration_dict is not None:
raise RuntimeError("Can only set configuration once", self.configuration_dict)
schema = fetch_config('ConfigurationSchema.json')
validictory.validate(config_json, schema)
config_json['name'] = self.name
config_json['run_number'] = self.run
config_json['src_dir'] = get_source_dir()
config_json['data_dir'] = get_data_dir()
config_json['log_dir'] = get_log_dir()
self.configuration_dict = config_json
def get_configuration_dict(self):
"""Return configuration as a dictionary
"""
return self.configuration_dict
class LocalConfiguration(ConfigurationBase):
"""Read a configuration from disk and overload if necessary
"""
def __init__(self, name, run=0, overload=None,
filename='ConfigurationDefaults.json'):
ConfigurationBase.__init__(self, name, run)
defaults = fetch_config(filename)
if overload:
for key, val in overload.iteritems():
if val is not None:
defaults[key] = val
self.set_json(defaults)
class MockConfiguration(LocalConfiguration):
"""Mock configuration for testing
This is just a copy of LocalConfiguration for now
"""
pass
def get_source_dir():
"""Find where the truth path to the directory containing the Configuration module source code
It can be useful to know the full path to the Configuration module's source code in order to try to guess where the
data and log files are stored. It does this by inspecting the current running python instance."""
# This trick gets the directory of *this* file Configuration.py thus
# allowing to find the schema files relative to this file.
return os.path.dirname(inspect.getfile(inspect.currentframe()))
def get_data_dir():
"""Find the data directory that stores geometries, cross sections, etc."""
src_dir = get_source_dir()
return os.path.join(src_dir, '../data')
def get_log_dir():
"""Find the directory used for saving log files"""
src_dir = get_source_dir()
return os.path.join(src_dir, '../log')
def fetch_config(filename):
"""Fetch the Configuration schema information
Finds the schema file, loads the file and reads the JSON, then converts to a dictionary that is returned
"""
# This trick gets the directory of *this* file Configuration.py thus
# allowing to find the schema files relative to this file.
dir_name = get_source_dir()
# Append json
filename = os.path.join('json', filename)
fileobj = open(os.path.join(dir_name, filename), 'r')
my_dict = json.loads(fileobj.read())
return my_dict
def populate_args(parser):
"""Add commandline arguments to parser from schema
"""
schema = fetch_config('ConfigurationSchema.json')
populate_args_level(schema, parser)
DEFAULT = LocalConfiguration
GLOBAL_CONFIG = None
RUNTIME_CONFIG = {}
|
nuSTORM/gnomon | gnomon/Configuration.py | ConfigurationBase.set_json | python | def set_json(self, config_json):
if self.configuration_dict is not None:
raise RuntimeError("Can only set configuration once", self.configuration_dict)
schema = fetch_config('ConfigurationSchema.json')
validictory.validate(config_json, schema)
config_json['name'] = self.name
config_json['run_number'] = self.run
config_json['src_dir'] = get_source_dir()
config_json['data_dir'] = get_data_dir()
config_json['log_dir'] = get_log_dir()
self.configuration_dict = config_json | Permanently set the JSON configuration
Unable to call twice. | train | https://github.com/nuSTORM/gnomon/blob/7616486ecd6e26b76f677c380e62db1c0ade558a/gnomon/Configuration.py#L43-L60 | [
"def fetch_config(filename):\n \"\"\"Fetch the Configuration schema information\n\n Finds the schema file, loads the file and reads the JSON, then converts to a dictionary that is returned\n \"\"\"\n\n # This trick gets the directory of *this* file Configuration.py thus\n # allowing to find the sche... | class ConfigurationBase(object):
"""Base class for all configuration classes
If the run number is zero, replace it with a random run number"""
def __init__(self, name, run, overload=None): # pylint: disable-msg=W0613
if run == 0:
run = random.randint(1, sys.maxint)
self.name = name
self.run = run
self.configuration_dict = None
def get_configuration_dict(self):
"""Return configuration as a dictionary
"""
return self.configuration_dict
|
nuSTORM/gnomon | gnomon/processors/Fitter.py | CombineViews.sort_points | python | def sort_points(self, points):
new_points = []
z_lookup = {}
for z, x, Q in points:
z_lookup[z] = (z, x, Q)
z_keys = z_lookup.keys()
z_keys.sort()
for key in z_keys:
new_points.append(z_lookup[key])
return new_points | Take points (z,x,q) and sort by increasing z | train | https://github.com/nuSTORM/gnomon/blob/7616486ecd6e26b76f677c380e62db1c0ade558a/gnomon/processors/Fitter.py#L175-L188 | null | class CombineViews(Base.Processor):
"""Combine x and y views"""
def process(self, docs):
new_docs = []
for doc in docs:
clsf = doc['classification']
# Throw away if not analyzable track
if 'type' not in doc or\
doc['type'] != 'track' or\
doc['analyzable'] is False:
new_docs.append(doc)
continue
# Require extracted tracks
if 'lengths' not in clsf or\
'x' not in clsf['lengths'] or\
'y' not in clsf['lengths']:
new_docs.append(doc)
continue
tracks = doc['tracks']
lx = clsf['lengths']['x'][0]
pts_x = tracks['x'][lx]
pts_x = self.sort_points(pts_x)
ly = clsf['lengths']['y'][0]
pts_y = tracks['y'][ly]
pts_y = self.sort_points(pts_y)
new_points = []
while len(pts_y) > 0 and len(pts_x) > 0:
# While there are still points to match up
z_x, trans_x, Q_x = pts_x.pop()
z_y, trans_y, Q_y = pts_y.pop()
if math.fabs(z_x - z_y) < rc['thickness_layer']:
# If they are in the same layer
new_z = (z_x + z_y) / 2.0
new_r = math.hypot(trans_x, trans_y)
new_theta = math.atan2(trans_y, trans_x) # y first according to pydocs
new_Q = Q_x + Q_y
new_points.append((new_z, new_r, new_Q))
else:
# Otherwise, toss out most downstream point and keep the
# upstream point.
if z_x > z_y:
pts_y.append((z_y, trans_y, Q_y))
else:
pts_x.append((z_x, trans_x, Q_x))
tracks['combined'] = new_points
# Save our modified tracks
doc['tracks'] = tracks
new_docs.append(doc)
return new_docs
|
nuSTORM/gnomon | gnomon/processors/Fitter.py | VlenfPolynomialFitter.Fit | python | def Fit(self, zxq):
z, trans, Q = zip(*zxq)
assert len(trans) == len(z)
ndf = len(z) - 3
z = np.array(z)
trans = np.array(trans)
def dbexpl(t, p):
return(p[0] - p[1] * t + p[2] * t ** 2)
def residuals(p, data, t):
err = data - dbexpl(t, p)
return err
doc = {}
try:
assert ndf > 0
p0 = [1, 0, 0] # initial guesses
pbest = leastsq(residuals, p0, args=(trans, z), full_output=1)
bestparams = pbest[0]
good_of_fit = sum(pbest[2]['fvec'] ** 2)
good_of_fit = float(good_of_fit / ndf)
doc['params'] = list(bestparams)
doc['gof'] = good_of_fit
except:
doc['gof'] = 'FAIL'
doc['params'] = [0, 0, 0]
return doc | Perform a 2D fit on 2D points then return parameters
:param zxq: A list where each element is (z, transverse, charge) | train | https://github.com/nuSTORM/gnomon/blob/7616486ecd6e26b76f677c380e62db1c0ade558a/gnomon/processors/Fitter.py#L317-L353 | null | class VlenfPolynomialFitter(Base.Processor):
def __init__(self):
Base.Processor.__init__(self)
self.field = MagneticField.WandsToroidField()
def _rotate(self, x, y):
self.log.debug('Rotating using x=%s and y=%s' % (str(x), str(y)))
value = [0, 0, 0]
for i in range(3):
value[i] = x[0] * x[i] + y[0] * y[i]
value[i] /= math.hypot(x[0], y[0])
return value
def _get_last_transverse_over_list(self, zxq):
""" Get transverse coord at highest z
:param zx: A list where each element is (z, transverse, charge)
"""
z_max = None
x_of_interest = None
for z, x, q in zxq:
if z == None or z > z_max:
x_of_interest = x
return x_of_interest
def process(self, docs):
new_docs = []
for doc in docs:
# Checking state
if 'type' not in doc or\
doc['type'] != 'track' or\
doc['analyzable'] is False:
new_docs.append(doc)
continue
clsf = doc['classification']
if 'tracks' not in doc:
self.log.info('Skipping since no tracks found...')
new_docs.append(doc)
continue
if 'lengths' not in clsf:
self.log.info('Skipping no extracted tracks to fit')
new_docs.append(doc)
continue
tracks = doc['tracks']
rf = {} # raw fit
rf['gof'] = {}
for view in ['x', 'y', 'combined']:
if view == 'combined':
points = tracks[view]
else:
l = clsf['lengths'][view][0]
points = tracks[view][l]
try:
fit_doc = self.Fit(points)
except Exception, err:
self.log.exception('Error from polynomial fitter:')
# Something bad happened... mark event not analyzable
doc['analyzable'] = False
if fit_doc['gof'] != 'FAIL':
rf['gof'][view] = fit_doc['gof']
else: # fail
self.log.warning("Fit in %s failed" % view)
doc['analyzable'] = False
continue
if len(fit_doc['params']) == 3:
rf[view] = fit_doc['params']
else: # fail
self.log.error("Fit in %s failed; didn't receive params" % view)
doc['analyzable'] = False
continue
if doc['analyzable']:
# Rotate fits to bending plane
x0, x1, x2 = rf['x']
y0, y1, y2 = rf['y']
rf['u'] = self._rotate(rf['x'], rf['y'])
# Save
clsf['fit_poly'] = rf
doc['classification'] = clsf
new_docs.append(doc)
return new_docs
|
nuSTORM/gnomon | gnomon/processors/Fitter.py | VlenfPolynomialFitter._get_last_transverse_over_list | python | def _get_last_transverse_over_list(self, zxq):
z_max = None
x_of_interest = None
for z, x, q in zxq:
if z == None or z > z_max:
x_of_interest = x
return x_of_interest | Get transverse coord at highest z
:param zx: A list where each element is (z, transverse, charge) | train | https://github.com/nuSTORM/gnomon/blob/7616486ecd6e26b76f677c380e62db1c0ade558a/gnomon/processors/Fitter.py#L363-L375 | null | class VlenfPolynomialFitter(Base.Processor):
def __init__(self):
Base.Processor.__init__(self)
self.field = MagneticField.WandsToroidField()
def Fit(self, zxq):
"""Perform a 2D fit on 2D points then return parameters
:param zxq: A list where each element is (z, transverse, charge)
"""
z, trans, Q = zip(*zxq)
assert len(trans) == len(z)
ndf = len(z) - 3
z = np.array(z)
trans = np.array(trans)
def dbexpl(t, p):
return(p[0] - p[1] * t + p[2] * t ** 2)
def residuals(p, data, t):
err = data - dbexpl(t, p)
return err
doc = {}
try:
assert ndf > 0
p0 = [1, 0, 0] # initial guesses
pbest = leastsq(residuals, p0, args=(trans, z), full_output=1)
bestparams = pbest[0]
good_of_fit = sum(pbest[2]['fvec'] ** 2)
good_of_fit = float(good_of_fit / ndf)
doc['params'] = list(bestparams)
doc['gof'] = good_of_fit
except:
doc['gof'] = 'FAIL'
doc['params'] = [0, 0, 0]
return doc
def _rotate(self, x, y):
self.log.debug('Rotating using x=%s and y=%s' % (str(x), str(y)))
value = [0, 0, 0]
for i in range(3):
value[i] = x[0] * x[i] + y[0] * y[i]
value[i] /= math.hypot(x[0], y[0])
return value
def process(self, docs):
new_docs = []
for doc in docs:
# Checking state
if 'type' not in doc or\
doc['type'] != 'track' or\
doc['analyzable'] is False:
new_docs.append(doc)
continue
clsf = doc['classification']
if 'tracks' not in doc:
self.log.info('Skipping since no tracks found...')
new_docs.append(doc)
continue
if 'lengths' not in clsf:
self.log.info('Skipping no extracted tracks to fit')
new_docs.append(doc)
continue
tracks = doc['tracks']
rf = {} # raw fit
rf['gof'] = {}
for view in ['x', 'y', 'combined']:
if view == 'combined':
points = tracks[view]
else:
l = clsf['lengths'][view][0]
points = tracks[view][l]
try:
fit_doc = self.Fit(points)
except Exception, err:
self.log.exception('Error from polynomial fitter:')
# Something bad happened... mark event not analyzable
doc['analyzable'] = False
if fit_doc['gof'] != 'FAIL':
rf['gof'][view] = fit_doc['gof']
else: # fail
self.log.warning("Fit in %s failed" % view)
doc['analyzable'] = False
continue
if len(fit_doc['params']) == 3:
rf[view] = fit_doc['params']
else: # fail
self.log.error("Fit in %s failed; didn't receive params" % view)
doc['analyzable'] = False
continue
if doc['analyzable']:
# Rotate fits to bending plane
x0, x1, x2 = rf['x']
y0, y1, y2 = rf['y']
rf['u'] = self._rotate(rf['x'], rf['y'])
# Save
clsf['fit_poly'] = rf
doc['classification'] = clsf
new_docs.append(doc)
return new_docs
|
nuSTORM/gnomon | gnomon/EventAction.py | EventAction.BeginOfEventAction | python | def BeginOfEventAction(self, event):
self.log.info("Simulating event %s", event.GetEventID())
self.sd.setEventNumber(event.GetEventID()) | Save event number | train | https://github.com/nuSTORM/gnomon/blob/7616486ecd6e26b76f677c380e62db1c0ade558a/gnomon/EventAction.py#L32-L35 | null | class EventAction(G4.G4UserEventAction):
"""A Geant4 interface that subclasses G4UserEventAction and runs processors over Geant4 events"""
def __init__(self, processor_names): # pga=None):
G4.G4UserEventAction.__init__(self)
self.log = logging.getLogger('root')
self.log = self.log.getChild(self.__class__.__name__)
self.processors = []
for name in processor_names:
try:
my_class = processors.lookupProcessor(name)
self.processors.append(my_class())
except:
self.log.error('Failed loading processor %s' % name)
raise
# used to fetch mchits, only way given geant
self.sd = None
def setSD(self, sd):
"""Hook to the sensitive detector class
User for fetching hits from sensitive detector to pass to processor loop"""
self.sd = sd
def EndOfEventAction(self, event):
"""At the end of an event, grab sensitive detector hits then run processor loop"""
self.log.debug('Processesing simulated event %d', event.GetEventID())
docs = self.sd.getDocs()
self.sd.clearDocs()
for processor in self.processors:
docs = processor.process(docs)
if not docs:
self.log.warning('%s did not return documents in process()!',
processor.__class__.__name__)
def shutdown(self):
"""Shutdown each processor"""
for processor in self.processors:
processor.shutdown()
|
nuSTORM/gnomon | gnomon/EventAction.py | EventAction.EndOfEventAction | python | def EndOfEventAction(self, event):
self.log.debug('Processesing simulated event %d', event.GetEventID())
docs = self.sd.getDocs()
self.sd.clearDocs()
for processor in self.processors:
docs = processor.process(docs)
if not docs:
self.log.warning('%s did not return documents in process()!',
processor.__class__.__name__) | At the end of an event, grab sensitive detector hits then run processor loop | train | https://github.com/nuSTORM/gnomon/blob/7616486ecd6e26b76f677c380e62db1c0ade558a/gnomon/EventAction.py#L43-L54 | null | class EventAction(G4.G4UserEventAction):
"""A Geant4 interface that subclasses G4UserEventAction and runs processors over Geant4 events"""
def __init__(self, processor_names): # pga=None):
G4.G4UserEventAction.__init__(self)
self.log = logging.getLogger('root')
self.log = self.log.getChild(self.__class__.__name__)
self.processors = []
for name in processor_names:
try:
my_class = processors.lookupProcessor(name)
self.processors.append(my_class())
except:
self.log.error('Failed loading processor %s' % name)
raise
# used to fetch mchits, only way given geant
self.sd = None
def BeginOfEventAction(self, event):
"""Save event number"""
self.log.info("Simulating event %s", event.GetEventID())
self.sd.setEventNumber(event.GetEventID())
def setSD(self, sd):
"""Hook to the sensitive detector class
User for fetching hits from sensitive detector to pass to processor loop"""
self.sd = sd
def shutdown(self):
"""Shutdown each processor"""
for processor in self.processors:
processor.shutdown()
|
nuSTORM/gnomon | gnomon/processors/DataManager.py | CouchManager.setup_db | python | def setup_db(self, couch, dbname):
# Avoid race condition of two creating db
my_db = None
self.log.debug('Setting up DB: %s' % dbname)
if dbname not in couch:
self.log.info("DB doesn't exist so creating DB: %s", dbname)
try:
my_db = couch.create(dbname)
except:
self.log.critical("Race condition caught")
raise RuntimeError("Race condition caught when creating DB")
try:
auth_doc = {}
auth_doc['_id'] = '_design/auth'
auth_doc['language'] = 'javascript'
auth_doc['validate_doc_update'] = """
function(newDoc, oldDoc, userCtx) {
if (userCtx.roles.indexOf('_admin') !== -1) {
return;
} else {
throw({forbidden: 'Only admins may edit the database'});
}
}
"""
my_db.save(auth_doc)
except:
self.log.error('Could not set permissions of %s' % dbname)
else:
my_db = couch[dbname]
return my_db | Setup and configure DB | train | https://github.com/nuSTORM/gnomon/blob/7616486ecd6e26b76f677c380e62db1c0ade558a/gnomon/processors/DataManager.py#L62-L101 | null | class CouchManager(Manager):
"""Output to CouchDB
This class handles sending events to CouchDB.
"""
def __init__(self):
"""Initialize
Setup connect to Couch
"""
Manager.__init__(self)
self.server_url = self.config['couchdb']['url']
if os.getenv('COUCHDB_URL'):
value = os.getenv('COUCHDB_URL')
self.log.info('Environmental variable COUCHDB_URL: %s' % value)
self.server_url = value
self.server = couchdb.Server(self.server_url)
self.server.version()
self.my_db = self.setup_db(self.server, self.config['name'])
self.commit_threshold = self.config['couchdb']['commit_threshold']
self.docs = []
def commit(self, force=False):
"""Commit data to couchdb
Compared to threshold (unless forced) then sends data to couch
"""
self.log.debug('Bulk commit requested')
size = sys.getsizeof(self.docs)
self.log.debug('Size of docs in KB: %d', size)
if size > self.commit_threshold or force:
self.log.info('Commiting %d KB to CouchDB' % size)
self.my_db.update(self.docs)
self.docs = []
def save(self, doc):
"""Save a doc to cache
"""
self.log.debug('save()')
self.docs.append(doc)
self.commit()
def shutdown(self):
"""Shutdown and commit rest
"""
self.log.debug('shutdown()')
self.commit(force=True)
|
nuSTORM/gnomon | gnomon/processors/DataManager.py | CouchManager.commit | python | def commit(self, force=False):
self.log.debug('Bulk commit requested')
size = sys.getsizeof(self.docs)
self.log.debug('Size of docs in KB: %d', size)
if size > self.commit_threshold or force:
self.log.info('Commiting %d KB to CouchDB' % size)
self.my_db.update(self.docs)
self.docs = [] | Commit data to couchdb
Compared to threshold (unless forced) then sends data to couch | train | https://github.com/nuSTORM/gnomon/blob/7616486ecd6e26b76f677c380e62db1c0ade558a/gnomon/processors/DataManager.py#L103-L115 | null | class CouchManager(Manager):
"""Output to CouchDB
This class handles sending events to CouchDB.
"""
def __init__(self):
"""Initialize
Setup connect to Couch
"""
Manager.__init__(self)
self.server_url = self.config['couchdb']['url']
if os.getenv('COUCHDB_URL'):
value = os.getenv('COUCHDB_URL')
self.log.info('Environmental variable COUCHDB_URL: %s' % value)
self.server_url = value
self.server = couchdb.Server(self.server_url)
self.server.version()
self.my_db = self.setup_db(self.server, self.config['name'])
self.commit_threshold = self.config['couchdb']['commit_threshold']
self.docs = []
def setup_db(self, couch, dbname):
"""Setup and configure DB
"""
# Avoid race condition of two creating db
my_db = None
self.log.debug('Setting up DB: %s' % dbname)
if dbname not in couch:
self.log.info("DB doesn't exist so creating DB: %s", dbname)
try:
my_db = couch.create(dbname)
except:
self.log.critical("Race condition caught")
raise RuntimeError("Race condition caught when creating DB")
try:
auth_doc = {}
auth_doc['_id'] = '_design/auth'
auth_doc['language'] = 'javascript'
auth_doc['validate_doc_update'] = """
function(newDoc, oldDoc, userCtx) {
if (userCtx.roles.indexOf('_admin') !== -1) {
return;
} else {
throw({forbidden: 'Only admins may edit the database'});
}
}
"""
my_db.save(auth_doc)
except:
self.log.error('Could not set permissions of %s' % dbname)
else:
my_db = couch[dbname]
return my_db
def save(self, doc):
"""Save a doc to cache
"""
self.log.debug('save()')
self.docs.append(doc)
self.commit()
def shutdown(self):
"""Shutdown and commit rest
"""
self.log.debug('shutdown()')
self.commit(force=True)
|
nuSTORM/gnomon | gnomon/processors/DataManager.py | CouchManager.save | python | def save(self, doc):
self.log.debug('save()')
self.docs.append(doc)
self.commit() | Save a doc to cache | train | https://github.com/nuSTORM/gnomon/blob/7616486ecd6e26b76f677c380e62db1c0ade558a/gnomon/processors/DataManager.py#L117-L122 | [
"def commit(self, force=False):\n \"\"\"Commit data to couchdb\n\n Compared to threshold (unless forced) then sends data to couch\n \"\"\"\n self.log.debug('Bulk commit requested')\n size = sys.getsizeof(self.docs)\n\n self.log.debug('Size of docs in KB: %d', size)\n if size > self.commit_thres... | class CouchManager(Manager):
"""Output to CouchDB
This class handles sending events to CouchDB.
"""
def __init__(self):
"""Initialize
Setup connect to Couch
"""
Manager.__init__(self)
self.server_url = self.config['couchdb']['url']
if os.getenv('COUCHDB_URL'):
value = os.getenv('COUCHDB_URL')
self.log.info('Environmental variable COUCHDB_URL: %s' % value)
self.server_url = value
self.server = couchdb.Server(self.server_url)
self.server.version()
self.my_db = self.setup_db(self.server, self.config['name'])
self.commit_threshold = self.config['couchdb']['commit_threshold']
self.docs = []
def setup_db(self, couch, dbname):
"""Setup and configure DB
"""
# Avoid race condition of two creating db
my_db = None
self.log.debug('Setting up DB: %s' % dbname)
if dbname not in couch:
self.log.info("DB doesn't exist so creating DB: %s", dbname)
try:
my_db = couch.create(dbname)
except:
self.log.critical("Race condition caught")
raise RuntimeError("Race condition caught when creating DB")
try:
auth_doc = {}
auth_doc['_id'] = '_design/auth'
auth_doc['language'] = 'javascript'
auth_doc['validate_doc_update'] = """
function(newDoc, oldDoc, userCtx) {
if (userCtx.roles.indexOf('_admin') !== -1) {
return;
} else {
throw({forbidden: 'Only admins may edit the database'});
}
}
"""
my_db.save(auth_doc)
except:
self.log.error('Could not set permissions of %s' % dbname)
else:
my_db = couch[dbname]
return my_db
def commit(self, force=False):
"""Commit data to couchdb
Compared to threshold (unless forced) then sends data to couch
"""
self.log.debug('Bulk commit requested')
size = sys.getsizeof(self.docs)
self.log.debug('Size of docs in KB: %d', size)
if size > self.commit_threshold or force:
self.log.info('Commiting %d KB to CouchDB' % size)
self.my_db.update(self.docs)
self.docs = []
def shutdown(self):
"""Shutdown and commit rest
"""
self.log.debug('shutdown()')
self.commit(force=True)
|
nuSTORM/gnomon | gnomon/SensitiveDetector.py | ScintSD.getView | python | def getView(self, lv):
view = None
if str(lv.GetName())[-1] == 'X':
return 'X'
elif str(lv.GetName())[-1] == 'Y':
return 'Y'
self.log.error('Cannot determine view for %s', lv.GetName())
raise 'Cannot determine view for %s' % lv.GetName()
return view | Determine the detector view starting with a G4LogicalVolume | train | https://github.com/nuSTORM/gnomon/blob/7616486ecd6e26b76f677c380e62db1c0ade558a/gnomon/SensitiveDetector.py#L78-L88 | null | class ScintSD(G4.G4VSensitiveDetector):
"SD for scint bar"
def __init__(self):
G4.G4VSensitiveDetector.__init__(self, "Scintillator")
random.seed()
self.log = logging.getLogger('root')
self.log = self.log.getChild(self.__class__.__name__)
self.layers = rc['layers']
self.bars = rc['bars']
self.width = rc['width']
self.thickness_bar = rc['thickness_bar']
self.log.debug('layers: %f', self.layers)
self.log.debug('bars: %f', self.bars)
self.log.debug('width: %f', self.width)
self.log.debug('thickness_bar: %f', self.thickness_bar)
self.config = Configuration.GLOBAL_CONFIG
self.docs = []
self.event = 0
def getDocs(self):
return self.docs
def clearDocs(self):
self.docs = []
def setEventNumber(self, number):
if isinstance(number, int):
self.event = number
else:
raise TypeError('Not an int event number')
def getNumberOfBars(self):
"""Return the number of bars per view"""
return self.bars
def getNumberOfLayers(self):
"""Return the number of layers in z, where a layer is steel plus
two views"""
return self.layers
def getMCHitBarPosition(self, layer_number, bar_number, view,
position, guess_z):
doc = {}
doc['z'] = guess_z
diff = math.fabs(guess_z - position.z)
threshold = self.thickness_bar / 2 + 0.1 * G4.mm # 0.1 mm tolerance
try:
assert diff <= threshold
except:
self.log.error('Bad longitudinal position:')
self.log.error('Guess in z: %f', guess_z)
self.log.error('Position in z: %f, View: %s', position.z, view)
raise
guess_trans = bar_number
guess_trans = self.width * (guess_trans - self.bars / 2) +\
self.width / 2
if view == 'X':
trans = position.x
doc['x'] = guess_trans
doc['y'] = 0
elif view == 'Y':
trans = position.y
doc['y'] = guess_trans
doc['x'] = 0
diff = math.fabs(trans - guess_trans)
threshold = self.width / 2 + 1 * G4.mm # 0.1 mm tolerance
try:
assert diff <= threshold
except:
self.log.error('Bad transverse position:')
self.log.error('Guess in z: %f, Position in z: %f',
guess_trans, trans)
raise
return doc
def ProcessHits(self, step, rohist):
preStepPoint = step.GetPreStepPoint()
if step.GetTotalEnergyDeposit() == 0.0:
return
theTouchable = preStepPoint.GetTouchable()
copyNo = theTouchable.GetCopyNumber(0)
motherCopyNo = theTouchable.GetCopyNumber(1)
pv = preStepPoint.GetPhysicalVolume()
dedx = step.GetTotalEnergyDeposit()
lv = pv.GetMotherLogical()
position = step.GetPostStepPoint().GetPosition()
view = self.getView(lv)
doc = {}
doc['type'] = 'mchit'
doc['dedx'] = dedx
doc['position'] = {'x': position.x,
'y': position.y,
'z': position.z}
doc['bar'] = theTouchable.GetCopyNumber(0)
doc['layer'] = theTouchable.GetCopyNumber(2)
doc['view'] = view
doc['run'] = self.config['run_number']
doc['event'] = self.event
my_z = theTouchable.GetTranslation(0).z
doc['position_bar'] = self.getMCHitBarPosition(doc['layer'],
doc['bar'],
doc['view'],
position,
my_z)
self.docs.append(doc)
|
nuSTORM/gnomon | gnomon/MagneticField.py | WandsToroidField.PhenomModel | python | def PhenomModel(self, r):
if r <= 0:
raise ValueError
field = self.B0 + self.B1 * G4.m / r + self.B2 * math.exp(-1 * self.H * r / G4.m)
return field | Fit to field map
A phenomenological fit by Ryan Bayes (Glasgow) to a field map
generated by Bob Wands (FNAL). It assumes a 1 cm plate. This is dated
January 30th, 2012. Not defined for r <= 0 | train | https://github.com/nuSTORM/gnomon/blob/7616486ecd6e26b76f677c380e62db1c0ade558a/gnomon/MagneticField.py#L37-L46 | null | class WandsToroidField(G4.G4MagneticField):
"""Toroid Field from Bob Wands simulation parameterization
scale has the sign of the field. Focus signal with scale = 1, focus
background with scale = -1. Have 80% field with scale = 0.8
B0, B1, B2, and H are fit parameters.
Default values from Ryan Bayes, March 15th, 2012, talk to Valencia grp.
B0 = 1.36 # T
B1 = 0.0406 # T m
B2 = 0.8 # T
H = 0.16 # 1/m
Field to field map from Bob Wands, 1 cm plate, Jan. 30, 2012
"""
def __init__(self, scale=1.0, B0=1.53, B1=0.032, B2=0.64, H=0.28):
G4.G4MagneticField.__init__(self)
self.scale = float(scale)
self.B0 = B0
self.B1 = B1
self.B2 = B2
self.H = H
# Save field
rc['field'] = self.scale
def GetFieldValue(self, pos, time):
bfield = G4.G4ThreeVector()
# Set to zero, only change if r != 0 and sign != 0
bfield.x = 0
bfield.y = 0
bfield.z = 0.
if self.scale == 0.0:
return bfield * G4.tesla
r = math.sqrt(pos.x ** 2 + pos.y ** 2)
if r != 0.0:
B = self.scale * self.PhenomModel(r)
bfield.x = -1 * (pos.y / r) * B
bfield.y = 1 * (pos.x / r) * B
return bfield * G4.tesla
|
nuSTORM/gnomon | gnomon/DetectorConstruction.py | BoxDetectorConstruction.Construct | python | def Construct(self): # pylint: disable-msg=C0103
# Parse the GDML
self.gdml_parser.Read(self.filename)
self.world = self.gdml_parser.GetWorldVolume()
self.log.info("Materials:")
self.log.info(G4.G4Material.GetMaterialTable())
# Return pointer to world volume
return self.world | Construct a cuboid from a GDML file without sensitive detector | train | https://github.com/nuSTORM/gnomon/blob/7616486ecd6e26b76f677c380e62db1c0ade558a/gnomon/DetectorConstruction.py#L38-L48 | null | class BoxDetectorConstruction(G4.G4VUserDetectorConstruction):
"""Create a cuboid geometry of uniform material
Useful for testing particle interactions with uniform materials"""
def __init__(self, name):
self.log = logging.getLogger('root')
self.log = self.log.getChild(self.__class__.__name__)
self.log.debug('Initialized %s', self.__class__.__name__)
G4.G4VUserDetectorConstruction.__init__(self)
self.world = None
self.gdml_parser = G4.G4GDMLParser()
self.sensitive_detector = None
self.config = Configuration.GLOBAL_CONFIG
self.filename = os.path.join(self.config['data_dir'], name)
|
nuSTORM/gnomon | gnomon/DetectorConstruction.py | MagIronSamplingCaloDetectorConstruction.Construct | python | def Construct(self): # pylint: disable-msg=C0103
# Parse the GDML
self.world = self.gdml_parser.GetWorldVolume()
# Create sensitive detector
self.sensitive_detector = ScintSD()
# Get logical volume for X view, then attach SD
my_lv = G4.G4LogicalVolumeStore.GetInstance().GetVolumeID(1)
assert my_lv.GetName() == "ScintillatorBarX"
my_lv.SetSensitiveDetector(self.sensitive_detector)
# Get logical volume for Y view, then attach SD
my_lv = G4.G4LogicalVolumeStore.GetInstance().GetVolumeID(2)
assert my_lv.GetName() == "ScintillatorBarY"
my_lv.SetSensitiveDetector(self.sensitive_detector)
my_lv = G4.G4LogicalVolumeStore.GetInstance().GetVolumeID(0)
assert my_lv.GetName() == "SteelPlane"
# field
self.field_manager = G4.G4FieldManager()
self.my_field = MagneticField.WandsToroidField(self.field_polarity)
self.field_manager.SetDetectorField(self.my_field)
self.field_manager.CreateChordFinder(self.my_field)
my_lv.SetFieldManager(self.field_manager, False)
self.log.info("Materials:")
self.log.info(G4.G4Material.GetMaterialTable())
# Return pointer to world volume
return self.world | Construct nuSTORM from a GDML file | train | https://github.com/nuSTORM/gnomon/blob/7616486ecd6e26b76f677c380e62db1c0ade558a/gnomon/DetectorConstruction.py#L94-L126 | null | class MagIronSamplingCaloDetectorConstruction(G4.G4VUserDetectorConstruction):
"""Create a magnetized iron sampling calorimeter"""
def __init__(self, field_polarity):
self.log = logging.getLogger('root')
self.log = self.log.getChild(self.__class__.__name__)
self.log.debug('Initialized %s', self.__class__.__name__)
G4.G4VUserDetectorConstruction.__init__(self)
self.world = None
self.gdml_parser = G4.G4GDMLParser()
self.sensitive_detector = None
self.config = Configuration.GLOBAL_CONFIG
self.filename = os.path.join(self.config['data_dir'],
'iron_scint_bars.gdml')
self.field_manager = None
self.my_field = None
self.field_polarity = field_polarity
self.gdml_parser.Read(self.filename)
# Grab constants from the GDML <define>
rc['layers'] = int(self.gdml_parser.GetConstant("layers"))
rc['bars'] = int(self.gdml_parser.GetConstant("bars"))
for name in ["width", "thickness_layer", "thickness_bar",
"density_scint", "density_iron"]:
rc[name] = self.gdml_parser.GetConstant(name)
det_width = rc['width'] * rc['bars']
iron_volume = det_width * det_width * (rc['layers'] / 2 * (rc['thickness_layer'] - rc['thickness_bar']))
scint_volume = det_width * det_width * (rc['layers'] / 2 * rc['thickness_bar'])
self.mass = iron_volume * rc['density_iron'] + scint_volume * rc['density_scint']
self.mass /= 10 ** 3 # mm^2 -> cm^3, density in /cm^3 but distances in mm
self.log.info("Mass [g]: %f" % self.mass)
def __del__(self):
pass
def get_sensitive_detector(self):
"""Return the SD"""
return self.sensitive_detector
|
nuSTORM/gnomon | gnomon/processors/__init__.py | lookupProcessor | python | def lookupProcessor(name):
if name in _proc_lookup:
return _proc_lookup[name]
else:
error_string = 'If you are creating a new processor, please read the\
documentation on creating a new processor'
raise LookupError("Unknown processor %s\n%s" % (name, error_string)) | Lookup processor class object by its name | train | https://github.com/nuSTORM/gnomon/blob/7616486ecd6e26b76f677c380e62db1c0ade558a/gnomon/processors/__init__.py#L39-L46 | null | """Catologue processors such that they can be imported
TODO: Cleanup by using '__all__'?
#http://stackoverflow.com/questions/1057431/loading-all-modules-in-a-folder-in-python/1057534#1057534 ??
"""
from gnomon.processors.Digitizer import VlenfSimpleDigitizer
from gnomon.processors.Fitter import EmptyTrackFromDigits
from gnomon.processors.Fitter import CombineViews
from gnomon.processors.Fitter import ExtractTracks
from gnomon.processors.Fitter import VlenfPolynomialFitter
from gnomon.processors.Fitter import ClassifyVariables
from gnomon.processors.Truth import AppendTruth
from gnomon.processors.DataManager import CouchManager
from gnomon.processors.Filter import SaveInteresting
from gnomon.processors.Filter import AppearanceCuts
from gnomon.processors.Fiducial import FiducialCuts
from gnomon.processors.CreateROOTFile import CreateROOTDigitizedHits
_processors = [VlenfSimpleDigitizer,
EmptyTrackFromDigits,
CombineViews,
ExtractTracks,
VlenfPolynomialFitter,
ClassifyVariables,
AppendTruth,
CouchManager,
AppearanceCuts,
SaveInteresting,
FiducialCuts,
CreateROOTDigitizedHits]
# Build lookup table of class names to class objects
_proc_lookup = {}
for processor in _processors:
_proc_lookup[processor.__name__] = processor
|
shawnsilva/steamwebapi | steamwebapi/api.py | _SteamWebAPI.create_request_url | python | def create_request_url(self, interface, method, version, parameters):
if 'format' in parameters:
parameters['key'] = self.apikey
else:
parameters.update({'key' : self.apikey, 'format' : self.format})
version = "v%04d" % (version)
url = "http://api.steampowered.com/%s/%s/%s/?%s" % (interface, method,
version, urlencode(parameters))
return url | Create the URL to submit to the Steam Web API
interface: Steam Web API interface containing methods.
method: The method to call.
version: The version of the method.
paramters: Parameters to supply to the method. | train | https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/api.py#L56-L72 | null | class _SteamWebAPI(object):
def __init__(self, steam_api_key=None):
if steam_api_key:
self.apikey = steam_api_key
elif not APIKEY:
print("Steam Web API key environment variable not set, and the key wasn't supplied elsewhere.")
sys.exit(1)
else:
self.apikey = APIKEY
self.format = DEFAULTFORMAT
self.language = DEFAULTLANG
def retrieve_request(self, url):
"""Open the given url and decode and return the response
url: The url to open.
"""
try:
data = urlopen(url)
except:
print("Error Retrieving Data from Steam")
sys.exit(2)
return data.read().decode('utf-8')
def return_data(self, data, format=None):
"""Format and return data appropriate to the requested API format.
data: The data retured by the api request
"""
if format is None:
format = self.format
if format == "json":
formatted_data = json.loads(data)
else:
formatted_data = data
return formatted_data
|
shawnsilva/steamwebapi | steamwebapi/api.py | _SteamWebAPI.retrieve_request | python | def retrieve_request(self, url):
try:
data = urlopen(url)
except:
print("Error Retrieving Data from Steam")
sys.exit(2)
return data.read().decode('utf-8') | Open the given url and decode and return the response
url: The url to open. | train | https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/api.py#L74-L85 | null | class _SteamWebAPI(object):
def __init__(self, steam_api_key=None):
if steam_api_key:
self.apikey = steam_api_key
elif not APIKEY:
print("Steam Web API key environment variable not set, and the key wasn't supplied elsewhere.")
sys.exit(1)
else:
self.apikey = APIKEY
self.format = DEFAULTFORMAT
self.language = DEFAULTLANG
def create_request_url(self, interface, method, version, parameters):
"""Create the URL to submit to the Steam Web API
interface: Steam Web API interface containing methods.
method: The method to call.
version: The version of the method.
paramters: Parameters to supply to the method.
"""
if 'format' in parameters:
parameters['key'] = self.apikey
else:
parameters.update({'key' : self.apikey, 'format' : self.format})
version = "v%04d" % (version)
url = "http://api.steampowered.com/%s/%s/%s/?%s" % (interface, method,
version, urlencode(parameters))
return url
def return_data(self, data, format=None):
"""Format and return data appropriate to the requested API format.
data: The data retured by the api request
"""
if format is None:
format = self.format
if format == "json":
formatted_data = json.loads(data)
else:
formatted_data = data
return formatted_data
|
shawnsilva/steamwebapi | steamwebapi/api.py | _SteamWebAPI.return_data | python | def return_data(self, data, format=None):
if format is None:
format = self.format
if format == "json":
formatted_data = json.loads(data)
else:
formatted_data = data
return formatted_data | Format and return data appropriate to the requested API format.
data: The data retured by the api request | train | https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/api.py#L87-L99 | null | class _SteamWebAPI(object):
def __init__(self, steam_api_key=None):
if steam_api_key:
self.apikey = steam_api_key
elif not APIKEY:
print("Steam Web API key environment variable not set, and the key wasn't supplied elsewhere.")
sys.exit(1)
else:
self.apikey = APIKEY
self.format = DEFAULTFORMAT
self.language = DEFAULTLANG
def create_request_url(self, interface, method, version, parameters):
"""Create the URL to submit to the Steam Web API
interface: Steam Web API interface containing methods.
method: The method to call.
version: The version of the method.
paramters: Parameters to supply to the method.
"""
if 'format' in parameters:
parameters['key'] = self.apikey
else:
parameters.update({'key' : self.apikey, 'format' : self.format})
version = "v%04d" % (version)
url = "http://api.steampowered.com/%s/%s/%s/?%s" % (interface, method,
version, urlencode(parameters))
return url
def retrieve_request(self, url):
"""Open the given url and decode and return the response
url: The url to open.
"""
try:
data = urlopen(url)
except:
print("Error Retrieving Data from Steam")
sys.exit(2)
return data.read().decode('utf-8')
|
shawnsilva/steamwebapi | steamwebapi/api.py | ISteamUser.get_friends_list | python | def get_friends_list(self, steamID, relationship='all', format=None):
parameters = {'steamid' : steamID, 'relationship' : relationship}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetFriendsList', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request the friends list of a given steam ID filtered by role.
steamID: The user ID
relationship: Type of friend to request (all, friend)
format: Return format. None defaults to json. (json, xml, vdf) | train | https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/api.py#L106-L120 | [
"def create_request_url(self, interface, method, version, parameters):\n \"\"\"Create the URL to submit to the Steam Web API\n\n interface: Steam Web API interface containing methods.\n method: The method to call.\n version: The version of the method.\n paramters: Parameters to supply to the method.\... | class ISteamUser(_SteamWebAPI):
def __init__(self,**kwargs):
self.interface = 'ISteamUser'
super(ISteamUser, self).__init__(**kwargs)
def get_player_bans(self, steamIDS, format=None):
"""Request the communities a steam id is banned in.
steamIDS: Comma-delimited list of SteamIDs
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamids' : steamIDS}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetPlayerBans', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_player_summaries(self, steamIDS, format=None):
"""
Get summaries of steam accounts.
steamIDS: Comma-delimited list of SteamIDs (max: 100)
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamids' : steamIDS}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetPlayerSummaries', 2,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_user_group_list(self, steamID, format=None):
"""Request a list of groups a user is subscribed to.
steamID: User ID
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetUserGroupList', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def resolve_vanity_url(self, vanityURL, url_type=1, format=None):
"""Request the steam id associated with a vanity url.
vanityURL: The users vanity URL
url_type: The type of vanity URL. 1 (default): Individual profile,
2: Group, 3: Official game group
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'vanityurl' : vanityURL, "url_type" : url_type}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'ResolveVanityUrl', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
|
shawnsilva/steamwebapi | steamwebapi/api.py | ISteamUser.get_player_bans | python | def get_player_bans(self, steamIDS, format=None):
parameters = {'steamids' : steamIDS}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetPlayerBans', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request the communities a steam id is banned in.
steamIDS: Comma-delimited list of SteamIDs
format: Return format. None defaults to json. (json, xml, vdf) | train | https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/api.py#L122-L135 | [
"def create_request_url(self, interface, method, version, parameters):\n \"\"\"Create the URL to submit to the Steam Web API\n\n interface: Steam Web API interface containing methods.\n method: The method to call.\n version: The version of the method.\n paramters: Parameters to supply to the method.\... | class ISteamUser(_SteamWebAPI):
def __init__(self,**kwargs):
self.interface = 'ISteamUser'
super(ISteamUser, self).__init__(**kwargs)
def get_friends_list(self, steamID, relationship='all', format=None):
"""Request the friends list of a given steam ID filtered by role.
steamID: The user ID
relationship: Type of friend to request (all, friend)
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'relationship' : relationship}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetFriendsList', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_player_summaries(self, steamIDS, format=None):
"""
Get summaries of steam accounts.
steamIDS: Comma-delimited list of SteamIDs (max: 100)
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamids' : steamIDS}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetPlayerSummaries', 2,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_user_group_list(self, steamID, format=None):
"""Request a list of groups a user is subscribed to.
steamID: User ID
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetUserGroupList', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def resolve_vanity_url(self, vanityURL, url_type=1, format=None):
"""Request the steam id associated with a vanity url.
vanityURL: The users vanity URL
url_type: The type of vanity URL. 1 (default): Individual profile,
2: Group, 3: Official game group
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'vanityurl' : vanityURL, "url_type" : url_type}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'ResolveVanityUrl', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
|
shawnsilva/steamwebapi | steamwebapi/api.py | ISteamUser.get_user_group_list | python | def get_user_group_list(self, steamID, format=None):
parameters = {'steamid' : steamID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetUserGroupList', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request a list of groups a user is subscribed to.
steamID: User ID
format: Return format. None defaults to json. (json, xml, vdf) | train | https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/api.py#L153-L166 | [
"def create_request_url(self, interface, method, version, parameters):\n \"\"\"Create the URL to submit to the Steam Web API\n\n interface: Steam Web API interface containing methods.\n method: The method to call.\n version: The version of the method.\n paramters: Parameters to supply to the method.\... | class ISteamUser(_SteamWebAPI):
def __init__(self,**kwargs):
self.interface = 'ISteamUser'
super(ISteamUser, self).__init__(**kwargs)
def get_friends_list(self, steamID, relationship='all', format=None):
"""Request the friends list of a given steam ID filtered by role.
steamID: The user ID
relationship: Type of friend to request (all, friend)
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'relationship' : relationship}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetFriendsList', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_player_bans(self, steamIDS, format=None):
"""Request the communities a steam id is banned in.
steamIDS: Comma-delimited list of SteamIDs
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamids' : steamIDS}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetPlayerBans', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_player_summaries(self, steamIDS, format=None):
"""
Get summaries of steam accounts.
steamIDS: Comma-delimited list of SteamIDs (max: 100)
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamids' : steamIDS}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetPlayerSummaries', 2,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def resolve_vanity_url(self, vanityURL, url_type=1, format=None):
"""Request the steam id associated with a vanity url.
vanityURL: The users vanity URL
url_type: The type of vanity URL. 1 (default): Individual profile,
2: Group, 3: Official game group
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'vanityurl' : vanityURL, "url_type" : url_type}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'ResolveVanityUrl', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
|
shawnsilva/steamwebapi | steamwebapi/api.py | ISteamUser.resolve_vanity_url | python | def resolve_vanity_url(self, vanityURL, url_type=1, format=None):
parameters = {'vanityurl' : vanityURL, "url_type" : url_type}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'ResolveVanityUrl', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request the steam id associated with a vanity url.
vanityURL: The users vanity URL
url_type: The type of vanity URL. 1 (default): Individual profile,
2: Group, 3: Official game group
format: Return format. None defaults to json. (json, xml, vdf) | train | https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/api.py#L168-L183 | [
"def create_request_url(self, interface, method, version, parameters):\n \"\"\"Create the URL to submit to the Steam Web API\n\n interface: Steam Web API interface containing methods.\n method: The method to call.\n version: The version of the method.\n paramters: Parameters to supply to the method.\... | class ISteamUser(_SteamWebAPI):
def __init__(self,**kwargs):
self.interface = 'ISteamUser'
super(ISteamUser, self).__init__(**kwargs)
def get_friends_list(self, steamID, relationship='all', format=None):
"""Request the friends list of a given steam ID filtered by role.
steamID: The user ID
relationship: Type of friend to request (all, friend)
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'relationship' : relationship}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetFriendsList', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_player_bans(self, steamIDS, format=None):
"""Request the communities a steam id is banned in.
steamIDS: Comma-delimited list of SteamIDs
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamids' : steamIDS}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetPlayerBans', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_player_summaries(self, steamIDS, format=None):
"""
Get summaries of steam accounts.
steamIDS: Comma-delimited list of SteamIDs (max: 100)
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamids' : steamIDS}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetPlayerSummaries', 2,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_user_group_list(self, steamID, format=None):
"""Request a list of groups a user is subscribed to.
steamID: User ID
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetUserGroupList', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
|
shawnsilva/steamwebapi | steamwebapi/api.py | ISteamUserStats.get_global_achievement_percentages_for_app | python | def get_global_achievement_percentages_for_app(self, gameID, format=None):
parameters = {'gameid' : gameID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface,
'GetGlobalAchievementPercentagesForApp', 2, parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request statistics showing global achievements that have been
unlocked.
gameID: The id of the game.
format: Return format. None defaults to json. (json, xml, vdf) | train | https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/api.py#L190-L204 | [
"def create_request_url(self, interface, method, version, parameters):\n \"\"\"Create the URL to submit to the Steam Web API\n\n interface: Steam Web API interface containing methods.\n method: The method to call.\n version: The version of the method.\n paramters: Parameters to supply to the method.\... | class ISteamUserStats(_SteamWebAPI):
def __init__(self,**kwargs):
self.interface = 'ISteamUserStats'
super(ISteamUserStats, self).__init__(**kwargs)
def get_global_stats_for_game(self, appID, count, names, startdate,
enddate, format=None):
"""Request global stats for a give game.
appID: The app ID
count: Number of stats to get.
names: A list of names of stats to get.
startdate: The start time to gather stats. Unix timestamp
enddate: The end time to gather stats. Unix timestamp
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {
'appid' : appID,
'count' : count,
'startdate' : startdate,
'enddate' : enddate
}
count = 0
for name in names:
param = "name[" + str(count) + "]"
parameters[param] = name
count += 1
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetGlobalStatsForGame', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_number_of_current_players(self, appID, format=None):
"""Request the current number of players for a given app.
appID: The app ID
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'appid' : appID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface,
'GetNumberOfCurrentPlayers', 1, parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_player_achievements(self, steamID, appID, language=None,
format=None):
"""Request the achievements for a given app and steam id.
steamID: Users steam ID
appID: The app id
language: The language to return the results in. None uses default.
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'appid' : appID}
if format is not None:
parameters['format'] = format
if language is not None:
parameters['l'] = language
else:
parameters['l'] = self.language
url = self.create_request_url(self.interface, 'GetPlayerAchievements', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_schema_for_game(self, appID, language=None, format=None):
"""Request the available achievements and stats for a game.
appID: The app id
language: The language to return the results in. None uses default.
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'appid' : appID}
if format is not None:
parameters['format'] = format
if language is not None:
parameters['l'] = language
else:
parameters['l'] = self.language
url = self.create_request_url(self.interface, 'GetSchemaForGame', 2,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_user_stats_for_game(self, steamID, appID, format=None):
"""Request the user stats for a given game.
steamID: The users ID
appID: The app id
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'appid' : appID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetUserStatsForGame', 2,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
|
shawnsilva/steamwebapi | steamwebapi/api.py | ISteamUserStats.get_global_stats_for_game | python | def get_global_stats_for_game(self, appID, count, names, startdate,
enddate, format=None):
parameters = {
'appid' : appID,
'count' : count,
'startdate' : startdate,
'enddate' : enddate
}
count = 0
for name in names:
param = "name[" + str(count) + "]"
parameters[param] = name
count += 1
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetGlobalStatsForGame', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request global stats for a give game.
appID: The app ID
count: Number of stats to get.
names: A list of names of stats to get.
startdate: The start time to gather stats. Unix timestamp
enddate: The end time to gather stats. Unix timestamp
format: Return format. None defaults to json. (json, xml, vdf) | train | https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/api.py#L206-L234 | [
"def create_request_url(self, interface, method, version, parameters):\n \"\"\"Create the URL to submit to the Steam Web API\n\n interface: Steam Web API interface containing methods.\n method: The method to call.\n version: The version of the method.\n paramters: Parameters to supply to the method.\... | class ISteamUserStats(_SteamWebAPI):
def __init__(self,**kwargs):
self.interface = 'ISteamUserStats'
super(ISteamUserStats, self).__init__(**kwargs)
def get_global_achievement_percentages_for_app(self, gameID, format=None):
"""Request statistics showing global achievements that have been
unlocked.
gameID: The id of the game.
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'gameid' : gameID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface,
'GetGlobalAchievementPercentagesForApp', 2, parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_number_of_current_players(self, appID, format=None):
"""Request the current number of players for a given app.
appID: The app ID
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'appid' : appID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface,
'GetNumberOfCurrentPlayers', 1, parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_player_achievements(self, steamID, appID, language=None,
format=None):
"""Request the achievements for a given app and steam id.
steamID: Users steam ID
appID: The app id
language: The language to return the results in. None uses default.
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'appid' : appID}
if format is not None:
parameters['format'] = format
if language is not None:
parameters['l'] = language
else:
parameters['l'] = self.language
url = self.create_request_url(self.interface, 'GetPlayerAchievements', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_schema_for_game(self, appID, language=None, format=None):
"""Request the available achievements and stats for a game.
appID: The app id
language: The language to return the results in. None uses default.
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'appid' : appID}
if format is not None:
parameters['format'] = format
if language is not None:
parameters['l'] = language
else:
parameters['l'] = self.language
url = self.create_request_url(self.interface, 'GetSchemaForGame', 2,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_user_stats_for_game(self, steamID, appID, format=None):
"""Request the user stats for a given game.
steamID: The users ID
appID: The app id
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'appid' : appID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetUserStatsForGame', 2,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
|
shawnsilva/steamwebapi | steamwebapi/api.py | ISteamUserStats.get_number_of_current_players | python | def get_number_of_current_players(self, appID, format=None):
parameters = {'appid' : appID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface,
'GetNumberOfCurrentPlayers', 1, parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request the current number of players for a given app.
appID: The app ID
format: Return format. None defaults to json. (json, xml, vdf) | train | https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/api.py#L236-L249 | [
"def create_request_url(self, interface, method, version, parameters):\n \"\"\"Create the URL to submit to the Steam Web API\n\n interface: Steam Web API interface containing methods.\n method: The method to call.\n version: The version of the method.\n paramters: Parameters to supply to the method.\... | class ISteamUserStats(_SteamWebAPI):
def __init__(self,**kwargs):
self.interface = 'ISteamUserStats'
super(ISteamUserStats, self).__init__(**kwargs)
def get_global_achievement_percentages_for_app(self, gameID, format=None):
"""Request statistics showing global achievements that have been
unlocked.
gameID: The id of the game.
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'gameid' : gameID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface,
'GetGlobalAchievementPercentagesForApp', 2, parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_global_stats_for_game(self, appID, count, names, startdate,
enddate, format=None):
"""Request global stats for a give game.
appID: The app ID
count: Number of stats to get.
names: A list of names of stats to get.
startdate: The start time to gather stats. Unix timestamp
enddate: The end time to gather stats. Unix timestamp
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {
'appid' : appID,
'count' : count,
'startdate' : startdate,
'enddate' : enddate
}
count = 0
for name in names:
param = "name[" + str(count) + "]"
parameters[param] = name
count += 1
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetGlobalStatsForGame', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_player_achievements(self, steamID, appID, language=None,
format=None):
"""Request the achievements for a given app and steam id.
steamID: Users steam ID
appID: The app id
language: The language to return the results in. None uses default.
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'appid' : appID}
if format is not None:
parameters['format'] = format
if language is not None:
parameters['l'] = language
else:
parameters['l'] = self.language
url = self.create_request_url(self.interface, 'GetPlayerAchievements', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_schema_for_game(self, appID, language=None, format=None):
"""Request the available achievements and stats for a game.
appID: The app id
language: The language to return the results in. None uses default.
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'appid' : appID}
if format is not None:
parameters['format'] = format
if language is not None:
parameters['l'] = language
else:
parameters['l'] = self.language
url = self.create_request_url(self.interface, 'GetSchemaForGame', 2,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_user_stats_for_game(self, steamID, appID, format=None):
"""Request the user stats for a given game.
steamID: The users ID
appID: The app id
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'appid' : appID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetUserStatsForGame', 2,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
|
shawnsilva/steamwebapi | steamwebapi/api.py | ISteamUserStats.get_player_achievements | python | def get_player_achievements(self, steamID, appID, language=None,
format=None):
parameters = {'steamid' : steamID, 'appid' : appID}
if format is not None:
parameters['format'] = format
if language is not None:
parameters['l'] = language
else:
parameters['l'] = self.language
url = self.create_request_url(self.interface, 'GetPlayerAchievements', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request the achievements for a given app and steam id.
steamID: Users steam ID
appID: The app id
language: The language to return the results in. None uses default.
format: Return format. None defaults to json. (json, xml, vdf) | train | https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/api.py#L251-L271 | [
"def create_request_url(self, interface, method, version, parameters):\n \"\"\"Create the URL to submit to the Steam Web API\n\n interface: Steam Web API interface containing methods.\n method: The method to call.\n version: The version of the method.\n paramters: Parameters to supply to the method.\... | class ISteamUserStats(_SteamWebAPI):
def __init__(self,**kwargs):
self.interface = 'ISteamUserStats'
super(ISteamUserStats, self).__init__(**kwargs)
def get_global_achievement_percentages_for_app(self, gameID, format=None):
"""Request statistics showing global achievements that have been
unlocked.
gameID: The id of the game.
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'gameid' : gameID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface,
'GetGlobalAchievementPercentagesForApp', 2, parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_global_stats_for_game(self, appID, count, names, startdate,
enddate, format=None):
"""Request global stats for a give game.
appID: The app ID
count: Number of stats to get.
names: A list of names of stats to get.
startdate: The start time to gather stats. Unix timestamp
enddate: The end time to gather stats. Unix timestamp
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {
'appid' : appID,
'count' : count,
'startdate' : startdate,
'enddate' : enddate
}
count = 0
for name in names:
param = "name[" + str(count) + "]"
parameters[param] = name
count += 1
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetGlobalStatsForGame', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_number_of_current_players(self, appID, format=None):
"""Request the current number of players for a given app.
appID: The app ID
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'appid' : appID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface,
'GetNumberOfCurrentPlayers', 1, parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_schema_for_game(self, appID, language=None, format=None):
"""Request the available achievements and stats for a game.
appID: The app id
language: The language to return the results in. None uses default.
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'appid' : appID}
if format is not None:
parameters['format'] = format
if language is not None:
parameters['l'] = language
else:
parameters['l'] = self.language
url = self.create_request_url(self.interface, 'GetSchemaForGame', 2,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_user_stats_for_game(self, steamID, appID, format=None):
"""Request the user stats for a given game.
steamID: The users ID
appID: The app id
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'appid' : appID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetUserStatsForGame', 2,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
|
shawnsilva/steamwebapi | steamwebapi/api.py | ISteamUserStats.get_schema_for_game | python | def get_schema_for_game(self, appID, language=None, format=None):
parameters = {'appid' : appID}
if format is not None:
parameters['format'] = format
if language is not None:
parameters['l'] = language
else:
parameters['l'] = self.language
url = self.create_request_url(self.interface, 'GetSchemaForGame', 2,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request the available achievements and stats for a game.
appID: The app id
language: The language to return the results in. None uses default.
format: Return format. None defaults to json. (json, xml, vdf) | train | https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/api.py#L273-L291 | [
"def create_request_url(self, interface, method, version, parameters):\n \"\"\"Create the URL to submit to the Steam Web API\n\n interface: Steam Web API interface containing methods.\n method: The method to call.\n version: The version of the method.\n paramters: Parameters to supply to the method.\... | class ISteamUserStats(_SteamWebAPI):
def __init__(self,**kwargs):
self.interface = 'ISteamUserStats'
super(ISteamUserStats, self).__init__(**kwargs)
def get_global_achievement_percentages_for_app(self, gameID, format=None):
"""Request statistics showing global achievements that have been
unlocked.
gameID: The id of the game.
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'gameid' : gameID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface,
'GetGlobalAchievementPercentagesForApp', 2, parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_global_stats_for_game(self, appID, count, names, startdate,
enddate, format=None):
"""Request global stats for a give game.
appID: The app ID
count: Number of stats to get.
names: A list of names of stats to get.
startdate: The start time to gather stats. Unix timestamp
enddate: The end time to gather stats. Unix timestamp
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {
'appid' : appID,
'count' : count,
'startdate' : startdate,
'enddate' : enddate
}
count = 0
for name in names:
param = "name[" + str(count) + "]"
parameters[param] = name
count += 1
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetGlobalStatsForGame', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_number_of_current_players(self, appID, format=None):
"""Request the current number of players for a given app.
appID: The app ID
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'appid' : appID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface,
'GetNumberOfCurrentPlayers', 1, parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_player_achievements(self, steamID, appID, language=None,
format=None):
"""Request the achievements for a given app and steam id.
steamID: Users steam ID
appID: The app id
language: The language to return the results in. None uses default.
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'appid' : appID}
if format is not None:
parameters['format'] = format
if language is not None:
parameters['l'] = language
else:
parameters['l'] = self.language
url = self.create_request_url(self.interface, 'GetPlayerAchievements', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_user_stats_for_game(self, steamID, appID, format=None):
"""Request the user stats for a given game.
steamID: The users ID
appID: The app id
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'appid' : appID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetUserStatsForGame', 2,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
|
shawnsilva/steamwebapi | steamwebapi/api.py | ISteamUserStats.get_user_stats_for_game | python | def get_user_stats_for_game(self, steamID, appID, format=None):
parameters = {'steamid' : steamID, 'appid' : appID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetUserStatsForGame', 2,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request the user stats for a given game.
steamID: The users ID
appID: The app id
format: Return format. None defaults to json. (json, xml, vdf) | train | https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/api.py#L293-L307 | [
"def create_request_url(self, interface, method, version, parameters):\n \"\"\"Create the URL to submit to the Steam Web API\n\n interface: Steam Web API interface containing methods.\n method: The method to call.\n version: The version of the method.\n paramters: Parameters to supply to the method.\... | class ISteamUserStats(_SteamWebAPI):
def __init__(self,**kwargs):
self.interface = 'ISteamUserStats'
super(ISteamUserStats, self).__init__(**kwargs)
def get_global_achievement_percentages_for_app(self, gameID, format=None):
"""Request statistics showing global achievements that have been
unlocked.
gameID: The id of the game.
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'gameid' : gameID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface,
'GetGlobalAchievementPercentagesForApp', 2, parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_global_stats_for_game(self, appID, count, names, startdate,
enddate, format=None):
"""Request global stats for a give game.
appID: The app ID
count: Number of stats to get.
names: A list of names of stats to get.
startdate: The start time to gather stats. Unix timestamp
enddate: The end time to gather stats. Unix timestamp
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {
'appid' : appID,
'count' : count,
'startdate' : startdate,
'enddate' : enddate
}
count = 0
for name in names:
param = "name[" + str(count) + "]"
parameters[param] = name
count += 1
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetGlobalStatsForGame', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_number_of_current_players(self, appID, format=None):
"""Request the current number of players for a given app.
appID: The app ID
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'appid' : appID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface,
'GetNumberOfCurrentPlayers', 1, parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_player_achievements(self, steamID, appID, language=None,
format=None):
"""Request the achievements for a given app and steam id.
steamID: Users steam ID
appID: The app id
language: The language to return the results in. None uses default.
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'appid' : appID}
if format is not None:
parameters['format'] = format
if language is not None:
parameters['l'] = language
else:
parameters['l'] = self.language
url = self.create_request_url(self.interface, 'GetPlayerAchievements', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_schema_for_game(self, appID, language=None, format=None):
"""Request the available achievements and stats for a game.
appID: The app id
language: The language to return the results in. None uses default.
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'appid' : appID}
if format is not None:
parameters['format'] = format
if language is not None:
parameters['l'] = language
else:
parameters['l'] = self.language
url = self.create_request_url(self.interface, 'GetSchemaForGame', 2,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
|
shawnsilva/steamwebapi | steamwebapi/api.py | IPlayerService.get_recently_played_games | python | def get_recently_played_games(self, steamID, count=0, format=None):
parameters = {'steamid' : steamID, 'count' : count}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetRecentlyPlayedGames', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request a list of recently played games by a given steam id.
steamID: The users ID
count: Number of games to return. (0 is all recent games.)
format: Return format. None defaults to json. (json, xml, vdf) | train | https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/api.py#L316-L330 | [
"def create_request_url(self, interface, method, version, parameters):\n \"\"\"Create the URL to submit to the Steam Web API\n\n interface: Steam Web API interface containing methods.\n method: The method to call.\n version: The version of the method.\n paramters: Parameters to supply to the method.\... | class IPlayerService(_SteamWebAPI):
def __init__(self,**kwargs):
self.interface = 'IPlayerService'
super(IPlayerService, self).__init__(**kwargs)
# RecordOfflinePlaytime, requires auth ticket
def get_owned_games(self, steamID, include_appinfo=1,
include_played_free_games=0, appids_filter=None, format=None):
"""Request a list of games owned by a given steam id.
steamID: The users id
include_appinfo: boolean.
include_played_free_games: boolean.
appids_filter: a json encoded list of app ids.
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {
'steamid' : steamID,
'include_appinfo' : include_appinfo,
'include_played_free_games' : include_played_free_games
}
if format is not None:
parameters['format'] = format
if appids_filter is not None:
parameters['appids_filter'] = appids_filter
url = self.create_request_url(self.interface, 'GetOwnedGames', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_steam_level(self, steamID, format=None):
"""Returns the Steam Level of a user.
steamID: The users ID
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetSteamLevel', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_badges(self, steamID, format=None):
"""Gets badges that are owned by a specific user
steamID: The users ID
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetBadges', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_community_badge_progress(self, steamID, badgeID, format=None):
"""Gets all the quests needed to get the specified badge, and which are completed.
steamID: The users ID
badgeID: The badge we're asking about
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'badgeid' : badgeID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetCommunityBadgeProgress', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def is_playing_shared_game(self, steamID, appid_playing, format=None):
"""Returns valid lender SteamID if game currently played is borrowed.
steamID: The users ID
appid_playing: The game player is currently playing
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'appid_playing' : appid_playing}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'IsPlayingSharedGame', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
|
shawnsilva/steamwebapi | steamwebapi/api.py | IPlayerService.get_owned_games | python | def get_owned_games(self, steamID, include_appinfo=1,
include_played_free_games=0, appids_filter=None, format=None):
parameters = {
'steamid' : steamID,
'include_appinfo' : include_appinfo,
'include_played_free_games' : include_played_free_games
}
if format is not None:
parameters['format'] = format
if appids_filter is not None:
parameters['appids_filter'] = appids_filter
url = self.create_request_url(self.interface, 'GetOwnedGames', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request a list of games owned by a given steam id.
steamID: The users id
include_appinfo: boolean.
include_played_free_games: boolean.
appids_filter: a json encoded list of app ids.
format: Return format. None defaults to json. (json, xml, vdf) | train | https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/api.py#L332-L355 | [
"def create_request_url(self, interface, method, version, parameters):\n \"\"\"Create the URL to submit to the Steam Web API\n\n interface: Steam Web API interface containing methods.\n method: The method to call.\n version: The version of the method.\n paramters: Parameters to supply to the method.\... | class IPlayerService(_SteamWebAPI):
def __init__(self,**kwargs):
self.interface = 'IPlayerService'
super(IPlayerService, self).__init__(**kwargs)
# RecordOfflinePlaytime, requires auth ticket
def get_recently_played_games(self, steamID, count=0, format=None):
"""Request a list of recently played games by a given steam id.
steamID: The users ID
count: Number of games to return. (0 is all recent games.)
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'count' : count}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetRecentlyPlayedGames', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_steam_level(self, steamID, format=None):
"""Returns the Steam Level of a user.
steamID: The users ID
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetSteamLevel', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_badges(self, steamID, format=None):
"""Gets badges that are owned by a specific user
steamID: The users ID
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetBadges', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_community_badge_progress(self, steamID, badgeID, format=None):
"""Gets all the quests needed to get the specified badge, and which are completed.
steamID: The users ID
badgeID: The badge we're asking about
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'badgeid' : badgeID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetCommunityBadgeProgress', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def is_playing_shared_game(self, steamID, appid_playing, format=None):
"""Returns valid lender SteamID if game currently played is borrowed.
steamID: The users ID
appid_playing: The game player is currently playing
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'appid_playing' : appid_playing}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'IsPlayingSharedGame', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
|
shawnsilva/steamwebapi | steamwebapi/api.py | IPlayerService.get_community_badge_progress | python | def get_community_badge_progress(self, steamID, badgeID, format=None):
parameters = {'steamid' : steamID, 'badgeid' : badgeID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetCommunityBadgeProgress', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Gets all the quests needed to get the specified badge, and which are completed.
steamID: The users ID
badgeID: The badge we're asking about
format: Return format. None defaults to json. (json, xml, vdf) | train | https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/api.py#L387-L401 | [
"def create_request_url(self, interface, method, version, parameters):\n \"\"\"Create the URL to submit to the Steam Web API\n\n interface: Steam Web API interface containing methods.\n method: The method to call.\n version: The version of the method.\n paramters: Parameters to supply to the method.\... | class IPlayerService(_SteamWebAPI):
def __init__(self,**kwargs):
self.interface = 'IPlayerService'
super(IPlayerService, self).__init__(**kwargs)
# RecordOfflinePlaytime, requires auth ticket
def get_recently_played_games(self, steamID, count=0, format=None):
"""Request a list of recently played games by a given steam id.
steamID: The users ID
count: Number of games to return. (0 is all recent games.)
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'count' : count}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetRecentlyPlayedGames', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_owned_games(self, steamID, include_appinfo=1,
include_played_free_games=0, appids_filter=None, format=None):
"""Request a list of games owned by a given steam id.
steamID: The users id
include_appinfo: boolean.
include_played_free_games: boolean.
appids_filter: a json encoded list of app ids.
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {
'steamid' : steamID,
'include_appinfo' : include_appinfo,
'include_played_free_games' : include_played_free_games
}
if format is not None:
parameters['format'] = format
if appids_filter is not None:
parameters['appids_filter'] = appids_filter
url = self.create_request_url(self.interface, 'GetOwnedGames', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_steam_level(self, steamID, format=None):
"""Returns the Steam Level of a user.
steamID: The users ID
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetSteamLevel', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_badges(self, steamID, format=None):
"""Gets badges that are owned by a specific user
steamID: The users ID
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetBadges', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def is_playing_shared_game(self, steamID, appid_playing, format=None):
"""Returns valid lender SteamID if game currently played is borrowed.
steamID: The users ID
appid_playing: The game player is currently playing
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'appid_playing' : appid_playing}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'IsPlayingSharedGame', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
|
shawnsilva/steamwebapi | steamwebapi/api.py | IPlayerService.is_playing_shared_game | python | def is_playing_shared_game(self, steamID, appid_playing, format=None):
parameters = {'steamid' : steamID, 'appid_playing' : appid_playing}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'IsPlayingSharedGame', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Returns valid lender SteamID if game currently played is borrowed.
steamID: The users ID
appid_playing: The game player is currently playing
format: Return format. None defaults to json. (json, xml, vdf) | train | https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/api.py#L403-L417 | [
"def create_request_url(self, interface, method, version, parameters):\n \"\"\"Create the URL to submit to the Steam Web API\n\n interface: Steam Web API interface containing methods.\n method: The method to call.\n version: The version of the method.\n paramters: Parameters to supply to the method.\... | class IPlayerService(_SteamWebAPI):
def __init__(self,**kwargs):
self.interface = 'IPlayerService'
super(IPlayerService, self).__init__(**kwargs)
# RecordOfflinePlaytime, requires auth ticket
def get_recently_played_games(self, steamID, count=0, format=None):
"""Request a list of recently played games by a given steam id.
steamID: The users ID
count: Number of games to return. (0 is all recent games.)
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'count' : count}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetRecentlyPlayedGames', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_owned_games(self, steamID, include_appinfo=1,
include_played_free_games=0, appids_filter=None, format=None):
"""Request a list of games owned by a given steam id.
steamID: The users id
include_appinfo: boolean.
include_played_free_games: boolean.
appids_filter: a json encoded list of app ids.
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {
'steamid' : steamID,
'include_appinfo' : include_appinfo,
'include_played_free_games' : include_played_free_games
}
if format is not None:
parameters['format'] = format
if appids_filter is not None:
parameters['appids_filter'] = appids_filter
url = self.create_request_url(self.interface, 'GetOwnedGames', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_steam_level(self, steamID, format=None):
"""Returns the Steam Level of a user.
steamID: The users ID
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetSteamLevel', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_badges(self, steamID, format=None):
"""Gets badges that are owned by a specific user
steamID: The users ID
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetBadges', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
def get_community_badge_progress(self, steamID, badgeID, format=None):
"""Gets all the quests needed to get the specified badge, and which are completed.
steamID: The users ID
badgeID: The badge we're asking about
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'badgeid' : badgeID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetCommunityBadgeProgress', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
|
shawnsilva/steamwebapi | steamwebapi/api.py | ISteamWebAPIUtil.get_server_info | python | def get_server_info(self, format=None):
parameters = {}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetServerInfo', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request the Steam Web API status and time.
format: Return format. None defaults to json. (json, xml, vdf) | train | https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/api.py#L424-L436 | [
"def create_request_url(self, interface, method, version, parameters):\n \"\"\"Create the URL to submit to the Steam Web API\n\n interface: Steam Web API interface containing methods.\n method: The method to call.\n version: The version of the method.\n paramters: Parameters to supply to the method.\... | class ISteamWebAPIUtil(_SteamWebAPI):
def __init__(self,**kwargs):
self.interface = 'ISteamWebAPIUtil'
super(ISteamWebAPIUtil, self).__init__(**kwargs)
def get_supported_API_list(self, format=None):
"""Request a list of APIs that can be accessed with your APIKEY
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetSupportedAPIList', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
|
shawnsilva/steamwebapi | steamwebapi/api.py | SteamCommunityXML.create_request_url | python | def create_request_url(self, profile_type, steamID):
regex = re.compile('^\d{17,}$')
if regex.match(steamID):
if profile_type == self.USER:
url = "http://steamcommunity.com/profiles/%s/?xml=1" % (steamID)
if profile_type == self.GROUP:
url = "http://steamcommunity.com/gid/%s/memberslistxml/?xml=1" % (steamID)
else:
if profile_type == self.USER:
url = "http://steamcommunity.com/id/%s/?xml=1" % (steamID)
if profile_type == self.GROUP:
url = "http://steamcommunity.com/groups/%s/memberslistxml/?xml=1" % (steamID)
return url | Create the url to submit to the Steam Community XML feed. | train | https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/api.py#L459-L472 | null | class SteamCommunityXML(_SteamWebAPI):
USER = 0
GROUP = 1
def __init__(self,**kwargs):
super(SteamCommunityXML, self).__init__(**kwargs)
def retrieve_request(self, url):
"""Open the given url and return the response
url: The url to open.
"""
try:
data = urlopen(url)
except:
print("Error Retrieving Data from Steam")
sys.exit(2)
return data.read()
def get_user_info(self, steamID):
"""Request the Steam Community XML feed for a specific user."""
url = self.create_request_url(self.USER, steamID)
data = self.retrieve_request(url)
return self.return_data(data, format='xml')
def get_group_info(self, steamID):
"""Request the Steam Community XML feed for a specific group."""
url = self.create_request_url(self.GROUP, steamID)
data = self.retrieve_request(url)
return self.return_data(data, format='xml')
|
shawnsilva/steamwebapi | steamwebapi/api.py | SteamCommunityXML.get_user_info | python | def get_user_info(self, steamID):
url = self.create_request_url(self.USER, steamID)
data = self.retrieve_request(url)
return self.return_data(data, format='xml') | Request the Steam Community XML feed for a specific user. | train | https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/api.py#L487-L491 | [
"def return_data(self, data, format=None):\n \"\"\"Format and return data appropriate to the requested API format.\n\n data: The data retured by the api request\n\n \"\"\"\n if format is None:\n format = self.format\n if format == \"json\":\n formatted_data = json.loads(data)\n else:... | class SteamCommunityXML(_SteamWebAPI):
USER = 0
GROUP = 1
def __init__(self,**kwargs):
super(SteamCommunityXML, self).__init__(**kwargs)
def create_request_url(self, profile_type, steamID):
"""Create the url to submit to the Steam Community XML feed."""
regex = re.compile('^\d{17,}$')
if regex.match(steamID):
if profile_type == self.USER:
url = "http://steamcommunity.com/profiles/%s/?xml=1" % (steamID)
if profile_type == self.GROUP:
url = "http://steamcommunity.com/gid/%s/memberslistxml/?xml=1" % (steamID)
else:
if profile_type == self.USER:
url = "http://steamcommunity.com/id/%s/?xml=1" % (steamID)
if profile_type == self.GROUP:
url = "http://steamcommunity.com/groups/%s/memberslistxml/?xml=1" % (steamID)
return url
def retrieve_request(self, url):
"""Open the given url and return the response
url: The url to open.
"""
try:
data = urlopen(url)
except:
print("Error Retrieving Data from Steam")
sys.exit(2)
return data.read()
def get_group_info(self, steamID):
"""Request the Steam Community XML feed for a specific group."""
url = self.create_request_url(self.GROUP, steamID)
data = self.retrieve_request(url)
return self.return_data(data, format='xml')
|
shawnsilva/steamwebapi | steamwebapi/api.py | SteamCommunityXML.get_group_info | python | def get_group_info(self, steamID):
url = self.create_request_url(self.GROUP, steamID)
data = self.retrieve_request(url)
return self.return_data(data, format='xml') | Request the Steam Community XML feed for a specific group. | train | https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/api.py#L493-L497 | [
"def return_data(self, data, format=None):\n \"\"\"Format and return data appropriate to the requested API format.\n\n data: The data retured by the api request\n\n \"\"\"\n if format is None:\n format = self.format\n if format == \"json\":\n formatted_data = json.loads(data)\n else:... | class SteamCommunityXML(_SteamWebAPI):
USER = 0
GROUP = 1
def __init__(self,**kwargs):
super(SteamCommunityXML, self).__init__(**kwargs)
def create_request_url(self, profile_type, steamID):
"""Create the url to submit to the Steam Community XML feed."""
regex = re.compile('^\d{17,}$')
if regex.match(steamID):
if profile_type == self.USER:
url = "http://steamcommunity.com/profiles/%s/?xml=1" % (steamID)
if profile_type == self.GROUP:
url = "http://steamcommunity.com/gid/%s/memberslistxml/?xml=1" % (steamID)
else:
if profile_type == self.USER:
url = "http://steamcommunity.com/id/%s/?xml=1" % (steamID)
if profile_type == self.GROUP:
url = "http://steamcommunity.com/groups/%s/memberslistxml/?xml=1" % (steamID)
return url
def retrieve_request(self, url):
"""Open the given url and return the response
url: The url to open.
"""
try:
data = urlopen(url)
except:
print("Error Retrieving Data from Steam")
sys.exit(2)
return data.read()
def get_user_info(self, steamID):
"""Request the Steam Community XML feed for a specific user."""
url = self.create_request_url(self.USER, steamID)
data = self.retrieve_request(url)
return self.return_data(data, format='xml')
|
shawnsilva/steamwebapi | steamwebapi/profiles.py | User.communityvisibilitystate | python | def communityvisibilitystate(self):
if self._communityvisibilitystate == None:
return None
elif self._communityvisibilitystate in self.VisibilityState:
return self.VisibilityState[self._communityvisibilitystate]
else:
#Invalid State
return None | Return the Visibility State of the Users Profile | train | https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/profiles.py#L61-L69 | null | class User(object):
VisibilityState = {1 : "Private", 2 : "Friends Only", 3 : "Friends of Friends", 4 : "Users Only", 5 : "Public"}
PersonaState = {0 : "Offline", 1 : "Online", 2 : "Busy", 3 : "Away", 4 : "Snooze", 5 : "Looking to Trade", 6 : "Looking to Play"}
def __init__(self):
self.steamid = None #The user's 64 bit ID
self._communityvisibilitystate = None #An integer that describes the access setting of the profile
self.profilestate = None #If set to 1 the user has configured the profile.
self.personaname = None #User's display name.
self.lastlogoff = None #A unix timestamp of when the user was last online.
self.profileurl = None #The URL to the user's Steam Community profile.
self.avatar = None #A 32x32 image
self.avatarmedium = None #A 64x64 image
self.avatarfull = None #A 184x184 image
self._personastate = None #The user's status
#The Following may not be present
self.commentpermission = None #If present the profile allows public comments.
self.realname = None #The user's real name.
self.primaryclanid = None #The 64 bit ID of the user's primary group.
self.timecreated = None #A unix timestamp of the date the profile was created.
self.loccountrycode = None #ISO 3166 code of where the user is located.
self.locstatecode = None #Variable length code representing the state the user is located in.
self.loccityid = None #An integer ID internal to Steam representing the user's city.
self.gameid = None #If the user is in game this will be set to it's app ID as a string.
self.gameextrainfo = None #The title of the game.
self.gameserverip = None #The server URL given as an IP address and port number separated by a colon, this will not be present or set to "0.0.0.0:0" if none is available.
self.profileurlname = None
self.steamlevel = None
self.recentlyplayedgames = None
@property
@communityvisibilitystate.setter
def communityvisibilitystate(self, value):
self._communityvisibilitystate = value
@property
def personastate(self):
"""Return the Persona State of the Users Profile"""
if self._personastate == None:
return None
elif self._personastate in self.PersonaState:
return self.PersonaState[self._personastate]
else:
#Invalid State
return None
@personastate.setter
def personastate(self, value):
self._personastate = value
|
shawnsilva/steamwebapi | steamwebapi/profiles.py | User.personastate | python | def personastate(self):
if self._personastate == None:
return None
elif self._personastate in self.PersonaState:
return self.PersonaState[self._personastate]
else:
#Invalid State
return None | Return the Persona State of the Users Profile | train | https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/profiles.py#L76-L84 | null | class User(object):
VisibilityState = {1 : "Private", 2 : "Friends Only", 3 : "Friends of Friends", 4 : "Users Only", 5 : "Public"}
PersonaState = {0 : "Offline", 1 : "Online", 2 : "Busy", 3 : "Away", 4 : "Snooze", 5 : "Looking to Trade", 6 : "Looking to Play"}
def __init__(self):
self.steamid = None #The user's 64 bit ID
self._communityvisibilitystate = None #An integer that describes the access setting of the profile
self.profilestate = None #If set to 1 the user has configured the profile.
self.personaname = None #User's display name.
self.lastlogoff = None #A unix timestamp of when the user was last online.
self.profileurl = None #The URL to the user's Steam Community profile.
self.avatar = None #A 32x32 image
self.avatarmedium = None #A 64x64 image
self.avatarfull = None #A 184x184 image
self._personastate = None #The user's status
#The Following may not be present
self.commentpermission = None #If present the profile allows public comments.
self.realname = None #The user's real name.
self.primaryclanid = None #The 64 bit ID of the user's primary group.
self.timecreated = None #A unix timestamp of the date the profile was created.
self.loccountrycode = None #ISO 3166 code of where the user is located.
self.locstatecode = None #Variable length code representing the state the user is located in.
self.loccityid = None #An integer ID internal to Steam representing the user's city.
self.gameid = None #If the user is in game this will be set to it's app ID as a string.
self.gameextrainfo = None #The title of the game.
self.gameserverip = None #The server URL given as an IP address and port number separated by a colon, this will not be present or set to "0.0.0.0:0" if none is available.
self.profileurlname = None
self.steamlevel = None
self.recentlyplayedgames = None
@property
def communityvisibilitystate(self):
"""Return the Visibility State of the Users Profile"""
if self._communityvisibilitystate == None:
return None
elif self._communityvisibilitystate in self.VisibilityState:
return self.VisibilityState[self._communityvisibilitystate]
else:
#Invalid State
return None
@communityvisibilitystate.setter
def communityvisibilitystate(self, value):
self._communityvisibilitystate = value
@property
@personastate.setter
def personastate(self, value):
self._personastate = value
|
yougov/vr.common | vr/common/balancer/nginx.py | str_to_pool | python | def str_to_pool(upstream):
name = re.search('upstream +(.*?) +{', upstream).group(1)
nodes = re.findall('server +(.*?);', upstream)
return name, nodes | Given a string containing an nginx upstream section, return the pool name
and list of nodes. | train | https://github.com/yougov/vr.common/blob/ca8ed0c50ba873fc51fdfeeaa25d3b8ec1b54eb4/vr/common/balancer/nginx.py#L26-L33 | null | import errno
import posixpath
import re
from . import base
# Template for 'upstream' directive in nginx config.
UPSTREAM_TPL = """
upstream %(name)s {
%(lines)s
}
"""
def pool_to_str(name, nodes):
"""
Given a list of nodes, return a string containing a properly-formatted
'upstream' section for nginx config.
"""
return UPSTREAM_TPL % {
'name': name,
'lines': '\n'.join([' server %s;' % n for n in nodes])
}
class NginxBalancer(base.SshBasedBalancer):
"""
A Velociraptor balancer backend for writing nginx config on remote servers.
Uses sftp and ssh for writing files and running reload cmds.
"""
def __init__(self, config):
self.include_dir = config.get('include_dir',
'/etc/nginx/sites-enabled/')
self.reload_cmd = config.get('reload_cmd', '/etc/init.d/nginx reload')
super(NginxBalancer, self).__init__(config)
def _get_host_nodes(self, host, pool):
# Return set of nodes currently configured in a given host and pool
path = posixpath.join(self.include_dir, pool + '.conf')
try:
contents = self._read_file(host, path)
poolname, nodes = str_to_pool(contents)
except IOError as e:
# It's OK if the file doesn't exist. But other IOErrors should be
# raised normally.
if e.errno == errno.ENOENT:
nodes = []
else:
raise
return set(nodes)
def _set_host_nodes(self, host, pool, nodes):
path = posixpath.join(self.include_dir, pool + '.conf')
contents = pool_to_str(pool, nodes)
self._write_file(host, path, contents)
|
yougov/vr.common | vr/common/paths.py | get_buildfile_path | python | def get_buildfile_path(settings):
base = os.path.basename(settings.build_url)
return os.path.join(BUILDS_ROOT, base) | Path to which a build tarball should be downloaded. | train | https://github.com/yougov/vr.common/blob/ca8ed0c50ba873fc51fdfeeaa25d3b8ec1b54eb4/vr/common/paths.py#L52-L57 | null | """
Constants and functions for determining where Velociraptor puts things at
deploy time. All of the get_* functions accept a ProcData object, which is is
built from a dict parsed out of a proc.yaml file.
"""
import os
VR_ROOT = '/apps'
BUILDS_ROOT = VR_ROOT + '/builds'
PROCS_ROOT = VR_ROOT + '/procs'
RELEASES_ROOT = VR_ROOT + '/releases'
IMAGES_ROOT = VR_ROOT + '/images'
def get_container_path(settings):
return os.path.join(get_proc_path(settings), 'rootfs')
def get_lxc_work_path(settings):
return os.path.join(get_proc_path(settings), 'work')
def get_container_name(settings):
return '-'.join([
settings.app_name,
settings.version,
settings.config_name,
settings.release_hash,
settings.proc_name,
str(settings.port),
])
def get_proc_path(settings):
return os.path.join(PROCS_ROOT, get_container_name(settings))
def get_app_path(settings):
"""
Path to which a build should be unpacked.
"""
# These days, we unpack a separate copy of the build for each proc on the
# host. This is the easiest way around different instances possibly
# running as different users, while still being able to write .pyc files in
# there (for example). In practice, Velociraptor wouldn't get much
# disk/memory savings from having multiple procs pointing at the same
# underlying files anyway, because VR tries hard to distribute instances of
# the same app across different hosts.
return os.path.join(get_container_path(settings), 'app')
|
yougov/vr.common | vr/common/models.py | _retry | python | def _retry(n, f, *args, **kwargs):
'''Try to call f(*args, **kwargs) "n" times before giving up. Wait
2**n seconds before retries.'''
for i in range(n):
try:
return f(*args, **kwargs)
except Exception as exc:
if i == n - 1:
log.error(
'%s permanently failed with %r', f.__name__, exc)
raise
else:
log.warning(
'%s attempt #%d failed with %r', f.__name__, i, exc)
time.sleep(2 ** i)
raise RuntimeError('Should never get here!') | Try to call f(*args, **kwargs) "n" times before giving up. Wait
2**n seconds before retries. | train | https://github.com/yougov/vr.common/blob/ca8ed0c50ba873fc51fdfeeaa25d3b8ec1b54eb4/vr/common/models.py#L61-L77 | null | from datetime import datetime
import collections
import copy
import functools
import getpass
import json
import logging
import operator
import os
import re
import socket
import time
try:
from collections import abc
except ImportError:
import collections as abc
from six.moves import urllib, xmlrpc_client, range
import six
import yaml
import requests
import sseclient
import utc
import contextlib2
try:
import redis
except ImportError:
# optional dependency
pass
try:
import keyring
except ImportError:
# stub out keyring
class keyring(object):
@staticmethod
def get_password(*args, **kwargs):
return None
log = logging.getLogger(__name__)
SUPERVISOR_RPC_TIMEOUT_SECS = 10
SUPERVISOR_RPC_N_RETRIES = 2
class TimeoutTransport(xmlrpc_client.Transport):
def __init__(self, timeout=SUPERVISOR_RPC_TIMEOUT_SECS, *l, **kw):
xmlrpc_client.Transport.__init__(self, *l, **kw)
self.timeout = timeout
def make_connection(self, *args, **kwargs):
conn = xmlrpc_client.Transport.make_connection(self, *args, **kwargs)
conn.timeout = self.timeout
return conn
class Host(object):
"""
Abstraction over the per-host Supervisor xmlrpc interface. This is used in
the Velociraptor web procs, command line clients, and Supervisor event
listeners.
Should be initialized with a hostname and either an existing xmlrpc Server
connection or a port number to where the RPC service is listening.
If also initialized with a Redis connection or URL, proc info will be
cached in Redis.
Call host.get_procs() to get a list of Proc objects for all
Supervisor-managed processes on the host. Call it with check_cache=True to
allow fetching proc info from the Redis cache.
Call host.get_proc('name') to get a Proc object for the process named
'name'. Call it with check_cache=True to allow fetching proc info from the
Redis cache. If the host has no proc with that name, ProcError will be
raised.
"""
def __init__(self, name, rpc_or_port=9001, supervisor_username=None,
supervisor_password=None, redis_or_url=None,
redis_cache_prefix='host_procs', redis_cache_lifetime=600):
self.name = name
self.username = supervisor_username
self.password = supervisor_password
self._init_supervisor_rpc(rpc_or_port)
self.redis = self._init_redis(redis_or_url)
self.cache_key = ':'.join([redis_cache_prefix, name])
self.cache_lifetime = redis_cache_lifetime
def _init_supervisor_rpc(self, rpc_or_port):
'''Initialize supervisor RPC.
Allow passing in an RPC connection, or a port number for
making one.
'''
if isinstance(rpc_or_port, int):
if self.username:
leader = 'http://{self.username}:{self.password}@'
else:
leader = 'http://'
tmpl = leader + '{self.name}:{port}'
url = tmpl.format(self=self, port=rpc_or_port)
self.rpc = xmlrpc_client.ServerProxy(
url, transport=TimeoutTransport())
else:
self.rpc = rpc_or_port
self.supervisor = self.rpc.supervisor
@staticmethod
def _init_redis(redis_spec):
"""
Return a StrictRedis instance or None based on redis_spec.
redis_spec may be None, a Redis URL, or a StrictRedis instance
"""
if not redis_spec:
return
if isinstance(redis_spec, six.string_types):
return redis.StrictRedis.from_url(redis_spec)
# assume any other value is a valid instance
return redis_spec
def get_proc(self, name, check_cache=False):
if check_cache:
# Note that if self.redis is None and check_cache is True, an
# AttributeError will be raised.
cached_json = self.redis.hget(self.cache_key, name)
if cached_json:
return Proc(self, json.loads(cached_json))
else:
procs_dict = self._get_and_cache_procs()
else:
procs_dict = self._get_and_cache_procs()
if name in procs_dict:
return Proc(self, procs_dict[name])
else:
raise ProcError('host %s has no proc named %s' % (self.name, name))
def _get_and_cache_procs(self):
try:
# Retry few times before giving up
proc_list = _retry(
SUPERVISOR_RPC_N_RETRIES, self.supervisor.getAllProcessInfo)
except Exception:
log.exception("Failed to connect to %s", self)
return {}
# getAllProcessInfo returns a list of dicts. Reshape that into a dict
# of dicts, keyed by proc name.
proc_dict = {d['name']: d for d in proc_list}
if self.redis:
# Use pipeline to do hash clear, set, and expiration in
# same redis call
with self.redis.pipeline() as pipe:
# First clear all existing data in the hash
pipe.delete(self.cache_key)
# Now set all the hash values, dumped to json.
dumped = {d: json.dumps(proc_dict[d]) for d in proc_dict}
pipe.hmset(self.cache_key, dumped)
pipe.expire(self.cache_key, self.cache_lifetime)
pipe.execute()
return proc_dict
def get_procs(self, check_cache=False):
if check_cache:
unparsed = self.redis.hgetall(self.cache_key)
if unparsed:
all_data = {v: json.loads(v) for v in unparsed.values()}
else:
all_data = self._get_and_cache_procs()
else:
all_data = self._get_and_cache_procs()
return [Proc(self, all_data[d]) for d in all_data]
def shortname(self):
return self.name.split(".")[0]
def __repr__(self):
info = {
'cls': self.__class__.__name__,
'name': self.name,
}
return "<%(cls)s %(name)s>" % info
class Proc(object):
"""
A representation of a proc running on a host. Must be initted with the
hostname and a dict of data structured like the one you get back from
Supervisor's XML RPC interface.
"""
# FIXME: I'm kind of an ugly grab bag of information about a proc,
# some of it used for initial setup, and some of it the details
# returned by supervisor at runtime. In the future, I'd like to
# have just 3 main attributes:
# 1. A 'ProcData' instance holding all the info used to create the
# proc.
# 2. A 'supervisor' thing that just holds exactly what supervisor
# returns.
# 3. A 'resources' thing showing how much RAM and CPU this proc is
# using.
# The Supervisor RPC plugin in vr.agent supports returning all of this info
# in one RPC call. We should refactor this class to use that, and the
# cache to use that, and the JS frontend to use that structure too. Not a
# small job :(
def __init__(self, host, data):
self.host = host
self._data = data
# Be explicit, not magical, about which keys we expect from the data
# and which attributes we set on the object.
self.description = data['description']
self.exitstatus = data['exitstatus']
self.group = data['group']
self.logfile = data['logfile']
self.name = data['name']
# When a timestamp field is inapplicable, Supevisor will put a 0 there
# instead of a real unix timestamp.
self.now = utc.fromtimestamp(data['now']) if data['now'] else None
self.pid = data['pid']
self.spawnerr = data['spawnerr']
self.start_time = utc.fromtimestamp(data['start']) \
if data['start'] else None
self.state = data['state']
self.statename = data['statename']
self.stderr_logfile = data['stderr_logfile']
self.stdout_logfile = data['stdout_logfile']
self.stop_time = utc.fromtimestamp(data['stop']) \
if data['stop'] else None
# The names returned from Supervisor have a bunch of metadata encoded
# in them (at least until we can get a Supervisor RPC plugin to return
# it). Parse that out and set attributes.
for k, v in self.parse_name(self.name).items():
setattr(self, k, v)
# We also set some convenience attributes for JS/CSS. It would be nice
# to set those in the JS layer, but that takes some hacking on
# Backbone.
self.jsname = self.name.replace('.', 'dot')
self.id = '%s-%s' % (self.host.name, self.name)
@property
def hostname(self):
return self.host.name
@property
def settings(self):
settings = self.host.rpc.vr.get_velociraptor_info(self.name)
if not settings:
return None
return ProcData(settings)
@staticmethod
def parse_name(name):
try:
app_name, version, config_name, rel_hash, proc_name, port = \
name.split('-')
return {
'app_name': app_name,
'version': version,
'config_name': config_name,
'hash': rel_hash,
'proc_name': proc_name,
'port': int(port)
}
except ValueError:
return {
'app_name': name,
'version': 'UNKNOWN',
'config_name': 'UNKNOWN',
'hash': 'UNKNOWN',
'proc_name': name,
'port': 0
}
@classmethod
def name_to_shortname(cls, name):
"""
In Celery tasks you often have a proc name, and want to send events
including the proc's shortname, but you don't want to do a XML RPC call
to get a full dict of data just for that.
"""
return '%(app_name)s-%(version)s-%(proc_name)s' % Proc.parse_name(name)
def __repr__(self):
return "<Proc %s>" % self.name
def shortname(self):
return '%s-%s-%s' % (self.app_name, self.version, self.proc_name)
def as_node(self):
"""
Return host:port, as needed by the balancer interface.
"""
return '%s:%s' % (self.host.name, self.port)
def as_dict(self):
data = {}
for k, v in self.__dict__.items():
if isinstance(v, six.string_types + (int,)):
data[k] = v
elif isinstance(v, datetime):
data[k] = v.isoformat()
elif v is None:
data[k] = v
data['host'] = self.host.name
return data
def as_json(self):
return json.dumps(self.as_dict())
def start(self):
try:
self.host.supervisor.startProcess(self.name)
except xmlrpc_client.Fault as f:
if f.faultString == 'ALREADY_STARTED':
log.warning("Process %s already started", self.name)
else:
log.exception("Failed to start %s", self.name)
raise
except Exception:
log.exception("Failed to start %s", self.name)
raise
def stop(self):
try:
self.host.supervisor.stopProcess(self.name)
except xmlrpc_client.Fault as f:
if f.faultString == 'NOT_RUNNING':
log.warning("Process %s not running", self.name)
else:
log.exception("Failed to stop %s", self.name)
raise
except Exception:
log.exception("Failed to stop %s", self.name)
raise
def restart(self):
self.stop()
self.start()
class ProcError(Exception):
"""
Raised when you request a proc that doesn't exist.
"""
pass
class ConfigData(object):
"""
Superclass for defining objects with required and optional attributes that
are set by passing in a dict on init.
Subclasses should have '_required' and '_optional' lists of attributes to
be pulled out of the dict on init.
"""
def __init__(self, dct):
# KeyError will be raised if any of these are missing from dct.
for attr in self._required:
setattr(self, attr, dct[attr])
# Any of these that are missing from dct will be set to None.
for attr in self._optional:
setattr(self, attr, dct.get(attr))
def as_yaml(self):
return yaml.safe_dump(self.as_dict(), default_flow_style=False)
def as_dict(self):
attrs = {}
for attr in self._required:
attrs[attr] = getattr(self, attr)
for attr in self._optional:
attrs[attr] = getattr(self, attr)
return attrs
class ProcData(ConfigData):
"""
An object with all the attributes you need to set up a proc on a host.
"""
_required = [
'app_name',
'app_repo_url',
'app_repo_type',
'buildpack_url',
'buildpack_version',
'config_name',
'env',
'host',
'port',
'version',
'release_hash',
'settings',
'user',
'proc_name',
]
_optional = [
'app_folder',
'build_url',
'group',
'cmd',
'image_url',
'image_name',
'image_md5',
'build_md5',
'volumes',
'mem_limit',
'memsw_limit',
]
# for compatibility, don't require any config yet
_optional += _required
_optional.sort()
del _required[:]
def __init__(self, dct):
super(ProcData, self).__init__(dct)
if self.proc_name is None and 'proc' in dct:
# Work around earlier versions of proc.yaml that used a different
# key for proc_name
setattr(self, 'proc_name', dct['proc'])
# One of proc_name or cmd must be provided.
if self.proc_name is None and self.cmd is None:
raise ValueError('Must provide either proc_name or cmd')
Credential = collections.namedtuple('Credential', 'username password')
class HashableDict(dict):
def __hash__(self):
return hash(tuple(sorted(self.items())))
class Filter(six.text_type):
"""
A regular expression indicating which items to include.
"""
exclusions = []
"additional patterns to exclude"
def getter(item):
return item
def matches(self, items):
return filter(self.match, items)
def match(self, item):
value = self.getter(item)
return (
not any(
re.search(exclude, value, re.I)
for exclude in self.exclusions
)
and re.match(self, value)
)
class SwarmFilter(Filter):
getter = operator.attrgetter('name')
class ProcHostFilter(Filter):
getter = operator.itemgetter('host')
class QueryResult(abc.Iterable):
def __init__(self, vr, url, params):
self.vr = vr
self.sess = vr.session
self.url = url
self.params = params
self._doc = None
self._index = 0
def __iter__(self):
return self
def load(self, next=None):
url = self.url
params = self.params or {}
if next:
next_url = urllib.parse.urlparse(next)
# See what query string args we have and update our
# current params
if next_url.query:
params.update(dict(urllib.parse.parse_qs(next_url.query)))
# Be sure we have a trailing slash to avoid redirects
if not next.endswith('/'):
next += '/'
url = self.vr._build_url(next_url.path)
resp = self.sess.get(url, params=params)
resp.raise_for_status()
return resp.json()
def __next__(self):
if not self._doc:
self._doc = self.load()
objects = self._doc['objects']
meta = self._doc['meta']
if self._index >= len(objects):
# We reached the end of the objects in the list. Let's see
# if there are more.
if meta.get('next'):
self._doc = self.load(meta['next'])
self._index = 0
return next(self)
raise StopIteration()
result = objects[self._index]
self._index += 1
return result
if six.PY2:
next = __next__
class Velociraptor(object):
"""
A Velociraptor 2 HTTP API service
"""
def __init__(self, base=None, username=None):
self.base = base or self._get_base()
self.username = username
self.session.auth = self.get_credentials()
@staticmethod
def _get_base():
"""
if 'deploy' resolves in this environment, use the hostname for which
that name resolves.
Override with 'VELOCIRAPTOR_URL'
"""
try:
name, _aliaslist, _addresslist = socket.gethostbyname_ex('deploy')
except socket.gaierror:
name = 'deploy'
fallback = 'https://{name}/'.format(name=name)
return os.environ.get('VELOCIRAPTOR_URL', fallback)
def hostname(self):
return urllib.parse.urlparse(self.base).hostname
session = requests.session()
session.headers = {
'Content-Type': 'application/json',
}
def get_credentials(self):
return self._get_credentials_env() or self._get_credentials_local()
def _get_credentials_local(self):
username = self.username or getpass.getuser()
hostname = self.hostname()
_, _, default_domain = hostname.partition('.')
auth_domain = os.environ.get(
'VELOCIRAPTOR_AUTH_DOMAIN',
default_domain
)
password = keyring.get_password(auth_domain, username)
if password is None:
prompt_tmpl = "{username}@{hostname}'s password: "
prompt = prompt_tmpl.format(**vars())
password = getpass.getpass(prompt)
return Credential(username, password)
def _get_credentials_env(self):
with contextlib2.suppress(KeyError):
return Credential(
os.environ['VELOCIRAPTOR_USERNAME'],
os.environ['VELOCIRAPTOR_PASSWORD'],
)
def load(self, path):
url = self._build_url(path)
url += '?format=json&limit=9999'
resp = self.session.get(url)
resp.raise_for_status()
return resp.json()
def query(self, path, query):
url = self._build_url(path)
return QueryResult(self, url, params=query)
def cut(self, build, **kwargs):
"""
Cut a release
"""
raise NotImplementedError("Can't cut releases (config?)")
def _build_url(self, *parts):
joiner = urllib.parse.urljoin
return functools.reduce(joiner, parts, self.base)
def events(self):
url = self._build_url('api/streams/events/')
messages = sseclient.SSEClient(url, auth=self.session.auth)
for msg in messages:
yield json.loads(msg.data)
class BaseResource(object):
def __init__(self, vr, obj=None):
self._vr = vr
self.__dict__.update(obj or {})
def create(self):
doc = copy.deepcopy(self.__dict__)
doc.pop('_vr')
url = self._vr._build_url(self.base)
resp = self._vr.session.post(url, json.dumps(doc))
if not resp.ok:
print(resp.headers)
try:
doc = resp.json()
if 'traceback' in doc:
print(doc['traceback'])
else:
print(doc)
except Exception:
print(resp.content)
resp.raise_for_status()
self.load(resp.headers['location'])
return resp.headers['location']
def load(self, url):
url = self._vr._build_url(self.base, url)
resp = self._vr.session.get(url)
resp.raise_for_status()
self.__dict__.update(resp.json())
def save(self):
url = self._vr._build_url(self.resource_uri)
content = copy.deepcopy(self.__dict__)
content.pop('_vr')
resp = self._vr.session.put(url, json.dumps(content))
resp.raise_for_status()
return resp
@classmethod
def load_all(cls, vr, params=None):
"""
Create instances of all objects found
"""
ob_docs = vr.query(cls.base, params)
return [cls(vr, ob) for ob in ob_docs]
@classmethod
def by_id(cls, vr, id):
resp = vr.session.get(vr._build_url(cls.base,
'{}/'.format(id)))
resp.raise_for_status()
return cls(vr, resp.json())
class Swarm(BaseResource):
"""
A VR Swarm
"""
base = '/api/v1/swarms/'
def __lt__(self, other):
return self.name < other.name
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return hash(self.name)
@property
def name(self):
return '-'.join([self.app_name, self.config_name, self.proc_name])
def __repr__(self):
return self.name
@classmethod
def by_name(cls, vr, swarm_name):
app_name, config_name, proc_name = swarm_name.split('-')
docs = list(vr.query(cls.base, {
'app__name': app_name,
'config_name': config_name,
'proc_name': proc_name,
}))
assert len(docs) == 1, 'Found too many swarms: {}'.format(len(docs))
return cls(vr, docs[0])
def dispatch(self, **changes):
"""
Patch the swarm with changes and then trigger the swarm.
"""
self.patch(**changes)
trigger_url = self._vr._build_url(self.resource_uri, 'swarm/')
resp = self._vr.session.post(trigger_url)
resp.raise_for_status()
try:
return resp.json()
except ValueError:
return None
def patch(self, **changes):
if not changes:
return
url = self._vr._build_url(self.resource_uri)
resp = self._vr.session.patch(url, json.dumps(changes))
resp.raise_for_status()
self.__dict__.update(changes)
@property
def app(self):
return self.app_name
@property
def recipe(self):
return self.config_name
def new_build(self):
return Build._for_app_and_tag(
self._vr,
self.app,
self.version,
)
class Build(BaseResource):
base = '/api/v1/builds/'
@property
def created(self):
return 'id' in vars(self)
def assemble(self):
"""
Assemble a build
"""
if not self.created:
self.create()
# trigger the build
url = self._vr._build_url(self.resource_uri, 'build/')
resp = self._vr.session.post(url)
resp.raise_for_status()
@classmethod
def _for_app_and_tag(cls, vr, app, tag):
obj = dict(app=App.base + app + '/', tag=tag)
return cls(vr, obj)
def __hash__(self):
hd = HashableDict(self.__dict__)
hd.pop('_vr')
return hash(hd)
def __eq__(self, other):
return vars(self) == vars(other)
class App(BaseResource):
base = '/api/v1/apps/'
class Buildpack(BaseResource):
base = '/api/v1/buildpacks/'
class Squad(BaseResource):
base = '/api/v1/squads/'
class Release(BaseResource):
base = '/api/v1/releases/'
def deploy(self, host, port, proc, config_name):
url = self._vr._build_url(self.resource_uri, 'deploy/')
data = dict(host=host, port=port, proc=proc, config_name=config_name)
resp = self._vr.session.post(url, data=json.dumps(data))
resp.raise_for_status()
def parsed_config(self):
return yaml.safe_load(self.config_yaml)
class Ingredient(BaseResource):
base = '/api/v1/ingredients/'
@property
def friendly_name(self):
return '{} ({})'.format(self.name, self.id)
def __repr__(self):
return self.friendly_name
@classmethod
def by_name(cls, vr, ingredient_name):
docs = list(vr.query(cls.base, {
'name': ingredient_name,
}))
assert len(docs) == 1, 'Found wrong number of ingredients: {}'.format(
len(docs))
return cls(vr, docs[0])
|
yougov/vr.common | vr/common/models.py | Host._init_supervisor_rpc | python | def _init_supervisor_rpc(self, rpc_or_port):
'''Initialize supervisor RPC.
Allow passing in an RPC connection, or a port number for
making one.
'''
if isinstance(rpc_or_port, int):
if self.username:
leader = 'http://{self.username}:{self.password}@'
else:
leader = 'http://'
tmpl = leader + '{self.name}:{port}'
url = tmpl.format(self=self, port=rpc_or_port)
self.rpc = xmlrpc_client.ServerProxy(
url, transport=TimeoutTransport())
else:
self.rpc = rpc_or_port
self.supervisor = self.rpc.supervisor | Initialize supervisor RPC.
Allow passing in an RPC connection, or a port number for
making one. | train | https://github.com/yougov/vr.common/blob/ca8ed0c50ba873fc51fdfeeaa25d3b8ec1b54eb4/vr/common/models.py#L113-L131 | null | class Host(object):
"""
Abstraction over the per-host Supervisor xmlrpc interface. This is used in
the Velociraptor web procs, command line clients, and Supervisor event
listeners.
Should be initialized with a hostname and either an existing xmlrpc Server
connection or a port number to where the RPC service is listening.
If also initialized with a Redis connection or URL, proc info will be
cached in Redis.
Call host.get_procs() to get a list of Proc objects for all
Supervisor-managed processes on the host. Call it with check_cache=True to
allow fetching proc info from the Redis cache.
Call host.get_proc('name') to get a Proc object for the process named
'name'. Call it with check_cache=True to allow fetching proc info from the
Redis cache. If the host has no proc with that name, ProcError will be
raised.
"""
def __init__(self, name, rpc_or_port=9001, supervisor_username=None,
supervisor_password=None, redis_or_url=None,
redis_cache_prefix='host_procs', redis_cache_lifetime=600):
self.name = name
self.username = supervisor_username
self.password = supervisor_password
self._init_supervisor_rpc(rpc_or_port)
self.redis = self._init_redis(redis_or_url)
self.cache_key = ':'.join([redis_cache_prefix, name])
self.cache_lifetime = redis_cache_lifetime
@staticmethod
def _init_redis(redis_spec):
"""
Return a StrictRedis instance or None based on redis_spec.
redis_spec may be None, a Redis URL, or a StrictRedis instance
"""
if not redis_spec:
return
if isinstance(redis_spec, six.string_types):
return redis.StrictRedis.from_url(redis_spec)
# assume any other value is a valid instance
return redis_spec
def get_proc(self, name, check_cache=False):
if check_cache:
# Note that if self.redis is None and check_cache is True, an
# AttributeError will be raised.
cached_json = self.redis.hget(self.cache_key, name)
if cached_json:
return Proc(self, json.loads(cached_json))
else:
procs_dict = self._get_and_cache_procs()
else:
procs_dict = self._get_and_cache_procs()
if name in procs_dict:
return Proc(self, procs_dict[name])
else:
raise ProcError('host %s has no proc named %s' % (self.name, name))
def _get_and_cache_procs(self):
try:
# Retry few times before giving up
proc_list = _retry(
SUPERVISOR_RPC_N_RETRIES, self.supervisor.getAllProcessInfo)
except Exception:
log.exception("Failed to connect to %s", self)
return {}
# getAllProcessInfo returns a list of dicts. Reshape that into a dict
# of dicts, keyed by proc name.
proc_dict = {d['name']: d for d in proc_list}
if self.redis:
# Use pipeline to do hash clear, set, and expiration in
# same redis call
with self.redis.pipeline() as pipe:
# First clear all existing data in the hash
pipe.delete(self.cache_key)
# Now set all the hash values, dumped to json.
dumped = {d: json.dumps(proc_dict[d]) for d in proc_dict}
pipe.hmset(self.cache_key, dumped)
pipe.expire(self.cache_key, self.cache_lifetime)
pipe.execute()
return proc_dict
def get_procs(self, check_cache=False):
if check_cache:
unparsed = self.redis.hgetall(self.cache_key)
if unparsed:
all_data = {v: json.loads(v) for v in unparsed.values()}
else:
all_data = self._get_and_cache_procs()
else:
all_data = self._get_and_cache_procs()
return [Proc(self, all_data[d]) for d in all_data]
def shortname(self):
return self.name.split(".")[0]
def __repr__(self):
info = {
'cls': self.__class__.__name__,
'name': self.name,
}
return "<%(cls)s %(name)s>" % info
|
yougov/vr.common | vr/common/models.py | Host._init_redis | python | def _init_redis(redis_spec):
if not redis_spec:
return
if isinstance(redis_spec, six.string_types):
return redis.StrictRedis.from_url(redis_spec)
# assume any other value is a valid instance
return redis_spec | Return a StrictRedis instance or None based on redis_spec.
redis_spec may be None, a Redis URL, or a StrictRedis instance | train | https://github.com/yougov/vr.common/blob/ca8ed0c50ba873fc51fdfeeaa25d3b8ec1b54eb4/vr/common/models.py#L134-L145 | null | class Host(object):
"""
Abstraction over the per-host Supervisor xmlrpc interface. This is used in
the Velociraptor web procs, command line clients, and Supervisor event
listeners.
Should be initialized with a hostname and either an existing xmlrpc Server
connection or a port number to where the RPC service is listening.
If also initialized with a Redis connection or URL, proc info will be
cached in Redis.
Call host.get_procs() to get a list of Proc objects for all
Supervisor-managed processes on the host. Call it with check_cache=True to
allow fetching proc info from the Redis cache.
Call host.get_proc('name') to get a Proc object for the process named
'name'. Call it with check_cache=True to allow fetching proc info from the
Redis cache. If the host has no proc with that name, ProcError will be
raised.
"""
def __init__(self, name, rpc_or_port=9001, supervisor_username=None,
supervisor_password=None, redis_or_url=None,
redis_cache_prefix='host_procs', redis_cache_lifetime=600):
self.name = name
self.username = supervisor_username
self.password = supervisor_password
self._init_supervisor_rpc(rpc_or_port)
self.redis = self._init_redis(redis_or_url)
self.cache_key = ':'.join([redis_cache_prefix, name])
self.cache_lifetime = redis_cache_lifetime
def _init_supervisor_rpc(self, rpc_or_port):
'''Initialize supervisor RPC.
Allow passing in an RPC connection, or a port number for
making one.
'''
if isinstance(rpc_or_port, int):
if self.username:
leader = 'http://{self.username}:{self.password}@'
else:
leader = 'http://'
tmpl = leader + '{self.name}:{port}'
url = tmpl.format(self=self, port=rpc_or_port)
self.rpc = xmlrpc_client.ServerProxy(
url, transport=TimeoutTransport())
else:
self.rpc = rpc_or_port
self.supervisor = self.rpc.supervisor
@staticmethod
def get_proc(self, name, check_cache=False):
if check_cache:
# Note that if self.redis is None and check_cache is True, an
# AttributeError will be raised.
cached_json = self.redis.hget(self.cache_key, name)
if cached_json:
return Proc(self, json.loads(cached_json))
else:
procs_dict = self._get_and_cache_procs()
else:
procs_dict = self._get_and_cache_procs()
if name in procs_dict:
return Proc(self, procs_dict[name])
else:
raise ProcError('host %s has no proc named %s' % (self.name, name))
def _get_and_cache_procs(self):
try:
# Retry few times before giving up
proc_list = _retry(
SUPERVISOR_RPC_N_RETRIES, self.supervisor.getAllProcessInfo)
except Exception:
log.exception("Failed to connect to %s", self)
return {}
# getAllProcessInfo returns a list of dicts. Reshape that into a dict
# of dicts, keyed by proc name.
proc_dict = {d['name']: d for d in proc_list}
if self.redis:
# Use pipeline to do hash clear, set, and expiration in
# same redis call
with self.redis.pipeline() as pipe:
# First clear all existing data in the hash
pipe.delete(self.cache_key)
# Now set all the hash values, dumped to json.
dumped = {d: json.dumps(proc_dict[d]) for d in proc_dict}
pipe.hmset(self.cache_key, dumped)
pipe.expire(self.cache_key, self.cache_lifetime)
pipe.execute()
return proc_dict
def get_procs(self, check_cache=False):
if check_cache:
unparsed = self.redis.hgetall(self.cache_key)
if unparsed:
all_data = {v: json.loads(v) for v in unparsed.values()}
else:
all_data = self._get_and_cache_procs()
else:
all_data = self._get_and_cache_procs()
return [Proc(self, all_data[d]) for d in all_data]
def shortname(self):
return self.name.split(".")[0]
def __repr__(self):
info = {
'cls': self.__class__.__name__,
'name': self.name,
}
return "<%(cls)s %(name)s>" % info
|
yougov/vr.common | vr/common/models.py | Velociraptor._get_base | python | def _get_base():
try:
name, _aliaslist, _addresslist = socket.gethostbyname_ex('deploy')
except socket.gaierror:
name = 'deploy'
fallback = 'https://{name}/'.format(name=name)
return os.environ.get('VELOCIRAPTOR_URL', fallback) | if 'deploy' resolves in this environment, use the hostname for which
that name resolves.
Override with 'VELOCIRAPTOR_URL' | train | https://github.com/yougov/vr.common/blob/ca8ed0c50ba873fc51fdfeeaa25d3b8ec1b54eb4/vr/common/models.py#L574-L585 | null | class Velociraptor(object):
"""
A Velociraptor 2 HTTP API service
"""
def __init__(self, base=None, username=None):
self.base = base or self._get_base()
self.username = username
self.session.auth = self.get_credentials()
@staticmethod
def hostname(self):
return urllib.parse.urlparse(self.base).hostname
session = requests.session()
session.headers = {
'Content-Type': 'application/json',
}
def get_credentials(self):
return self._get_credentials_env() or self._get_credentials_local()
def _get_credentials_local(self):
username = self.username or getpass.getuser()
hostname = self.hostname()
_, _, default_domain = hostname.partition('.')
auth_domain = os.environ.get(
'VELOCIRAPTOR_AUTH_DOMAIN',
default_domain
)
password = keyring.get_password(auth_domain, username)
if password is None:
prompt_tmpl = "{username}@{hostname}'s password: "
prompt = prompt_tmpl.format(**vars())
password = getpass.getpass(prompt)
return Credential(username, password)
def _get_credentials_env(self):
with contextlib2.suppress(KeyError):
return Credential(
os.environ['VELOCIRAPTOR_USERNAME'],
os.environ['VELOCIRAPTOR_PASSWORD'],
)
def load(self, path):
url = self._build_url(path)
url += '?format=json&limit=9999'
resp = self.session.get(url)
resp.raise_for_status()
return resp.json()
def query(self, path, query):
url = self._build_url(path)
return QueryResult(self, url, params=query)
def cut(self, build, **kwargs):
"""
Cut a release
"""
raise NotImplementedError("Can't cut releases (config?)")
def _build_url(self, *parts):
joiner = urllib.parse.urljoin
return functools.reduce(joiner, parts, self.base)
def events(self):
url = self._build_url('api/streams/events/')
messages = sseclient.SSEClient(url, auth=self.session.auth)
for msg in messages:
yield json.loads(msg.data)
|
yougov/vr.common | vr/common/models.py | BaseResource.load_all | python | def load_all(cls, vr, params=None):
ob_docs = vr.query(cls.base, params)
return [cls(vr, ob) for ob in ob_docs] | Create instances of all objects found | train | https://github.com/yougov/vr.common/blob/ca8ed0c50ba873fc51fdfeeaa25d3b8ec1b54eb4/vr/common/models.py#L689-L694 | null | class BaseResource(object):
def __init__(self, vr, obj=None):
self._vr = vr
self.__dict__.update(obj or {})
def create(self):
doc = copy.deepcopy(self.__dict__)
doc.pop('_vr')
url = self._vr._build_url(self.base)
resp = self._vr.session.post(url, json.dumps(doc))
if not resp.ok:
print(resp.headers)
try:
doc = resp.json()
if 'traceback' in doc:
print(doc['traceback'])
else:
print(doc)
except Exception:
print(resp.content)
resp.raise_for_status()
self.load(resp.headers['location'])
return resp.headers['location']
def load(self, url):
url = self._vr._build_url(self.base, url)
resp = self._vr.session.get(url)
resp.raise_for_status()
self.__dict__.update(resp.json())
def save(self):
url = self._vr._build_url(self.resource_uri)
content = copy.deepcopy(self.__dict__)
content.pop('_vr')
resp = self._vr.session.put(url, json.dumps(content))
resp.raise_for_status()
return resp
@classmethod
@classmethod
def by_id(cls, vr, id):
resp = vr.session.get(vr._build_url(cls.base,
'{}/'.format(id)))
resp.raise_for_status()
return cls(vr, resp.json())
|
yougov/vr.common | vr/common/models.py | Swarm.dispatch | python | def dispatch(self, **changes):
self.patch(**changes)
trigger_url = self._vr._build_url(self.resource_uri, 'swarm/')
resp = self._vr.session.post(trigger_url)
resp.raise_for_status()
try:
return resp.json()
except ValueError:
return None | Patch the swarm with changes and then trigger the swarm. | train | https://github.com/yougov/vr.common/blob/ca8ed0c50ba873fc51fdfeeaa25d3b8ec1b54eb4/vr/common/models.py#L738-L749 | [
"def patch(self, **changes):\n if not changes:\n return\n url = self._vr._build_url(self.resource_uri)\n resp = self._vr.session.patch(url, json.dumps(changes))\n resp.raise_for_status()\n self.__dict__.update(changes)\n"
] | class Swarm(BaseResource):
"""
A VR Swarm
"""
base = '/api/v1/swarms/'
def __lt__(self, other):
return self.name < other.name
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return hash(self.name)
@property
def name(self):
return '-'.join([self.app_name, self.config_name, self.proc_name])
def __repr__(self):
return self.name
@classmethod
def by_name(cls, vr, swarm_name):
app_name, config_name, proc_name = swarm_name.split('-')
docs = list(vr.query(cls.base, {
'app__name': app_name,
'config_name': config_name,
'proc_name': proc_name,
}))
assert len(docs) == 1, 'Found too many swarms: {}'.format(len(docs))
return cls(vr, docs[0])
def patch(self, **changes):
if not changes:
return
url = self._vr._build_url(self.resource_uri)
resp = self._vr.session.patch(url, json.dumps(changes))
resp.raise_for_status()
self.__dict__.update(changes)
@property
def app(self):
return self.app_name
@property
def recipe(self):
return self.config_name
def new_build(self):
return Build._for_app_and_tag(
self._vr,
self.app,
self.version,
)
|
yougov/vr.common | vr/common/models.py | Build.assemble | python | def assemble(self):
if not self.created:
self.create()
# trigger the build
url = self._vr._build_url(self.resource_uri, 'build/')
resp = self._vr.session.post(url)
resp.raise_for_status() | Assemble a build | train | https://github.com/yougov/vr.common/blob/ca8ed0c50ba873fc51fdfeeaa25d3b8ec1b54eb4/vr/common/models.py#L782-L791 | [
"def create(self):\n doc = copy.deepcopy(self.__dict__)\n doc.pop('_vr')\n url = self._vr._build_url(self.base)\n resp = self._vr.session.post(url, json.dumps(doc))\n if not resp.ok:\n print(resp.headers)\n try:\n doc = resp.json()\n if 'traceback' in doc:\n ... | class Build(BaseResource):
base = '/api/v1/builds/'
@property
def created(self):
return 'id' in vars(self)
@classmethod
def _for_app_and_tag(cls, vr, app, tag):
obj = dict(app=App.base + app + '/', tag=tag)
return cls(vr, obj)
def __hash__(self):
hd = HashableDict(self.__dict__)
hd.pop('_vr')
return hash(hd)
def __eq__(self, other):
return vars(self) == vars(other)
|
yougov/vr.common | vr/common/utils.py | tmpdir | python | def tmpdir():
target = None
try:
with _tmpdir_extant() as target:
yield target
finally:
if target is not None:
shutil.rmtree(target, ignore_errors=True) | Create a tempdir context for the cwd and remove it after. | train | https://github.com/yougov/vr.common/blob/ca8ed0c50ba873fc51fdfeeaa25d3b8ec1b54eb4/vr/common/utils.py#L31-L41 | null | from __future__ import print_function, unicode_literals
import os
import subprocess
import shutil
import tempfile
import random
import string
import hashlib
import errno
import textwrap
import contextlib
import functools
import warnings
from pkg_resources import parse_version
try:
import pwd
import grp
import fcntl
except ImportError:
# bypass import failure on Windows
pass
from six.moves import urllib
import six
@contextlib.contextmanager
@contextlib.contextmanager
def _tmpdir_extant():
"""
Create a tempdir context for the cwd, but allow the target to remain after
exiting the context.
"""
target = tempfile.mkdtemp()
with chdir(target):
yield target
@contextlib.contextmanager
def chdir(folder):
orig_path = os.getcwd()
os.chdir(folder)
try:
yield
finally:
os.chdir(orig_path)
def mkdir(path):
if not os.path.isdir(path):
os.makedirs(path)
class CommandException(Exception):
"""
Custom exception class for displaying nice input from failed commands.
Accepts an CommandResult object on init.
"""
def __init__(self, result):
template = six.text_type(
"Command '{result.command}' failed with status code "
"{result.status_code}.\noutput: {result.output}\n"
)
message = template.format(result=result)
super(CommandException, self).__init__(message)
class CommandResult(object):
def __init__(self, command, output, status_code):
self.command = command
if not isinstance(output, six.text_type):
output = six.text_type(output, 'ascii', 'replace')
self.output = output
self.status_code = status_code
def __repr__(self):
return '<CommandResult: %s,%s>' % (self.status_code, self.command)
def raise_for_status(self):
if self.status_code != 0:
raise CommandException(self)
def run(command, verbose=False):
"""
Run a shell command. Capture the stdout and stderr as a single stream.
Capture the status code.
If verbose=True, then print command and the output to the terminal as it
comes in.
"""
def do_nothing(*args, **kwargs):
return None
v_print = print if verbose else do_nothing
p = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
v_print("run:", command)
def log_and_yield(line):
if six.PY2:
# If not unicode, try to decode it first
if isinstance(line, str):
line = line.decode('utf8', 'replace')
v_print(line)
return line
output = ''.join(map(log_and_yield, p.stdout))
status_code = p.wait()
return CommandResult(command, output, status_code)
def parse_redis_url(url):
"""
Given a url like redis://localhost:6379/0, return a dict with host, port,
and db members.
"""
warnings.warn(
"Use redis.StrictRedis.from_url instead", DeprecationWarning,
stacklevel=2)
parsed = urllib.parse.urlsplit(url)
return {
'host': parsed.hostname,
'port': parsed.port,
'db': int(parsed.path.replace('/', '')),
}
def randchars(num=8):
return ''.join(random.choice(string.ascii_lowercase) for x in range(num))
def lock_file(f, block=False):
"""
If block=False (the default), die hard and fast if another process has
already grabbed the lock for this file.
If block=True, wait for the lock to be released, then continue.
"""
try:
flags = fcntl.LOCK_EX
if not block:
flags |= fcntl.LOCK_NB
fcntl.flock(f.fileno(), flags)
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
raise SystemExit("ERROR: %s is locked by another process." %
f.name)
raise
def file_md5(filename):
"""
Given a path to a file, read it chunk-wise and feed each chunk into
an MD5 file hash. Avoids having to hold the whole file in memory.
"""
md5 = hashlib.md5()
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(128 * md5.block_size), b''):
md5.update(chunk)
return md5.hexdigest()
def which(name, flags=os.X_OK):
"""
Search PATH for executable files with the given name.
Taken from Twisted.
"""
result = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ.get('PATH', None)
if path is None:
return []
for p in os.environ.get('PATH', '').split(os.pathsep):
p = os.path.join(p, name)
if os.access(p, flags):
result.append(p)
for e in exts:
pext = p + e
if os.access(pext, flags):
result.append(pext)
return result
def chowntree(path, username=None, groupname=None):
if username is None and groupname is None:
raise ValueError("Must provide username and/or groupname")
# os.chown will let you pass -1 to leave user or group unchanged.
uid = -1
gid = -1
if username:
uid = pwd.getpwnam(username).pw_uid
if groupname:
gid = grp.getgrnam(groupname).gr_gid
os.chown(path, uid, gid)
for root, dirs, files in os.walk(path):
for d in dirs:
dpath = os.path.join(root, d)
os.chown(dpath, uid, gid)
for f in files:
fpath = os.path.join(root, f)
if not os.path.islink(fpath):
os.chown(fpath, uid, gid)
def get_lxc_version():
""" Asks the current host what version of LXC it has. Returns it as a
string. If LXC is not installed, raises subprocess.CalledProcessError"""
runner = functools.partial(
subprocess.check_output,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
# Old LXC had an lxc-version executable, and prefixed its result with
# "lxc version: "
try:
result = runner(['lxc-version']).rstrip()
return parse_version(result.replace("lxc version: ", ""))
except (OSError, subprocess.CalledProcessError):
pass
# New LXC instead has a --version option on most installed executables.
return parse_version(runner(['lxc-start', '--version']).rstrip())
def get_lxc_network_config(version):
if version < parse_version('1.0.0'):
return ''
return textwrap.dedent(
"""
# Share the host's networking interface. This is unsafe!
# TODO: make separate virtual interfaces per container.
lxc.network.type = none""")
def get_lxc_overlayfs_config_fmt(version):
if version < parse_version('2.0.0'):
# Old LXC
return (
"lxc.mount.entry = overlayfs %(proc_path)s overlayfs "
"lowerdir=%(image_path)s,upperdir=%(proc_path)s "
"0 0"
)
# On newer LXC, fstype is called 'overlay' and we need a 'workdir'
return (
"lxc.mount.entry = overlay %(proc_path)s overlay "
"lowerdir=%(image_path)s,upperdir=%(proc_path)s,workdir=%(work_path)s "
"0 0"
)
|
yougov/vr.common | vr/common/utils.py | run | python | def run(command, verbose=False):
def do_nothing(*args, **kwargs):
return None
v_print = print if verbose else do_nothing
p = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
v_print("run:", command)
def log_and_yield(line):
if six.PY2:
# If not unicode, try to decode it first
if isinstance(line, str):
line = line.decode('utf8', 'replace')
v_print(line)
return line
output = ''.join(map(log_and_yield, p.stdout))
status_code = p.wait()
return CommandResult(command, output, status_code) | Run a shell command. Capture the stdout and stderr as a single stream.
Capture the status code.
If verbose=True, then print command and the output to the terminal as it
comes in. | train | https://github.com/yougov/vr.common/blob/ca8ed0c50ba873fc51fdfeeaa25d3b8ec1b54eb4/vr/common/utils.py#L101-L136 | [
"def do_nothing(*args, **kwargs):\n return None\n"
] | from __future__ import print_function, unicode_literals
import os
import subprocess
import shutil
import tempfile
import random
import string
import hashlib
import errno
import textwrap
import contextlib
import functools
import warnings
from pkg_resources import parse_version
try:
import pwd
import grp
import fcntl
except ImportError:
# bypass import failure on Windows
pass
from six.moves import urllib
import six
@contextlib.contextmanager
def tmpdir():
"""
Create a tempdir context for the cwd and remove it after.
"""
target = None
try:
with _tmpdir_extant() as target:
yield target
finally:
if target is not None:
shutil.rmtree(target, ignore_errors=True)
@contextlib.contextmanager
def _tmpdir_extant():
"""
Create a tempdir context for the cwd, but allow the target to remain after
exiting the context.
"""
target = tempfile.mkdtemp()
with chdir(target):
yield target
@contextlib.contextmanager
def chdir(folder):
orig_path = os.getcwd()
os.chdir(folder)
try:
yield
finally:
os.chdir(orig_path)
def mkdir(path):
if not os.path.isdir(path):
os.makedirs(path)
class CommandException(Exception):
"""
Custom exception class for displaying nice input from failed commands.
Accepts an CommandResult object on init.
"""
def __init__(self, result):
template = six.text_type(
"Command '{result.command}' failed with status code "
"{result.status_code}.\noutput: {result.output}\n"
)
message = template.format(result=result)
super(CommandException, self).__init__(message)
class CommandResult(object):
def __init__(self, command, output, status_code):
self.command = command
if not isinstance(output, six.text_type):
output = six.text_type(output, 'ascii', 'replace')
self.output = output
self.status_code = status_code
def __repr__(self):
return '<CommandResult: %s,%s>' % (self.status_code, self.command)
def raise_for_status(self):
if self.status_code != 0:
raise CommandException(self)
def parse_redis_url(url):
"""
Given a url like redis://localhost:6379/0, return a dict with host, port,
and db members.
"""
warnings.warn(
"Use redis.StrictRedis.from_url instead", DeprecationWarning,
stacklevel=2)
parsed = urllib.parse.urlsplit(url)
return {
'host': parsed.hostname,
'port': parsed.port,
'db': int(parsed.path.replace('/', '')),
}
def randchars(num=8):
return ''.join(random.choice(string.ascii_lowercase) for x in range(num))
def lock_file(f, block=False):
"""
If block=False (the default), die hard and fast if another process has
already grabbed the lock for this file.
If block=True, wait for the lock to be released, then continue.
"""
try:
flags = fcntl.LOCK_EX
if not block:
flags |= fcntl.LOCK_NB
fcntl.flock(f.fileno(), flags)
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
raise SystemExit("ERROR: %s is locked by another process." %
f.name)
raise
def file_md5(filename):
"""
Given a path to a file, read it chunk-wise and feed each chunk into
an MD5 file hash. Avoids having to hold the whole file in memory.
"""
md5 = hashlib.md5()
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(128 * md5.block_size), b''):
md5.update(chunk)
return md5.hexdigest()
def which(name, flags=os.X_OK):
"""
Search PATH for executable files with the given name.
Taken from Twisted.
"""
result = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ.get('PATH', None)
if path is None:
return []
for p in os.environ.get('PATH', '').split(os.pathsep):
p = os.path.join(p, name)
if os.access(p, flags):
result.append(p)
for e in exts:
pext = p + e
if os.access(pext, flags):
result.append(pext)
return result
def chowntree(path, username=None, groupname=None):
if username is None and groupname is None:
raise ValueError("Must provide username and/or groupname")
# os.chown will let you pass -1 to leave user or group unchanged.
uid = -1
gid = -1
if username:
uid = pwd.getpwnam(username).pw_uid
if groupname:
gid = grp.getgrnam(groupname).gr_gid
os.chown(path, uid, gid)
for root, dirs, files in os.walk(path):
for d in dirs:
dpath = os.path.join(root, d)
os.chown(dpath, uid, gid)
for f in files:
fpath = os.path.join(root, f)
if not os.path.islink(fpath):
os.chown(fpath, uid, gid)
def get_lxc_version():
""" Asks the current host what version of LXC it has. Returns it as a
string. If LXC is not installed, raises subprocess.CalledProcessError"""
runner = functools.partial(
subprocess.check_output,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
# Old LXC had an lxc-version executable, and prefixed its result with
# "lxc version: "
try:
result = runner(['lxc-version']).rstrip()
return parse_version(result.replace("lxc version: ", ""))
except (OSError, subprocess.CalledProcessError):
pass
# New LXC instead has a --version option on most installed executables.
return parse_version(runner(['lxc-start', '--version']).rstrip())
def get_lxc_network_config(version):
if version < parse_version('1.0.0'):
return ''
return textwrap.dedent(
"""
# Share the host's networking interface. This is unsafe!
# TODO: make separate virtual interfaces per container.
lxc.network.type = none""")
def get_lxc_overlayfs_config_fmt(version):
if version < parse_version('2.0.0'):
# Old LXC
return (
"lxc.mount.entry = overlayfs %(proc_path)s overlayfs "
"lowerdir=%(image_path)s,upperdir=%(proc_path)s "
"0 0"
)
# On newer LXC, fstype is called 'overlay' and we need a 'workdir'
return (
"lxc.mount.entry = overlay %(proc_path)s overlay "
"lowerdir=%(image_path)s,upperdir=%(proc_path)s,workdir=%(work_path)s "
"0 0"
)
|
yougov/vr.common | vr/common/utils.py | parse_redis_url | python | def parse_redis_url(url):
warnings.warn(
"Use redis.StrictRedis.from_url instead", DeprecationWarning,
stacklevel=2)
parsed = urllib.parse.urlsplit(url)
return {
'host': parsed.hostname,
'port': parsed.port,
'db': int(parsed.path.replace('/', '')),
} | Given a url like redis://localhost:6379/0, return a dict with host, port,
and db members. | train | https://github.com/yougov/vr.common/blob/ca8ed0c50ba873fc51fdfeeaa25d3b8ec1b54eb4/vr/common/utils.py#L139-L152 | null | from __future__ import print_function, unicode_literals
import os
import subprocess
import shutil
import tempfile
import random
import string
import hashlib
import errno
import textwrap
import contextlib
import functools
import warnings
from pkg_resources import parse_version
try:
import pwd
import grp
import fcntl
except ImportError:
# bypass import failure on Windows
pass
from six.moves import urllib
import six
@contextlib.contextmanager
def tmpdir():
"""
Create a tempdir context for the cwd and remove it after.
"""
target = None
try:
with _tmpdir_extant() as target:
yield target
finally:
if target is not None:
shutil.rmtree(target, ignore_errors=True)
@contextlib.contextmanager
def _tmpdir_extant():
"""
Create a tempdir context for the cwd, but allow the target to remain after
exiting the context.
"""
target = tempfile.mkdtemp()
with chdir(target):
yield target
@contextlib.contextmanager
def chdir(folder):
orig_path = os.getcwd()
os.chdir(folder)
try:
yield
finally:
os.chdir(orig_path)
def mkdir(path):
if not os.path.isdir(path):
os.makedirs(path)
class CommandException(Exception):
"""
Custom exception class for displaying nice input from failed commands.
Accepts an CommandResult object on init.
"""
def __init__(self, result):
template = six.text_type(
"Command '{result.command}' failed with status code "
"{result.status_code}.\noutput: {result.output}\n"
)
message = template.format(result=result)
super(CommandException, self).__init__(message)
class CommandResult(object):
def __init__(self, command, output, status_code):
self.command = command
if not isinstance(output, six.text_type):
output = six.text_type(output, 'ascii', 'replace')
self.output = output
self.status_code = status_code
def __repr__(self):
return '<CommandResult: %s,%s>' % (self.status_code, self.command)
def raise_for_status(self):
if self.status_code != 0:
raise CommandException(self)
def run(command, verbose=False):
"""
Run a shell command. Capture the stdout and stderr as a single stream.
Capture the status code.
If verbose=True, then print command and the output to the terminal as it
comes in.
"""
def do_nothing(*args, **kwargs):
return None
v_print = print if verbose else do_nothing
p = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
v_print("run:", command)
def log_and_yield(line):
if six.PY2:
# If not unicode, try to decode it first
if isinstance(line, str):
line = line.decode('utf8', 'replace')
v_print(line)
return line
output = ''.join(map(log_and_yield, p.stdout))
status_code = p.wait()
return CommandResult(command, output, status_code)
def randchars(num=8):
return ''.join(random.choice(string.ascii_lowercase) for x in range(num))
def lock_file(f, block=False):
"""
If block=False (the default), die hard and fast if another process has
already grabbed the lock for this file.
If block=True, wait for the lock to be released, then continue.
"""
try:
flags = fcntl.LOCK_EX
if not block:
flags |= fcntl.LOCK_NB
fcntl.flock(f.fileno(), flags)
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
raise SystemExit("ERROR: %s is locked by another process." %
f.name)
raise
def file_md5(filename):
"""
Given a path to a file, read it chunk-wise and feed each chunk into
an MD5 file hash. Avoids having to hold the whole file in memory.
"""
md5 = hashlib.md5()
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(128 * md5.block_size), b''):
md5.update(chunk)
return md5.hexdigest()
def which(name, flags=os.X_OK):
"""
Search PATH for executable files with the given name.
Taken from Twisted.
"""
result = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ.get('PATH', None)
if path is None:
return []
for p in os.environ.get('PATH', '').split(os.pathsep):
p = os.path.join(p, name)
if os.access(p, flags):
result.append(p)
for e in exts:
pext = p + e
if os.access(pext, flags):
result.append(pext)
return result
def chowntree(path, username=None, groupname=None):
if username is None and groupname is None:
raise ValueError("Must provide username and/or groupname")
# os.chown will let you pass -1 to leave user or group unchanged.
uid = -1
gid = -1
if username:
uid = pwd.getpwnam(username).pw_uid
if groupname:
gid = grp.getgrnam(groupname).gr_gid
os.chown(path, uid, gid)
for root, dirs, files in os.walk(path):
for d in dirs:
dpath = os.path.join(root, d)
os.chown(dpath, uid, gid)
for f in files:
fpath = os.path.join(root, f)
if not os.path.islink(fpath):
os.chown(fpath, uid, gid)
def get_lxc_version():
""" Asks the current host what version of LXC it has. Returns it as a
string. If LXC is not installed, raises subprocess.CalledProcessError"""
runner = functools.partial(
subprocess.check_output,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
# Old LXC had an lxc-version executable, and prefixed its result with
# "lxc version: "
try:
result = runner(['lxc-version']).rstrip()
return parse_version(result.replace("lxc version: ", ""))
except (OSError, subprocess.CalledProcessError):
pass
# New LXC instead has a --version option on most installed executables.
return parse_version(runner(['lxc-start', '--version']).rstrip())
def get_lxc_network_config(version):
if version < parse_version('1.0.0'):
return ''
return textwrap.dedent(
"""
# Share the host's networking interface. This is unsafe!
# TODO: make separate virtual interfaces per container.
lxc.network.type = none""")
def get_lxc_overlayfs_config_fmt(version):
if version < parse_version('2.0.0'):
# Old LXC
return (
"lxc.mount.entry = overlayfs %(proc_path)s overlayfs "
"lowerdir=%(image_path)s,upperdir=%(proc_path)s "
"0 0"
)
# On newer LXC, fstype is called 'overlay' and we need a 'workdir'
return (
"lxc.mount.entry = overlay %(proc_path)s overlay "
"lowerdir=%(image_path)s,upperdir=%(proc_path)s,workdir=%(work_path)s "
"0 0"
)
|
yougov/vr.common | vr/common/utils.py | lock_file | python | def lock_file(f, block=False):
try:
flags = fcntl.LOCK_EX
if not block:
flags |= fcntl.LOCK_NB
fcntl.flock(f.fileno(), flags)
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
raise SystemExit("ERROR: %s is locked by another process." %
f.name)
raise | If block=False (the default), die hard and fast if another process has
already grabbed the lock for this file.
If block=True, wait for the lock to be released, then continue. | train | https://github.com/yougov/vr.common/blob/ca8ed0c50ba873fc51fdfeeaa25d3b8ec1b54eb4/vr/common/utils.py#L159-L175 | null | from __future__ import print_function, unicode_literals
import os
import subprocess
import shutil
import tempfile
import random
import string
import hashlib
import errno
import textwrap
import contextlib
import functools
import warnings
from pkg_resources import parse_version
try:
import pwd
import grp
import fcntl
except ImportError:
# bypass import failure on Windows
pass
from six.moves import urllib
import six
@contextlib.contextmanager
def tmpdir():
"""
Create a tempdir context for the cwd and remove it after.
"""
target = None
try:
with _tmpdir_extant() as target:
yield target
finally:
if target is not None:
shutil.rmtree(target, ignore_errors=True)
@contextlib.contextmanager
def _tmpdir_extant():
"""
Create a tempdir context for the cwd, but allow the target to remain after
exiting the context.
"""
target = tempfile.mkdtemp()
with chdir(target):
yield target
@contextlib.contextmanager
def chdir(folder):
orig_path = os.getcwd()
os.chdir(folder)
try:
yield
finally:
os.chdir(orig_path)
def mkdir(path):
if not os.path.isdir(path):
os.makedirs(path)
class CommandException(Exception):
"""
Custom exception class for displaying nice input from failed commands.
Accepts an CommandResult object on init.
"""
def __init__(self, result):
template = six.text_type(
"Command '{result.command}' failed with status code "
"{result.status_code}.\noutput: {result.output}\n"
)
message = template.format(result=result)
super(CommandException, self).__init__(message)
class CommandResult(object):
def __init__(self, command, output, status_code):
self.command = command
if not isinstance(output, six.text_type):
output = six.text_type(output, 'ascii', 'replace')
self.output = output
self.status_code = status_code
def __repr__(self):
return '<CommandResult: %s,%s>' % (self.status_code, self.command)
def raise_for_status(self):
if self.status_code != 0:
raise CommandException(self)
def run(command, verbose=False):
"""
Run a shell command. Capture the stdout and stderr as a single stream.
Capture the status code.
If verbose=True, then print command and the output to the terminal as it
comes in.
"""
def do_nothing(*args, **kwargs):
return None
v_print = print if verbose else do_nothing
p = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
v_print("run:", command)
def log_and_yield(line):
if six.PY2:
# If not unicode, try to decode it first
if isinstance(line, str):
line = line.decode('utf8', 'replace')
v_print(line)
return line
output = ''.join(map(log_and_yield, p.stdout))
status_code = p.wait()
return CommandResult(command, output, status_code)
def parse_redis_url(url):
"""
Given a url like redis://localhost:6379/0, return a dict with host, port,
and db members.
"""
warnings.warn(
"Use redis.StrictRedis.from_url instead", DeprecationWarning,
stacklevel=2)
parsed = urllib.parse.urlsplit(url)
return {
'host': parsed.hostname,
'port': parsed.port,
'db': int(parsed.path.replace('/', '')),
}
def randchars(num=8):
return ''.join(random.choice(string.ascii_lowercase) for x in range(num))
def file_md5(filename):
"""
Given a path to a file, read it chunk-wise and feed each chunk into
an MD5 file hash. Avoids having to hold the whole file in memory.
"""
md5 = hashlib.md5()
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(128 * md5.block_size), b''):
md5.update(chunk)
return md5.hexdigest()
def which(name, flags=os.X_OK):
"""
Search PATH for executable files with the given name.
Taken from Twisted.
"""
result = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ.get('PATH', None)
if path is None:
return []
for p in os.environ.get('PATH', '').split(os.pathsep):
p = os.path.join(p, name)
if os.access(p, flags):
result.append(p)
for e in exts:
pext = p + e
if os.access(pext, flags):
result.append(pext)
return result
def chowntree(path, username=None, groupname=None):
if username is None and groupname is None:
raise ValueError("Must provide username and/or groupname")
# os.chown will let you pass -1 to leave user or group unchanged.
uid = -1
gid = -1
if username:
uid = pwd.getpwnam(username).pw_uid
if groupname:
gid = grp.getgrnam(groupname).gr_gid
os.chown(path, uid, gid)
for root, dirs, files in os.walk(path):
for d in dirs:
dpath = os.path.join(root, d)
os.chown(dpath, uid, gid)
for f in files:
fpath = os.path.join(root, f)
if not os.path.islink(fpath):
os.chown(fpath, uid, gid)
def get_lxc_version():
""" Asks the current host what version of LXC it has. Returns it as a
string. If LXC is not installed, raises subprocess.CalledProcessError"""
runner = functools.partial(
subprocess.check_output,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
# Old LXC had an lxc-version executable, and prefixed its result with
# "lxc version: "
try:
result = runner(['lxc-version']).rstrip()
return parse_version(result.replace("lxc version: ", ""))
except (OSError, subprocess.CalledProcessError):
pass
# New LXC instead has a --version option on most installed executables.
return parse_version(runner(['lxc-start', '--version']).rstrip())
def get_lxc_network_config(version):
if version < parse_version('1.0.0'):
return ''
return textwrap.dedent(
"""
# Share the host's networking interface. This is unsafe!
# TODO: make separate virtual interfaces per container.
lxc.network.type = none""")
def get_lxc_overlayfs_config_fmt(version):
if version < parse_version('2.0.0'):
# Old LXC
return (
"lxc.mount.entry = overlayfs %(proc_path)s overlayfs "
"lowerdir=%(image_path)s,upperdir=%(proc_path)s "
"0 0"
)
# On newer LXC, fstype is called 'overlay' and we need a 'workdir'
return (
"lxc.mount.entry = overlay %(proc_path)s overlay "
"lowerdir=%(image_path)s,upperdir=%(proc_path)s,workdir=%(work_path)s "
"0 0"
)
|
yougov/vr.common | vr/common/utils.py | which | python | def which(name, flags=os.X_OK):
result = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ.get('PATH', None)
if path is None:
return []
for p in os.environ.get('PATH', '').split(os.pathsep):
p = os.path.join(p, name)
if os.access(p, flags):
result.append(p)
for e in exts:
pext = p + e
if os.access(pext, flags):
result.append(pext)
return result | Search PATH for executable files with the given name.
Taken from Twisted. | train | https://github.com/yougov/vr.common/blob/ca8ed0c50ba873fc51fdfeeaa25d3b8ec1b54eb4/vr/common/utils.py#L190-L209 | null | from __future__ import print_function, unicode_literals
import os
import subprocess
import shutil
import tempfile
import random
import string
import hashlib
import errno
import textwrap
import contextlib
import functools
import warnings
from pkg_resources import parse_version
try:
import pwd
import grp
import fcntl
except ImportError:
# bypass import failure on Windows
pass
from six.moves import urllib
import six
@contextlib.contextmanager
def tmpdir():
"""
Create a tempdir context for the cwd and remove it after.
"""
target = None
try:
with _tmpdir_extant() as target:
yield target
finally:
if target is not None:
shutil.rmtree(target, ignore_errors=True)
@contextlib.contextmanager
def _tmpdir_extant():
"""
Create a tempdir context for the cwd, but allow the target to remain after
exiting the context.
"""
target = tempfile.mkdtemp()
with chdir(target):
yield target
@contextlib.contextmanager
def chdir(folder):
orig_path = os.getcwd()
os.chdir(folder)
try:
yield
finally:
os.chdir(orig_path)
def mkdir(path):
if not os.path.isdir(path):
os.makedirs(path)
class CommandException(Exception):
"""
Custom exception class for displaying nice input from failed commands.
Accepts an CommandResult object on init.
"""
def __init__(self, result):
template = six.text_type(
"Command '{result.command}' failed with status code "
"{result.status_code}.\noutput: {result.output}\n"
)
message = template.format(result=result)
super(CommandException, self).__init__(message)
class CommandResult(object):
def __init__(self, command, output, status_code):
self.command = command
if not isinstance(output, six.text_type):
output = six.text_type(output, 'ascii', 'replace')
self.output = output
self.status_code = status_code
def __repr__(self):
return '<CommandResult: %s,%s>' % (self.status_code, self.command)
def raise_for_status(self):
if self.status_code != 0:
raise CommandException(self)
def run(command, verbose=False):
"""
Run a shell command. Capture the stdout and stderr as a single stream.
Capture the status code.
If verbose=True, then print command and the output to the terminal as it
comes in.
"""
def do_nothing(*args, **kwargs):
return None
v_print = print if verbose else do_nothing
p = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
v_print("run:", command)
def log_and_yield(line):
if six.PY2:
# If not unicode, try to decode it first
if isinstance(line, str):
line = line.decode('utf8', 'replace')
v_print(line)
return line
output = ''.join(map(log_and_yield, p.stdout))
status_code = p.wait()
return CommandResult(command, output, status_code)
def parse_redis_url(url):
"""
Given a url like redis://localhost:6379/0, return a dict with host, port,
and db members.
"""
warnings.warn(
"Use redis.StrictRedis.from_url instead", DeprecationWarning,
stacklevel=2)
parsed = urllib.parse.urlsplit(url)
return {
'host': parsed.hostname,
'port': parsed.port,
'db': int(parsed.path.replace('/', '')),
}
def randchars(num=8):
return ''.join(random.choice(string.ascii_lowercase) for x in range(num))
def lock_file(f, block=False):
"""
If block=False (the default), die hard and fast if another process has
already grabbed the lock for this file.
If block=True, wait for the lock to be released, then continue.
"""
try:
flags = fcntl.LOCK_EX
if not block:
flags |= fcntl.LOCK_NB
fcntl.flock(f.fileno(), flags)
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
raise SystemExit("ERROR: %s is locked by another process." %
f.name)
raise
def file_md5(filename):
"""
Given a path to a file, read it chunk-wise and feed each chunk into
an MD5 file hash. Avoids having to hold the whole file in memory.
"""
md5 = hashlib.md5()
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(128 * md5.block_size), b''):
md5.update(chunk)
return md5.hexdigest()
def chowntree(path, username=None, groupname=None):
if username is None and groupname is None:
raise ValueError("Must provide username and/or groupname")
# os.chown will let you pass -1 to leave user or group unchanged.
uid = -1
gid = -1
if username:
uid = pwd.getpwnam(username).pw_uid
if groupname:
gid = grp.getgrnam(groupname).gr_gid
os.chown(path, uid, gid)
for root, dirs, files in os.walk(path):
for d in dirs:
dpath = os.path.join(root, d)
os.chown(dpath, uid, gid)
for f in files:
fpath = os.path.join(root, f)
if not os.path.islink(fpath):
os.chown(fpath, uid, gid)
def get_lxc_version():
""" Asks the current host what version of LXC it has. Returns it as a
string. If LXC is not installed, raises subprocess.CalledProcessError"""
runner = functools.partial(
subprocess.check_output,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
# Old LXC had an lxc-version executable, and prefixed its result with
# "lxc version: "
try:
result = runner(['lxc-version']).rstrip()
return parse_version(result.replace("lxc version: ", ""))
except (OSError, subprocess.CalledProcessError):
pass
# New LXC instead has a --version option on most installed executables.
return parse_version(runner(['lxc-start', '--version']).rstrip())
def get_lxc_network_config(version):
if version < parse_version('1.0.0'):
return ''
return textwrap.dedent(
"""
# Share the host's networking interface. This is unsafe!
# TODO: make separate virtual interfaces per container.
lxc.network.type = none""")
def get_lxc_overlayfs_config_fmt(version):
if version < parse_version('2.0.0'):
# Old LXC
return (
"lxc.mount.entry = overlayfs %(proc_path)s overlayfs "
"lowerdir=%(image_path)s,upperdir=%(proc_path)s "
"0 0"
)
# On newer LXC, fstype is called 'overlay' and we need a 'workdir'
return (
"lxc.mount.entry = overlay %(proc_path)s overlay "
"lowerdir=%(image_path)s,upperdir=%(proc_path)s,workdir=%(work_path)s "
"0 0"
)
|
yougov/vr.common | vr/common/utils.py | get_lxc_version | python | def get_lxc_version():
runner = functools.partial(
subprocess.check_output,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
# Old LXC had an lxc-version executable, and prefixed its result with
# "lxc version: "
try:
result = runner(['lxc-version']).rstrip()
return parse_version(result.replace("lxc version: ", ""))
except (OSError, subprocess.CalledProcessError):
pass
# New LXC instead has a --version option on most installed executables.
return parse_version(runner(['lxc-start', '--version']).rstrip()) | Asks the current host what version of LXC it has. Returns it as a
string. If LXC is not installed, raises subprocess.CalledProcessError | train | https://github.com/yougov/vr.common/blob/ca8ed0c50ba873fc51fdfeeaa25d3b8ec1b54eb4/vr/common/utils.py#L238-L257 | null | from __future__ import print_function, unicode_literals
import os
import subprocess
import shutil
import tempfile
import random
import string
import hashlib
import errno
import textwrap
import contextlib
import functools
import warnings
from pkg_resources import parse_version
try:
import pwd
import grp
import fcntl
except ImportError:
# bypass import failure on Windows
pass
from six.moves import urllib
import six
@contextlib.contextmanager
def tmpdir():
"""
Create a tempdir context for the cwd and remove it after.
"""
target = None
try:
with _tmpdir_extant() as target:
yield target
finally:
if target is not None:
shutil.rmtree(target, ignore_errors=True)
@contextlib.contextmanager
def _tmpdir_extant():
"""
Create a tempdir context for the cwd, but allow the target to remain after
exiting the context.
"""
target = tempfile.mkdtemp()
with chdir(target):
yield target
@contextlib.contextmanager
def chdir(folder):
orig_path = os.getcwd()
os.chdir(folder)
try:
yield
finally:
os.chdir(orig_path)
def mkdir(path):
if not os.path.isdir(path):
os.makedirs(path)
class CommandException(Exception):
"""
Custom exception class for displaying nice input from failed commands.
Accepts an CommandResult object on init.
"""
def __init__(self, result):
template = six.text_type(
"Command '{result.command}' failed with status code "
"{result.status_code}.\noutput: {result.output}\n"
)
message = template.format(result=result)
super(CommandException, self).__init__(message)
class CommandResult(object):
def __init__(self, command, output, status_code):
self.command = command
if not isinstance(output, six.text_type):
output = six.text_type(output, 'ascii', 'replace')
self.output = output
self.status_code = status_code
def __repr__(self):
return '<CommandResult: %s,%s>' % (self.status_code, self.command)
def raise_for_status(self):
if self.status_code != 0:
raise CommandException(self)
def run(command, verbose=False):
"""
Run a shell command. Capture the stdout and stderr as a single stream.
Capture the status code.
If verbose=True, then print command and the output to the terminal as it
comes in.
"""
def do_nothing(*args, **kwargs):
return None
v_print = print if verbose else do_nothing
p = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
v_print("run:", command)
def log_and_yield(line):
if six.PY2:
# If not unicode, try to decode it first
if isinstance(line, str):
line = line.decode('utf8', 'replace')
v_print(line)
return line
output = ''.join(map(log_and_yield, p.stdout))
status_code = p.wait()
return CommandResult(command, output, status_code)
def parse_redis_url(url):
"""
Given a url like redis://localhost:6379/0, return a dict with host, port,
and db members.
"""
warnings.warn(
"Use redis.StrictRedis.from_url instead", DeprecationWarning,
stacklevel=2)
parsed = urllib.parse.urlsplit(url)
return {
'host': parsed.hostname,
'port': parsed.port,
'db': int(parsed.path.replace('/', '')),
}
def randchars(num=8):
return ''.join(random.choice(string.ascii_lowercase) for x in range(num))
def lock_file(f, block=False):
"""
If block=False (the default), die hard and fast if another process has
already grabbed the lock for this file.
If block=True, wait for the lock to be released, then continue.
"""
try:
flags = fcntl.LOCK_EX
if not block:
flags |= fcntl.LOCK_NB
fcntl.flock(f.fileno(), flags)
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
raise SystemExit("ERROR: %s is locked by another process." %
f.name)
raise
def file_md5(filename):
"""
Given a path to a file, read it chunk-wise and feed each chunk into
an MD5 file hash. Avoids having to hold the whole file in memory.
"""
md5 = hashlib.md5()
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(128 * md5.block_size), b''):
md5.update(chunk)
return md5.hexdigest()
def which(name, flags=os.X_OK):
"""
Search PATH for executable files with the given name.
Taken from Twisted.
"""
result = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ.get('PATH', None)
if path is None:
return []
for p in os.environ.get('PATH', '').split(os.pathsep):
p = os.path.join(p, name)
if os.access(p, flags):
result.append(p)
for e in exts:
pext = p + e
if os.access(pext, flags):
result.append(pext)
return result
def chowntree(path, username=None, groupname=None):
if username is None and groupname is None:
raise ValueError("Must provide username and/or groupname")
# os.chown will let you pass -1 to leave user or group unchanged.
uid = -1
gid = -1
if username:
uid = pwd.getpwnam(username).pw_uid
if groupname:
gid = grp.getgrnam(groupname).gr_gid
os.chown(path, uid, gid)
for root, dirs, files in os.walk(path):
for d in dirs:
dpath = os.path.join(root, d)
os.chown(dpath, uid, gid)
for f in files:
fpath = os.path.join(root, f)
if not os.path.islink(fpath):
os.chown(fpath, uid, gid)
def get_lxc_network_config(version):
if version < parse_version('1.0.0'):
return ''
return textwrap.dedent(
"""
# Share the host's networking interface. This is unsafe!
# TODO: make separate virtual interfaces per container.
lxc.network.type = none""")
def get_lxc_overlayfs_config_fmt(version):
if version < parse_version('2.0.0'):
# Old LXC
return (
"lxc.mount.entry = overlayfs %(proc_path)s overlayfs "
"lowerdir=%(image_path)s,upperdir=%(proc_path)s "
"0 0"
)
# On newer LXC, fstype is called 'overlay' and we need a 'workdir'
return (
"lxc.mount.entry = overlay %(proc_path)s overlay "
"lowerdir=%(image_path)s,upperdir=%(proc_path)s,workdir=%(work_path)s "
"0 0"
)
|
yougov/vr.common | vr/common/slugignore.py | remove | python | def remove(item):
if os.path.isdir(item):
shutil.rmtree(item)
else:
# Assume it's a file. error if not.
os.remove(item) | Delete item, whether it's a file, a folder, or a folder
full of other files and folders. | train | https://github.com/yougov/vr.common/blob/ca8ed0c50ba873fc51fdfeeaa25d3b8ec1b54eb4/vr/common/slugignore.py#L32-L41 | null | """
Functions to support the .slugignore feature. clean_slug_dir(path) is the
main API.
There are two notable differences from the Heroku implementation:
- Velociraptor will not automatically delete repo folders like
.git. It will only delete things specified in .slugignore.
- Velociraptor will delete patterns specified in .slugignore *after*
compilation is finished. (Heroku deletes before compiling.)
"""
from __future__ import print_function
import os
import shutil
import glob
def is_inside(root, item):
root = os.path.realpath(root)
item = os.path.realpath(item)
relative = os.path.relpath(root, item)
if relative.startswith(os.pardir + os.sep):
return False
else:
return True
def remove_pattern(root, pat, verbose=True):
"""
Given a directory, and a pattern of files like "garbage.txt" or
"*pyc" inside it, remove them.
Try not to delete the whole OS while you're at it.
"""
print("removing pattern", root, pat)
combined = root + pat
print('combined', combined)
items = glob.glob(combined)
print('items', items)
for item in items:
print('item', item)
if is_inside(root, item):
remove(item)
elif verbose:
print("{item} is not inside {root}! Skipping.".format(**vars()))
def get_slugignores(root, fname='.slugignore'):
"""
Given a root path, read any .slugignore file inside and return a list of
patterns that should be removed prior to slug compilation.
Return empty list if file does not exist.
"""
try:
with open(os.path.join(root, fname)) as f:
return [l.rstrip('\n') for l in f]
except IOError:
return []
def clean_slug_dir(root):
"""
Given a path, delete anything specified in .slugignore.
"""
if not root.endswith('/'):
root += '/'
for pattern in get_slugignores(root):
print("pattern", pattern)
remove_pattern(root, pattern)
|
yougov/vr.common | vr/common/slugignore.py | remove_pattern | python | def remove_pattern(root, pat, verbose=True):
print("removing pattern", root, pat)
combined = root + pat
print('combined', combined)
items = glob.glob(combined)
print('items', items)
for item in items:
print('item', item)
if is_inside(root, item):
remove(item)
elif verbose:
print("{item} is not inside {root}! Skipping.".format(**vars())) | Given a directory, and a pattern of files like "garbage.txt" or
"*pyc" inside it, remove them.
Try not to delete the whole OS while you're at it. | train | https://github.com/yougov/vr.common/blob/ca8ed0c50ba873fc51fdfeeaa25d3b8ec1b54eb4/vr/common/slugignore.py#L44-L61 | [
"def remove(item):\n \"\"\"\n Delete item, whether it's a file, a folder, or a folder\n full of other files and folders.\n \"\"\"\n if os.path.isdir(item):\n shutil.rmtree(item)\n else:\n # Assume it's a file. error if not.\n os.remove(item)\n",
"def is_inside(root, item):\n... | """
Functions to support the .slugignore feature. clean_slug_dir(path) is the
main API.
There are two notable differences from the Heroku implementation:
- Velociraptor will not automatically delete repo folders like
.git. It will only delete things specified in .slugignore.
- Velociraptor will delete patterns specified in .slugignore *after*
compilation is finished. (Heroku deletes before compiling.)
"""
from __future__ import print_function
import os
import shutil
import glob
def is_inside(root, item):
root = os.path.realpath(root)
item = os.path.realpath(item)
relative = os.path.relpath(root, item)
if relative.startswith(os.pardir + os.sep):
return False
else:
return True
def remove(item):
"""
Delete item, whether it's a file, a folder, or a folder
full of other files and folders.
"""
if os.path.isdir(item):
shutil.rmtree(item)
else:
# Assume it's a file. error if not.
os.remove(item)
def get_slugignores(root, fname='.slugignore'):
"""
Given a root path, read any .slugignore file inside and return a list of
patterns that should be removed prior to slug compilation.
Return empty list if file does not exist.
"""
try:
with open(os.path.join(root, fname)) as f:
return [l.rstrip('\n') for l in f]
except IOError:
return []
def clean_slug_dir(root):
"""
Given a path, delete anything specified in .slugignore.
"""
if not root.endswith('/'):
root += '/'
for pattern in get_slugignores(root):
print("pattern", pattern)
remove_pattern(root, pattern)
|
yougov/vr.common | vr/common/slugignore.py | get_slugignores | python | def get_slugignores(root, fname='.slugignore'):
try:
with open(os.path.join(root, fname)) as f:
return [l.rstrip('\n') for l in f]
except IOError:
return [] | Given a root path, read any .slugignore file inside and return a list of
patterns that should be removed prior to slug compilation.
Return empty list if file does not exist. | train | https://github.com/yougov/vr.common/blob/ca8ed0c50ba873fc51fdfeeaa25d3b8ec1b54eb4/vr/common/slugignore.py#L64-L75 | null | """
Functions to support the .slugignore feature. clean_slug_dir(path) is the
main API.
There are two notable differences from the Heroku implementation:
- Velociraptor will not automatically delete repo folders like
.git. It will only delete things specified in .slugignore.
- Velociraptor will delete patterns specified in .slugignore *after*
compilation is finished. (Heroku deletes before compiling.)
"""
from __future__ import print_function
import os
import shutil
import glob
def is_inside(root, item):
root = os.path.realpath(root)
item = os.path.realpath(item)
relative = os.path.relpath(root, item)
if relative.startswith(os.pardir + os.sep):
return False
else:
return True
def remove(item):
"""
Delete item, whether it's a file, a folder, or a folder
full of other files and folders.
"""
if os.path.isdir(item):
shutil.rmtree(item)
else:
# Assume it's a file. error if not.
os.remove(item)
def remove_pattern(root, pat, verbose=True):
"""
Given a directory, and a pattern of files like "garbage.txt" or
"*pyc" inside it, remove them.
Try not to delete the whole OS while you're at it.
"""
print("removing pattern", root, pat)
combined = root + pat
print('combined', combined)
items = glob.glob(combined)
print('items', items)
for item in items:
print('item', item)
if is_inside(root, item):
remove(item)
elif verbose:
print("{item} is not inside {root}! Skipping.".format(**vars()))
def clean_slug_dir(root):
"""
Given a path, delete anything specified in .slugignore.
"""
if not root.endswith('/'):
root += '/'
for pattern in get_slugignores(root):
print("pattern", pattern)
remove_pattern(root, pattern)
|
yougov/vr.common | vr/common/slugignore.py | clean_slug_dir | python | def clean_slug_dir(root):
if not root.endswith('/'):
root += '/'
for pattern in get_slugignores(root):
print("pattern", pattern)
remove_pattern(root, pattern) | Given a path, delete anything specified in .slugignore. | train | https://github.com/yougov/vr.common/blob/ca8ed0c50ba873fc51fdfeeaa25d3b8ec1b54eb4/vr/common/slugignore.py#L78-L86 | [
"def remove_pattern(root, pat, verbose=True):\n \"\"\"\n Given a directory, and a pattern of files like \"garbage.txt\" or\n \"*pyc\" inside it, remove them.\n\n Try not to delete the whole OS while you're at it.\n \"\"\"\n print(\"removing pattern\", root, pat)\n combined = root + pat\n pri... | """
Functions to support the .slugignore feature. clean_slug_dir(path) is the
main API.
There are two notable differences from the Heroku implementation:
- Velociraptor will not automatically delete repo folders like
.git. It will only delete things specified in .slugignore.
- Velociraptor will delete patterns specified in .slugignore *after*
compilation is finished. (Heroku deletes before compiling.)
"""
from __future__ import print_function
import os
import shutil
import glob
def is_inside(root, item):
root = os.path.realpath(root)
item = os.path.realpath(item)
relative = os.path.relpath(root, item)
if relative.startswith(os.pardir + os.sep):
return False
else:
return True
def remove(item):
"""
Delete item, whether it's a file, a folder, or a folder
full of other files and folders.
"""
if os.path.isdir(item):
shutil.rmtree(item)
else:
# Assume it's a file. error if not.
os.remove(item)
def remove_pattern(root, pat, verbose=True):
"""
Given a directory, and a pattern of files like "garbage.txt" or
"*pyc" inside it, remove them.
Try not to delete the whole OS while you're at it.
"""
print("removing pattern", root, pat)
combined = root + pat
print('combined', combined)
items = glob.glob(combined)
print('items', items)
for item in items:
print('item', item)
if is_inside(root, item):
remove(item)
elif verbose:
print("{item} is not inside {root}! Skipping.".format(**vars()))
def get_slugignores(root, fname='.slugignore'):
"""
Given a root path, read any .slugignore file inside and return a list of
patterns that should be removed prior to slug compilation.
Return empty list if file does not exist.
"""
try:
with open(os.path.join(root, fname)) as f:
return [l.rstrip('\n') for l in f]
except IOError:
return []
|
yougov/vr.common | vr/common/repo.py | guess_url_vcs | python | def guess_url_vcs(url):
parsed = urllib.parse.urlsplit(url)
if parsed.scheme in ('git', 'svn'):
return parsed.scheme
elif parsed.path.endswith('.git'):
return 'git'
elif parsed.hostname == 'github.com':
return 'git'
# If it's an http url, we can try requesting it and guessing from the
# contents.
if parsed.scheme in ('http', 'https'):
resp = requests.get(url)
if re.match('basehttp.*python.*', resp.headers.get('server').lower()):
# It's the mercurial http server
return 'hg'
return None | Given a url, try to guess what kind of VCS it's for. Return None if we
can't make a good guess. | train | https://github.com/yougov/vr.common/blob/ca8ed0c50ba873fc51fdfeeaa25d3b8ec1b54eb4/vr/common/repo.py#L18-L39 | null | # Tools for doing some simple (clone and pull) operations with repositories.
import os
import re
import logging
from six.moves import urllib
import requests
import contextlib2
from vr.common import utils
from vr.common.utils import chdir, run
log = logging.getLogger(__name__)
def guess_folder_vcs(folder):
"""
Given a path for a folder on the local filesystem, see what kind of vcs
repo it is, if any.
"""
try:
contents = os.listdir(folder)
vcs_folders = ['.git', '.hg', '.svn']
found = next((x for x in vcs_folders if x in contents), None)
# Chop off the dot if we got a string back
return found[1:] if found else None
except OSError:
return None
class Repo(object):
def __init__(self, folder, url=None, vcs_type=None):
# strip trailing slash from folder if present
if folder.endswith('/'):
folder = folder[:-1]
self.folder = folder
vcs_type = vcs_type or guess_folder_vcs(folder) or guess_url_vcs(url)
if vcs_type is None:
raise ValueError('vcs type not guessable from folder (%s) or URL '
'(%s) ' % (folder, url))
self.vcs_type = vcs_type
if url is None and not os.path.isdir(folder):
raise ValueError('Must provide repo url if folder does not exist')
url = url or self.get_url()
# Strip off fragment
url, _, self.fragment = url.partition('#')
# Strip off trailing slash
if url.endswith('/'):
url = url[:-1]
self.url = url
@staticmethod
def run(command):
r = run(command, verbose=True)
r.raise_for_status()
return r
def get_url(self):
"""
Assuming that the repo has been cloned locally, get its default
upstream URL.
"""
cmd = {
'hg': 'hg paths default',
'git': 'git config --local --get remote.origin.url',
}[self.vcs_type]
with chdir(self.folder):
r = self.run(cmd)
return r.output.replace('\n', '')
def clone(self):
log.info('Cloning %s to %s', self.url, self.folder)
cmd = {
'hg': 'hg clone %s %s' % (self.url, self.folder),
'git': 'git clone %s %s' % (self.url, self.folder),
}[self.vcs_type]
self.run(cmd)
def update(self, rev=None):
# If folder doesn't exist, do a clone. Else pull and update.
if not os.path.exists(self.folder):
self.clone()
log.info('Updating %s from %s', self.folder, self.url)
# account for self.fragment=='' case
rev = rev or self.fragment or None
update = getattr(self, '_update_{self.vcs_type}'.format(**locals()))
with chdir(self.folder):
update(rev)
def _update_hg(self, rev):
rev = rev or 'tip'
self.run('hg pull {}'.format(self.url))
self.run('hg up --clean {}'.format(rev))
def _update_git(self, rev):
# Default to master
rev = rev or 'master'
# Assume origin is called 'origin'.
remote = 'origin'
# Get all refs first
self.run('git fetch --tags')
# Checkout the rev we want
self.run('git checkout {}'.format(rev))
# reset working state to the origin (only relevant to
# branches, so suppress errors).
with contextlib2.suppress(utils.CommandException):
self.run('git reset --hard {remote}/{rev}'.format(**locals()))
@property
def basename(self):
return basename(self.url)
@property
def version(self):
method = getattr(self, '_version_' + self.vcs_type)
return method()
def _version_hg(self):
r = self.run('hg identify -i %s' % self.folder)
return r.output.rstrip('+\n')
def _version_git(self):
with chdir(self.folder):
r = self.run('git rev-parse HEAD')
return r.output.rstrip()
def __repr__(self):
values = {'classname': self.__class__.__name__,
'folder': os.path.basename(self.folder)}
return "%(classname)s <%(folder)s>" % values
def basename(url):
"""
Return the name of the folder that you'd get if you cloned 'url' into the
current working directory.
"""
# It's easy to accidentally have whitespace on the beginning or end of the
# url.
url = url.strip()
url, _sep, _fragment = url.partition('#')
# Remove trailing slash from url if present
if url.endswith('/'):
url = url[:-1]
# Also strip .git from url if it ends in that.
return re.sub(r'\.git$', '', url.split('/')[-1])
|
yougov/vr.common | vr/common/repo.py | guess_folder_vcs | python | def guess_folder_vcs(folder):
try:
contents = os.listdir(folder)
vcs_folders = ['.git', '.hg', '.svn']
found = next((x for x in vcs_folders if x in contents), None)
# Chop off the dot if we got a string back
return found[1:] if found else None
except OSError:
return None | Given a path for a folder on the local filesystem, see what kind of vcs
repo it is, if any. | train | https://github.com/yougov/vr.common/blob/ca8ed0c50ba873fc51fdfeeaa25d3b8ec1b54eb4/vr/common/repo.py#L42-L54 | null | # Tools for doing some simple (clone and pull) operations with repositories.
import os
import re
import logging
from six.moves import urllib
import requests
import contextlib2
from vr.common import utils
from vr.common.utils import chdir, run
log = logging.getLogger(__name__)
def guess_url_vcs(url):
"""
Given a url, try to guess what kind of VCS it's for. Return None if we
can't make a good guess.
"""
parsed = urllib.parse.urlsplit(url)
if parsed.scheme in ('git', 'svn'):
return parsed.scheme
elif parsed.path.endswith('.git'):
return 'git'
elif parsed.hostname == 'github.com':
return 'git'
# If it's an http url, we can try requesting it and guessing from the
# contents.
if parsed.scheme in ('http', 'https'):
resp = requests.get(url)
if re.match('basehttp.*python.*', resp.headers.get('server').lower()):
# It's the mercurial http server
return 'hg'
return None
class Repo(object):
def __init__(self, folder, url=None, vcs_type=None):
# strip trailing slash from folder if present
if folder.endswith('/'):
folder = folder[:-1]
self.folder = folder
vcs_type = vcs_type or guess_folder_vcs(folder) or guess_url_vcs(url)
if vcs_type is None:
raise ValueError('vcs type not guessable from folder (%s) or URL '
'(%s) ' % (folder, url))
self.vcs_type = vcs_type
if url is None and not os.path.isdir(folder):
raise ValueError('Must provide repo url if folder does not exist')
url = url or self.get_url()
# Strip off fragment
url, _, self.fragment = url.partition('#')
# Strip off trailing slash
if url.endswith('/'):
url = url[:-1]
self.url = url
@staticmethod
def run(command):
r = run(command, verbose=True)
r.raise_for_status()
return r
def get_url(self):
"""
Assuming that the repo has been cloned locally, get its default
upstream URL.
"""
cmd = {
'hg': 'hg paths default',
'git': 'git config --local --get remote.origin.url',
}[self.vcs_type]
with chdir(self.folder):
r = self.run(cmd)
return r.output.replace('\n', '')
def clone(self):
log.info('Cloning %s to %s', self.url, self.folder)
cmd = {
'hg': 'hg clone %s %s' % (self.url, self.folder),
'git': 'git clone %s %s' % (self.url, self.folder),
}[self.vcs_type]
self.run(cmd)
def update(self, rev=None):
# If folder doesn't exist, do a clone. Else pull and update.
if not os.path.exists(self.folder):
self.clone()
log.info('Updating %s from %s', self.folder, self.url)
# account for self.fragment=='' case
rev = rev or self.fragment or None
update = getattr(self, '_update_{self.vcs_type}'.format(**locals()))
with chdir(self.folder):
update(rev)
def _update_hg(self, rev):
rev = rev or 'tip'
self.run('hg pull {}'.format(self.url))
self.run('hg up --clean {}'.format(rev))
def _update_git(self, rev):
# Default to master
rev = rev or 'master'
# Assume origin is called 'origin'.
remote = 'origin'
# Get all refs first
self.run('git fetch --tags')
# Checkout the rev we want
self.run('git checkout {}'.format(rev))
# reset working state to the origin (only relevant to
# branches, so suppress errors).
with contextlib2.suppress(utils.CommandException):
self.run('git reset --hard {remote}/{rev}'.format(**locals()))
@property
def basename(self):
return basename(self.url)
@property
def version(self):
method = getattr(self, '_version_' + self.vcs_type)
return method()
def _version_hg(self):
r = self.run('hg identify -i %s' % self.folder)
return r.output.rstrip('+\n')
def _version_git(self):
with chdir(self.folder):
r = self.run('git rev-parse HEAD')
return r.output.rstrip()
def __repr__(self):
values = {'classname': self.__class__.__name__,
'folder': os.path.basename(self.folder)}
return "%(classname)s <%(folder)s>" % values
def basename(url):
"""
Return the name of the folder that you'd get if you cloned 'url' into the
current working directory.
"""
# It's easy to accidentally have whitespace on the beginning or end of the
# url.
url = url.strip()
url, _sep, _fragment = url.partition('#')
# Remove trailing slash from url if present
if url.endswith('/'):
url = url[:-1]
# Also strip .git from url if it ends in that.
return re.sub(r'\.git$', '', url.split('/')[-1])
|
yougov/vr.common | vr/common/repo.py | basename | python | def basename(url):
# It's easy to accidentally have whitespace on the beginning or end of the
# url.
url = url.strip()
url, _sep, _fragment = url.partition('#')
# Remove trailing slash from url if present
if url.endswith('/'):
url = url[:-1]
# Also strip .git from url if it ends in that.
return re.sub(r'\.git$', '', url.split('/')[-1]) | Return the name of the folder that you'd get if you cloned 'url' into the
current working directory. | train | https://github.com/yougov/vr.common/blob/ca8ed0c50ba873fc51fdfeeaa25d3b8ec1b54eb4/vr/common/repo.py#L169-L183 | null | # Tools for doing some simple (clone and pull) operations with repositories.
import os
import re
import logging
from six.moves import urllib
import requests
import contextlib2
from vr.common import utils
from vr.common.utils import chdir, run
log = logging.getLogger(__name__)
def guess_url_vcs(url):
"""
Given a url, try to guess what kind of VCS it's for. Return None if we
can't make a good guess.
"""
parsed = urllib.parse.urlsplit(url)
if parsed.scheme in ('git', 'svn'):
return parsed.scheme
elif parsed.path.endswith('.git'):
return 'git'
elif parsed.hostname == 'github.com':
return 'git'
# If it's an http url, we can try requesting it and guessing from the
# contents.
if parsed.scheme in ('http', 'https'):
resp = requests.get(url)
if re.match('basehttp.*python.*', resp.headers.get('server').lower()):
# It's the mercurial http server
return 'hg'
return None
def guess_folder_vcs(folder):
"""
Given a path for a folder on the local filesystem, see what kind of vcs
repo it is, if any.
"""
try:
contents = os.listdir(folder)
vcs_folders = ['.git', '.hg', '.svn']
found = next((x for x in vcs_folders if x in contents), None)
# Chop off the dot if we got a string back
return found[1:] if found else None
except OSError:
return None
class Repo(object):
def __init__(self, folder, url=None, vcs_type=None):
# strip trailing slash from folder if present
if folder.endswith('/'):
folder = folder[:-1]
self.folder = folder
vcs_type = vcs_type or guess_folder_vcs(folder) or guess_url_vcs(url)
if vcs_type is None:
raise ValueError('vcs type not guessable from folder (%s) or URL '
'(%s) ' % (folder, url))
self.vcs_type = vcs_type
if url is None and not os.path.isdir(folder):
raise ValueError('Must provide repo url if folder does not exist')
url = url or self.get_url()
# Strip off fragment
url, _, self.fragment = url.partition('#')
# Strip off trailing slash
if url.endswith('/'):
url = url[:-1]
self.url = url
@staticmethod
def run(command):
r = run(command, verbose=True)
r.raise_for_status()
return r
def get_url(self):
"""
Assuming that the repo has been cloned locally, get its default
upstream URL.
"""
cmd = {
'hg': 'hg paths default',
'git': 'git config --local --get remote.origin.url',
}[self.vcs_type]
with chdir(self.folder):
r = self.run(cmd)
return r.output.replace('\n', '')
def clone(self):
log.info('Cloning %s to %s', self.url, self.folder)
cmd = {
'hg': 'hg clone %s %s' % (self.url, self.folder),
'git': 'git clone %s %s' % (self.url, self.folder),
}[self.vcs_type]
self.run(cmd)
def update(self, rev=None):
# If folder doesn't exist, do a clone. Else pull and update.
if not os.path.exists(self.folder):
self.clone()
log.info('Updating %s from %s', self.folder, self.url)
# account for self.fragment=='' case
rev = rev or self.fragment or None
update = getattr(self, '_update_{self.vcs_type}'.format(**locals()))
with chdir(self.folder):
update(rev)
def _update_hg(self, rev):
rev = rev or 'tip'
self.run('hg pull {}'.format(self.url))
self.run('hg up --clean {}'.format(rev))
def _update_git(self, rev):
# Default to master
rev = rev or 'master'
# Assume origin is called 'origin'.
remote = 'origin'
# Get all refs first
self.run('git fetch --tags')
# Checkout the rev we want
self.run('git checkout {}'.format(rev))
# reset working state to the origin (only relevant to
# branches, so suppress errors).
with contextlib2.suppress(utils.CommandException):
self.run('git reset --hard {remote}/{rev}'.format(**locals()))
@property
def basename(self):
return basename(self.url)
@property
def version(self):
method = getattr(self, '_version_' + self.vcs_type)
return method()
def _version_hg(self):
r = self.run('hg identify -i %s' % self.folder)
return r.output.rstrip('+\n')
def _version_git(self):
with chdir(self.folder):
r = self.run('git rev-parse HEAD')
return r.output.rstrip()
def __repr__(self):
values = {'classname': self.__class__.__name__,
'folder': os.path.basename(self.folder)}
return "%(classname)s <%(folder)s>" % values
|
yougov/vr.common | vr/common/repo.py | Repo.get_url | python | def get_url(self):
cmd = {
'hg': 'hg paths default',
'git': 'git config --local --get remote.origin.url',
}[self.vcs_type]
with chdir(self.folder):
r = self.run(cmd)
return r.output.replace('\n', '') | Assuming that the repo has been cloned locally, get its default
upstream URL. | train | https://github.com/yougov/vr.common/blob/ca8ed0c50ba873fc51fdfeeaa25d3b8ec1b54eb4/vr/common/repo.py#L90-L101 | [
"def run(command):\n r = run(command, verbose=True)\n r.raise_for_status()\n return r\n"
] | class Repo(object):
def __init__(self, folder, url=None, vcs_type=None):
# strip trailing slash from folder if present
if folder.endswith('/'):
folder = folder[:-1]
self.folder = folder
vcs_type = vcs_type or guess_folder_vcs(folder) or guess_url_vcs(url)
if vcs_type is None:
raise ValueError('vcs type not guessable from folder (%s) or URL '
'(%s) ' % (folder, url))
self.vcs_type = vcs_type
if url is None and not os.path.isdir(folder):
raise ValueError('Must provide repo url if folder does not exist')
url = url or self.get_url()
# Strip off fragment
url, _, self.fragment = url.partition('#')
# Strip off trailing slash
if url.endswith('/'):
url = url[:-1]
self.url = url
@staticmethod
def run(command):
r = run(command, verbose=True)
r.raise_for_status()
return r
def clone(self):
log.info('Cloning %s to %s', self.url, self.folder)
cmd = {
'hg': 'hg clone %s %s' % (self.url, self.folder),
'git': 'git clone %s %s' % (self.url, self.folder),
}[self.vcs_type]
self.run(cmd)
def update(self, rev=None):
# If folder doesn't exist, do a clone. Else pull and update.
if not os.path.exists(self.folder):
self.clone()
log.info('Updating %s from %s', self.folder, self.url)
# account for self.fragment=='' case
rev = rev or self.fragment or None
update = getattr(self, '_update_{self.vcs_type}'.format(**locals()))
with chdir(self.folder):
update(rev)
def _update_hg(self, rev):
rev = rev or 'tip'
self.run('hg pull {}'.format(self.url))
self.run('hg up --clean {}'.format(rev))
def _update_git(self, rev):
# Default to master
rev = rev or 'master'
# Assume origin is called 'origin'.
remote = 'origin'
# Get all refs first
self.run('git fetch --tags')
# Checkout the rev we want
self.run('git checkout {}'.format(rev))
# reset working state to the origin (only relevant to
# branches, so suppress errors).
with contextlib2.suppress(utils.CommandException):
self.run('git reset --hard {remote}/{rev}'.format(**locals()))
@property
def basename(self):
return basename(self.url)
@property
def version(self):
method = getattr(self, '_version_' + self.vcs_type)
return method()
def _version_hg(self):
r = self.run('hg identify -i %s' % self.folder)
return r.output.rstrip('+\n')
def _version_git(self):
with chdir(self.folder):
r = self.run('git rev-parse HEAD')
return r.output.rstrip()
def __repr__(self):
values = {'classname': self.__class__.__name__,
'folder': os.path.basename(self.folder)}
return "%(classname)s <%(folder)s>" % values
|
RetailMeNotSandbox/acky | acky/ec2.py | ElasticIPCollection.destroy | python | def destroy(self, eip_or_aid, disassociate=False):
if "." in eip_or_aid: # If an IP is given (Classic)
# NOTE: EIPs are automatically disassociated for Classic instances.
return "true" == self.call("ReleaseAddress",
response_data_key="return",
PublicIp=eip_or_aid)
else: # If an AID is given (VPC)
if disassociate:
self.disassociate(eip_or_aid)
return "true" == self.call("ReleaseAddress",
response_data_key="return",
AllocationId=eip_or_aid) | Release an EIP. If the EIP was allocated for a VPC instance, an
AllocationId(aid) must be provided instead of a PublicIp. Setting
disassociate to True will attempt to disassociate the IP before
releasing it (required for associated nondefault VPC instances). | train | https://github.com/RetailMeNotSandbox/acky/blob/fcd4d092c42892ede7c924cafc41e9cf4be3fb9f/acky/ec2.py#L151-L167 | [
"def call(self, operation, response_data_key=None, *args, **kwargs):\n log = logging.getLogger(__name__)\n op = self._service.get_operation(operation)\n log.debug(\"Calling {} action '{}'\".format(self._service, operation))\n resp, data = op.call(self._endpoint, *args, **kwargs)\n if not resp.ok:\n ... | class ElasticIPCollection(AwsCollection, EC2ApiClient):
"""Interface to get, create, destroy, associate, and disassociate EIPs for
classic EC2 domains and VPCs. (Amazon EC2 API Version 2014-06-15)
"""
def get(self, filters=None):
"""List EIPs and associated information."""
params = {}
if filters:
params["filters"] = make_filters(filters)
return self.call("DescribeAddresses",
response_data_key="Addresses",
**params)
def create(self, vpc=False):
"""Set vpc=True to allocate an EIP for a EC2-Classic instance.
Set vpc=False to allocate an EIP for a VPC instance.
"""
return self.call("AllocateAddress",
Domain="vpc" if vpc else "standard")
def associate(self, eip_or_aid,
instance_id='', network_interface_id='', private_ip=''):
"""Associate an EIP with a given instance or network interface. If
the EIP was allocated for a VPC instance, an AllocationId(aid) must
be provided instead of a PublicIp.
"""
if "." in eip_or_aid: # If an IP is given (Classic)
return self.call("AssociateAddress",
PublicIp=eip_or_aid,
InstanceId=instance_id,
NetworkInterfaceId=network_interface_id,
PrivateIpAddress=private_ip)
else: # If an AID is given (VPC)
return self.call("AssociateAddress",
AllocationId=eip_or_aid,
InstanceId=instance_id,
NetworkInterfaceId=network_interface_id,
PrivateIpAddress=private_ip)
def disassociate(self, eip_or_aid):
"""Disassociates an EIP. If the EIP was allocated for a VPC instance,
an AllocationId(aid) must be provided instead of a PublicIp.
"""
if "." in eip_or_aid: # If an IP is given (Classic)
return "true" == self.call("DisassociateAddress",
response_data_key="return",
PublicIp=eip_or_aid)
else: # If an AID is given (VPC)
return "true" == self.call("DisassociateAddress",
response_data_key="return",
AllocationId=eip_or_aid)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.