repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
mozilla-services/python-dockerflow
|
src/dockerflow/flask/app.py
|
Dockerflow._got_request_exception
|
python
|
def _got_request_exception(self, sender, exception, **extra):
extra = self.summary_extra()
extra['errno'] = 500
self.summary_logger.error(str(exception), extra=extra)
g._has_exception = True
|
The signal handler for the got_request_exception signal.
|
train
|
https://github.com/mozilla-services/python-dockerflow/blob/43703c5e8934ba6901b0a1520d6da4ed6457208c/src/dockerflow/flask/app.py#L194-L201
| null |
class Dockerflow(object):
"""
The Dockerflow Flask extension. Set it up like this:
.. code-block:: python
:caption: ``myproject.py``
from flask import Flask
from dockerflow.flask import Dockerflow
app = Flask(__name__)
dockerflow = Dockerflow(app)
Or if you use the Flask application factory pattern, in
an own module set up Dockerflow first:
.. code-block:: python
:caption: ``myproject/deployment.py``
from dockerflow.flask import Dockerflow
dockerflow = Dockerflow()
and then import and initialize it with the Flask application
object when you create the application:
.. code-block:: python
:caption: ``myproject/app.py``
def create_app(config_filename):
app = Flask(__name__)
app.config.from_pyfile(config_filename)
from myproject.deployment import dockerflow
dockerflow.init_app(app)
from myproject.views.admin import admin
from myproject.views.frontend import frontend
app.register_blueprint(admin)
app.register_blueprint(frontend)
return app
See the parameters for a more detailed list of optional features when
initializing the extension.
:param app: The Flask app that this Dockerflow extension should be
initialized with.
:type root: ~flask.Flask or None
:param db: A Flask-SQLAlchemy extension instance to be used by the
built-in Dockerflow check for the database connection.
:param redis: A Redis connection to be used by the built-in Dockerflow
check for the Redis connection.
:param migrate: A Flask-Migrate extension instance to be used by the
built-in Dockerflow check for Alembic migrations.
:param silenced_checks: Dockerflow check IDs to ignore when running
through the list of configured checks.
:type silenced_checks: list
:param version_path: The filesystem path where the ``version.json`` can
be found. Defaults to the parent directory of the
Flask app's root path.
"""
def __init__(self, app=None, db=None, redis=None, migrate=None,
silenced_checks=None, version_path=None, *args, **kwargs):
# The Flask blueprint to add the Dockerflow signal callbacks and views
self._blueprint = Blueprint('dockerflow', 'dockerflow.flask.app')
# The Dockerflow specific logger to be used by internals of this
# extension.
self.logger = logging.getLogger('dockerflow.flask')
self.logger.addHandler(logging.NullHandler())
self.logger.setLevel(logging.INFO)
# The request summary logger to be used by this extension
# without pre-configuration. See docs for how to set it up.
self.summary_logger = logging.getLogger('request.summary')
# An ordered dictionary for storing custom Dockerflow checks in.
self.checks = OrderedDict()
# A list of IDs of custom Dockerflow checks to ignore in case they
# show up.
self.silenced_checks = silenced_checks or []
# The path where to find the version JSON file. Defaults to the
# parent directory of the app root path.
self.version_path = version_path
self._version_callback = version.get_version
# Initialize the app if given.
if app:
self.init_app(app)
# Initialize the built-in checks.
if db:
self.init_check(checks.check_database_connected, db)
if redis:
self.init_check(checks.check_redis_connected, redis)
if migrate:
self.init_check(checks.check_migrations_applied, migrate)
def init_check(self, check, obj):
"""
Adds a given check callback with the provided object to the list
of checks. Useful for built-ins but also advanced custom checks.
"""
self.logger.info('Adding extension check %s' % check.__name__)
check = functools.wraps(check)(functools.partial(check, obj))
self.check(func=check)
def init_app(self, app):
"""
Initializes the extension with the given app, registers the
built-in views with an own blueprint and hooks up our signal
callbacks.
"""
# If no version path was provided in the init of the Dockerflow
# class we'll use the parent directory of the app root path.
if self.version_path is None:
self.version_path = os.path.dirname(app.root_path)
for view in (
('/__version__', 'version', self._version_view),
('/__heartbeat__', 'heartbeat', self._heartbeat_view),
('/__lbheartbeat__', 'lbheartbeat', self._lbheartbeat_view),
):
self._blueprint.add_url_rule(*view)
self._blueprint.before_app_request(self._before_request)
self._blueprint.after_app_request(self._after_request)
self._blueprint.app_errorhandler(HeartbeatFailure)(self._heartbeat_exception_handler)
app.register_blueprint(self._blueprint)
got_request_exception.connect(self._got_request_exception, sender=app)
if not hasattr(app, 'extensions'): # pragma: nocover
app.extensions = {}
app.extensions['dockerflow'] = self
def _heartbeat_exception_handler(self, error):
"""
An exception handler to act as a middleman to return
a heartbeat view response with a 500 error code.
"""
return error.get_response()
def _before_request(self):
"""
The before_request callback.
"""
g._request_id = str(uuid.uuid4())
g._start_timestamp = time.time()
def _after_request(self, response):
"""
The signal handler for the request_finished signal.
"""
if not getattr(g, '_has_exception', False):
extra = self.summary_extra()
self.summary_logger.info('', extra=extra)
return response
def user_id(self):
"""
Return the ID of the current request's user
"""
# This needs flask-login to be installed
if not has_flask_login:
return
# and the actual login manager installed
if not hasattr(current_app, 'login_manager'):
return
# fail if no current_user was attached to the request context
try:
is_authenticated = current_user.is_authenticated
except AttributeError:
return
# because is_authenticated could be a callable, call it
if callable(is_authenticated):
is_authenticated = is_authenticated()
# and fail if the user isn't authenticated
if not is_authenticated:
return
# finally return the user id
return current_user.get_id()
def summary_extra(self):
"""
Build the extra data for the summary logger.
"""
out = {
'errno': 0,
'agent': request.headers.get('User-Agent', ''),
'lang': request.headers.get('Accept-Language', ''),
'method': request.method,
'path': request.path,
}
# set the uid value to the current user ID
user_id = self.user_id()
if user_id is None:
user_id = ''
out['uid'] = user_id
# the rid value to the current request ID
request_id = g.get('_request_id', None)
if request_id is not None:
out['rid'] = request_id
# and the t value to the time it took to render
start_timestamp = g.get('_start_timestamp', None)
if start_timestamp is not None:
# Duration of request, in milliseconds.
out['t'] = int(1000 * (time.time() - start_timestamp))
return out
def _version_view(self):
"""
View that returns the contents of version.json or a 404.
"""
version_json = self._version_callback(self.version_path)
if version_json is None:
return 'version.json not found', 404
else:
return jsonify(version_json)
def _lbheartbeat_view(self):
"""
Lets the load balancer know the application is running and available.
Must return 200 (not 204) for ELB
http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-healthchecks.html
"""
return '', 200
def _heartbeat_check_detail(self, check):
errors = list(filter(lambda e: e.id not in self.silenced_checks, check()))
level = max([0] + [e.level for e in errors])
return {
'status': checks.level_to_text(level),
'level': level,
'messages': {e.id: e.msg for e in errors},
}
def _heartbeat_view(self):
"""
Runs all the registered checks and returns a JSON response with either
a status code of 200 or 500 depending on the results of the checks.
Any check that returns a warning or worse (error, critical) will
return a 500 response.
"""
details = {}
statuses = {}
level = 0
for name, check in self.checks.items():
detail = self._heartbeat_check_detail(check)
statuses[name] = detail['status']
level = max(level, detail['level'])
if detail['level'] > 0:
details[name] = detail
payload = {
'status': checks.level_to_text(level),
'checks': statuses,
'details': details,
}
def render(status_code):
return make_response(jsonify(payload), status_code)
if level < checks.WARNING:
status_code = 200
heartbeat_passed.send(self, level=level)
return render(status_code)
else:
status_code = 500
heartbeat_failed.send(self, level=level)
raise HeartbeatFailure(response=render(status_code))
def version_callback(self, func):
"""
A decorator to optionally register a new Dockerflow version callback
and use that instead of the default of
:func:`dockerflow.version.get_version`.
The callback will be passed the value of the
``version_path`` parameter to the Dockerflow extension object,
which defaults to the parent directory of the Flask app's root path.
The callback should return a dictionary with the
version information as defined in the Dockerflow spec,
or None if no version information could be loaded.
E.g.::
app = Flask(__name__)
dockerflow = Dockerflow(app)
@dockerflow.version_callback
def my_version(root):
return json.loads(os.path.join(root, 'acme_version.json'))
"""
self._version_callback = func
def check(self, func=None, name=None):
"""
A decorator to register a new Dockerflow check to be run
when the /__heartbeat__ endpoint is called., e.g.::
from dockerflow.flask import checks
@dockerflow.check
def storage_reachable():
try:
acme.storage.ping()
except SlowConnectionException as exc:
return [checks.Warning(exc.msg, id='acme.health.0002')]
except StorageException as exc:
return [checks.Error(exc.msg, id='acme.health.0001')]
or using a custom name::
@dockerflow.check(name='acme-storage-check)
def storage_reachable():
# ...
"""
if func is None:
return functools.partial(self.check, name=name)
if name is None:
name = func.__name__
self.logger.info('Registered Dockerflow check %s', name)
@functools.wraps(func)
def decorated_function(*args, **kwargs):
self.logger.info('Called Dockerflow check %s', name)
return func(*args, **kwargs)
self.checks[name] = decorated_function
return decorated_function
|
mozilla-services/python-dockerflow
|
src/dockerflow/flask/app.py
|
Dockerflow.user_id
|
python
|
def user_id(self):
# This needs flask-login to be installed
if not has_flask_login:
return
# and the actual login manager installed
if not hasattr(current_app, 'login_manager'):
return
# fail if no current_user was attached to the request context
try:
is_authenticated = current_user.is_authenticated
except AttributeError:
return
# because is_authenticated could be a callable, call it
if callable(is_authenticated):
is_authenticated = is_authenticated()
# and fail if the user isn't authenticated
if not is_authenticated:
return
# finally return the user id
return current_user.get_id()
|
Return the ID of the current request's user
|
train
|
https://github.com/mozilla-services/python-dockerflow/blob/43703c5e8934ba6901b0a1520d6da4ed6457208c/src/dockerflow/flask/app.py#L203-L230
| null |
class Dockerflow(object):
"""
The Dockerflow Flask extension. Set it up like this:
.. code-block:: python
:caption: ``myproject.py``
from flask import Flask
from dockerflow.flask import Dockerflow
app = Flask(__name__)
dockerflow = Dockerflow(app)
Or if you use the Flask application factory pattern, in
an own module set up Dockerflow first:
.. code-block:: python
:caption: ``myproject/deployment.py``
from dockerflow.flask import Dockerflow
dockerflow = Dockerflow()
and then import and initialize it with the Flask application
object when you create the application:
.. code-block:: python
:caption: ``myproject/app.py``
def create_app(config_filename):
app = Flask(__name__)
app.config.from_pyfile(config_filename)
from myproject.deployment import dockerflow
dockerflow.init_app(app)
from myproject.views.admin import admin
from myproject.views.frontend import frontend
app.register_blueprint(admin)
app.register_blueprint(frontend)
return app
See the parameters for a more detailed list of optional features when
initializing the extension.
:param app: The Flask app that this Dockerflow extension should be
initialized with.
:type root: ~flask.Flask or None
:param db: A Flask-SQLAlchemy extension instance to be used by the
built-in Dockerflow check for the database connection.
:param redis: A Redis connection to be used by the built-in Dockerflow
check for the Redis connection.
:param migrate: A Flask-Migrate extension instance to be used by the
built-in Dockerflow check for Alembic migrations.
:param silenced_checks: Dockerflow check IDs to ignore when running
through the list of configured checks.
:type silenced_checks: list
:param version_path: The filesystem path where the ``version.json`` can
be found. Defaults to the parent directory of the
Flask app's root path.
"""
def __init__(self, app=None, db=None, redis=None, migrate=None,
silenced_checks=None, version_path=None, *args, **kwargs):
# The Flask blueprint to add the Dockerflow signal callbacks and views
self._blueprint = Blueprint('dockerflow', 'dockerflow.flask.app')
# The Dockerflow specific logger to be used by internals of this
# extension.
self.logger = logging.getLogger('dockerflow.flask')
self.logger.addHandler(logging.NullHandler())
self.logger.setLevel(logging.INFO)
# The request summary logger to be used by this extension
# without pre-configuration. See docs for how to set it up.
self.summary_logger = logging.getLogger('request.summary')
# An ordered dictionary for storing custom Dockerflow checks in.
self.checks = OrderedDict()
# A list of IDs of custom Dockerflow checks to ignore in case they
# show up.
self.silenced_checks = silenced_checks or []
# The path where to find the version JSON file. Defaults to the
# parent directory of the app root path.
self.version_path = version_path
self._version_callback = version.get_version
# Initialize the app if given.
if app:
self.init_app(app)
# Initialize the built-in checks.
if db:
self.init_check(checks.check_database_connected, db)
if redis:
self.init_check(checks.check_redis_connected, redis)
if migrate:
self.init_check(checks.check_migrations_applied, migrate)
def init_check(self, check, obj):
"""
Adds a given check callback with the provided object to the list
of checks. Useful for built-ins but also advanced custom checks.
"""
self.logger.info('Adding extension check %s' % check.__name__)
check = functools.wraps(check)(functools.partial(check, obj))
self.check(func=check)
def init_app(self, app):
"""
Initializes the extension with the given app, registers the
built-in views with an own blueprint and hooks up our signal
callbacks.
"""
# If no version path was provided in the init of the Dockerflow
# class we'll use the parent directory of the app root path.
if self.version_path is None:
self.version_path = os.path.dirname(app.root_path)
for view in (
('/__version__', 'version', self._version_view),
('/__heartbeat__', 'heartbeat', self._heartbeat_view),
('/__lbheartbeat__', 'lbheartbeat', self._lbheartbeat_view),
):
self._blueprint.add_url_rule(*view)
self._blueprint.before_app_request(self._before_request)
self._blueprint.after_app_request(self._after_request)
self._blueprint.app_errorhandler(HeartbeatFailure)(self._heartbeat_exception_handler)
app.register_blueprint(self._blueprint)
got_request_exception.connect(self._got_request_exception, sender=app)
if not hasattr(app, 'extensions'): # pragma: nocover
app.extensions = {}
app.extensions['dockerflow'] = self
def _heartbeat_exception_handler(self, error):
"""
An exception handler to act as a middleman to return
a heartbeat view response with a 500 error code.
"""
return error.get_response()
def _before_request(self):
"""
The before_request callback.
"""
g._request_id = str(uuid.uuid4())
g._start_timestamp = time.time()
def _after_request(self, response):
"""
The signal handler for the request_finished signal.
"""
if not getattr(g, '_has_exception', False):
extra = self.summary_extra()
self.summary_logger.info('', extra=extra)
return response
def _got_request_exception(self, sender, exception, **extra):
"""
The signal handler for the got_request_exception signal.
"""
extra = self.summary_extra()
extra['errno'] = 500
self.summary_logger.error(str(exception), extra=extra)
g._has_exception = True
def summary_extra(self):
"""
Build the extra data for the summary logger.
"""
out = {
'errno': 0,
'agent': request.headers.get('User-Agent', ''),
'lang': request.headers.get('Accept-Language', ''),
'method': request.method,
'path': request.path,
}
# set the uid value to the current user ID
user_id = self.user_id()
if user_id is None:
user_id = ''
out['uid'] = user_id
# the rid value to the current request ID
request_id = g.get('_request_id', None)
if request_id is not None:
out['rid'] = request_id
# and the t value to the time it took to render
start_timestamp = g.get('_start_timestamp', None)
if start_timestamp is not None:
# Duration of request, in milliseconds.
out['t'] = int(1000 * (time.time() - start_timestamp))
return out
def _version_view(self):
"""
View that returns the contents of version.json or a 404.
"""
version_json = self._version_callback(self.version_path)
if version_json is None:
return 'version.json not found', 404
else:
return jsonify(version_json)
def _lbheartbeat_view(self):
"""
Lets the load balancer know the application is running and available.
Must return 200 (not 204) for ELB
http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-healthchecks.html
"""
return '', 200
def _heartbeat_check_detail(self, check):
errors = list(filter(lambda e: e.id not in self.silenced_checks, check()))
level = max([0] + [e.level for e in errors])
return {
'status': checks.level_to_text(level),
'level': level,
'messages': {e.id: e.msg for e in errors},
}
def _heartbeat_view(self):
"""
Runs all the registered checks and returns a JSON response with either
a status code of 200 or 500 depending on the results of the checks.
Any check that returns a warning or worse (error, critical) will
return a 500 response.
"""
details = {}
statuses = {}
level = 0
for name, check in self.checks.items():
detail = self._heartbeat_check_detail(check)
statuses[name] = detail['status']
level = max(level, detail['level'])
if detail['level'] > 0:
details[name] = detail
payload = {
'status': checks.level_to_text(level),
'checks': statuses,
'details': details,
}
def render(status_code):
return make_response(jsonify(payload), status_code)
if level < checks.WARNING:
status_code = 200
heartbeat_passed.send(self, level=level)
return render(status_code)
else:
status_code = 500
heartbeat_failed.send(self, level=level)
raise HeartbeatFailure(response=render(status_code))
def version_callback(self, func):
"""
A decorator to optionally register a new Dockerflow version callback
and use that instead of the default of
:func:`dockerflow.version.get_version`.
The callback will be passed the value of the
``version_path`` parameter to the Dockerflow extension object,
which defaults to the parent directory of the Flask app's root path.
The callback should return a dictionary with the
version information as defined in the Dockerflow spec,
or None if no version information could be loaded.
E.g.::
app = Flask(__name__)
dockerflow = Dockerflow(app)
@dockerflow.version_callback
def my_version(root):
return json.loads(os.path.join(root, 'acme_version.json'))
"""
self._version_callback = func
def check(self, func=None, name=None):
"""
A decorator to register a new Dockerflow check to be run
when the /__heartbeat__ endpoint is called., e.g.::
from dockerflow.flask import checks
@dockerflow.check
def storage_reachable():
try:
acme.storage.ping()
except SlowConnectionException as exc:
return [checks.Warning(exc.msg, id='acme.health.0002')]
except StorageException as exc:
return [checks.Error(exc.msg, id='acme.health.0001')]
or using a custom name::
@dockerflow.check(name='acme-storage-check)
def storage_reachable():
# ...
"""
if func is None:
return functools.partial(self.check, name=name)
if name is None:
name = func.__name__
self.logger.info('Registered Dockerflow check %s', name)
@functools.wraps(func)
def decorated_function(*args, **kwargs):
self.logger.info('Called Dockerflow check %s', name)
return func(*args, **kwargs)
self.checks[name] = decorated_function
return decorated_function
|
mozilla-services/python-dockerflow
|
src/dockerflow/flask/app.py
|
Dockerflow.summary_extra
|
python
|
def summary_extra(self):
out = {
'errno': 0,
'agent': request.headers.get('User-Agent', ''),
'lang': request.headers.get('Accept-Language', ''),
'method': request.method,
'path': request.path,
}
# set the uid value to the current user ID
user_id = self.user_id()
if user_id is None:
user_id = ''
out['uid'] = user_id
# the rid value to the current request ID
request_id = g.get('_request_id', None)
if request_id is not None:
out['rid'] = request_id
# and the t value to the time it took to render
start_timestamp = g.get('_start_timestamp', None)
if start_timestamp is not None:
# Duration of request, in milliseconds.
out['t'] = int(1000 * (time.time() - start_timestamp))
return out
|
Build the extra data for the summary logger.
|
train
|
https://github.com/mozilla-services/python-dockerflow/blob/43703c5e8934ba6901b0a1520d6da4ed6457208c/src/dockerflow/flask/app.py#L232-L261
|
[
"def user_id(self):\n \"\"\"\n Return the ID of the current request's user\n \"\"\"\n # This needs flask-login to be installed\n if not has_flask_login:\n return\n\n # and the actual login manager installed\n if not hasattr(current_app, 'login_manager'):\n return\n\n # fail if no current_user was attached to the request context\n try:\n is_authenticated = current_user.is_authenticated\n except AttributeError:\n return\n\n # because is_authenticated could be a callable, call it\n if callable(is_authenticated):\n is_authenticated = is_authenticated()\n\n # and fail if the user isn't authenticated\n if not is_authenticated:\n return\n\n # finally return the user id\n return current_user.get_id()\n"
] |
class Dockerflow(object):
"""
The Dockerflow Flask extension. Set it up like this:
.. code-block:: python
:caption: ``myproject.py``
from flask import Flask
from dockerflow.flask import Dockerflow
app = Flask(__name__)
dockerflow = Dockerflow(app)
Or if you use the Flask application factory pattern, in
an own module set up Dockerflow first:
.. code-block:: python
:caption: ``myproject/deployment.py``
from dockerflow.flask import Dockerflow
dockerflow = Dockerflow()
and then import and initialize it with the Flask application
object when you create the application:
.. code-block:: python
:caption: ``myproject/app.py``
def create_app(config_filename):
app = Flask(__name__)
app.config.from_pyfile(config_filename)
from myproject.deployment import dockerflow
dockerflow.init_app(app)
from myproject.views.admin import admin
from myproject.views.frontend import frontend
app.register_blueprint(admin)
app.register_blueprint(frontend)
return app
See the parameters for a more detailed list of optional features when
initializing the extension.
:param app: The Flask app that this Dockerflow extension should be
initialized with.
:type root: ~flask.Flask or None
:param db: A Flask-SQLAlchemy extension instance to be used by the
built-in Dockerflow check for the database connection.
:param redis: A Redis connection to be used by the built-in Dockerflow
check for the Redis connection.
:param migrate: A Flask-Migrate extension instance to be used by the
built-in Dockerflow check for Alembic migrations.
:param silenced_checks: Dockerflow check IDs to ignore when running
through the list of configured checks.
:type silenced_checks: list
:param version_path: The filesystem path where the ``version.json`` can
be found. Defaults to the parent directory of the
Flask app's root path.
"""
def __init__(self, app=None, db=None, redis=None, migrate=None,
silenced_checks=None, version_path=None, *args, **kwargs):
# The Flask blueprint to add the Dockerflow signal callbacks and views
self._blueprint = Blueprint('dockerflow', 'dockerflow.flask.app')
# The Dockerflow specific logger to be used by internals of this
# extension.
self.logger = logging.getLogger('dockerflow.flask')
self.logger.addHandler(logging.NullHandler())
self.logger.setLevel(logging.INFO)
# The request summary logger to be used by this extension
# without pre-configuration. See docs for how to set it up.
self.summary_logger = logging.getLogger('request.summary')
# An ordered dictionary for storing custom Dockerflow checks in.
self.checks = OrderedDict()
# A list of IDs of custom Dockerflow checks to ignore in case they
# show up.
self.silenced_checks = silenced_checks or []
# The path where to find the version JSON file. Defaults to the
# parent directory of the app root path.
self.version_path = version_path
self._version_callback = version.get_version
# Initialize the app if given.
if app:
self.init_app(app)
# Initialize the built-in checks.
if db:
self.init_check(checks.check_database_connected, db)
if redis:
self.init_check(checks.check_redis_connected, redis)
if migrate:
self.init_check(checks.check_migrations_applied, migrate)
def init_check(self, check, obj):
"""
Adds a given check callback with the provided object to the list
of checks. Useful for built-ins but also advanced custom checks.
"""
self.logger.info('Adding extension check %s' % check.__name__)
check = functools.wraps(check)(functools.partial(check, obj))
self.check(func=check)
def init_app(self, app):
"""
Initializes the extension with the given app, registers the
built-in views with an own blueprint and hooks up our signal
callbacks.
"""
# If no version path was provided in the init of the Dockerflow
# class we'll use the parent directory of the app root path.
if self.version_path is None:
self.version_path = os.path.dirname(app.root_path)
for view in (
('/__version__', 'version', self._version_view),
('/__heartbeat__', 'heartbeat', self._heartbeat_view),
('/__lbheartbeat__', 'lbheartbeat', self._lbheartbeat_view),
):
self._blueprint.add_url_rule(*view)
self._blueprint.before_app_request(self._before_request)
self._blueprint.after_app_request(self._after_request)
self._blueprint.app_errorhandler(HeartbeatFailure)(self._heartbeat_exception_handler)
app.register_blueprint(self._blueprint)
got_request_exception.connect(self._got_request_exception, sender=app)
if not hasattr(app, 'extensions'): # pragma: nocover
app.extensions = {}
app.extensions['dockerflow'] = self
def _heartbeat_exception_handler(self, error):
"""
An exception handler to act as a middleman to return
a heartbeat view response with a 500 error code.
"""
return error.get_response()
def _before_request(self):
"""
The before_request callback.
"""
g._request_id = str(uuid.uuid4())
g._start_timestamp = time.time()
def _after_request(self, response):
"""
The signal handler for the request_finished signal.
"""
if not getattr(g, '_has_exception', False):
extra = self.summary_extra()
self.summary_logger.info('', extra=extra)
return response
def _got_request_exception(self, sender, exception, **extra):
"""
The signal handler for the got_request_exception signal.
"""
extra = self.summary_extra()
extra['errno'] = 500
self.summary_logger.error(str(exception), extra=extra)
g._has_exception = True
def user_id(self):
"""
Return the ID of the current request's user
"""
# This needs flask-login to be installed
if not has_flask_login:
return
# and the actual login manager installed
if not hasattr(current_app, 'login_manager'):
return
# fail if no current_user was attached to the request context
try:
is_authenticated = current_user.is_authenticated
except AttributeError:
return
# because is_authenticated could be a callable, call it
if callable(is_authenticated):
is_authenticated = is_authenticated()
# and fail if the user isn't authenticated
if not is_authenticated:
return
# finally return the user id
return current_user.get_id()
def _version_view(self):
"""
View that returns the contents of version.json or a 404.
"""
version_json = self._version_callback(self.version_path)
if version_json is None:
return 'version.json not found', 404
else:
return jsonify(version_json)
def _lbheartbeat_view(self):
"""
Lets the load balancer know the application is running and available.
Must return 200 (not 204) for ELB
http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-healthchecks.html
"""
return '', 200
def _heartbeat_check_detail(self, check):
errors = list(filter(lambda e: e.id not in self.silenced_checks, check()))
level = max([0] + [e.level for e in errors])
return {
'status': checks.level_to_text(level),
'level': level,
'messages': {e.id: e.msg for e in errors},
}
def _heartbeat_view(self):
"""
Runs all the registered checks and returns a JSON response with either
a status code of 200 or 500 depending on the results of the checks.
Any check that returns a warning or worse (error, critical) will
return a 500 response.
"""
details = {}
statuses = {}
level = 0
for name, check in self.checks.items():
detail = self._heartbeat_check_detail(check)
statuses[name] = detail['status']
level = max(level, detail['level'])
if detail['level'] > 0:
details[name] = detail
payload = {
'status': checks.level_to_text(level),
'checks': statuses,
'details': details,
}
def render(status_code):
return make_response(jsonify(payload), status_code)
if level < checks.WARNING:
status_code = 200
heartbeat_passed.send(self, level=level)
return render(status_code)
else:
status_code = 500
heartbeat_failed.send(self, level=level)
raise HeartbeatFailure(response=render(status_code))
def version_callback(self, func):
"""
A decorator to optionally register a new Dockerflow version callback
and use that instead of the default of
:func:`dockerflow.version.get_version`.
The callback will be passed the value of the
``version_path`` parameter to the Dockerflow extension object,
which defaults to the parent directory of the Flask app's root path.
The callback should return a dictionary with the
version information as defined in the Dockerflow spec,
or None if no version information could be loaded.
E.g.::
app = Flask(__name__)
dockerflow = Dockerflow(app)
@dockerflow.version_callback
def my_version(root):
return json.loads(os.path.join(root, 'acme_version.json'))
"""
self._version_callback = func
def check(self, func=None, name=None):
"""
A decorator to register a new Dockerflow check to be run
when the /__heartbeat__ endpoint is called., e.g.::
from dockerflow.flask import checks
@dockerflow.check
def storage_reachable():
try:
acme.storage.ping()
except SlowConnectionException as exc:
return [checks.Warning(exc.msg, id='acme.health.0002')]
except StorageException as exc:
return [checks.Error(exc.msg, id='acme.health.0001')]
or using a custom name::
@dockerflow.check(name='acme-storage-check)
def storage_reachable():
# ...
"""
if func is None:
return functools.partial(self.check, name=name)
if name is None:
name = func.__name__
self.logger.info('Registered Dockerflow check %s', name)
@functools.wraps(func)
def decorated_function(*args, **kwargs):
self.logger.info('Called Dockerflow check %s', name)
return func(*args, **kwargs)
self.checks[name] = decorated_function
return decorated_function
|
mozilla-services/python-dockerflow
|
src/dockerflow/flask/app.py
|
Dockerflow._version_view
|
python
|
def _version_view(self):
version_json = self._version_callback(self.version_path)
if version_json is None:
return 'version.json not found', 404
else:
return jsonify(version_json)
|
View that returns the contents of version.json or a 404.
|
train
|
https://github.com/mozilla-services/python-dockerflow/blob/43703c5e8934ba6901b0a1520d6da4ed6457208c/src/dockerflow/flask/app.py#L263-L271
| null |
class Dockerflow(object):
"""
The Dockerflow Flask extension. Set it up like this:
.. code-block:: python
:caption: ``myproject.py``
from flask import Flask
from dockerflow.flask import Dockerflow
app = Flask(__name__)
dockerflow = Dockerflow(app)
Or if you use the Flask application factory pattern, in
an own module set up Dockerflow first:
.. code-block:: python
:caption: ``myproject/deployment.py``
from dockerflow.flask import Dockerflow
dockerflow = Dockerflow()
and then import and initialize it with the Flask application
object when you create the application:
.. code-block:: python
:caption: ``myproject/app.py``
def create_app(config_filename):
app = Flask(__name__)
app.config.from_pyfile(config_filename)
from myproject.deployment import dockerflow
dockerflow.init_app(app)
from myproject.views.admin import admin
from myproject.views.frontend import frontend
app.register_blueprint(admin)
app.register_blueprint(frontend)
return app
See the parameters for a more detailed list of optional features when
initializing the extension.
:param app: The Flask app that this Dockerflow extension should be
initialized with.
:type root: ~flask.Flask or None
:param db: A Flask-SQLAlchemy extension instance to be used by the
built-in Dockerflow check for the database connection.
:param redis: A Redis connection to be used by the built-in Dockerflow
check for the Redis connection.
:param migrate: A Flask-Migrate extension instance to be used by the
built-in Dockerflow check for Alembic migrations.
:param silenced_checks: Dockerflow check IDs to ignore when running
through the list of configured checks.
:type silenced_checks: list
:param version_path: The filesystem path where the ``version.json`` can
be found. Defaults to the parent directory of the
Flask app's root path.
"""
def __init__(self, app=None, db=None, redis=None, migrate=None,
silenced_checks=None, version_path=None, *args, **kwargs):
# The Flask blueprint to add the Dockerflow signal callbacks and views
self._blueprint = Blueprint('dockerflow', 'dockerflow.flask.app')
# The Dockerflow specific logger to be used by internals of this
# extension.
self.logger = logging.getLogger('dockerflow.flask')
self.logger.addHandler(logging.NullHandler())
self.logger.setLevel(logging.INFO)
# The request summary logger to be used by this extension
# without pre-configuration. See docs for how to set it up.
self.summary_logger = logging.getLogger('request.summary')
# An ordered dictionary for storing custom Dockerflow checks in.
self.checks = OrderedDict()
# A list of IDs of custom Dockerflow checks to ignore in case they
# show up.
self.silenced_checks = silenced_checks or []
# The path where to find the version JSON file. Defaults to the
# parent directory of the app root path.
self.version_path = version_path
self._version_callback = version.get_version
# Initialize the app if given.
if app:
self.init_app(app)
# Initialize the built-in checks.
if db:
self.init_check(checks.check_database_connected, db)
if redis:
self.init_check(checks.check_redis_connected, redis)
if migrate:
self.init_check(checks.check_migrations_applied, migrate)
def init_check(self, check, obj):
"""
Adds a given check callback with the provided object to the list
of checks. Useful for built-ins but also advanced custom checks.
"""
self.logger.info('Adding extension check %s' % check.__name__)
check = functools.wraps(check)(functools.partial(check, obj))
self.check(func=check)
def init_app(self, app):
"""
Initializes the extension with the given app, registers the
built-in views with an own blueprint and hooks up our signal
callbacks.
"""
# If no version path was provided in the init of the Dockerflow
# class we'll use the parent directory of the app root path.
if self.version_path is None:
self.version_path = os.path.dirname(app.root_path)
for view in (
('/__version__', 'version', self._version_view),
('/__heartbeat__', 'heartbeat', self._heartbeat_view),
('/__lbheartbeat__', 'lbheartbeat', self._lbheartbeat_view),
):
self._blueprint.add_url_rule(*view)
self._blueprint.before_app_request(self._before_request)
self._blueprint.after_app_request(self._after_request)
self._blueprint.app_errorhandler(HeartbeatFailure)(self._heartbeat_exception_handler)
app.register_blueprint(self._blueprint)
got_request_exception.connect(self._got_request_exception, sender=app)
if not hasattr(app, 'extensions'): # pragma: nocover
app.extensions = {}
app.extensions['dockerflow'] = self
def _heartbeat_exception_handler(self, error):
"""
An exception handler to act as a middleman to return
a heartbeat view response with a 500 error code.
"""
return error.get_response()
def _before_request(self):
"""
The before_request callback.
"""
g._request_id = str(uuid.uuid4())
g._start_timestamp = time.time()
def _after_request(self, response):
"""
The signal handler for the request_finished signal.
"""
if not getattr(g, '_has_exception', False):
extra = self.summary_extra()
self.summary_logger.info('', extra=extra)
return response
def _got_request_exception(self, sender, exception, **extra):
"""
The signal handler for the got_request_exception signal.
"""
extra = self.summary_extra()
extra['errno'] = 500
self.summary_logger.error(str(exception), extra=extra)
g._has_exception = True
def user_id(self):
"""
Return the ID of the current request's user
"""
# This needs flask-login to be installed
if not has_flask_login:
return
# and the actual login manager installed
if not hasattr(current_app, 'login_manager'):
return
# fail if no current_user was attached to the request context
try:
is_authenticated = current_user.is_authenticated
except AttributeError:
return
# because is_authenticated could be a callable, call it
if callable(is_authenticated):
is_authenticated = is_authenticated()
# and fail if the user isn't authenticated
if not is_authenticated:
return
# finally return the user id
return current_user.get_id()
def summary_extra(self):
"""
Build the extra data for the summary logger.
"""
out = {
'errno': 0,
'agent': request.headers.get('User-Agent', ''),
'lang': request.headers.get('Accept-Language', ''),
'method': request.method,
'path': request.path,
}
# set the uid value to the current user ID
user_id = self.user_id()
if user_id is None:
user_id = ''
out['uid'] = user_id
# the rid value to the current request ID
request_id = g.get('_request_id', None)
if request_id is not None:
out['rid'] = request_id
# and the t value to the time it took to render
start_timestamp = g.get('_start_timestamp', None)
if start_timestamp is not None:
# Duration of request, in milliseconds.
out['t'] = int(1000 * (time.time() - start_timestamp))
return out
def _lbheartbeat_view(self):
"""
Lets the load balancer know the application is running and available.
Must return 200 (not 204) for ELB
http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-healthchecks.html
"""
return '', 200
def _heartbeat_check_detail(self, check):
errors = list(filter(lambda e: e.id not in self.silenced_checks, check()))
level = max([0] + [e.level for e in errors])
return {
'status': checks.level_to_text(level),
'level': level,
'messages': {e.id: e.msg for e in errors},
}
def _heartbeat_view(self):
"""
Runs all the registered checks and returns a JSON response with either
a status code of 200 or 500 depending on the results of the checks.
Any check that returns a warning or worse (error, critical) will
return a 500 response.
"""
details = {}
statuses = {}
level = 0
for name, check in self.checks.items():
detail = self._heartbeat_check_detail(check)
statuses[name] = detail['status']
level = max(level, detail['level'])
if detail['level'] > 0:
details[name] = detail
payload = {
'status': checks.level_to_text(level),
'checks': statuses,
'details': details,
}
def render(status_code):
return make_response(jsonify(payload), status_code)
if level < checks.WARNING:
status_code = 200
heartbeat_passed.send(self, level=level)
return render(status_code)
else:
status_code = 500
heartbeat_failed.send(self, level=level)
raise HeartbeatFailure(response=render(status_code))
def version_callback(self, func):
"""
A decorator to optionally register a new Dockerflow version callback
and use that instead of the default of
:func:`dockerflow.version.get_version`.
The callback will be passed the value of the
``version_path`` parameter to the Dockerflow extension object,
which defaults to the parent directory of the Flask app's root path.
The callback should return a dictionary with the
version information as defined in the Dockerflow spec,
or None if no version information could be loaded.
E.g.::
app = Flask(__name__)
dockerflow = Dockerflow(app)
@dockerflow.version_callback
def my_version(root):
return json.loads(os.path.join(root, 'acme_version.json'))
"""
self._version_callback = func
def check(self, func=None, name=None):
"""
A decorator to register a new Dockerflow check to be run
when the /__heartbeat__ endpoint is called., e.g.::
from dockerflow.flask import checks
@dockerflow.check
def storage_reachable():
try:
acme.storage.ping()
except SlowConnectionException as exc:
return [checks.Warning(exc.msg, id='acme.health.0002')]
except StorageException as exc:
return [checks.Error(exc.msg, id='acme.health.0001')]
or using a custom name::
@dockerflow.check(name='acme-storage-check)
def storage_reachable():
# ...
"""
if func is None:
return functools.partial(self.check, name=name)
if name is None:
name = func.__name__
self.logger.info('Registered Dockerflow check %s', name)
@functools.wraps(func)
def decorated_function(*args, **kwargs):
self.logger.info('Called Dockerflow check %s', name)
return func(*args, **kwargs)
self.checks[name] = decorated_function
return decorated_function
|
mozilla-services/python-dockerflow
|
src/dockerflow/flask/app.py
|
Dockerflow._heartbeat_view
|
python
|
def _heartbeat_view(self):
details = {}
statuses = {}
level = 0
for name, check in self.checks.items():
detail = self._heartbeat_check_detail(check)
statuses[name] = detail['status']
level = max(level, detail['level'])
if detail['level'] > 0:
details[name] = detail
payload = {
'status': checks.level_to_text(level),
'checks': statuses,
'details': details,
}
def render(status_code):
return make_response(jsonify(payload), status_code)
if level < checks.WARNING:
status_code = 200
heartbeat_passed.send(self, level=level)
return render(status_code)
else:
status_code = 500
heartbeat_failed.send(self, level=level)
raise HeartbeatFailure(response=render(status_code))
|
Runs all the registered checks and returns a JSON response with either
a status code of 200 or 500 depending on the results of the checks.
Any check that returns a warning or worse (error, critical) will
return a 500 response.
|
train
|
https://github.com/mozilla-services/python-dockerflow/blob/43703c5e8934ba6901b0a1520d6da4ed6457208c/src/dockerflow/flask/app.py#L291-L326
| null |
class Dockerflow(object):
"""
The Dockerflow Flask extension. Set it up like this:
.. code-block:: python
:caption: ``myproject.py``
from flask import Flask
from dockerflow.flask import Dockerflow
app = Flask(__name__)
dockerflow = Dockerflow(app)
Or if you use the Flask application factory pattern, in
an own module set up Dockerflow first:
.. code-block:: python
:caption: ``myproject/deployment.py``
from dockerflow.flask import Dockerflow
dockerflow = Dockerflow()
and then import and initialize it with the Flask application
object when you create the application:
.. code-block:: python
:caption: ``myproject/app.py``
def create_app(config_filename):
app = Flask(__name__)
app.config.from_pyfile(config_filename)
from myproject.deployment import dockerflow
dockerflow.init_app(app)
from myproject.views.admin import admin
from myproject.views.frontend import frontend
app.register_blueprint(admin)
app.register_blueprint(frontend)
return app
See the parameters for a more detailed list of optional features when
initializing the extension.
:param app: The Flask app that this Dockerflow extension should be
initialized with.
:type root: ~flask.Flask or None
:param db: A Flask-SQLAlchemy extension instance to be used by the
built-in Dockerflow check for the database connection.
:param redis: A Redis connection to be used by the built-in Dockerflow
check for the Redis connection.
:param migrate: A Flask-Migrate extension instance to be used by the
built-in Dockerflow check for Alembic migrations.
:param silenced_checks: Dockerflow check IDs to ignore when running
through the list of configured checks.
:type silenced_checks: list
:param version_path: The filesystem path where the ``version.json`` can
be found. Defaults to the parent directory of the
Flask app's root path.
"""
def __init__(self, app=None, db=None, redis=None, migrate=None,
silenced_checks=None, version_path=None, *args, **kwargs):
# The Flask blueprint to add the Dockerflow signal callbacks and views
self._blueprint = Blueprint('dockerflow', 'dockerflow.flask.app')
# The Dockerflow specific logger to be used by internals of this
# extension.
self.logger = logging.getLogger('dockerflow.flask')
self.logger.addHandler(logging.NullHandler())
self.logger.setLevel(logging.INFO)
# The request summary logger to be used by this extension
# without pre-configuration. See docs for how to set it up.
self.summary_logger = logging.getLogger('request.summary')
# An ordered dictionary for storing custom Dockerflow checks in.
self.checks = OrderedDict()
# A list of IDs of custom Dockerflow checks to ignore in case they
# show up.
self.silenced_checks = silenced_checks or []
# The path where to find the version JSON file. Defaults to the
# parent directory of the app root path.
self.version_path = version_path
self._version_callback = version.get_version
# Initialize the app if given.
if app:
self.init_app(app)
# Initialize the built-in checks.
if db:
self.init_check(checks.check_database_connected, db)
if redis:
self.init_check(checks.check_redis_connected, redis)
if migrate:
self.init_check(checks.check_migrations_applied, migrate)
def init_check(self, check, obj):
"""
Adds a given check callback with the provided object to the list
of checks. Useful for built-ins but also advanced custom checks.
"""
self.logger.info('Adding extension check %s' % check.__name__)
check = functools.wraps(check)(functools.partial(check, obj))
self.check(func=check)
def init_app(self, app):
"""
Initializes the extension with the given app, registers the
built-in views with an own blueprint and hooks up our signal
callbacks.
"""
# If no version path was provided in the init of the Dockerflow
# class we'll use the parent directory of the app root path.
if self.version_path is None:
self.version_path = os.path.dirname(app.root_path)
for view in (
('/__version__', 'version', self._version_view),
('/__heartbeat__', 'heartbeat', self._heartbeat_view),
('/__lbheartbeat__', 'lbheartbeat', self._lbheartbeat_view),
):
self._blueprint.add_url_rule(*view)
self._blueprint.before_app_request(self._before_request)
self._blueprint.after_app_request(self._after_request)
self._blueprint.app_errorhandler(HeartbeatFailure)(self._heartbeat_exception_handler)
app.register_blueprint(self._blueprint)
got_request_exception.connect(self._got_request_exception, sender=app)
if not hasattr(app, 'extensions'): # pragma: nocover
app.extensions = {}
app.extensions['dockerflow'] = self
def _heartbeat_exception_handler(self, error):
"""
An exception handler to act as a middleman to return
a heartbeat view response with a 500 error code.
"""
return error.get_response()
def _before_request(self):
"""
The before_request callback.
"""
g._request_id = str(uuid.uuid4())
g._start_timestamp = time.time()
def _after_request(self, response):
"""
The signal handler for the request_finished signal.
"""
if not getattr(g, '_has_exception', False):
extra = self.summary_extra()
self.summary_logger.info('', extra=extra)
return response
def _got_request_exception(self, sender, exception, **extra):
"""
The signal handler for the got_request_exception signal.
"""
extra = self.summary_extra()
extra['errno'] = 500
self.summary_logger.error(str(exception), extra=extra)
g._has_exception = True
def user_id(self):
"""
Return the ID of the current request's user
"""
# This needs flask-login to be installed
if not has_flask_login:
return
# and the actual login manager installed
if not hasattr(current_app, 'login_manager'):
return
# fail if no current_user was attached to the request context
try:
is_authenticated = current_user.is_authenticated
except AttributeError:
return
# because is_authenticated could be a callable, call it
if callable(is_authenticated):
is_authenticated = is_authenticated()
# and fail if the user isn't authenticated
if not is_authenticated:
return
# finally return the user id
return current_user.get_id()
def summary_extra(self):
"""
Build the extra data for the summary logger.
"""
out = {
'errno': 0,
'agent': request.headers.get('User-Agent', ''),
'lang': request.headers.get('Accept-Language', ''),
'method': request.method,
'path': request.path,
}
# set the uid value to the current user ID
user_id = self.user_id()
if user_id is None:
user_id = ''
out['uid'] = user_id
# the rid value to the current request ID
request_id = g.get('_request_id', None)
if request_id is not None:
out['rid'] = request_id
# and the t value to the time it took to render
start_timestamp = g.get('_start_timestamp', None)
if start_timestamp is not None:
# Duration of request, in milliseconds.
out['t'] = int(1000 * (time.time() - start_timestamp))
return out
def _version_view(self):
"""
View that returns the contents of version.json or a 404.
"""
version_json = self._version_callback(self.version_path)
if version_json is None:
return 'version.json not found', 404
else:
return jsonify(version_json)
def _lbheartbeat_view(self):
"""
Lets the load balancer know the application is running and available.
Must return 200 (not 204) for ELB
http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-healthchecks.html
"""
return '', 200
def _heartbeat_check_detail(self, check):
errors = list(filter(lambda e: e.id not in self.silenced_checks, check()))
level = max([0] + [e.level for e in errors])
return {
'status': checks.level_to_text(level),
'level': level,
'messages': {e.id: e.msg for e in errors},
}
def version_callback(self, func):
"""
A decorator to optionally register a new Dockerflow version callback
and use that instead of the default of
:func:`dockerflow.version.get_version`.
The callback will be passed the value of the
``version_path`` parameter to the Dockerflow extension object,
which defaults to the parent directory of the Flask app's root path.
The callback should return a dictionary with the
version information as defined in the Dockerflow spec,
or None if no version information could be loaded.
E.g.::
app = Flask(__name__)
dockerflow = Dockerflow(app)
@dockerflow.version_callback
def my_version(root):
return json.loads(os.path.join(root, 'acme_version.json'))
"""
self._version_callback = func
def check(self, func=None, name=None):
"""
A decorator to register a new Dockerflow check to be run
when the /__heartbeat__ endpoint is called., e.g.::
from dockerflow.flask import checks
@dockerflow.check
def storage_reachable():
try:
acme.storage.ping()
except SlowConnectionException as exc:
return [checks.Warning(exc.msg, id='acme.health.0002')]
except StorageException as exc:
return [checks.Error(exc.msg, id='acme.health.0001')]
or using a custom name::
@dockerflow.check(name='acme-storage-check)
def storage_reachable():
# ...
"""
if func is None:
return functools.partial(self.check, name=name)
if name is None:
name = func.__name__
self.logger.info('Registered Dockerflow check %s', name)
@functools.wraps(func)
def decorated_function(*args, **kwargs):
self.logger.info('Called Dockerflow check %s', name)
return func(*args, **kwargs)
self.checks[name] = decorated_function
return decorated_function
|
mozilla-services/python-dockerflow
|
src/dockerflow/flask/app.py
|
Dockerflow.check
|
python
|
def check(self, func=None, name=None):
if func is None:
return functools.partial(self.check, name=name)
if name is None:
name = func.__name__
self.logger.info('Registered Dockerflow check %s', name)
@functools.wraps(func)
def decorated_function(*args, **kwargs):
self.logger.info('Called Dockerflow check %s', name)
return func(*args, **kwargs)
self.checks[name] = decorated_function
return decorated_function
|
A decorator to register a new Dockerflow check to be run
when the /__heartbeat__ endpoint is called., e.g.::
from dockerflow.flask import checks
@dockerflow.check
def storage_reachable():
try:
acme.storage.ping()
except SlowConnectionException as exc:
return [checks.Warning(exc.msg, id='acme.health.0002')]
except StorageException as exc:
return [checks.Error(exc.msg, id='acme.health.0001')]
or using a custom name::
@dockerflow.check(name='acme-storage-check)
def storage_reachable():
# ...
|
train
|
https://github.com/mozilla-services/python-dockerflow/blob/43703c5e8934ba6901b0a1520d6da4ed6457208c/src/dockerflow/flask/app.py#L354-L391
| null |
class Dockerflow(object):
"""
The Dockerflow Flask extension. Set it up like this:
.. code-block:: python
:caption: ``myproject.py``
from flask import Flask
from dockerflow.flask import Dockerflow
app = Flask(__name__)
dockerflow = Dockerflow(app)
Or if you use the Flask application factory pattern, in
an own module set up Dockerflow first:
.. code-block:: python
:caption: ``myproject/deployment.py``
from dockerflow.flask import Dockerflow
dockerflow = Dockerflow()
and then import and initialize it with the Flask application
object when you create the application:
.. code-block:: python
:caption: ``myproject/app.py``
def create_app(config_filename):
app = Flask(__name__)
app.config.from_pyfile(config_filename)
from myproject.deployment import dockerflow
dockerflow.init_app(app)
from myproject.views.admin import admin
from myproject.views.frontend import frontend
app.register_blueprint(admin)
app.register_blueprint(frontend)
return app
See the parameters for a more detailed list of optional features when
initializing the extension.
:param app: The Flask app that this Dockerflow extension should be
initialized with.
:type root: ~flask.Flask or None
:param db: A Flask-SQLAlchemy extension instance to be used by the
built-in Dockerflow check for the database connection.
:param redis: A Redis connection to be used by the built-in Dockerflow
check for the Redis connection.
:param migrate: A Flask-Migrate extension instance to be used by the
built-in Dockerflow check for Alembic migrations.
:param silenced_checks: Dockerflow check IDs to ignore when running
through the list of configured checks.
:type silenced_checks: list
:param version_path: The filesystem path where the ``version.json`` can
be found. Defaults to the parent directory of the
Flask app's root path.
"""
def __init__(self, app=None, db=None, redis=None, migrate=None,
silenced_checks=None, version_path=None, *args, **kwargs):
# The Flask blueprint to add the Dockerflow signal callbacks and views
self._blueprint = Blueprint('dockerflow', 'dockerflow.flask.app')
# The Dockerflow specific logger to be used by internals of this
# extension.
self.logger = logging.getLogger('dockerflow.flask')
self.logger.addHandler(logging.NullHandler())
self.logger.setLevel(logging.INFO)
# The request summary logger to be used by this extension
# without pre-configuration. See docs for how to set it up.
self.summary_logger = logging.getLogger('request.summary')
# An ordered dictionary for storing custom Dockerflow checks in.
self.checks = OrderedDict()
# A list of IDs of custom Dockerflow checks to ignore in case they
# show up.
self.silenced_checks = silenced_checks or []
# The path where to find the version JSON file. Defaults to the
# parent directory of the app root path.
self.version_path = version_path
self._version_callback = version.get_version
# Initialize the app if given.
if app:
self.init_app(app)
# Initialize the built-in checks.
if db:
self.init_check(checks.check_database_connected, db)
if redis:
self.init_check(checks.check_redis_connected, redis)
if migrate:
self.init_check(checks.check_migrations_applied, migrate)
def init_check(self, check, obj):
"""
Adds a given check callback with the provided object to the list
of checks. Useful for built-ins but also advanced custom checks.
"""
self.logger.info('Adding extension check %s' % check.__name__)
check = functools.wraps(check)(functools.partial(check, obj))
self.check(func=check)
def init_app(self, app):
"""
Initializes the extension with the given app, registers the
built-in views with an own blueprint and hooks up our signal
callbacks.
"""
# If no version path was provided in the init of the Dockerflow
# class we'll use the parent directory of the app root path.
if self.version_path is None:
self.version_path = os.path.dirname(app.root_path)
for view in (
('/__version__', 'version', self._version_view),
('/__heartbeat__', 'heartbeat', self._heartbeat_view),
('/__lbheartbeat__', 'lbheartbeat', self._lbheartbeat_view),
):
self._blueprint.add_url_rule(*view)
self._blueprint.before_app_request(self._before_request)
self._blueprint.after_app_request(self._after_request)
self._blueprint.app_errorhandler(HeartbeatFailure)(self._heartbeat_exception_handler)
app.register_blueprint(self._blueprint)
got_request_exception.connect(self._got_request_exception, sender=app)
if not hasattr(app, 'extensions'): # pragma: nocover
app.extensions = {}
app.extensions['dockerflow'] = self
def _heartbeat_exception_handler(self, error):
"""
An exception handler to act as a middleman to return
a heartbeat view response with a 500 error code.
"""
return error.get_response()
def _before_request(self):
"""
The before_request callback.
"""
g._request_id = str(uuid.uuid4())
g._start_timestamp = time.time()
def _after_request(self, response):
"""
The signal handler for the request_finished signal.
"""
if not getattr(g, '_has_exception', False):
extra = self.summary_extra()
self.summary_logger.info('', extra=extra)
return response
def _got_request_exception(self, sender, exception, **extra):
"""
The signal handler for the got_request_exception signal.
"""
extra = self.summary_extra()
extra['errno'] = 500
self.summary_logger.error(str(exception), extra=extra)
g._has_exception = True
def user_id(self):
"""
Return the ID of the current request's user
"""
# This needs flask-login to be installed
if not has_flask_login:
return
# and the actual login manager installed
if not hasattr(current_app, 'login_manager'):
return
# fail if no current_user was attached to the request context
try:
is_authenticated = current_user.is_authenticated
except AttributeError:
return
# because is_authenticated could be a callable, call it
if callable(is_authenticated):
is_authenticated = is_authenticated()
# and fail if the user isn't authenticated
if not is_authenticated:
return
# finally return the user id
return current_user.get_id()
def summary_extra(self):
"""
Build the extra data for the summary logger.
"""
out = {
'errno': 0,
'agent': request.headers.get('User-Agent', ''),
'lang': request.headers.get('Accept-Language', ''),
'method': request.method,
'path': request.path,
}
# set the uid value to the current user ID
user_id = self.user_id()
if user_id is None:
user_id = ''
out['uid'] = user_id
# the rid value to the current request ID
request_id = g.get('_request_id', None)
if request_id is not None:
out['rid'] = request_id
# and the t value to the time it took to render
start_timestamp = g.get('_start_timestamp', None)
if start_timestamp is not None:
# Duration of request, in milliseconds.
out['t'] = int(1000 * (time.time() - start_timestamp))
return out
def _version_view(self):
"""
View that returns the contents of version.json or a 404.
"""
version_json = self._version_callback(self.version_path)
if version_json is None:
return 'version.json not found', 404
else:
return jsonify(version_json)
def _lbheartbeat_view(self):
"""
Lets the load balancer know the application is running and available.
Must return 200 (not 204) for ELB
http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-healthchecks.html
"""
return '', 200
def _heartbeat_check_detail(self, check):
errors = list(filter(lambda e: e.id not in self.silenced_checks, check()))
level = max([0] + [e.level for e in errors])
return {
'status': checks.level_to_text(level),
'level': level,
'messages': {e.id: e.msg for e in errors},
}
def _heartbeat_view(self):
"""
Runs all the registered checks and returns a JSON response with either
a status code of 200 or 500 depending on the results of the checks.
Any check that returns a warning or worse (error, critical) will
return a 500 response.
"""
details = {}
statuses = {}
level = 0
for name, check in self.checks.items():
detail = self._heartbeat_check_detail(check)
statuses[name] = detail['status']
level = max(level, detail['level'])
if detail['level'] > 0:
details[name] = detail
payload = {
'status': checks.level_to_text(level),
'checks': statuses,
'details': details,
}
def render(status_code):
return make_response(jsonify(payload), status_code)
if level < checks.WARNING:
status_code = 200
heartbeat_passed.send(self, level=level)
return render(status_code)
else:
status_code = 500
heartbeat_failed.send(self, level=level)
raise HeartbeatFailure(response=render(status_code))
def version_callback(self, func):
"""
A decorator to optionally register a new Dockerflow version callback
and use that instead of the default of
:func:`dockerflow.version.get_version`.
The callback will be passed the value of the
``version_path`` parameter to the Dockerflow extension object,
which defaults to the parent directory of the Flask app's root path.
The callback should return a dictionary with the
version information as defined in the Dockerflow spec,
or None if no version information could be loaded.
E.g.::
app = Flask(__name__)
dockerflow = Dockerflow(app)
@dockerflow.version_callback
def my_version(root):
return json.loads(os.path.join(root, 'acme_version.json'))
"""
self._version_callback = func
|
mozilla-services/python-dockerflow
|
src/dockerflow/flask/checks/__init__.py
|
check_database_connected
|
python
|
def check_database_connected(db):
from sqlalchemy.exc import DBAPIError, SQLAlchemyError
errors = []
try:
with db.engine.connect() as connection:
connection.execute('SELECT 1;')
except DBAPIError as e:
msg = 'DB-API error: {!s}'.format(e)
errors.append(Error(msg, id=health.ERROR_DB_API_EXCEPTION))
except SQLAlchemyError as e:
msg = 'Database misconfigured: "{!s}"'.format(e)
errors.append(Error(msg, id=health.ERROR_SQLALCHEMY_EXCEPTION))
return errors
|
A built-in check to see if connecting to the configured default
database backend succeeds.
It's automatically added to the list of Dockerflow checks if a
:class:`~flask_sqlalchemy.SQLAlchemy` object is passed
to the :class:`~dockerflow.flask.app.Dockerflow` class during
instantiation, e.g.::
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from dockerflow.flask import Dockerflow
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
db = SQLAlchemy(app)
dockerflow = Dockerflow(app, db=db)
|
train
|
https://github.com/mozilla-services/python-dockerflow/blob/43703c5e8934ba6901b0a1520d6da4ed6457208c/src/dockerflow/flask/checks/__init__.py#L14-L46
| null |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
"""
This module contains a few built-in checks for the Flask integration.
"""
from ... import health
from .messages import ( # noqa
DEBUG, INFO, WARNING, ERROR, CRITICAL, STATUSES, level_to_text,
CheckMessage, Debug, Info, Warning, Error, Critical,
)
def check_migrations_applied(migrate):
"""
A built-in check to see if all migrations have been applied correctly.
It's automatically added to the list of Dockerflow checks if a
`flask_migrate.Migrate <https://flask-migrate.readthedocs.io/>`_ object
is passed to the :class:`~dockerflow.flask.app.Dockerflow` class during
instantiation, e.g.::
from flask import Flask
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from dockerflow.flask import Dockerflow
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
db = SQLAlchemy(app)
migrate = Migrate(app, db)
dockerflow = Dockerflow(app, db=db, migrate=migrate)
"""
errors = []
from alembic.migration import MigrationContext
from alembic.script import ScriptDirectory
from sqlalchemy.exc import DBAPIError, SQLAlchemyError
# pass in Migrate.directory here explicitly to be compatible with
# older versions of Flask-Migrate that required the directory to be passed
config = migrate.get_config(directory=migrate.directory)
script = ScriptDirectory.from_config(config)
try:
with migrate.db.engine.connect() as connection:
context = MigrationContext.configure(connection)
db_heads = set(context.get_current_heads())
script_heads = set(script.get_heads())
except (DBAPIError, SQLAlchemyError) as e:
msg = "Can't connect to database to check migrations: {!s}".format(e)
return [Info(msg, id=health.INFO_CANT_CHECK_MIGRATIONS)]
if db_heads != script_heads:
msg = "Unapplied migrations found: {}".format(', '.join(script_heads))
errors.append(Warning(msg, id=health.WARNING_UNAPPLIED_MIGRATION))
return errors
def check_redis_connected(client):
"""
A built-in check to connect to Redis using the given client and see
if it responds to the ``PING`` command.
It's automatically added to the list of Dockerflow checks if a
:class:`~redis.StrictRedis` instances is passed
to the :class:`~dockerflow.flask.app.Dockerflow` class during
instantiation, e.g.::
import redis
from flask import Flask
from dockerflow.flask import Dockerflow
app = Flask(__name__)
redis_client = redis.StrictRedis(host='localhost', port=6379, db=0)
dockerflow = Dockerflow(app, redis=redis)
An alternative approach to instantiating a Redis client directly
would be using the `Flask-Redis <https://github.com/underyx/flask-redis>`_
Flask extension::
from flask import Flask
from flask_redis import FlaskRedis
from dockerflow.flask import Dockerflow
app = Flask(__name__)
app.config['REDIS_URL'] = 'redis://:password@localhost:6379/0'
redis_store = FlaskRedis(app)
dockerflow = Dockerflow(app, redis=redis_store)
"""
import redis
errors = []
try:
result = client.ping()
except redis.ConnectionError as e:
msg = 'Could not connect to redis: {!s}'.format(e)
errors.append(Error(msg, id=health.ERROR_CANNOT_CONNECT_REDIS))
except redis.RedisError as e:
errors.append(Error('Redis error: "{!s}"'.format(e),
id=health.ERROR_REDIS_EXCEPTION))
else:
if not result:
errors.append(Error('Redis ping failed',
id=health.ERROR_REDIS_PING_FAILED))
return errors
|
mozilla-services/python-dockerflow
|
src/dockerflow/flask/checks/__init__.py
|
check_migrations_applied
|
python
|
def check_migrations_applied(migrate):
errors = []
from alembic.migration import MigrationContext
from alembic.script import ScriptDirectory
from sqlalchemy.exc import DBAPIError, SQLAlchemyError
# pass in Migrate.directory here explicitly to be compatible with
# older versions of Flask-Migrate that required the directory to be passed
config = migrate.get_config(directory=migrate.directory)
script = ScriptDirectory.from_config(config)
try:
with migrate.db.engine.connect() as connection:
context = MigrationContext.configure(connection)
db_heads = set(context.get_current_heads())
script_heads = set(script.get_heads())
except (DBAPIError, SQLAlchemyError) as e:
msg = "Can't connect to database to check migrations: {!s}".format(e)
return [Info(msg, id=health.INFO_CANT_CHECK_MIGRATIONS)]
if db_heads != script_heads:
msg = "Unapplied migrations found: {}".format(', '.join(script_heads))
errors.append(Warning(msg, id=health.WARNING_UNAPPLIED_MIGRATION))
return errors
|
A built-in check to see if all migrations have been applied correctly.
It's automatically added to the list of Dockerflow checks if a
`flask_migrate.Migrate <https://flask-migrate.readthedocs.io/>`_ object
is passed to the :class:`~dockerflow.flask.app.Dockerflow` class during
instantiation, e.g.::
from flask import Flask
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from dockerflow.flask import Dockerflow
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
db = SQLAlchemy(app)
migrate = Migrate(app, db)
dockerflow = Dockerflow(app, db=db, migrate=migrate)
|
train
|
https://github.com/mozilla-services/python-dockerflow/blob/43703c5e8934ba6901b0a1520d6da4ed6457208c/src/dockerflow/flask/checks/__init__.py#L49-L93
| null |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
"""
This module contains a few built-in checks for the Flask integration.
"""
from ... import health
from .messages import ( # noqa
DEBUG, INFO, WARNING, ERROR, CRITICAL, STATUSES, level_to_text,
CheckMessage, Debug, Info, Warning, Error, Critical,
)
def check_database_connected(db):
"""
A built-in check to see if connecting to the configured default
database backend succeeds.
It's automatically added to the list of Dockerflow checks if a
:class:`~flask_sqlalchemy.SQLAlchemy` object is passed
to the :class:`~dockerflow.flask.app.Dockerflow` class during
instantiation, e.g.::
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from dockerflow.flask import Dockerflow
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
db = SQLAlchemy(app)
dockerflow = Dockerflow(app, db=db)
"""
from sqlalchemy.exc import DBAPIError, SQLAlchemyError
errors = []
try:
with db.engine.connect() as connection:
connection.execute('SELECT 1;')
except DBAPIError as e:
msg = 'DB-API error: {!s}'.format(e)
errors.append(Error(msg, id=health.ERROR_DB_API_EXCEPTION))
except SQLAlchemyError as e:
msg = 'Database misconfigured: "{!s}"'.format(e)
errors.append(Error(msg, id=health.ERROR_SQLALCHEMY_EXCEPTION))
return errors
def check_redis_connected(client):
"""
A built-in check to connect to Redis using the given client and see
if it responds to the ``PING`` command.
It's automatically added to the list of Dockerflow checks if a
:class:`~redis.StrictRedis` instances is passed
to the :class:`~dockerflow.flask.app.Dockerflow` class during
instantiation, e.g.::
import redis
from flask import Flask
from dockerflow.flask import Dockerflow
app = Flask(__name__)
redis_client = redis.StrictRedis(host='localhost', port=6379, db=0)
dockerflow = Dockerflow(app, redis=redis)
An alternative approach to instantiating a Redis client directly
would be using the `Flask-Redis <https://github.com/underyx/flask-redis>`_
Flask extension::
from flask import Flask
from flask_redis import FlaskRedis
from dockerflow.flask import Dockerflow
app = Flask(__name__)
app.config['REDIS_URL'] = 'redis://:password@localhost:6379/0'
redis_store = FlaskRedis(app)
dockerflow = Dockerflow(app, redis=redis_store)
"""
import redis
errors = []
try:
result = client.ping()
except redis.ConnectionError as e:
msg = 'Could not connect to redis: {!s}'.format(e)
errors.append(Error(msg, id=health.ERROR_CANNOT_CONNECT_REDIS))
except redis.RedisError as e:
errors.append(Error('Redis error: "{!s}"'.format(e),
id=health.ERROR_REDIS_EXCEPTION))
else:
if not result:
errors.append(Error('Redis ping failed',
id=health.ERROR_REDIS_PING_FAILED))
return errors
|
mozilla-services/python-dockerflow
|
src/dockerflow/flask/checks/__init__.py
|
check_redis_connected
|
python
|
def check_redis_connected(client):
import redis
errors = []
try:
result = client.ping()
except redis.ConnectionError as e:
msg = 'Could not connect to redis: {!s}'.format(e)
errors.append(Error(msg, id=health.ERROR_CANNOT_CONNECT_REDIS))
except redis.RedisError as e:
errors.append(Error('Redis error: "{!s}"'.format(e),
id=health.ERROR_REDIS_EXCEPTION))
else:
if not result:
errors.append(Error('Redis ping failed',
id=health.ERROR_REDIS_PING_FAILED))
return errors
|
A built-in check to connect to Redis using the given client and see
if it responds to the ``PING`` command.
It's automatically added to the list of Dockerflow checks if a
:class:`~redis.StrictRedis` instances is passed
to the :class:`~dockerflow.flask.app.Dockerflow` class during
instantiation, e.g.::
import redis
from flask import Flask
from dockerflow.flask import Dockerflow
app = Flask(__name__)
redis_client = redis.StrictRedis(host='localhost', port=6379, db=0)
dockerflow = Dockerflow(app, redis=redis)
An alternative approach to instantiating a Redis client directly
would be using the `Flask-Redis <https://github.com/underyx/flask-redis>`_
Flask extension::
from flask import Flask
from flask_redis import FlaskRedis
from dockerflow.flask import Dockerflow
app = Flask(__name__)
app.config['REDIS_URL'] = 'redis://:password@localhost:6379/0'
redis_store = FlaskRedis(app)
dockerflow = Dockerflow(app, redis=redis_store)
|
train
|
https://github.com/mozilla-services/python-dockerflow/blob/43703c5e8934ba6901b0a1520d6da4ed6457208c/src/dockerflow/flask/checks/__init__.py#L96-L145
| null |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
"""
This module contains a few built-in checks for the Flask integration.
"""
from ... import health
from .messages import ( # noqa
DEBUG, INFO, WARNING, ERROR, CRITICAL, STATUSES, level_to_text,
CheckMessage, Debug, Info, Warning, Error, Critical,
)
def check_database_connected(db):
"""
A built-in check to see if connecting to the configured default
database backend succeeds.
It's automatically added to the list of Dockerflow checks if a
:class:`~flask_sqlalchemy.SQLAlchemy` object is passed
to the :class:`~dockerflow.flask.app.Dockerflow` class during
instantiation, e.g.::
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from dockerflow.flask import Dockerflow
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
db = SQLAlchemy(app)
dockerflow = Dockerflow(app, db=db)
"""
from sqlalchemy.exc import DBAPIError, SQLAlchemyError
errors = []
try:
with db.engine.connect() as connection:
connection.execute('SELECT 1;')
except DBAPIError as e:
msg = 'DB-API error: {!s}'.format(e)
errors.append(Error(msg, id=health.ERROR_DB_API_EXCEPTION))
except SQLAlchemyError as e:
msg = 'Database misconfigured: "{!s}"'.format(e)
errors.append(Error(msg, id=health.ERROR_SQLALCHEMY_EXCEPTION))
return errors
def check_migrations_applied(migrate):
"""
A built-in check to see if all migrations have been applied correctly.
It's automatically added to the list of Dockerflow checks if a
`flask_migrate.Migrate <https://flask-migrate.readthedocs.io/>`_ object
is passed to the :class:`~dockerflow.flask.app.Dockerflow` class during
instantiation, e.g.::
from flask import Flask
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from dockerflow.flask import Dockerflow
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
db = SQLAlchemy(app)
migrate = Migrate(app, db)
dockerflow = Dockerflow(app, db=db, migrate=migrate)
"""
errors = []
from alembic.migration import MigrationContext
from alembic.script import ScriptDirectory
from sqlalchemy.exc import DBAPIError, SQLAlchemyError
# pass in Migrate.directory here explicitly to be compatible with
# older versions of Flask-Migrate that required the directory to be passed
config = migrate.get_config(directory=migrate.directory)
script = ScriptDirectory.from_config(config)
try:
with migrate.db.engine.connect() as connection:
context = MigrationContext.configure(connection)
db_heads = set(context.get_current_heads())
script_heads = set(script.get_heads())
except (DBAPIError, SQLAlchemyError) as e:
msg = "Can't connect to database to check migrations: {!s}".format(e)
return [Info(msg, id=health.INFO_CANT_CHECK_MIGRATIONS)]
if db_heads != script_heads:
msg = "Unapplied migrations found: {}".format(', '.join(script_heads))
errors.append(Warning(msg, id=health.WARNING_UNAPPLIED_MIGRATION))
return errors
|
HEPData/hepdata-converter
|
hepdata_converter/parsers/oldhepdata_parser.py
|
OldHEPData.reset
|
python
|
def reset(self):
self.current_table = None
self.tables = []
self.data = [{}]
self.additional_data = {}
self.lines = []
self.set_state('document')
self.current_file = None
self.set_of_energies = set()
|
Clean any processing data, and prepare object for reuse
|
train
|
https://github.com/HEPData/hepdata-converter/blob/354271448980efba86f2f3d27b99d818e75fd90d/hepdata_converter/parsers/oldhepdata_parser.py#L34-L44
|
[
"def set_state(self, state):\n if state not in OldHEPData.states:\n raise ValueError(\"unknown state\")\n self.current_state = state\n"
] |
class OldHEPData(Parser):
"""Parser for Old HEPData format
"""
help = 'Parses OLD HepData format - example OLD HepData input format: http://hepdata.cedar.ac.uk/resource/sample.input'
@classmethod
def options(cls):
options = Parser.options()
options['strict'] = Option('strict', default=True, type=bool, required=False,
help='if specified any additional keywords in OldHEPData file will raise an error')
options['use_additional_data'] = Option('use-additional-data', default=False, type=bool, required=False, variable_mapping='use_additional_data',
help=('if specified additional data which does not have equivalent in new HEPData format'
' will be appended to comment section of the output document'))
return options
def set_state(self, state):
if state not in OldHEPData.states:
raise ValueError("unknown state")
self.current_state = state
def __init__(self, **kwargs):
"""Constructor
:param use_additional_data: if set to True additional data which does not have equivalent in new HEPData format
will be appended to comment section of the output document
:type use_additional_data: bool
:param strict: if set to True, any additional keywords (not specified by documentation)
will raise BadFormat exception during parsing
:type strict: bool
"""
# mapping of OLD HepData format's keywords to proper parsing functions
# all possible keywords are specified here, the ones which aren't used
# by new data format are either mapped to _pass method which does nothing,
# or bound to _add_to_comment which adds the specified data to comment section of the
# output
# functions are specified by states in which parser may be, it may either be parsing table or document
# for those two states different sets of directives are permitted
self.mapping = {
'document': {
'reference': self._parse_reference,
'dataset': self._set_table,
# additional data which have no one to one mapping to new YAML format
'author': self._bind_parse_additional_data('author'),
'doi': self._bind_parse_additional_data('doi'),
'status': self._bind_parse_additional_data('status'),
'experiment': self._bind_parse_additional_data('experiment'),
'detector': self._bind_parse_additional_data('detector'),
'title': self._bind_parse_additional_data('title'),
# add it to record_ids
'spiresId': self._bind_parse_record_ids('spires'),
'inspireId': self._bind_parse_record_ids('inspire'),
'cdsId': self._bind_parse_record_ids('cds'),
'durhamId': self._bind_parse_record_ids('durham'),
'comment': self._parse_document_comment,
'E': self._pass
},
'table': {
'dataend': self._set_document,
'location': self._bind_set_table_metadata('location'),
'dscomment': self._bind_set_table_metadata('description', True),
'dserror': self._parse_dserror,
'reackey': self._parse_reackey,
'qual': self._parse_qual,
'data': self._parse_table_data,
'xheader': self._parse_xheaders,
'yheader': self._parse_yheaders,
'obskey': self._parse_obskey,
'phrase': self._parse_phrase,
'E': self._pass
}
}
OptionInitMixin.__init__(self, options=kwargs)
def _parse(self):
# parse the file
while self._parse_line(self.current_file):
pass
if self.use_additional_data:
if self.additional_data:
self.data[0]['comment'] += 'ADDITIONAL DATA IMPORTED FROM OLD HEPDATA FORMAT: \n'
for key in self.additional_data:
for element in self.additional_data[key]:
self.data[0]['comment'] += "%s: %s" % (key, element)
return ParsedData(self.data[0], self.tables)
def parse(self, data_in):
# clean any possible data from previous parsing
self.reset()
# in case of strings we should treat them as filepaths
if isinstance(data_in, (str, unicode)):
with open(data_in, 'r') as self.current_file:
return self._parse()
else:
self.current_file = data_in
return self._parse()
def _parse_line(self, file):
"""Parse single line (or more if particular keyword actually demands it)
:param file:
:type file: file
"""
line = self._strip_comments(file.readline())
# check if the file ended
if not line:
return False
# line was empty or it was a comment, continue
if line.strip() == '':
return True
# retrieve keyword and its value
reg = re.search("^\*(?P<key>[^:#]*)(:\s*(?P<value>.*)\s*)?$", line)
if reg:
key = reg.group('key').strip()
value = reg.group('value')
if key in self.mapping[self.current_state]:
self.mapping[self.current_state][key](value)
elif self.strict:
raise BadFormat("unknown key: *%s" % key)
else:
raise BadFormat("line can not be parsed: %s" % line)
return True
def _parse_reference(self, data):
"""
:param data:
:type data: str
"""
if 'additional_resources' not in self.data[0]:
self.data[0]['additional_resources'] = []
location = data.split(' : ')[0].strip()
if location.startswith('http'):
self.data[0]['additional_resources'].append({
'location': location,
'description': 'web page with auxiliary material'
})
def _set_table(self, data):
"""Set current parsing state to 'table',
create new table object and add it to tables collection
"""
self.set_state('table')
self.current_table = HEPTable(index=len(self.tables) + 1)
self.tables.append(self.current_table)
self.data.append(self.current_table.metadata)
def _set_document(self, data):
"""Set current parsing state to 'document',
set current_table to None
"""
self.set_state('document')
self.current_table = None
def _pass(self, data):
"""Empty processing function, map it to keywords if they're not used in the new YAML format
"""
pass
def _parse_table_data(self, data):
"""Parse dataset data of the original HEPData format
:param data: header of the table to be parsed
:raise ValueError:
"""
header = data.split(':')
self.current_table.data_header = header
for i, h in enumerate(header):
header[i] = h.strip()
x_count = header.count('x')
y_count = header.count('y')
if not self.current_table.xheaders:
raise BadFormat("*xheader line needs to appear before *data: %s" % data)
if not self.current_table.yheaders:
raise BadFormat("*yheader line needs to appear before *data: %s" % data)
# use deepcopy to avoid references in yaml... may not be required, and will very probably be refactored
# TODO - is this appropriate behavior, or are references in YAML files acceptable, they are certainly less human readable
self.current_table.data = {'independent_variables': [{'header': self.current_table.xheaders[i] if i < len(self.current_table.xheaders) else copy.deepcopy(self.current_table.xheaders[-1]),
'values': []} for i in range(x_count)],
'dependent_variables': [{'header': self.current_table.yheaders[i] if i < len(self.current_table.yheaders) else copy.deepcopy(self.current_table.yheaders[-1]),
'qualifiers': [self.current_table.qualifiers[j][i] if i < len(self.current_table.qualifiers[j]) else copy.deepcopy(self.current_table.qualifiers[j][-1]) for j in range(len(self.current_table.qualifiers)) ],
'values': []} for i in range(y_count)]}
xy_mapping = []
current_x_count = 0
current_y_count = 0
for h in header:
if h == 'x':
xy_mapping.append(current_x_count)
current_x_count += 1
if h == 'y':
xy_mapping.append(current_y_count)
current_y_count += 1
last_index = self.current_file.tell()
line = self._strip_comments(self.current_file.readline())
while line and not line.startswith('*'):
data_entry_elements = line.split(';')[:-1] # split and also strip newline character at the end
if len(data_entry_elements) == len(header):
# this is kind of a big stretch... I assume that x is always first
for i, h in enumerate(header):
single_element = data_entry_elements[i].strip()
# number patterns copied from old subs.pl parsing script
pmnum1 = '[-+]?[\d]+\.?[\d]*'
pmnum2 = '[-+]?\.[\d]+'
pmnum3 = '[-+]?[\d]+\.?[\d]*\s*[eE]+\s*[+-]?\s*[\d]+'
pmnum = '(' + pmnum1 + '|' + pmnum2 + '|' + pmnum3 + ')'
# implement same regular expression matching as in old subs.pl parsing script
if h == 'x': # independent variables
r = re.search('^(?P<value>' + pmnum + ')$', single_element)
if r: # "value"
single_element = {'value': r.group('value')}
else:
r = re.search('^(?P<value>' + pmnum + ')\s*\(\s*BIN\s*=\s*(?P<low>' + pmnum + \
')\s+TO\s+(?P<high>' + pmnum + ')\s*\)$', single_element)
if r: # "value (BIN=low TO high)"
single_element = {'value': float(r.group('value')),
'low': float(r.group('low')), 'high': float(r.group('high'))}
else:
r = re.search('^(?P<low>' + pmnum + ')\s+TO\s+(?P<high>' + pmnum + ')$',
single_element)
if r: # "low TO high"
single_element = {'low': float(r.group('low')), 'high': float(r.group('high'))}
else: # everything else: don't try to convert to float
single_element = {'value': single_element}
# TO DO: subs.pl also parses other formats such as "low high", "value low high" (sorted),
# "value +- err", and "value -err_m, +err_p". Do we need to support these formats here?
# Probably not: unsupported formats will just be written as a text string.
self.current_table.data['independent_variables'][xy_mapping[i]]['values'].append(single_element)
# extract energy if SQRT(S) is one of the 'x' variables
xheader = self.current_table.data['independent_variables'][xy_mapping[i]]['header']
if xheader['name'].startswith('SQRT(S)') and lower(xheader['units']) in ('gev'):
for energy in single_element.values():
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
elif h == 'y': # dependent variable
pmnum_pct = pmnum + '(\s*PCT)?' # errors can possibly be given as percentages
r = re.search('^(?P<value>' + pmnum + ')\s+(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' +
pmnum_pct + '|-)\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element)
element = {'errors': []}
if r: # asymmetric first error
element['value'] = r.group('value').strip()
err_p = r.group('err_p').strip().lstrip('+')
if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format
err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p
err_m = r.group('err_m').strip().lstrip('+')
if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format
err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m
if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%':
err_p = err_p + '%'
if not err_p and not err_m:
raise ValueError("Both asymmetric errors cannot be '-': %s" % line)
if r.group('err_sys'):
element['errors'] += [{'label': 'stat', 'asymerror': {'plus': err_p, 'minus': err_m}}]
else:
element['errors'] += [{'asymerror': {'plus': err_p, 'minus': err_m}}]
else:
r = re.search('^(?P<value>' + pmnum + ')\s*(\+-\s*(?P<error>' +
pmnum_pct + '))?\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element)
if r: # symmetric first error
element['value'] = r.group('value').strip()
if r.group('error'):
error = r.group('error').strip().lstrip('+')
error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error
if r.group('err_sys'):
element['errors'] += [{'label': 'stat', 'symerror': error}]
else:
element['errors'] += [{'symerror': error}]
else: # everything else
element['value'] = single_element
err_sys = []
if r and r.group('err_sys'):
err_sys = r.group('err_sys').strip(' \t()').split('DSYS=')
for err in err_sys + self.current_table.dserrors:
err = err.strip(' \t,')
if not err:
continue
error = {}
label = 'sys'
r = re.search('^(\+-)?\s*(?P<error>' + pmnum_pct + ')\s*(\:\s*(?P<label>.+))?$', err)
if r: # symmetric systematic error
if r.group('label'):
label += ',' + r.group('label')
error = r.group('error').strip().lstrip('+')
error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error
error = {'symerror': error}
else:
r = re.search('^(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' +
pmnum_pct + '|-)\s*(\:\s*(?P<label>.+))?$', err)
if r: # asymmetric systematic error
if r.group('label'):
label += ',' + r.group('label')
err_p = r.group('err_p').strip().lstrip('+')
if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format
err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p
err_m = r.group('err_m').strip().lstrip('+')
if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format
err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m
if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%':
err_p = err_p + '%'
if not err_p and not err_m:
raise ValueError("Both asymmetric errors cannot be '-': %s" % line)
error = {'asymerror': {'plus': err_p, 'minus': err_m}}
if not r:
# error happened
raise ValueError("Error while parsing data line: %s" % line)
error['label'] = label
if element['value'] != single_element:
element['errors'].append(error)
self.current_table.data['dependent_variables'][xy_mapping[i]]['values'].append(element)
elif data_entry_elements:
raise BadFormat("%s data entry elements but %s expected: %s" %
(len(data_entry_elements), len(header), line))
last_index = self.current_file.tell()
l = self.current_file.readline()
line = self._strip_comments(l)
self.current_file.seek(last_index)
# extract minimum and maximum from set of energies
if self.set_of_energies:
energy_min = min(self.set_of_energies)
energy_max = max(self.set_of_energies)
if energy_max > energy_min:
energy = str(energy_min) + '-' + str(energy_max)
else:
energy = energy_min
self._parse_energies(energy)
if self.current_table.description:
if any(word in self.current_table.description.lower() for word in ['covariance', 'correlation', 'matrix']):
reformatted = self._reformat_matrix()
def _reformat_matrix(self):
"""Transform a square matrix into a format with two independent variables and one dependent variable.
"""
nxax = len(self.current_table.data['independent_variables'])
nyax = len(self.current_table.data['dependent_variables'])
npts = len(self.current_table.data['dependent_variables'][0]['values'])
# check if 1 x-axis, and npts (>=2) equals number of y-axes
if nxax != 1 or nyax != npts or npts < 2:
return False
# add second independent variable with each value duplicated npts times
if len(self.current_table.xheaders) == 2:
xheader = self.current_table.xheaders[1]
else:
xheader = copy.deepcopy(self.current_table.data['independent_variables'][0]['header'])
self.current_table.data['independent_variables'].append({'header': xheader, 'values': []})
for value in self.current_table.data['independent_variables'][0]['values']:
self.current_table.data['independent_variables'][1]['values'].extend([copy.deepcopy(value) for npt in range(npts)])
# duplicate values of first independent variable npts times
self.current_table.data['independent_variables'][0]['values'] \
= [copy.deepcopy(value) for npt in range(npts) for value in self.current_table.data['independent_variables'][0]['values']]
# suppress header if different for second y-axis
if self.current_table.data['dependent_variables'][0]['header'] != \
self.current_table.data['dependent_variables'][1]['header']:
self.current_table.data['dependent_variables'][0]['header'] = {'name': ''}
# remove qualifier if different for second y-axis
iqdel = [] # list of qualifier indices to be deleted
for iq, qualifier in enumerate(self.current_table.data['dependent_variables'][0]['qualifiers']):
if qualifier != self.current_table.data['dependent_variables'][1]['qualifiers'][iq]:
iqdel.append(iq)
for iq in iqdel[::-1]: # need to delete in reverse order
del self.current_table.data['dependent_variables'][0]['qualifiers'][iq]
# append values of second and subsequent y-axes to first dependent variable
for iy in range(1, nyax):
for value in self.current_table.data['dependent_variables'][iy]['values']:
self.current_table.data['dependent_variables'][0]['values'].append(value)
# finally, delete the second and subsequent y-axes in reverse order
for iy in range(nyax-1, 0, -1):
del self.current_table.data['dependent_variables'][iy]
return True
def _parse_dserror(self, data):
"""Parse dserror attribute of the old HEPData format
example:
*dserror: 7.5 PCT : overall normalization uncertainty
:param data: data to be parsed
:type data: str
"""
self.current_table.dserrors.append(data.strip())
def _parse_reackey(self, data):
"""Parse reackey attribute of the old HEPData format
example:
*reackey: P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
"""
self.current_table.reactions.append(data.strip())
def _parse_obskey(self, data):
"""Parse obskey attribute of the old HEPData format
example:
*obskey: DSIG/DPT
:param data: data to be parsed
:type data: str
"""
self.current_table.observables.append(data.strip())
def _parse_phrase(self, data):
"""Parse phrase attribute of the old HEPData format
example:
*phrase: Z pair Production
:param data: data to be parsed
:type data: str
"""
self.current_table.phrases.append(data.strip())
def _parse_energies(self, data):
"""Add energy given in data to tables energies
this method is here for completeness sake, it's used in only one other place so
can be safely extracted
:param data: data to be appended to table's energies
:type data: str
"""
self.current_table.energies.append(data)
def _parse_qual(self, data):
"""Parse qual attribute of the old HEPData format
example qual:
*qual: RE : P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
"""
list = []
headers = data.split(':')
name = headers[0].strip()
name = re.split(' IN ', name, flags=re.I) # ignore case
units = None
if len(name) > 1:
units = name[1].strip()
name = name[0].strip()
if len(headers) < 2:
raise BadFormat("*qual line must contain a name and values: %s" % data)
for header in headers[1:]:
xheader = {'name': name}
if units:
xheader['units'] = units
xheader['value'] = header.strip()
list.append(xheader)
# extract energy if SQRT(S) is one of the qualifiers
if name.startswith('SQRT(S)') and lower(units) in ('gev'):
energies = re.split(' TO ', xheader['value'], flags=re.I)
for energy in energies:
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
self.current_table.qualifiers.append(list)
def _parse_header(self, data):
"""Parse header (xheader or yheader)
:param data: data to be parsed
:type data: str
:return: list with header's data
:rtype: list
"""
return_list = []
headers = data.split(':')
for header in headers:
header = re.split(' IN ', header, flags=re.I) # ignore case
xheader = {'name': header[0].strip()}
if len(header) > 1:
xheader['units'] = header[1].strip()
return_list.append(xheader)
return return_list
def _parse_xheaders(self, data):
"""parse xheaders from old HEPData format
:param data: data with xheaders to be parsed
:type data: str
"""
self.current_table.xheaders += self._parse_header(data)
def _parse_yheaders(self, data):
"""parse yheaders from old HEPData format
:param data: data with yheaders to be parsed
:type data: str
"""
self.current_table.yheaders += self._parse_header(data)
@staticmethod
def _strip_comments(line):
"""Processes line stripping any comments from it
:param line: line to be processed
:type line: str
:return: line with removed comments
:rtype: str
"""
if line == '':
return line
r = re.search('(?P<line>[^#]*)(#(?P<comment>.*))?', line)
if r:
line = r.group('line')
if not line.endswith('\n'):
line += '\n'
return line
return '\n'
def _read_multiline(self, init_data):
"""Reads multiline symbols (ususally comments)
:param init_data: initial data (parsed from the line containing keyword)
:return: parsed value of the multiline symbol
:rtype: str
"""
result = init_data
first = True
while True:
last_index = self.current_file.tell()
line_raw = self.current_file.readline()
# don't add newlines from full line comments
if line_raw[0] == '#':
continue
# now strip comments
# TODO - is it appropriate behavior?
data = self._strip_comments(line_raw)
# EOF, stop here
if not data:
break
# we arrived to the next command, step back and break
if len(data.strip()) >= 1 and data.strip()[0] == '*':
self.current_file.seek(last_index)
break
if first:
result += '\n'
first = False
result += data
result = result.strip()
if result and not result.endswith('.'):
result += '.'
return result
def _parse_document_comment(self, data):
self.data[0]['comment'] = self._read_multiline(data)
def _bind_set_table_metadata(self, key, multiline=False):
"""Returns parsing function which will parse data as text, and add it to the table metatadata dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
"""
def set_table_metadata(self, data):
if multiline:
data = self._read_multiline(data)
if key == 'location' and data:
data = 'Data from ' + data
self.current_table.metadata[key] = data.strip()
# method must be bound, so we use __get__
return set_table_metadata.__get__(self)
def _bind_parse_record_ids(self, key):
def _parse_record_ids(self, data):
if 'record_ids' not in self.data[0]:
self.data[0]['record_ids'] = []
record_id = {'type': key, 'id': int(data) if data else 0}
if self.data[0]['record_ids'].count(record_id) == 0:
self.data[0]['record_ids'].append(record_id)
elif self.strict:
raise BadFormat("duplicated record: *%s" % key)
# method must be bound, so we use __get__
return _parse_record_ids.__get__(self)
def _bind_parse_additional_data(self, key, multiline=False):
"""Returns parsing function which will parse data as text, and add it to the table additional data dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
"""
def _set_additional_data_bound(self, data):
"""Concrete method for setting additional data
:param self:
:type self: OldHEPData
"""
# if it's multiline, parse it
if multiline:
data = self._read_multiline(data)
if key not in self.additional_data:
self.additional_data[key] = []
self.additional_data[key].append(data)
# method must be bound, so we use __get__
return _set_additional_data_bound.__get__(self)
# employing full fledged FSM for only two states is kind of an overkill, so
# I'll just define them here...
states = ['document', 'table']
|
HEPData/hepdata-converter
|
hepdata_converter/parsers/oldhepdata_parser.py
|
OldHEPData._parse_line
|
python
|
def _parse_line(self, file):
line = self._strip_comments(file.readline())
# check if the file ended
if not line:
return False
# line was empty or it was a comment, continue
if line.strip() == '':
return True
# retrieve keyword and its value
reg = re.search("^\*(?P<key>[^:#]*)(:\s*(?P<value>.*)\s*)?$", line)
if reg:
key = reg.group('key').strip()
value = reg.group('value')
if key in self.mapping[self.current_state]:
self.mapping[self.current_state][key](value)
elif self.strict:
raise BadFormat("unknown key: *%s" % key)
else:
raise BadFormat("line can not be parsed: %s" % line)
return True
|
Parse single line (or more if particular keyword actually demands it)
:param file:
:type file: file
|
train
|
https://github.com/HEPData/hepdata-converter/blob/354271448980efba86f2f3d27b99d818e75fd90d/hepdata_converter/parsers/oldhepdata_parser.py#L136-L165
| null |
class OldHEPData(Parser):
"""Parser for Old HEPData format
"""
help = 'Parses OLD HepData format - example OLD HepData input format: http://hepdata.cedar.ac.uk/resource/sample.input'
@classmethod
def options(cls):
options = Parser.options()
options['strict'] = Option('strict', default=True, type=bool, required=False,
help='if specified any additional keywords in OldHEPData file will raise an error')
options['use_additional_data'] = Option('use-additional-data', default=False, type=bool, required=False, variable_mapping='use_additional_data',
help=('if specified additional data which does not have equivalent in new HEPData format'
' will be appended to comment section of the output document'))
return options
def reset(self):
"""Clean any processing data, and prepare object for reuse
"""
self.current_table = None
self.tables = []
self.data = [{}]
self.additional_data = {}
self.lines = []
self.set_state('document')
self.current_file = None
self.set_of_energies = set()
def set_state(self, state):
if state not in OldHEPData.states:
raise ValueError("unknown state")
self.current_state = state
def __init__(self, **kwargs):
"""Constructor
:param use_additional_data: if set to True additional data which does not have equivalent in new HEPData format
will be appended to comment section of the output document
:type use_additional_data: bool
:param strict: if set to True, any additional keywords (not specified by documentation)
will raise BadFormat exception during parsing
:type strict: bool
"""
# mapping of OLD HepData format's keywords to proper parsing functions
# all possible keywords are specified here, the ones which aren't used
# by new data format are either mapped to _pass method which does nothing,
# or bound to _add_to_comment which adds the specified data to comment section of the
# output
# functions are specified by states in which parser may be, it may either be parsing table or document
# for those two states different sets of directives are permitted
self.mapping = {
'document': {
'reference': self._parse_reference,
'dataset': self._set_table,
# additional data which have no one to one mapping to new YAML format
'author': self._bind_parse_additional_data('author'),
'doi': self._bind_parse_additional_data('doi'),
'status': self._bind_parse_additional_data('status'),
'experiment': self._bind_parse_additional_data('experiment'),
'detector': self._bind_parse_additional_data('detector'),
'title': self._bind_parse_additional_data('title'),
# add it to record_ids
'spiresId': self._bind_parse_record_ids('spires'),
'inspireId': self._bind_parse_record_ids('inspire'),
'cdsId': self._bind_parse_record_ids('cds'),
'durhamId': self._bind_parse_record_ids('durham'),
'comment': self._parse_document_comment,
'E': self._pass
},
'table': {
'dataend': self._set_document,
'location': self._bind_set_table_metadata('location'),
'dscomment': self._bind_set_table_metadata('description', True),
'dserror': self._parse_dserror,
'reackey': self._parse_reackey,
'qual': self._parse_qual,
'data': self._parse_table_data,
'xheader': self._parse_xheaders,
'yheader': self._parse_yheaders,
'obskey': self._parse_obskey,
'phrase': self._parse_phrase,
'E': self._pass
}
}
OptionInitMixin.__init__(self, options=kwargs)
def _parse(self):
# parse the file
while self._parse_line(self.current_file):
pass
if self.use_additional_data:
if self.additional_data:
self.data[0]['comment'] += 'ADDITIONAL DATA IMPORTED FROM OLD HEPDATA FORMAT: \n'
for key in self.additional_data:
for element in self.additional_data[key]:
self.data[0]['comment'] += "%s: %s" % (key, element)
return ParsedData(self.data[0], self.tables)
def parse(self, data_in):
# clean any possible data from previous parsing
self.reset()
# in case of strings we should treat them as filepaths
if isinstance(data_in, (str, unicode)):
with open(data_in, 'r') as self.current_file:
return self._parse()
else:
self.current_file = data_in
return self._parse()
def _parse_reference(self, data):
"""
:param data:
:type data: str
"""
if 'additional_resources' not in self.data[0]:
self.data[0]['additional_resources'] = []
location = data.split(' : ')[0].strip()
if location.startswith('http'):
self.data[0]['additional_resources'].append({
'location': location,
'description': 'web page with auxiliary material'
})
def _set_table(self, data):
"""Set current parsing state to 'table',
create new table object and add it to tables collection
"""
self.set_state('table')
self.current_table = HEPTable(index=len(self.tables) + 1)
self.tables.append(self.current_table)
self.data.append(self.current_table.metadata)
def _set_document(self, data):
"""Set current parsing state to 'document',
set current_table to None
"""
self.set_state('document')
self.current_table = None
def _pass(self, data):
"""Empty processing function, map it to keywords if they're not used in the new YAML format
"""
pass
def _parse_table_data(self, data):
"""Parse dataset data of the original HEPData format
:param data: header of the table to be parsed
:raise ValueError:
"""
header = data.split(':')
self.current_table.data_header = header
for i, h in enumerate(header):
header[i] = h.strip()
x_count = header.count('x')
y_count = header.count('y')
if not self.current_table.xheaders:
raise BadFormat("*xheader line needs to appear before *data: %s" % data)
if not self.current_table.yheaders:
raise BadFormat("*yheader line needs to appear before *data: %s" % data)
# use deepcopy to avoid references in yaml... may not be required, and will very probably be refactored
# TODO - is this appropriate behavior, or are references in YAML files acceptable, they are certainly less human readable
self.current_table.data = {'independent_variables': [{'header': self.current_table.xheaders[i] if i < len(self.current_table.xheaders) else copy.deepcopy(self.current_table.xheaders[-1]),
'values': []} for i in range(x_count)],
'dependent_variables': [{'header': self.current_table.yheaders[i] if i < len(self.current_table.yheaders) else copy.deepcopy(self.current_table.yheaders[-1]),
'qualifiers': [self.current_table.qualifiers[j][i] if i < len(self.current_table.qualifiers[j]) else copy.deepcopy(self.current_table.qualifiers[j][-1]) for j in range(len(self.current_table.qualifiers)) ],
'values': []} for i in range(y_count)]}
xy_mapping = []
current_x_count = 0
current_y_count = 0
for h in header:
if h == 'x':
xy_mapping.append(current_x_count)
current_x_count += 1
if h == 'y':
xy_mapping.append(current_y_count)
current_y_count += 1
last_index = self.current_file.tell()
line = self._strip_comments(self.current_file.readline())
while line and not line.startswith('*'):
data_entry_elements = line.split(';')[:-1] # split and also strip newline character at the end
if len(data_entry_elements) == len(header):
# this is kind of a big stretch... I assume that x is always first
for i, h in enumerate(header):
single_element = data_entry_elements[i].strip()
# number patterns copied from old subs.pl parsing script
pmnum1 = '[-+]?[\d]+\.?[\d]*'
pmnum2 = '[-+]?\.[\d]+'
pmnum3 = '[-+]?[\d]+\.?[\d]*\s*[eE]+\s*[+-]?\s*[\d]+'
pmnum = '(' + pmnum1 + '|' + pmnum2 + '|' + pmnum3 + ')'
# implement same regular expression matching as in old subs.pl parsing script
if h == 'x': # independent variables
r = re.search('^(?P<value>' + pmnum + ')$', single_element)
if r: # "value"
single_element = {'value': r.group('value')}
else:
r = re.search('^(?P<value>' + pmnum + ')\s*\(\s*BIN\s*=\s*(?P<low>' + pmnum + \
')\s+TO\s+(?P<high>' + pmnum + ')\s*\)$', single_element)
if r: # "value (BIN=low TO high)"
single_element = {'value': float(r.group('value')),
'low': float(r.group('low')), 'high': float(r.group('high'))}
else:
r = re.search('^(?P<low>' + pmnum + ')\s+TO\s+(?P<high>' + pmnum + ')$',
single_element)
if r: # "low TO high"
single_element = {'low': float(r.group('low')), 'high': float(r.group('high'))}
else: # everything else: don't try to convert to float
single_element = {'value': single_element}
# TO DO: subs.pl also parses other formats such as "low high", "value low high" (sorted),
# "value +- err", and "value -err_m, +err_p". Do we need to support these formats here?
# Probably not: unsupported formats will just be written as a text string.
self.current_table.data['independent_variables'][xy_mapping[i]]['values'].append(single_element)
# extract energy if SQRT(S) is one of the 'x' variables
xheader = self.current_table.data['independent_variables'][xy_mapping[i]]['header']
if xheader['name'].startswith('SQRT(S)') and lower(xheader['units']) in ('gev'):
for energy in single_element.values():
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
elif h == 'y': # dependent variable
pmnum_pct = pmnum + '(\s*PCT)?' # errors can possibly be given as percentages
r = re.search('^(?P<value>' + pmnum + ')\s+(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' +
pmnum_pct + '|-)\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element)
element = {'errors': []}
if r: # asymmetric first error
element['value'] = r.group('value').strip()
err_p = r.group('err_p').strip().lstrip('+')
if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format
err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p
err_m = r.group('err_m').strip().lstrip('+')
if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format
err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m
if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%':
err_p = err_p + '%'
if not err_p and not err_m:
raise ValueError("Both asymmetric errors cannot be '-': %s" % line)
if r.group('err_sys'):
element['errors'] += [{'label': 'stat', 'asymerror': {'plus': err_p, 'minus': err_m}}]
else:
element['errors'] += [{'asymerror': {'plus': err_p, 'minus': err_m}}]
else:
r = re.search('^(?P<value>' + pmnum + ')\s*(\+-\s*(?P<error>' +
pmnum_pct + '))?\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element)
if r: # symmetric first error
element['value'] = r.group('value').strip()
if r.group('error'):
error = r.group('error').strip().lstrip('+')
error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error
if r.group('err_sys'):
element['errors'] += [{'label': 'stat', 'symerror': error}]
else:
element['errors'] += [{'symerror': error}]
else: # everything else
element['value'] = single_element
err_sys = []
if r and r.group('err_sys'):
err_sys = r.group('err_sys').strip(' \t()').split('DSYS=')
for err in err_sys + self.current_table.dserrors:
err = err.strip(' \t,')
if not err:
continue
error = {}
label = 'sys'
r = re.search('^(\+-)?\s*(?P<error>' + pmnum_pct + ')\s*(\:\s*(?P<label>.+))?$', err)
if r: # symmetric systematic error
if r.group('label'):
label += ',' + r.group('label')
error = r.group('error').strip().lstrip('+')
error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error
error = {'symerror': error}
else:
r = re.search('^(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' +
pmnum_pct + '|-)\s*(\:\s*(?P<label>.+))?$', err)
if r: # asymmetric systematic error
if r.group('label'):
label += ',' + r.group('label')
err_p = r.group('err_p').strip().lstrip('+')
if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format
err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p
err_m = r.group('err_m').strip().lstrip('+')
if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format
err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m
if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%':
err_p = err_p + '%'
if not err_p and not err_m:
raise ValueError("Both asymmetric errors cannot be '-': %s" % line)
error = {'asymerror': {'plus': err_p, 'minus': err_m}}
if not r:
# error happened
raise ValueError("Error while parsing data line: %s" % line)
error['label'] = label
if element['value'] != single_element:
element['errors'].append(error)
self.current_table.data['dependent_variables'][xy_mapping[i]]['values'].append(element)
elif data_entry_elements:
raise BadFormat("%s data entry elements but %s expected: %s" %
(len(data_entry_elements), len(header), line))
last_index = self.current_file.tell()
l = self.current_file.readline()
line = self._strip_comments(l)
self.current_file.seek(last_index)
# extract minimum and maximum from set of energies
if self.set_of_energies:
energy_min = min(self.set_of_energies)
energy_max = max(self.set_of_energies)
if energy_max > energy_min:
energy = str(energy_min) + '-' + str(energy_max)
else:
energy = energy_min
self._parse_energies(energy)
if self.current_table.description:
if any(word in self.current_table.description.lower() for word in ['covariance', 'correlation', 'matrix']):
reformatted = self._reformat_matrix()
def _reformat_matrix(self):
"""Transform a square matrix into a format with two independent variables and one dependent variable.
"""
nxax = len(self.current_table.data['independent_variables'])
nyax = len(self.current_table.data['dependent_variables'])
npts = len(self.current_table.data['dependent_variables'][0]['values'])
# check if 1 x-axis, and npts (>=2) equals number of y-axes
if nxax != 1 or nyax != npts or npts < 2:
return False
# add second independent variable with each value duplicated npts times
if len(self.current_table.xheaders) == 2:
xheader = self.current_table.xheaders[1]
else:
xheader = copy.deepcopy(self.current_table.data['independent_variables'][0]['header'])
self.current_table.data['independent_variables'].append({'header': xheader, 'values': []})
for value in self.current_table.data['independent_variables'][0]['values']:
self.current_table.data['independent_variables'][1]['values'].extend([copy.deepcopy(value) for npt in range(npts)])
# duplicate values of first independent variable npts times
self.current_table.data['independent_variables'][0]['values'] \
= [copy.deepcopy(value) for npt in range(npts) for value in self.current_table.data['independent_variables'][0]['values']]
# suppress header if different for second y-axis
if self.current_table.data['dependent_variables'][0]['header'] != \
self.current_table.data['dependent_variables'][1]['header']:
self.current_table.data['dependent_variables'][0]['header'] = {'name': ''}
# remove qualifier if different for second y-axis
iqdel = [] # list of qualifier indices to be deleted
for iq, qualifier in enumerate(self.current_table.data['dependent_variables'][0]['qualifiers']):
if qualifier != self.current_table.data['dependent_variables'][1]['qualifiers'][iq]:
iqdel.append(iq)
for iq in iqdel[::-1]: # need to delete in reverse order
del self.current_table.data['dependent_variables'][0]['qualifiers'][iq]
# append values of second and subsequent y-axes to first dependent variable
for iy in range(1, nyax):
for value in self.current_table.data['dependent_variables'][iy]['values']:
self.current_table.data['dependent_variables'][0]['values'].append(value)
# finally, delete the second and subsequent y-axes in reverse order
for iy in range(nyax-1, 0, -1):
del self.current_table.data['dependent_variables'][iy]
return True
def _parse_dserror(self, data):
"""Parse dserror attribute of the old HEPData format
example:
*dserror: 7.5 PCT : overall normalization uncertainty
:param data: data to be parsed
:type data: str
"""
self.current_table.dserrors.append(data.strip())
def _parse_reackey(self, data):
"""Parse reackey attribute of the old HEPData format
example:
*reackey: P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
"""
self.current_table.reactions.append(data.strip())
def _parse_obskey(self, data):
"""Parse obskey attribute of the old HEPData format
example:
*obskey: DSIG/DPT
:param data: data to be parsed
:type data: str
"""
self.current_table.observables.append(data.strip())
def _parse_phrase(self, data):
"""Parse phrase attribute of the old HEPData format
example:
*phrase: Z pair Production
:param data: data to be parsed
:type data: str
"""
self.current_table.phrases.append(data.strip())
def _parse_energies(self, data):
"""Add energy given in data to tables energies
this method is here for completeness sake, it's used in only one other place so
can be safely extracted
:param data: data to be appended to table's energies
:type data: str
"""
self.current_table.energies.append(data)
def _parse_qual(self, data):
"""Parse qual attribute of the old HEPData format
example qual:
*qual: RE : P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
"""
list = []
headers = data.split(':')
name = headers[0].strip()
name = re.split(' IN ', name, flags=re.I) # ignore case
units = None
if len(name) > 1:
units = name[1].strip()
name = name[0].strip()
if len(headers) < 2:
raise BadFormat("*qual line must contain a name and values: %s" % data)
for header in headers[1:]:
xheader = {'name': name}
if units:
xheader['units'] = units
xheader['value'] = header.strip()
list.append(xheader)
# extract energy if SQRT(S) is one of the qualifiers
if name.startswith('SQRT(S)') and lower(units) in ('gev'):
energies = re.split(' TO ', xheader['value'], flags=re.I)
for energy in energies:
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
self.current_table.qualifiers.append(list)
def _parse_header(self, data):
"""Parse header (xheader or yheader)
:param data: data to be parsed
:type data: str
:return: list with header's data
:rtype: list
"""
return_list = []
headers = data.split(':')
for header in headers:
header = re.split(' IN ', header, flags=re.I) # ignore case
xheader = {'name': header[0].strip()}
if len(header) > 1:
xheader['units'] = header[1].strip()
return_list.append(xheader)
return return_list
def _parse_xheaders(self, data):
"""parse xheaders from old HEPData format
:param data: data with xheaders to be parsed
:type data: str
"""
self.current_table.xheaders += self._parse_header(data)
def _parse_yheaders(self, data):
"""parse yheaders from old HEPData format
:param data: data with yheaders to be parsed
:type data: str
"""
self.current_table.yheaders += self._parse_header(data)
@staticmethod
def _strip_comments(line):
"""Processes line stripping any comments from it
:param line: line to be processed
:type line: str
:return: line with removed comments
:rtype: str
"""
if line == '':
return line
r = re.search('(?P<line>[^#]*)(#(?P<comment>.*))?', line)
if r:
line = r.group('line')
if not line.endswith('\n'):
line += '\n'
return line
return '\n'
def _read_multiline(self, init_data):
"""Reads multiline symbols (ususally comments)
:param init_data: initial data (parsed from the line containing keyword)
:return: parsed value of the multiline symbol
:rtype: str
"""
result = init_data
first = True
while True:
last_index = self.current_file.tell()
line_raw = self.current_file.readline()
# don't add newlines from full line comments
if line_raw[0] == '#':
continue
# now strip comments
# TODO - is it appropriate behavior?
data = self._strip_comments(line_raw)
# EOF, stop here
if not data:
break
# we arrived to the next command, step back and break
if len(data.strip()) >= 1 and data.strip()[0] == '*':
self.current_file.seek(last_index)
break
if first:
result += '\n'
first = False
result += data
result = result.strip()
if result and not result.endswith('.'):
result += '.'
return result
def _parse_document_comment(self, data):
self.data[0]['comment'] = self._read_multiline(data)
def _bind_set_table_metadata(self, key, multiline=False):
"""Returns parsing function which will parse data as text, and add it to the table metatadata dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
"""
def set_table_metadata(self, data):
if multiline:
data = self._read_multiline(data)
if key == 'location' and data:
data = 'Data from ' + data
self.current_table.metadata[key] = data.strip()
# method must be bound, so we use __get__
return set_table_metadata.__get__(self)
def _bind_parse_record_ids(self, key):
def _parse_record_ids(self, data):
if 'record_ids' not in self.data[0]:
self.data[0]['record_ids'] = []
record_id = {'type': key, 'id': int(data) if data else 0}
if self.data[0]['record_ids'].count(record_id) == 0:
self.data[0]['record_ids'].append(record_id)
elif self.strict:
raise BadFormat("duplicated record: *%s" % key)
# method must be bound, so we use __get__
return _parse_record_ids.__get__(self)
def _bind_parse_additional_data(self, key, multiline=False):
"""Returns parsing function which will parse data as text, and add it to the table additional data dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
"""
def _set_additional_data_bound(self, data):
"""Concrete method for setting additional data
:param self:
:type self: OldHEPData
"""
# if it's multiline, parse it
if multiline:
data = self._read_multiline(data)
if key not in self.additional_data:
self.additional_data[key] = []
self.additional_data[key].append(data)
# method must be bound, so we use __get__
return _set_additional_data_bound.__get__(self)
# employing full fledged FSM for only two states is kind of an overkill, so
# I'll just define them here...
states = ['document', 'table']
|
HEPData/hepdata-converter
|
hepdata_converter/parsers/oldhepdata_parser.py
|
OldHEPData._set_table
|
python
|
def _set_table(self, data):
self.set_state('table')
self.current_table = HEPTable(index=len(self.tables) + 1)
self.tables.append(self.current_table)
self.data.append(self.current_table.metadata)
|
Set current parsing state to 'table',
create new table object and add it to tables collection
|
train
|
https://github.com/HEPData/hepdata-converter/blob/354271448980efba86f2f3d27b99d818e75fd90d/hepdata_converter/parsers/oldhepdata_parser.py#L183-L190
| null |
class OldHEPData(Parser):
"""Parser for Old HEPData format
"""
help = 'Parses OLD HepData format - example OLD HepData input format: http://hepdata.cedar.ac.uk/resource/sample.input'
@classmethod
def options(cls):
options = Parser.options()
options['strict'] = Option('strict', default=True, type=bool, required=False,
help='if specified any additional keywords in OldHEPData file will raise an error')
options['use_additional_data'] = Option('use-additional-data', default=False, type=bool, required=False, variable_mapping='use_additional_data',
help=('if specified additional data which does not have equivalent in new HEPData format'
' will be appended to comment section of the output document'))
return options
def reset(self):
"""Clean any processing data, and prepare object for reuse
"""
self.current_table = None
self.tables = []
self.data = [{}]
self.additional_data = {}
self.lines = []
self.set_state('document')
self.current_file = None
self.set_of_energies = set()
def set_state(self, state):
if state not in OldHEPData.states:
raise ValueError("unknown state")
self.current_state = state
def __init__(self, **kwargs):
"""Constructor
:param use_additional_data: if set to True additional data which does not have equivalent in new HEPData format
will be appended to comment section of the output document
:type use_additional_data: bool
:param strict: if set to True, any additional keywords (not specified by documentation)
will raise BadFormat exception during parsing
:type strict: bool
"""
# mapping of OLD HepData format's keywords to proper parsing functions
# all possible keywords are specified here, the ones which aren't used
# by new data format are either mapped to _pass method which does nothing,
# or bound to _add_to_comment which adds the specified data to comment section of the
# output
# functions are specified by states in which parser may be, it may either be parsing table or document
# for those two states different sets of directives are permitted
self.mapping = {
'document': {
'reference': self._parse_reference,
'dataset': self._set_table,
# additional data which have no one to one mapping to new YAML format
'author': self._bind_parse_additional_data('author'),
'doi': self._bind_parse_additional_data('doi'),
'status': self._bind_parse_additional_data('status'),
'experiment': self._bind_parse_additional_data('experiment'),
'detector': self._bind_parse_additional_data('detector'),
'title': self._bind_parse_additional_data('title'),
# add it to record_ids
'spiresId': self._bind_parse_record_ids('spires'),
'inspireId': self._bind_parse_record_ids('inspire'),
'cdsId': self._bind_parse_record_ids('cds'),
'durhamId': self._bind_parse_record_ids('durham'),
'comment': self._parse_document_comment,
'E': self._pass
},
'table': {
'dataend': self._set_document,
'location': self._bind_set_table_metadata('location'),
'dscomment': self._bind_set_table_metadata('description', True),
'dserror': self._parse_dserror,
'reackey': self._parse_reackey,
'qual': self._parse_qual,
'data': self._parse_table_data,
'xheader': self._parse_xheaders,
'yheader': self._parse_yheaders,
'obskey': self._parse_obskey,
'phrase': self._parse_phrase,
'E': self._pass
}
}
OptionInitMixin.__init__(self, options=kwargs)
def _parse(self):
# parse the file
while self._parse_line(self.current_file):
pass
if self.use_additional_data:
if self.additional_data:
self.data[0]['comment'] += 'ADDITIONAL DATA IMPORTED FROM OLD HEPDATA FORMAT: \n'
for key in self.additional_data:
for element in self.additional_data[key]:
self.data[0]['comment'] += "%s: %s" % (key, element)
return ParsedData(self.data[0], self.tables)
def parse(self, data_in):
# clean any possible data from previous parsing
self.reset()
# in case of strings we should treat them as filepaths
if isinstance(data_in, (str, unicode)):
with open(data_in, 'r') as self.current_file:
return self._parse()
else:
self.current_file = data_in
return self._parse()
def _parse_line(self, file):
"""Parse single line (or more if particular keyword actually demands it)
:param file:
:type file: file
"""
line = self._strip_comments(file.readline())
# check if the file ended
if not line:
return False
# line was empty or it was a comment, continue
if line.strip() == '':
return True
# retrieve keyword and its value
reg = re.search("^\*(?P<key>[^:#]*)(:\s*(?P<value>.*)\s*)?$", line)
if reg:
key = reg.group('key').strip()
value = reg.group('value')
if key in self.mapping[self.current_state]:
self.mapping[self.current_state][key](value)
elif self.strict:
raise BadFormat("unknown key: *%s" % key)
else:
raise BadFormat("line can not be parsed: %s" % line)
return True
def _parse_reference(self, data):
"""
:param data:
:type data: str
"""
if 'additional_resources' not in self.data[0]:
self.data[0]['additional_resources'] = []
location = data.split(' : ')[0].strip()
if location.startswith('http'):
self.data[0]['additional_resources'].append({
'location': location,
'description': 'web page with auxiliary material'
})
def _set_document(self, data):
"""Set current parsing state to 'document',
set current_table to None
"""
self.set_state('document')
self.current_table = None
def _pass(self, data):
"""Empty processing function, map it to keywords if they're not used in the new YAML format
"""
pass
def _parse_table_data(self, data):
"""Parse dataset data of the original HEPData format
:param data: header of the table to be parsed
:raise ValueError:
"""
header = data.split(':')
self.current_table.data_header = header
for i, h in enumerate(header):
header[i] = h.strip()
x_count = header.count('x')
y_count = header.count('y')
if not self.current_table.xheaders:
raise BadFormat("*xheader line needs to appear before *data: %s" % data)
if not self.current_table.yheaders:
raise BadFormat("*yheader line needs to appear before *data: %s" % data)
# use deepcopy to avoid references in yaml... may not be required, and will very probably be refactored
# TODO - is this appropriate behavior, or are references in YAML files acceptable, they are certainly less human readable
self.current_table.data = {'independent_variables': [{'header': self.current_table.xheaders[i] if i < len(self.current_table.xheaders) else copy.deepcopy(self.current_table.xheaders[-1]),
'values': []} for i in range(x_count)],
'dependent_variables': [{'header': self.current_table.yheaders[i] if i < len(self.current_table.yheaders) else copy.deepcopy(self.current_table.yheaders[-1]),
'qualifiers': [self.current_table.qualifiers[j][i] if i < len(self.current_table.qualifiers[j]) else copy.deepcopy(self.current_table.qualifiers[j][-1]) for j in range(len(self.current_table.qualifiers)) ],
'values': []} for i in range(y_count)]}
xy_mapping = []
current_x_count = 0
current_y_count = 0
for h in header:
if h == 'x':
xy_mapping.append(current_x_count)
current_x_count += 1
if h == 'y':
xy_mapping.append(current_y_count)
current_y_count += 1
last_index = self.current_file.tell()
line = self._strip_comments(self.current_file.readline())
while line and not line.startswith('*'):
data_entry_elements = line.split(';')[:-1] # split and also strip newline character at the end
if len(data_entry_elements) == len(header):
# this is kind of a big stretch... I assume that x is always first
for i, h in enumerate(header):
single_element = data_entry_elements[i].strip()
# number patterns copied from old subs.pl parsing script
pmnum1 = '[-+]?[\d]+\.?[\d]*'
pmnum2 = '[-+]?\.[\d]+'
pmnum3 = '[-+]?[\d]+\.?[\d]*\s*[eE]+\s*[+-]?\s*[\d]+'
pmnum = '(' + pmnum1 + '|' + pmnum2 + '|' + pmnum3 + ')'
# implement same regular expression matching as in old subs.pl parsing script
if h == 'x': # independent variables
r = re.search('^(?P<value>' + pmnum + ')$', single_element)
if r: # "value"
single_element = {'value': r.group('value')}
else:
r = re.search('^(?P<value>' + pmnum + ')\s*\(\s*BIN\s*=\s*(?P<low>' + pmnum + \
')\s+TO\s+(?P<high>' + pmnum + ')\s*\)$', single_element)
if r: # "value (BIN=low TO high)"
single_element = {'value': float(r.group('value')),
'low': float(r.group('low')), 'high': float(r.group('high'))}
else:
r = re.search('^(?P<low>' + pmnum + ')\s+TO\s+(?P<high>' + pmnum + ')$',
single_element)
if r: # "low TO high"
single_element = {'low': float(r.group('low')), 'high': float(r.group('high'))}
else: # everything else: don't try to convert to float
single_element = {'value': single_element}
# TO DO: subs.pl also parses other formats such as "low high", "value low high" (sorted),
# "value +- err", and "value -err_m, +err_p". Do we need to support these formats here?
# Probably not: unsupported formats will just be written as a text string.
self.current_table.data['independent_variables'][xy_mapping[i]]['values'].append(single_element)
# extract energy if SQRT(S) is one of the 'x' variables
xheader = self.current_table.data['independent_variables'][xy_mapping[i]]['header']
if xheader['name'].startswith('SQRT(S)') and lower(xheader['units']) in ('gev'):
for energy in single_element.values():
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
elif h == 'y': # dependent variable
pmnum_pct = pmnum + '(\s*PCT)?' # errors can possibly be given as percentages
r = re.search('^(?P<value>' + pmnum + ')\s+(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' +
pmnum_pct + '|-)\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element)
element = {'errors': []}
if r: # asymmetric first error
element['value'] = r.group('value').strip()
err_p = r.group('err_p').strip().lstrip('+')
if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format
err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p
err_m = r.group('err_m').strip().lstrip('+')
if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format
err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m
if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%':
err_p = err_p + '%'
if not err_p and not err_m:
raise ValueError("Both asymmetric errors cannot be '-': %s" % line)
if r.group('err_sys'):
element['errors'] += [{'label': 'stat', 'asymerror': {'plus': err_p, 'minus': err_m}}]
else:
element['errors'] += [{'asymerror': {'plus': err_p, 'minus': err_m}}]
else:
r = re.search('^(?P<value>' + pmnum + ')\s*(\+-\s*(?P<error>' +
pmnum_pct + '))?\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element)
if r: # symmetric first error
element['value'] = r.group('value').strip()
if r.group('error'):
error = r.group('error').strip().lstrip('+')
error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error
if r.group('err_sys'):
element['errors'] += [{'label': 'stat', 'symerror': error}]
else:
element['errors'] += [{'symerror': error}]
else: # everything else
element['value'] = single_element
err_sys = []
if r and r.group('err_sys'):
err_sys = r.group('err_sys').strip(' \t()').split('DSYS=')
for err in err_sys + self.current_table.dserrors:
err = err.strip(' \t,')
if not err:
continue
error = {}
label = 'sys'
r = re.search('^(\+-)?\s*(?P<error>' + pmnum_pct + ')\s*(\:\s*(?P<label>.+))?$', err)
if r: # symmetric systematic error
if r.group('label'):
label += ',' + r.group('label')
error = r.group('error').strip().lstrip('+')
error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error
error = {'symerror': error}
else:
r = re.search('^(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' +
pmnum_pct + '|-)\s*(\:\s*(?P<label>.+))?$', err)
if r: # asymmetric systematic error
if r.group('label'):
label += ',' + r.group('label')
err_p = r.group('err_p').strip().lstrip('+')
if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format
err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p
err_m = r.group('err_m').strip().lstrip('+')
if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format
err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m
if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%':
err_p = err_p + '%'
if not err_p and not err_m:
raise ValueError("Both asymmetric errors cannot be '-': %s" % line)
error = {'asymerror': {'plus': err_p, 'minus': err_m}}
if not r:
# error happened
raise ValueError("Error while parsing data line: %s" % line)
error['label'] = label
if element['value'] != single_element:
element['errors'].append(error)
self.current_table.data['dependent_variables'][xy_mapping[i]]['values'].append(element)
elif data_entry_elements:
raise BadFormat("%s data entry elements but %s expected: %s" %
(len(data_entry_elements), len(header), line))
last_index = self.current_file.tell()
l = self.current_file.readline()
line = self._strip_comments(l)
self.current_file.seek(last_index)
# extract minimum and maximum from set of energies
if self.set_of_energies:
energy_min = min(self.set_of_energies)
energy_max = max(self.set_of_energies)
if energy_max > energy_min:
energy = str(energy_min) + '-' + str(energy_max)
else:
energy = energy_min
self._parse_energies(energy)
if self.current_table.description:
if any(word in self.current_table.description.lower() for word in ['covariance', 'correlation', 'matrix']):
reformatted = self._reformat_matrix()
def _reformat_matrix(self):
"""Transform a square matrix into a format with two independent variables and one dependent variable.
"""
nxax = len(self.current_table.data['independent_variables'])
nyax = len(self.current_table.data['dependent_variables'])
npts = len(self.current_table.data['dependent_variables'][0]['values'])
# check if 1 x-axis, and npts (>=2) equals number of y-axes
if nxax != 1 or nyax != npts or npts < 2:
return False
# add second independent variable with each value duplicated npts times
if len(self.current_table.xheaders) == 2:
xheader = self.current_table.xheaders[1]
else:
xheader = copy.deepcopy(self.current_table.data['independent_variables'][0]['header'])
self.current_table.data['independent_variables'].append({'header': xheader, 'values': []})
for value in self.current_table.data['independent_variables'][0]['values']:
self.current_table.data['independent_variables'][1]['values'].extend([copy.deepcopy(value) for npt in range(npts)])
# duplicate values of first independent variable npts times
self.current_table.data['independent_variables'][0]['values'] \
= [copy.deepcopy(value) for npt in range(npts) for value in self.current_table.data['independent_variables'][0]['values']]
# suppress header if different for second y-axis
if self.current_table.data['dependent_variables'][0]['header'] != \
self.current_table.data['dependent_variables'][1]['header']:
self.current_table.data['dependent_variables'][0]['header'] = {'name': ''}
# remove qualifier if different for second y-axis
iqdel = [] # list of qualifier indices to be deleted
for iq, qualifier in enumerate(self.current_table.data['dependent_variables'][0]['qualifiers']):
if qualifier != self.current_table.data['dependent_variables'][1]['qualifiers'][iq]:
iqdel.append(iq)
for iq in iqdel[::-1]: # need to delete in reverse order
del self.current_table.data['dependent_variables'][0]['qualifiers'][iq]
# append values of second and subsequent y-axes to first dependent variable
for iy in range(1, nyax):
for value in self.current_table.data['dependent_variables'][iy]['values']:
self.current_table.data['dependent_variables'][0]['values'].append(value)
# finally, delete the second and subsequent y-axes in reverse order
for iy in range(nyax-1, 0, -1):
del self.current_table.data['dependent_variables'][iy]
return True
def _parse_dserror(self, data):
"""Parse dserror attribute of the old HEPData format
example:
*dserror: 7.5 PCT : overall normalization uncertainty
:param data: data to be parsed
:type data: str
"""
self.current_table.dserrors.append(data.strip())
def _parse_reackey(self, data):
"""Parse reackey attribute of the old HEPData format
example:
*reackey: P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
"""
self.current_table.reactions.append(data.strip())
def _parse_obskey(self, data):
"""Parse obskey attribute of the old HEPData format
example:
*obskey: DSIG/DPT
:param data: data to be parsed
:type data: str
"""
self.current_table.observables.append(data.strip())
def _parse_phrase(self, data):
"""Parse phrase attribute of the old HEPData format
example:
*phrase: Z pair Production
:param data: data to be parsed
:type data: str
"""
self.current_table.phrases.append(data.strip())
def _parse_energies(self, data):
"""Add energy given in data to tables energies
this method is here for completeness sake, it's used in only one other place so
can be safely extracted
:param data: data to be appended to table's energies
:type data: str
"""
self.current_table.energies.append(data)
def _parse_qual(self, data):
"""Parse qual attribute of the old HEPData format
example qual:
*qual: RE : P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
"""
list = []
headers = data.split(':')
name = headers[0].strip()
name = re.split(' IN ', name, flags=re.I) # ignore case
units = None
if len(name) > 1:
units = name[1].strip()
name = name[0].strip()
if len(headers) < 2:
raise BadFormat("*qual line must contain a name and values: %s" % data)
for header in headers[1:]:
xheader = {'name': name}
if units:
xheader['units'] = units
xheader['value'] = header.strip()
list.append(xheader)
# extract energy if SQRT(S) is one of the qualifiers
if name.startswith('SQRT(S)') and lower(units) in ('gev'):
energies = re.split(' TO ', xheader['value'], flags=re.I)
for energy in energies:
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
self.current_table.qualifiers.append(list)
def _parse_header(self, data):
"""Parse header (xheader or yheader)
:param data: data to be parsed
:type data: str
:return: list with header's data
:rtype: list
"""
return_list = []
headers = data.split(':')
for header in headers:
header = re.split(' IN ', header, flags=re.I) # ignore case
xheader = {'name': header[0].strip()}
if len(header) > 1:
xheader['units'] = header[1].strip()
return_list.append(xheader)
return return_list
def _parse_xheaders(self, data):
"""parse xheaders from old HEPData format
:param data: data with xheaders to be parsed
:type data: str
"""
self.current_table.xheaders += self._parse_header(data)
def _parse_yheaders(self, data):
"""parse yheaders from old HEPData format
:param data: data with yheaders to be parsed
:type data: str
"""
self.current_table.yheaders += self._parse_header(data)
@staticmethod
def _strip_comments(line):
"""Processes line stripping any comments from it
:param line: line to be processed
:type line: str
:return: line with removed comments
:rtype: str
"""
if line == '':
return line
r = re.search('(?P<line>[^#]*)(#(?P<comment>.*))?', line)
if r:
line = r.group('line')
if not line.endswith('\n'):
line += '\n'
return line
return '\n'
def _read_multiline(self, init_data):
"""Reads multiline symbols (ususally comments)
:param init_data: initial data (parsed from the line containing keyword)
:return: parsed value of the multiline symbol
:rtype: str
"""
result = init_data
first = True
while True:
last_index = self.current_file.tell()
line_raw = self.current_file.readline()
# don't add newlines from full line comments
if line_raw[0] == '#':
continue
# now strip comments
# TODO - is it appropriate behavior?
data = self._strip_comments(line_raw)
# EOF, stop here
if not data:
break
# we arrived to the next command, step back and break
if len(data.strip()) >= 1 and data.strip()[0] == '*':
self.current_file.seek(last_index)
break
if first:
result += '\n'
first = False
result += data
result = result.strip()
if result and not result.endswith('.'):
result += '.'
return result
def _parse_document_comment(self, data):
self.data[0]['comment'] = self._read_multiline(data)
def _bind_set_table_metadata(self, key, multiline=False):
"""Returns parsing function which will parse data as text, and add it to the table metatadata dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
"""
def set_table_metadata(self, data):
if multiline:
data = self._read_multiline(data)
if key == 'location' and data:
data = 'Data from ' + data
self.current_table.metadata[key] = data.strip()
# method must be bound, so we use __get__
return set_table_metadata.__get__(self)
def _bind_parse_record_ids(self, key):
def _parse_record_ids(self, data):
if 'record_ids' not in self.data[0]:
self.data[0]['record_ids'] = []
record_id = {'type': key, 'id': int(data) if data else 0}
if self.data[0]['record_ids'].count(record_id) == 0:
self.data[0]['record_ids'].append(record_id)
elif self.strict:
raise BadFormat("duplicated record: *%s" % key)
# method must be bound, so we use __get__
return _parse_record_ids.__get__(self)
def _bind_parse_additional_data(self, key, multiline=False):
"""Returns parsing function which will parse data as text, and add it to the table additional data dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
"""
def _set_additional_data_bound(self, data):
"""Concrete method for setting additional data
:param self:
:type self: OldHEPData
"""
# if it's multiline, parse it
if multiline:
data = self._read_multiline(data)
if key not in self.additional_data:
self.additional_data[key] = []
self.additional_data[key].append(data)
# method must be bound, so we use __get__
return _set_additional_data_bound.__get__(self)
# employing full fledged FSM for only two states is kind of an overkill, so
# I'll just define them here...
states = ['document', 'table']
|
HEPData/hepdata-converter
|
hepdata_converter/parsers/oldhepdata_parser.py
|
OldHEPData._parse_table_data
|
python
|
def _parse_table_data(self, data):
header = data.split(':')
self.current_table.data_header = header
for i, h in enumerate(header):
header[i] = h.strip()
x_count = header.count('x')
y_count = header.count('y')
if not self.current_table.xheaders:
raise BadFormat("*xheader line needs to appear before *data: %s" % data)
if not self.current_table.yheaders:
raise BadFormat("*yheader line needs to appear before *data: %s" % data)
# use deepcopy to avoid references in yaml... may not be required, and will very probably be refactored
# TODO - is this appropriate behavior, or are references in YAML files acceptable, they are certainly less human readable
self.current_table.data = {'independent_variables': [{'header': self.current_table.xheaders[i] if i < len(self.current_table.xheaders) else copy.deepcopy(self.current_table.xheaders[-1]),
'values': []} for i in range(x_count)],
'dependent_variables': [{'header': self.current_table.yheaders[i] if i < len(self.current_table.yheaders) else copy.deepcopy(self.current_table.yheaders[-1]),
'qualifiers': [self.current_table.qualifiers[j][i] if i < len(self.current_table.qualifiers[j]) else copy.deepcopy(self.current_table.qualifiers[j][-1]) for j in range(len(self.current_table.qualifiers)) ],
'values': []} for i in range(y_count)]}
xy_mapping = []
current_x_count = 0
current_y_count = 0
for h in header:
if h == 'x':
xy_mapping.append(current_x_count)
current_x_count += 1
if h == 'y':
xy_mapping.append(current_y_count)
current_y_count += 1
last_index = self.current_file.tell()
line = self._strip_comments(self.current_file.readline())
while line and not line.startswith('*'):
data_entry_elements = line.split(';')[:-1] # split and also strip newline character at the end
if len(data_entry_elements) == len(header):
# this is kind of a big stretch... I assume that x is always first
for i, h in enumerate(header):
single_element = data_entry_elements[i].strip()
# number patterns copied from old subs.pl parsing script
pmnum1 = '[-+]?[\d]+\.?[\d]*'
pmnum2 = '[-+]?\.[\d]+'
pmnum3 = '[-+]?[\d]+\.?[\d]*\s*[eE]+\s*[+-]?\s*[\d]+'
pmnum = '(' + pmnum1 + '|' + pmnum2 + '|' + pmnum3 + ')'
# implement same regular expression matching as in old subs.pl parsing script
if h == 'x': # independent variables
r = re.search('^(?P<value>' + pmnum + ')$', single_element)
if r: # "value"
single_element = {'value': r.group('value')}
else:
r = re.search('^(?P<value>' + pmnum + ')\s*\(\s*BIN\s*=\s*(?P<low>' + pmnum + \
')\s+TO\s+(?P<high>' + pmnum + ')\s*\)$', single_element)
if r: # "value (BIN=low TO high)"
single_element = {'value': float(r.group('value')),
'low': float(r.group('low')), 'high': float(r.group('high'))}
else:
r = re.search('^(?P<low>' + pmnum + ')\s+TO\s+(?P<high>' + pmnum + ')$',
single_element)
if r: # "low TO high"
single_element = {'low': float(r.group('low')), 'high': float(r.group('high'))}
else: # everything else: don't try to convert to float
single_element = {'value': single_element}
# TO DO: subs.pl also parses other formats such as "low high", "value low high" (sorted),
# "value +- err", and "value -err_m, +err_p". Do we need to support these formats here?
# Probably not: unsupported formats will just be written as a text string.
self.current_table.data['independent_variables'][xy_mapping[i]]['values'].append(single_element)
# extract energy if SQRT(S) is one of the 'x' variables
xheader = self.current_table.data['independent_variables'][xy_mapping[i]]['header']
if xheader['name'].startswith('SQRT(S)') and lower(xheader['units']) in ('gev'):
for energy in single_element.values():
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
elif h == 'y': # dependent variable
pmnum_pct = pmnum + '(\s*PCT)?' # errors can possibly be given as percentages
r = re.search('^(?P<value>' + pmnum + ')\s+(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' +
pmnum_pct + '|-)\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element)
element = {'errors': []}
if r: # asymmetric first error
element['value'] = r.group('value').strip()
err_p = r.group('err_p').strip().lstrip('+')
if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format
err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p
err_m = r.group('err_m').strip().lstrip('+')
if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format
err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m
if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%':
err_p = err_p + '%'
if not err_p and not err_m:
raise ValueError("Both asymmetric errors cannot be '-': %s" % line)
if r.group('err_sys'):
element['errors'] += [{'label': 'stat', 'asymerror': {'plus': err_p, 'minus': err_m}}]
else:
element['errors'] += [{'asymerror': {'plus': err_p, 'minus': err_m}}]
else:
r = re.search('^(?P<value>' + pmnum + ')\s*(\+-\s*(?P<error>' +
pmnum_pct + '))?\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element)
if r: # symmetric first error
element['value'] = r.group('value').strip()
if r.group('error'):
error = r.group('error').strip().lstrip('+')
error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error
if r.group('err_sys'):
element['errors'] += [{'label': 'stat', 'symerror': error}]
else:
element['errors'] += [{'symerror': error}]
else: # everything else
element['value'] = single_element
err_sys = []
if r and r.group('err_sys'):
err_sys = r.group('err_sys').strip(' \t()').split('DSYS=')
for err in err_sys + self.current_table.dserrors:
err = err.strip(' \t,')
if not err:
continue
error = {}
label = 'sys'
r = re.search('^(\+-)?\s*(?P<error>' + pmnum_pct + ')\s*(\:\s*(?P<label>.+))?$', err)
if r: # symmetric systematic error
if r.group('label'):
label += ',' + r.group('label')
error = r.group('error').strip().lstrip('+')
error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error
error = {'symerror': error}
else:
r = re.search('^(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' +
pmnum_pct + '|-)\s*(\:\s*(?P<label>.+))?$', err)
if r: # asymmetric systematic error
if r.group('label'):
label += ',' + r.group('label')
err_p = r.group('err_p').strip().lstrip('+')
if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format
err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p
err_m = r.group('err_m').strip().lstrip('+')
if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format
err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m
if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%':
err_p = err_p + '%'
if not err_p and not err_m:
raise ValueError("Both asymmetric errors cannot be '-': %s" % line)
error = {'asymerror': {'plus': err_p, 'minus': err_m}}
if not r:
# error happened
raise ValueError("Error while parsing data line: %s" % line)
error['label'] = label
if element['value'] != single_element:
element['errors'].append(error)
self.current_table.data['dependent_variables'][xy_mapping[i]]['values'].append(element)
elif data_entry_elements:
raise BadFormat("%s data entry elements but %s expected: %s" %
(len(data_entry_elements), len(header), line))
last_index = self.current_file.tell()
l = self.current_file.readline()
line = self._strip_comments(l)
self.current_file.seek(last_index)
# extract minimum and maximum from set of energies
if self.set_of_energies:
energy_min = min(self.set_of_energies)
energy_max = max(self.set_of_energies)
if energy_max > energy_min:
energy = str(energy_min) + '-' + str(energy_max)
else:
energy = energy_min
self._parse_energies(energy)
if self.current_table.description:
if any(word in self.current_table.description.lower() for word in ['covariance', 'correlation', 'matrix']):
reformatted = self._reformat_matrix()
|
Parse dataset data of the original HEPData format
:param data: header of the table to be parsed
:raise ValueError:
|
train
|
https://github.com/HEPData/hepdata-converter/blob/354271448980efba86f2f3d27b99d818e75fd90d/hepdata_converter/parsers/oldhepdata_parser.py#L204-L406
| null |
class OldHEPData(Parser):
"""Parser for Old HEPData format
"""
help = 'Parses OLD HepData format - example OLD HepData input format: http://hepdata.cedar.ac.uk/resource/sample.input'
@classmethod
def options(cls):
options = Parser.options()
options['strict'] = Option('strict', default=True, type=bool, required=False,
help='if specified any additional keywords in OldHEPData file will raise an error')
options['use_additional_data'] = Option('use-additional-data', default=False, type=bool, required=False, variable_mapping='use_additional_data',
help=('if specified additional data which does not have equivalent in new HEPData format'
' will be appended to comment section of the output document'))
return options
def reset(self):
"""Clean any processing data, and prepare object for reuse
"""
self.current_table = None
self.tables = []
self.data = [{}]
self.additional_data = {}
self.lines = []
self.set_state('document')
self.current_file = None
self.set_of_energies = set()
def set_state(self, state):
if state not in OldHEPData.states:
raise ValueError("unknown state")
self.current_state = state
def __init__(self, **kwargs):
"""Constructor
:param use_additional_data: if set to True additional data which does not have equivalent in new HEPData format
will be appended to comment section of the output document
:type use_additional_data: bool
:param strict: if set to True, any additional keywords (not specified by documentation)
will raise BadFormat exception during parsing
:type strict: bool
"""
# mapping of OLD HepData format's keywords to proper parsing functions
# all possible keywords are specified here, the ones which aren't used
# by new data format are either mapped to _pass method which does nothing,
# or bound to _add_to_comment which adds the specified data to comment section of the
# output
# functions are specified by states in which parser may be, it may either be parsing table or document
# for those two states different sets of directives are permitted
self.mapping = {
'document': {
'reference': self._parse_reference,
'dataset': self._set_table,
# additional data which have no one to one mapping to new YAML format
'author': self._bind_parse_additional_data('author'),
'doi': self._bind_parse_additional_data('doi'),
'status': self._bind_parse_additional_data('status'),
'experiment': self._bind_parse_additional_data('experiment'),
'detector': self._bind_parse_additional_data('detector'),
'title': self._bind_parse_additional_data('title'),
# add it to record_ids
'spiresId': self._bind_parse_record_ids('spires'),
'inspireId': self._bind_parse_record_ids('inspire'),
'cdsId': self._bind_parse_record_ids('cds'),
'durhamId': self._bind_parse_record_ids('durham'),
'comment': self._parse_document_comment,
'E': self._pass
},
'table': {
'dataend': self._set_document,
'location': self._bind_set_table_metadata('location'),
'dscomment': self._bind_set_table_metadata('description', True),
'dserror': self._parse_dserror,
'reackey': self._parse_reackey,
'qual': self._parse_qual,
'data': self._parse_table_data,
'xheader': self._parse_xheaders,
'yheader': self._parse_yheaders,
'obskey': self._parse_obskey,
'phrase': self._parse_phrase,
'E': self._pass
}
}
OptionInitMixin.__init__(self, options=kwargs)
def _parse(self):
# parse the file
while self._parse_line(self.current_file):
pass
if self.use_additional_data:
if self.additional_data:
self.data[0]['comment'] += 'ADDITIONAL DATA IMPORTED FROM OLD HEPDATA FORMAT: \n'
for key in self.additional_data:
for element in self.additional_data[key]:
self.data[0]['comment'] += "%s: %s" % (key, element)
return ParsedData(self.data[0], self.tables)
def parse(self, data_in):
# clean any possible data from previous parsing
self.reset()
# in case of strings we should treat them as filepaths
if isinstance(data_in, (str, unicode)):
with open(data_in, 'r') as self.current_file:
return self._parse()
else:
self.current_file = data_in
return self._parse()
def _parse_line(self, file):
"""Parse single line (or more if particular keyword actually demands it)
:param file:
:type file: file
"""
line = self._strip_comments(file.readline())
# check if the file ended
if not line:
return False
# line was empty or it was a comment, continue
if line.strip() == '':
return True
# retrieve keyword and its value
reg = re.search("^\*(?P<key>[^:#]*)(:\s*(?P<value>.*)\s*)?$", line)
if reg:
key = reg.group('key').strip()
value = reg.group('value')
if key in self.mapping[self.current_state]:
self.mapping[self.current_state][key](value)
elif self.strict:
raise BadFormat("unknown key: *%s" % key)
else:
raise BadFormat("line can not be parsed: %s" % line)
return True
def _parse_reference(self, data):
"""
:param data:
:type data: str
"""
if 'additional_resources' not in self.data[0]:
self.data[0]['additional_resources'] = []
location = data.split(' : ')[0].strip()
if location.startswith('http'):
self.data[0]['additional_resources'].append({
'location': location,
'description': 'web page with auxiliary material'
})
def _set_table(self, data):
"""Set current parsing state to 'table',
create new table object and add it to tables collection
"""
self.set_state('table')
self.current_table = HEPTable(index=len(self.tables) + 1)
self.tables.append(self.current_table)
self.data.append(self.current_table.metadata)
def _set_document(self, data):
"""Set current parsing state to 'document',
set current_table to None
"""
self.set_state('document')
self.current_table = None
def _pass(self, data):
"""Empty processing function, map it to keywords if they're not used in the new YAML format
"""
pass
def _reformat_matrix(self):
"""Transform a square matrix into a format with two independent variables and one dependent variable.
"""
nxax = len(self.current_table.data['independent_variables'])
nyax = len(self.current_table.data['dependent_variables'])
npts = len(self.current_table.data['dependent_variables'][0]['values'])
# check if 1 x-axis, and npts (>=2) equals number of y-axes
if nxax != 1 or nyax != npts or npts < 2:
return False
# add second independent variable with each value duplicated npts times
if len(self.current_table.xheaders) == 2:
xheader = self.current_table.xheaders[1]
else:
xheader = copy.deepcopy(self.current_table.data['independent_variables'][0]['header'])
self.current_table.data['independent_variables'].append({'header': xheader, 'values': []})
for value in self.current_table.data['independent_variables'][0]['values']:
self.current_table.data['independent_variables'][1]['values'].extend([copy.deepcopy(value) for npt in range(npts)])
# duplicate values of first independent variable npts times
self.current_table.data['independent_variables'][0]['values'] \
= [copy.deepcopy(value) for npt in range(npts) for value in self.current_table.data['independent_variables'][0]['values']]
# suppress header if different for second y-axis
if self.current_table.data['dependent_variables'][0]['header'] != \
self.current_table.data['dependent_variables'][1]['header']:
self.current_table.data['dependent_variables'][0]['header'] = {'name': ''}
# remove qualifier if different for second y-axis
iqdel = [] # list of qualifier indices to be deleted
for iq, qualifier in enumerate(self.current_table.data['dependent_variables'][0]['qualifiers']):
if qualifier != self.current_table.data['dependent_variables'][1]['qualifiers'][iq]:
iqdel.append(iq)
for iq in iqdel[::-1]: # need to delete in reverse order
del self.current_table.data['dependent_variables'][0]['qualifiers'][iq]
# append values of second and subsequent y-axes to first dependent variable
for iy in range(1, nyax):
for value in self.current_table.data['dependent_variables'][iy]['values']:
self.current_table.data['dependent_variables'][0]['values'].append(value)
# finally, delete the second and subsequent y-axes in reverse order
for iy in range(nyax-1, 0, -1):
del self.current_table.data['dependent_variables'][iy]
return True
def _parse_dserror(self, data):
"""Parse dserror attribute of the old HEPData format
example:
*dserror: 7.5 PCT : overall normalization uncertainty
:param data: data to be parsed
:type data: str
"""
self.current_table.dserrors.append(data.strip())
def _parse_reackey(self, data):
"""Parse reackey attribute of the old HEPData format
example:
*reackey: P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
"""
self.current_table.reactions.append(data.strip())
def _parse_obskey(self, data):
"""Parse obskey attribute of the old HEPData format
example:
*obskey: DSIG/DPT
:param data: data to be parsed
:type data: str
"""
self.current_table.observables.append(data.strip())
def _parse_phrase(self, data):
"""Parse phrase attribute of the old HEPData format
example:
*phrase: Z pair Production
:param data: data to be parsed
:type data: str
"""
self.current_table.phrases.append(data.strip())
def _parse_energies(self, data):
"""Add energy given in data to tables energies
this method is here for completeness sake, it's used in only one other place so
can be safely extracted
:param data: data to be appended to table's energies
:type data: str
"""
self.current_table.energies.append(data)
def _parse_qual(self, data):
"""Parse qual attribute of the old HEPData format
example qual:
*qual: RE : P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
"""
list = []
headers = data.split(':')
name = headers[0].strip()
name = re.split(' IN ', name, flags=re.I) # ignore case
units = None
if len(name) > 1:
units = name[1].strip()
name = name[0].strip()
if len(headers) < 2:
raise BadFormat("*qual line must contain a name and values: %s" % data)
for header in headers[1:]:
xheader = {'name': name}
if units:
xheader['units'] = units
xheader['value'] = header.strip()
list.append(xheader)
# extract energy if SQRT(S) is one of the qualifiers
if name.startswith('SQRT(S)') and lower(units) in ('gev'):
energies = re.split(' TO ', xheader['value'], flags=re.I)
for energy in energies:
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
self.current_table.qualifiers.append(list)
def _parse_header(self, data):
"""Parse header (xheader or yheader)
:param data: data to be parsed
:type data: str
:return: list with header's data
:rtype: list
"""
return_list = []
headers = data.split(':')
for header in headers:
header = re.split(' IN ', header, flags=re.I) # ignore case
xheader = {'name': header[0].strip()}
if len(header) > 1:
xheader['units'] = header[1].strip()
return_list.append(xheader)
return return_list
def _parse_xheaders(self, data):
"""parse xheaders from old HEPData format
:param data: data with xheaders to be parsed
:type data: str
"""
self.current_table.xheaders += self._parse_header(data)
def _parse_yheaders(self, data):
"""parse yheaders from old HEPData format
:param data: data with yheaders to be parsed
:type data: str
"""
self.current_table.yheaders += self._parse_header(data)
@staticmethod
def _strip_comments(line):
"""Processes line stripping any comments from it
:param line: line to be processed
:type line: str
:return: line with removed comments
:rtype: str
"""
if line == '':
return line
r = re.search('(?P<line>[^#]*)(#(?P<comment>.*))?', line)
if r:
line = r.group('line')
if not line.endswith('\n'):
line += '\n'
return line
return '\n'
def _read_multiline(self, init_data):
"""Reads multiline symbols (ususally comments)
:param init_data: initial data (parsed from the line containing keyword)
:return: parsed value of the multiline symbol
:rtype: str
"""
result = init_data
first = True
while True:
last_index = self.current_file.tell()
line_raw = self.current_file.readline()
# don't add newlines from full line comments
if line_raw[0] == '#':
continue
# now strip comments
# TODO - is it appropriate behavior?
data = self._strip_comments(line_raw)
# EOF, stop here
if not data:
break
# we arrived to the next command, step back and break
if len(data.strip()) >= 1 and data.strip()[0] == '*':
self.current_file.seek(last_index)
break
if first:
result += '\n'
first = False
result += data
result = result.strip()
if result and not result.endswith('.'):
result += '.'
return result
def _parse_document_comment(self, data):
self.data[0]['comment'] = self._read_multiline(data)
def _bind_set_table_metadata(self, key, multiline=False):
"""Returns parsing function which will parse data as text, and add it to the table metatadata dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
"""
def set_table_metadata(self, data):
if multiline:
data = self._read_multiline(data)
if key == 'location' and data:
data = 'Data from ' + data
self.current_table.metadata[key] = data.strip()
# method must be bound, so we use __get__
return set_table_metadata.__get__(self)
def _bind_parse_record_ids(self, key):
def _parse_record_ids(self, data):
if 'record_ids' not in self.data[0]:
self.data[0]['record_ids'] = []
record_id = {'type': key, 'id': int(data) if data else 0}
if self.data[0]['record_ids'].count(record_id) == 0:
self.data[0]['record_ids'].append(record_id)
elif self.strict:
raise BadFormat("duplicated record: *%s" % key)
# method must be bound, so we use __get__
return _parse_record_ids.__get__(self)
def _bind_parse_additional_data(self, key, multiline=False):
"""Returns parsing function which will parse data as text, and add it to the table additional data dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
"""
def _set_additional_data_bound(self, data):
"""Concrete method for setting additional data
:param self:
:type self: OldHEPData
"""
# if it's multiline, parse it
if multiline:
data = self._read_multiline(data)
if key not in self.additional_data:
self.additional_data[key] = []
self.additional_data[key].append(data)
# method must be bound, so we use __get__
return _set_additional_data_bound.__get__(self)
# employing full fledged FSM for only two states is kind of an overkill, so
# I'll just define them here...
states = ['document', 'table']
|
HEPData/hepdata-converter
|
hepdata_converter/parsers/oldhepdata_parser.py
|
OldHEPData._reformat_matrix
|
python
|
def _reformat_matrix(self):
nxax = len(self.current_table.data['independent_variables'])
nyax = len(self.current_table.data['dependent_variables'])
npts = len(self.current_table.data['dependent_variables'][0]['values'])
# check if 1 x-axis, and npts (>=2) equals number of y-axes
if nxax != 1 or nyax != npts or npts < 2:
return False
# add second independent variable with each value duplicated npts times
if len(self.current_table.xheaders) == 2:
xheader = self.current_table.xheaders[1]
else:
xheader = copy.deepcopy(self.current_table.data['independent_variables'][0]['header'])
self.current_table.data['independent_variables'].append({'header': xheader, 'values': []})
for value in self.current_table.data['independent_variables'][0]['values']:
self.current_table.data['independent_variables'][1]['values'].extend([copy.deepcopy(value) for npt in range(npts)])
# duplicate values of first independent variable npts times
self.current_table.data['independent_variables'][0]['values'] \
= [copy.deepcopy(value) for npt in range(npts) for value in self.current_table.data['independent_variables'][0]['values']]
# suppress header if different for second y-axis
if self.current_table.data['dependent_variables'][0]['header'] != \
self.current_table.data['dependent_variables'][1]['header']:
self.current_table.data['dependent_variables'][0]['header'] = {'name': ''}
# remove qualifier if different for second y-axis
iqdel = [] # list of qualifier indices to be deleted
for iq, qualifier in enumerate(self.current_table.data['dependent_variables'][0]['qualifiers']):
if qualifier != self.current_table.data['dependent_variables'][1]['qualifiers'][iq]:
iqdel.append(iq)
for iq in iqdel[::-1]: # need to delete in reverse order
del self.current_table.data['dependent_variables'][0]['qualifiers'][iq]
# append values of second and subsequent y-axes to first dependent variable
for iy in range(1, nyax):
for value in self.current_table.data['dependent_variables'][iy]['values']:
self.current_table.data['dependent_variables'][0]['values'].append(value)
# finally, delete the second and subsequent y-axes in reverse order
for iy in range(nyax-1, 0, -1):
del self.current_table.data['dependent_variables'][iy]
return True
|
Transform a square matrix into a format with two independent variables and one dependent variable.
|
train
|
https://github.com/HEPData/hepdata-converter/blob/354271448980efba86f2f3d27b99d818e75fd90d/hepdata_converter/parsers/oldhepdata_parser.py#L408-L454
| null |
class OldHEPData(Parser):
"""Parser for Old HEPData format
"""
help = 'Parses OLD HepData format - example OLD HepData input format: http://hepdata.cedar.ac.uk/resource/sample.input'
@classmethod
def options(cls):
options = Parser.options()
options['strict'] = Option('strict', default=True, type=bool, required=False,
help='if specified any additional keywords in OldHEPData file will raise an error')
options['use_additional_data'] = Option('use-additional-data', default=False, type=bool, required=False, variable_mapping='use_additional_data',
help=('if specified additional data which does not have equivalent in new HEPData format'
' will be appended to comment section of the output document'))
return options
def reset(self):
"""Clean any processing data, and prepare object for reuse
"""
self.current_table = None
self.tables = []
self.data = [{}]
self.additional_data = {}
self.lines = []
self.set_state('document')
self.current_file = None
self.set_of_energies = set()
def set_state(self, state):
if state not in OldHEPData.states:
raise ValueError("unknown state")
self.current_state = state
def __init__(self, **kwargs):
"""Constructor
:param use_additional_data: if set to True additional data which does not have equivalent in new HEPData format
will be appended to comment section of the output document
:type use_additional_data: bool
:param strict: if set to True, any additional keywords (not specified by documentation)
will raise BadFormat exception during parsing
:type strict: bool
"""
# mapping of OLD HepData format's keywords to proper parsing functions
# all possible keywords are specified here, the ones which aren't used
# by new data format are either mapped to _pass method which does nothing,
# or bound to _add_to_comment which adds the specified data to comment section of the
# output
# functions are specified by states in which parser may be, it may either be parsing table or document
# for those two states different sets of directives are permitted
self.mapping = {
'document': {
'reference': self._parse_reference,
'dataset': self._set_table,
# additional data which have no one to one mapping to new YAML format
'author': self._bind_parse_additional_data('author'),
'doi': self._bind_parse_additional_data('doi'),
'status': self._bind_parse_additional_data('status'),
'experiment': self._bind_parse_additional_data('experiment'),
'detector': self._bind_parse_additional_data('detector'),
'title': self._bind_parse_additional_data('title'),
# add it to record_ids
'spiresId': self._bind_parse_record_ids('spires'),
'inspireId': self._bind_parse_record_ids('inspire'),
'cdsId': self._bind_parse_record_ids('cds'),
'durhamId': self._bind_parse_record_ids('durham'),
'comment': self._parse_document_comment,
'E': self._pass
},
'table': {
'dataend': self._set_document,
'location': self._bind_set_table_metadata('location'),
'dscomment': self._bind_set_table_metadata('description', True),
'dserror': self._parse_dserror,
'reackey': self._parse_reackey,
'qual': self._parse_qual,
'data': self._parse_table_data,
'xheader': self._parse_xheaders,
'yheader': self._parse_yheaders,
'obskey': self._parse_obskey,
'phrase': self._parse_phrase,
'E': self._pass
}
}
OptionInitMixin.__init__(self, options=kwargs)
def _parse(self):
# parse the file
while self._parse_line(self.current_file):
pass
if self.use_additional_data:
if self.additional_data:
self.data[0]['comment'] += 'ADDITIONAL DATA IMPORTED FROM OLD HEPDATA FORMAT: \n'
for key in self.additional_data:
for element in self.additional_data[key]:
self.data[0]['comment'] += "%s: %s" % (key, element)
return ParsedData(self.data[0], self.tables)
def parse(self, data_in):
# clean any possible data from previous parsing
self.reset()
# in case of strings we should treat them as filepaths
if isinstance(data_in, (str, unicode)):
with open(data_in, 'r') as self.current_file:
return self._parse()
else:
self.current_file = data_in
return self._parse()
def _parse_line(self, file):
"""Parse single line (or more if particular keyword actually demands it)
:param file:
:type file: file
"""
line = self._strip_comments(file.readline())
# check if the file ended
if not line:
return False
# line was empty or it was a comment, continue
if line.strip() == '':
return True
# retrieve keyword and its value
reg = re.search("^\*(?P<key>[^:#]*)(:\s*(?P<value>.*)\s*)?$", line)
if reg:
key = reg.group('key').strip()
value = reg.group('value')
if key in self.mapping[self.current_state]:
self.mapping[self.current_state][key](value)
elif self.strict:
raise BadFormat("unknown key: *%s" % key)
else:
raise BadFormat("line can not be parsed: %s" % line)
return True
def _parse_reference(self, data):
"""
:param data:
:type data: str
"""
if 'additional_resources' not in self.data[0]:
self.data[0]['additional_resources'] = []
location = data.split(' : ')[0].strip()
if location.startswith('http'):
self.data[0]['additional_resources'].append({
'location': location,
'description': 'web page with auxiliary material'
})
def _set_table(self, data):
"""Set current parsing state to 'table',
create new table object and add it to tables collection
"""
self.set_state('table')
self.current_table = HEPTable(index=len(self.tables) + 1)
self.tables.append(self.current_table)
self.data.append(self.current_table.metadata)
def _set_document(self, data):
"""Set current parsing state to 'document',
set current_table to None
"""
self.set_state('document')
self.current_table = None
def _pass(self, data):
"""Empty processing function, map it to keywords if they're not used in the new YAML format
"""
pass
def _parse_table_data(self, data):
"""Parse dataset data of the original HEPData format
:param data: header of the table to be parsed
:raise ValueError:
"""
header = data.split(':')
self.current_table.data_header = header
for i, h in enumerate(header):
header[i] = h.strip()
x_count = header.count('x')
y_count = header.count('y')
if not self.current_table.xheaders:
raise BadFormat("*xheader line needs to appear before *data: %s" % data)
if not self.current_table.yheaders:
raise BadFormat("*yheader line needs to appear before *data: %s" % data)
# use deepcopy to avoid references in yaml... may not be required, and will very probably be refactored
# TODO - is this appropriate behavior, or are references in YAML files acceptable, they are certainly less human readable
self.current_table.data = {'independent_variables': [{'header': self.current_table.xheaders[i] if i < len(self.current_table.xheaders) else copy.deepcopy(self.current_table.xheaders[-1]),
'values': []} for i in range(x_count)],
'dependent_variables': [{'header': self.current_table.yheaders[i] if i < len(self.current_table.yheaders) else copy.deepcopy(self.current_table.yheaders[-1]),
'qualifiers': [self.current_table.qualifiers[j][i] if i < len(self.current_table.qualifiers[j]) else copy.deepcopy(self.current_table.qualifiers[j][-1]) for j in range(len(self.current_table.qualifiers)) ],
'values': []} for i in range(y_count)]}
xy_mapping = []
current_x_count = 0
current_y_count = 0
for h in header:
if h == 'x':
xy_mapping.append(current_x_count)
current_x_count += 1
if h == 'y':
xy_mapping.append(current_y_count)
current_y_count += 1
last_index = self.current_file.tell()
line = self._strip_comments(self.current_file.readline())
while line and not line.startswith('*'):
data_entry_elements = line.split(';')[:-1] # split and also strip newline character at the end
if len(data_entry_elements) == len(header):
# this is kind of a big stretch... I assume that x is always first
for i, h in enumerate(header):
single_element = data_entry_elements[i].strip()
# number patterns copied from old subs.pl parsing script
pmnum1 = '[-+]?[\d]+\.?[\d]*'
pmnum2 = '[-+]?\.[\d]+'
pmnum3 = '[-+]?[\d]+\.?[\d]*\s*[eE]+\s*[+-]?\s*[\d]+'
pmnum = '(' + pmnum1 + '|' + pmnum2 + '|' + pmnum3 + ')'
# implement same regular expression matching as in old subs.pl parsing script
if h == 'x': # independent variables
r = re.search('^(?P<value>' + pmnum + ')$', single_element)
if r: # "value"
single_element = {'value': r.group('value')}
else:
r = re.search('^(?P<value>' + pmnum + ')\s*\(\s*BIN\s*=\s*(?P<low>' + pmnum + \
')\s+TO\s+(?P<high>' + pmnum + ')\s*\)$', single_element)
if r: # "value (BIN=low TO high)"
single_element = {'value': float(r.group('value')),
'low': float(r.group('low')), 'high': float(r.group('high'))}
else:
r = re.search('^(?P<low>' + pmnum + ')\s+TO\s+(?P<high>' + pmnum + ')$',
single_element)
if r: # "low TO high"
single_element = {'low': float(r.group('low')), 'high': float(r.group('high'))}
else: # everything else: don't try to convert to float
single_element = {'value': single_element}
# TO DO: subs.pl also parses other formats such as "low high", "value low high" (sorted),
# "value +- err", and "value -err_m, +err_p". Do we need to support these formats here?
# Probably not: unsupported formats will just be written as a text string.
self.current_table.data['independent_variables'][xy_mapping[i]]['values'].append(single_element)
# extract energy if SQRT(S) is one of the 'x' variables
xheader = self.current_table.data['independent_variables'][xy_mapping[i]]['header']
if xheader['name'].startswith('SQRT(S)') and lower(xheader['units']) in ('gev'):
for energy in single_element.values():
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
elif h == 'y': # dependent variable
pmnum_pct = pmnum + '(\s*PCT)?' # errors can possibly be given as percentages
r = re.search('^(?P<value>' + pmnum + ')\s+(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' +
pmnum_pct + '|-)\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element)
element = {'errors': []}
if r: # asymmetric first error
element['value'] = r.group('value').strip()
err_p = r.group('err_p').strip().lstrip('+')
if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format
err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p
err_m = r.group('err_m').strip().lstrip('+')
if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format
err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m
if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%':
err_p = err_p + '%'
if not err_p and not err_m:
raise ValueError("Both asymmetric errors cannot be '-': %s" % line)
if r.group('err_sys'):
element['errors'] += [{'label': 'stat', 'asymerror': {'plus': err_p, 'minus': err_m}}]
else:
element['errors'] += [{'asymerror': {'plus': err_p, 'minus': err_m}}]
else:
r = re.search('^(?P<value>' + pmnum + ')\s*(\+-\s*(?P<error>' +
pmnum_pct + '))?\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element)
if r: # symmetric first error
element['value'] = r.group('value').strip()
if r.group('error'):
error = r.group('error').strip().lstrip('+')
error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error
if r.group('err_sys'):
element['errors'] += [{'label': 'stat', 'symerror': error}]
else:
element['errors'] += [{'symerror': error}]
else: # everything else
element['value'] = single_element
err_sys = []
if r and r.group('err_sys'):
err_sys = r.group('err_sys').strip(' \t()').split('DSYS=')
for err in err_sys + self.current_table.dserrors:
err = err.strip(' \t,')
if not err:
continue
error = {}
label = 'sys'
r = re.search('^(\+-)?\s*(?P<error>' + pmnum_pct + ')\s*(\:\s*(?P<label>.+))?$', err)
if r: # symmetric systematic error
if r.group('label'):
label += ',' + r.group('label')
error = r.group('error').strip().lstrip('+')
error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error
error = {'symerror': error}
else:
r = re.search('^(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' +
pmnum_pct + '|-)\s*(\:\s*(?P<label>.+))?$', err)
if r: # asymmetric systematic error
if r.group('label'):
label += ',' + r.group('label')
err_p = r.group('err_p').strip().lstrip('+')
if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format
err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p
err_m = r.group('err_m').strip().lstrip('+')
if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format
err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m
if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%':
err_p = err_p + '%'
if not err_p and not err_m:
raise ValueError("Both asymmetric errors cannot be '-': %s" % line)
error = {'asymerror': {'plus': err_p, 'minus': err_m}}
if not r:
# error happened
raise ValueError("Error while parsing data line: %s" % line)
error['label'] = label
if element['value'] != single_element:
element['errors'].append(error)
self.current_table.data['dependent_variables'][xy_mapping[i]]['values'].append(element)
elif data_entry_elements:
raise BadFormat("%s data entry elements but %s expected: %s" %
(len(data_entry_elements), len(header), line))
last_index = self.current_file.tell()
l = self.current_file.readline()
line = self._strip_comments(l)
self.current_file.seek(last_index)
# extract minimum and maximum from set of energies
if self.set_of_energies:
energy_min = min(self.set_of_energies)
energy_max = max(self.set_of_energies)
if energy_max > energy_min:
energy = str(energy_min) + '-' + str(energy_max)
else:
energy = energy_min
self._parse_energies(energy)
if self.current_table.description:
if any(word in self.current_table.description.lower() for word in ['covariance', 'correlation', 'matrix']):
reformatted = self._reformat_matrix()
def _parse_dserror(self, data):
"""Parse dserror attribute of the old HEPData format
example:
*dserror: 7.5 PCT : overall normalization uncertainty
:param data: data to be parsed
:type data: str
"""
self.current_table.dserrors.append(data.strip())
def _parse_reackey(self, data):
"""Parse reackey attribute of the old HEPData format
example:
*reackey: P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
"""
self.current_table.reactions.append(data.strip())
def _parse_obskey(self, data):
"""Parse obskey attribute of the old HEPData format
example:
*obskey: DSIG/DPT
:param data: data to be parsed
:type data: str
"""
self.current_table.observables.append(data.strip())
def _parse_phrase(self, data):
"""Parse phrase attribute of the old HEPData format
example:
*phrase: Z pair Production
:param data: data to be parsed
:type data: str
"""
self.current_table.phrases.append(data.strip())
def _parse_energies(self, data):
"""Add energy given in data to tables energies
this method is here for completeness sake, it's used in only one other place so
can be safely extracted
:param data: data to be appended to table's energies
:type data: str
"""
self.current_table.energies.append(data)
def _parse_qual(self, data):
"""Parse qual attribute of the old HEPData format
example qual:
*qual: RE : P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
"""
list = []
headers = data.split(':')
name = headers[0].strip()
name = re.split(' IN ', name, flags=re.I) # ignore case
units = None
if len(name) > 1:
units = name[1].strip()
name = name[0].strip()
if len(headers) < 2:
raise BadFormat("*qual line must contain a name and values: %s" % data)
for header in headers[1:]:
xheader = {'name': name}
if units:
xheader['units'] = units
xheader['value'] = header.strip()
list.append(xheader)
# extract energy if SQRT(S) is one of the qualifiers
if name.startswith('SQRT(S)') and lower(units) in ('gev'):
energies = re.split(' TO ', xheader['value'], flags=re.I)
for energy in energies:
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
self.current_table.qualifiers.append(list)
def _parse_header(self, data):
"""Parse header (xheader or yheader)
:param data: data to be parsed
:type data: str
:return: list with header's data
:rtype: list
"""
return_list = []
headers = data.split(':')
for header in headers:
header = re.split(' IN ', header, flags=re.I) # ignore case
xheader = {'name': header[0].strip()}
if len(header) > 1:
xheader['units'] = header[1].strip()
return_list.append(xheader)
return return_list
def _parse_xheaders(self, data):
"""parse xheaders from old HEPData format
:param data: data with xheaders to be parsed
:type data: str
"""
self.current_table.xheaders += self._parse_header(data)
def _parse_yheaders(self, data):
"""parse yheaders from old HEPData format
:param data: data with yheaders to be parsed
:type data: str
"""
self.current_table.yheaders += self._parse_header(data)
@staticmethod
def _strip_comments(line):
"""Processes line stripping any comments from it
:param line: line to be processed
:type line: str
:return: line with removed comments
:rtype: str
"""
if line == '':
return line
r = re.search('(?P<line>[^#]*)(#(?P<comment>.*))?', line)
if r:
line = r.group('line')
if not line.endswith('\n'):
line += '\n'
return line
return '\n'
def _read_multiline(self, init_data):
"""Reads multiline symbols (ususally comments)
:param init_data: initial data (parsed from the line containing keyword)
:return: parsed value of the multiline symbol
:rtype: str
"""
result = init_data
first = True
while True:
last_index = self.current_file.tell()
line_raw = self.current_file.readline()
# don't add newlines from full line comments
if line_raw[0] == '#':
continue
# now strip comments
# TODO - is it appropriate behavior?
data = self._strip_comments(line_raw)
# EOF, stop here
if not data:
break
# we arrived to the next command, step back and break
if len(data.strip()) >= 1 and data.strip()[0] == '*':
self.current_file.seek(last_index)
break
if first:
result += '\n'
first = False
result += data
result = result.strip()
if result and not result.endswith('.'):
result += '.'
return result
def _parse_document_comment(self, data):
self.data[0]['comment'] = self._read_multiline(data)
def _bind_set_table_metadata(self, key, multiline=False):
"""Returns parsing function which will parse data as text, and add it to the table metatadata dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
"""
def set_table_metadata(self, data):
if multiline:
data = self._read_multiline(data)
if key == 'location' and data:
data = 'Data from ' + data
self.current_table.metadata[key] = data.strip()
# method must be bound, so we use __get__
return set_table_metadata.__get__(self)
def _bind_parse_record_ids(self, key):
def _parse_record_ids(self, data):
if 'record_ids' not in self.data[0]:
self.data[0]['record_ids'] = []
record_id = {'type': key, 'id': int(data) if data else 0}
if self.data[0]['record_ids'].count(record_id) == 0:
self.data[0]['record_ids'].append(record_id)
elif self.strict:
raise BadFormat("duplicated record: *%s" % key)
# method must be bound, so we use __get__
return _parse_record_ids.__get__(self)
def _bind_parse_additional_data(self, key, multiline=False):
"""Returns parsing function which will parse data as text, and add it to the table additional data dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
"""
def _set_additional_data_bound(self, data):
"""Concrete method for setting additional data
:param self:
:type self: OldHEPData
"""
# if it's multiline, parse it
if multiline:
data = self._read_multiline(data)
if key not in self.additional_data:
self.additional_data[key] = []
self.additional_data[key].append(data)
# method must be bound, so we use __get__
return _set_additional_data_bound.__get__(self)
# employing full fledged FSM for only two states is kind of an overkill, so
# I'll just define them here...
states = ['document', 'table']
|
HEPData/hepdata-converter
|
hepdata_converter/parsers/oldhepdata_parser.py
|
OldHEPData._parse_qual
|
python
|
def _parse_qual(self, data):
list = []
headers = data.split(':')
name = headers[0].strip()
name = re.split(' IN ', name, flags=re.I) # ignore case
units = None
if len(name) > 1:
units = name[1].strip()
name = name[0].strip()
if len(headers) < 2:
raise BadFormat("*qual line must contain a name and values: %s" % data)
for header in headers[1:]:
xheader = {'name': name}
if units:
xheader['units'] = units
xheader['value'] = header.strip()
list.append(xheader)
# extract energy if SQRT(S) is one of the qualifiers
if name.startswith('SQRT(S)') and lower(units) in ('gev'):
energies = re.split(' TO ', xheader['value'], flags=re.I)
for energy in energies:
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
self.current_table.qualifiers.append(list)
|
Parse qual attribute of the old HEPData format
example qual:
*qual: RE : P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
|
train
|
https://github.com/HEPData/hepdata-converter/blob/354271448980efba86f2f3d27b99d818e75fd90d/hepdata_converter/parsers/oldhepdata_parser.py#L510-L550
| null |
class OldHEPData(Parser):
"""Parser for Old HEPData format
"""
help = 'Parses OLD HepData format - example OLD HepData input format: http://hepdata.cedar.ac.uk/resource/sample.input'
@classmethod
def options(cls):
options = Parser.options()
options['strict'] = Option('strict', default=True, type=bool, required=False,
help='if specified any additional keywords in OldHEPData file will raise an error')
options['use_additional_data'] = Option('use-additional-data', default=False, type=bool, required=False, variable_mapping='use_additional_data',
help=('if specified additional data which does not have equivalent in new HEPData format'
' will be appended to comment section of the output document'))
return options
def reset(self):
"""Clean any processing data, and prepare object for reuse
"""
self.current_table = None
self.tables = []
self.data = [{}]
self.additional_data = {}
self.lines = []
self.set_state('document')
self.current_file = None
self.set_of_energies = set()
def set_state(self, state):
if state not in OldHEPData.states:
raise ValueError("unknown state")
self.current_state = state
def __init__(self, **kwargs):
"""Constructor
:param use_additional_data: if set to True additional data which does not have equivalent in new HEPData format
will be appended to comment section of the output document
:type use_additional_data: bool
:param strict: if set to True, any additional keywords (not specified by documentation)
will raise BadFormat exception during parsing
:type strict: bool
"""
# mapping of OLD HepData format's keywords to proper parsing functions
# all possible keywords are specified here, the ones which aren't used
# by new data format are either mapped to _pass method which does nothing,
# or bound to _add_to_comment which adds the specified data to comment section of the
# output
# functions are specified by states in which parser may be, it may either be parsing table or document
# for those two states different sets of directives are permitted
self.mapping = {
'document': {
'reference': self._parse_reference,
'dataset': self._set_table,
# additional data which have no one to one mapping to new YAML format
'author': self._bind_parse_additional_data('author'),
'doi': self._bind_parse_additional_data('doi'),
'status': self._bind_parse_additional_data('status'),
'experiment': self._bind_parse_additional_data('experiment'),
'detector': self._bind_parse_additional_data('detector'),
'title': self._bind_parse_additional_data('title'),
# add it to record_ids
'spiresId': self._bind_parse_record_ids('spires'),
'inspireId': self._bind_parse_record_ids('inspire'),
'cdsId': self._bind_parse_record_ids('cds'),
'durhamId': self._bind_parse_record_ids('durham'),
'comment': self._parse_document_comment,
'E': self._pass
},
'table': {
'dataend': self._set_document,
'location': self._bind_set_table_metadata('location'),
'dscomment': self._bind_set_table_metadata('description', True),
'dserror': self._parse_dserror,
'reackey': self._parse_reackey,
'qual': self._parse_qual,
'data': self._parse_table_data,
'xheader': self._parse_xheaders,
'yheader': self._parse_yheaders,
'obskey': self._parse_obskey,
'phrase': self._parse_phrase,
'E': self._pass
}
}
OptionInitMixin.__init__(self, options=kwargs)
def _parse(self):
# parse the file
while self._parse_line(self.current_file):
pass
if self.use_additional_data:
if self.additional_data:
self.data[0]['comment'] += 'ADDITIONAL DATA IMPORTED FROM OLD HEPDATA FORMAT: \n'
for key in self.additional_data:
for element in self.additional_data[key]:
self.data[0]['comment'] += "%s: %s" % (key, element)
return ParsedData(self.data[0], self.tables)
def parse(self, data_in):
# clean any possible data from previous parsing
self.reset()
# in case of strings we should treat them as filepaths
if isinstance(data_in, (str, unicode)):
with open(data_in, 'r') as self.current_file:
return self._parse()
else:
self.current_file = data_in
return self._parse()
def _parse_line(self, file):
"""Parse single line (or more if particular keyword actually demands it)
:param file:
:type file: file
"""
line = self._strip_comments(file.readline())
# check if the file ended
if not line:
return False
# line was empty or it was a comment, continue
if line.strip() == '':
return True
# retrieve keyword and its value
reg = re.search("^\*(?P<key>[^:#]*)(:\s*(?P<value>.*)\s*)?$", line)
if reg:
key = reg.group('key').strip()
value = reg.group('value')
if key in self.mapping[self.current_state]:
self.mapping[self.current_state][key](value)
elif self.strict:
raise BadFormat("unknown key: *%s" % key)
else:
raise BadFormat("line can not be parsed: %s" % line)
return True
def _parse_reference(self, data):
"""
:param data:
:type data: str
"""
if 'additional_resources' not in self.data[0]:
self.data[0]['additional_resources'] = []
location = data.split(' : ')[0].strip()
if location.startswith('http'):
self.data[0]['additional_resources'].append({
'location': location,
'description': 'web page with auxiliary material'
})
def _set_table(self, data):
"""Set current parsing state to 'table',
create new table object and add it to tables collection
"""
self.set_state('table')
self.current_table = HEPTable(index=len(self.tables) + 1)
self.tables.append(self.current_table)
self.data.append(self.current_table.metadata)
def _set_document(self, data):
"""Set current parsing state to 'document',
set current_table to None
"""
self.set_state('document')
self.current_table = None
def _pass(self, data):
"""Empty processing function, map it to keywords if they're not used in the new YAML format
"""
pass
def _parse_table_data(self, data):
"""Parse dataset data of the original HEPData format
:param data: header of the table to be parsed
:raise ValueError:
"""
header = data.split(':')
self.current_table.data_header = header
for i, h in enumerate(header):
header[i] = h.strip()
x_count = header.count('x')
y_count = header.count('y')
if not self.current_table.xheaders:
raise BadFormat("*xheader line needs to appear before *data: %s" % data)
if not self.current_table.yheaders:
raise BadFormat("*yheader line needs to appear before *data: %s" % data)
# use deepcopy to avoid references in yaml... may not be required, and will very probably be refactored
# TODO - is this appropriate behavior, or are references in YAML files acceptable, they are certainly less human readable
self.current_table.data = {'independent_variables': [{'header': self.current_table.xheaders[i] if i < len(self.current_table.xheaders) else copy.deepcopy(self.current_table.xheaders[-1]),
'values': []} for i in range(x_count)],
'dependent_variables': [{'header': self.current_table.yheaders[i] if i < len(self.current_table.yheaders) else copy.deepcopy(self.current_table.yheaders[-1]),
'qualifiers': [self.current_table.qualifiers[j][i] if i < len(self.current_table.qualifiers[j]) else copy.deepcopy(self.current_table.qualifiers[j][-1]) for j in range(len(self.current_table.qualifiers)) ],
'values': []} for i in range(y_count)]}
xy_mapping = []
current_x_count = 0
current_y_count = 0
for h in header:
if h == 'x':
xy_mapping.append(current_x_count)
current_x_count += 1
if h == 'y':
xy_mapping.append(current_y_count)
current_y_count += 1
last_index = self.current_file.tell()
line = self._strip_comments(self.current_file.readline())
while line and not line.startswith('*'):
data_entry_elements = line.split(';')[:-1] # split and also strip newline character at the end
if len(data_entry_elements) == len(header):
# this is kind of a big stretch... I assume that x is always first
for i, h in enumerate(header):
single_element = data_entry_elements[i].strip()
# number patterns copied from old subs.pl parsing script
pmnum1 = '[-+]?[\d]+\.?[\d]*'
pmnum2 = '[-+]?\.[\d]+'
pmnum3 = '[-+]?[\d]+\.?[\d]*\s*[eE]+\s*[+-]?\s*[\d]+'
pmnum = '(' + pmnum1 + '|' + pmnum2 + '|' + pmnum3 + ')'
# implement same regular expression matching as in old subs.pl parsing script
if h == 'x': # independent variables
r = re.search('^(?P<value>' + pmnum + ')$', single_element)
if r: # "value"
single_element = {'value': r.group('value')}
else:
r = re.search('^(?P<value>' + pmnum + ')\s*\(\s*BIN\s*=\s*(?P<low>' + pmnum + \
')\s+TO\s+(?P<high>' + pmnum + ')\s*\)$', single_element)
if r: # "value (BIN=low TO high)"
single_element = {'value': float(r.group('value')),
'low': float(r.group('low')), 'high': float(r.group('high'))}
else:
r = re.search('^(?P<low>' + pmnum + ')\s+TO\s+(?P<high>' + pmnum + ')$',
single_element)
if r: # "low TO high"
single_element = {'low': float(r.group('low')), 'high': float(r.group('high'))}
else: # everything else: don't try to convert to float
single_element = {'value': single_element}
# TO DO: subs.pl also parses other formats such as "low high", "value low high" (sorted),
# "value +- err", and "value -err_m, +err_p". Do we need to support these formats here?
# Probably not: unsupported formats will just be written as a text string.
self.current_table.data['independent_variables'][xy_mapping[i]]['values'].append(single_element)
# extract energy if SQRT(S) is one of the 'x' variables
xheader = self.current_table.data['independent_variables'][xy_mapping[i]]['header']
if xheader['name'].startswith('SQRT(S)') and lower(xheader['units']) in ('gev'):
for energy in single_element.values():
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
elif h == 'y': # dependent variable
pmnum_pct = pmnum + '(\s*PCT)?' # errors can possibly be given as percentages
r = re.search('^(?P<value>' + pmnum + ')\s+(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' +
pmnum_pct + '|-)\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element)
element = {'errors': []}
if r: # asymmetric first error
element['value'] = r.group('value').strip()
err_p = r.group('err_p').strip().lstrip('+')
if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format
err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p
err_m = r.group('err_m').strip().lstrip('+')
if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format
err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m
if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%':
err_p = err_p + '%'
if not err_p and not err_m:
raise ValueError("Both asymmetric errors cannot be '-': %s" % line)
if r.group('err_sys'):
element['errors'] += [{'label': 'stat', 'asymerror': {'plus': err_p, 'minus': err_m}}]
else:
element['errors'] += [{'asymerror': {'plus': err_p, 'minus': err_m}}]
else:
r = re.search('^(?P<value>' + pmnum + ')\s*(\+-\s*(?P<error>' +
pmnum_pct + '))?\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element)
if r: # symmetric first error
element['value'] = r.group('value').strip()
if r.group('error'):
error = r.group('error').strip().lstrip('+')
error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error
if r.group('err_sys'):
element['errors'] += [{'label': 'stat', 'symerror': error}]
else:
element['errors'] += [{'symerror': error}]
else: # everything else
element['value'] = single_element
err_sys = []
if r and r.group('err_sys'):
err_sys = r.group('err_sys').strip(' \t()').split('DSYS=')
for err in err_sys + self.current_table.dserrors:
err = err.strip(' \t,')
if not err:
continue
error = {}
label = 'sys'
r = re.search('^(\+-)?\s*(?P<error>' + pmnum_pct + ')\s*(\:\s*(?P<label>.+))?$', err)
if r: # symmetric systematic error
if r.group('label'):
label += ',' + r.group('label')
error = r.group('error').strip().lstrip('+')
error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error
error = {'symerror': error}
else:
r = re.search('^(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' +
pmnum_pct + '|-)\s*(\:\s*(?P<label>.+))?$', err)
if r: # asymmetric systematic error
if r.group('label'):
label += ',' + r.group('label')
err_p = r.group('err_p').strip().lstrip('+')
if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format
err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p
err_m = r.group('err_m').strip().lstrip('+')
if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format
err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m
if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%':
err_p = err_p + '%'
if not err_p and not err_m:
raise ValueError("Both asymmetric errors cannot be '-': %s" % line)
error = {'asymerror': {'plus': err_p, 'minus': err_m}}
if not r:
# error happened
raise ValueError("Error while parsing data line: %s" % line)
error['label'] = label
if element['value'] != single_element:
element['errors'].append(error)
self.current_table.data['dependent_variables'][xy_mapping[i]]['values'].append(element)
elif data_entry_elements:
raise BadFormat("%s data entry elements but %s expected: %s" %
(len(data_entry_elements), len(header), line))
last_index = self.current_file.tell()
l = self.current_file.readline()
line = self._strip_comments(l)
self.current_file.seek(last_index)
# extract minimum and maximum from set of energies
if self.set_of_energies:
energy_min = min(self.set_of_energies)
energy_max = max(self.set_of_energies)
if energy_max > energy_min:
energy = str(energy_min) + '-' + str(energy_max)
else:
energy = energy_min
self._parse_energies(energy)
if self.current_table.description:
if any(word in self.current_table.description.lower() for word in ['covariance', 'correlation', 'matrix']):
reformatted = self._reformat_matrix()
def _reformat_matrix(self):
"""Transform a square matrix into a format with two independent variables and one dependent variable.
"""
nxax = len(self.current_table.data['independent_variables'])
nyax = len(self.current_table.data['dependent_variables'])
npts = len(self.current_table.data['dependent_variables'][0]['values'])
# check if 1 x-axis, and npts (>=2) equals number of y-axes
if nxax != 1 or nyax != npts or npts < 2:
return False
# add second independent variable with each value duplicated npts times
if len(self.current_table.xheaders) == 2:
xheader = self.current_table.xheaders[1]
else:
xheader = copy.deepcopy(self.current_table.data['independent_variables'][0]['header'])
self.current_table.data['independent_variables'].append({'header': xheader, 'values': []})
for value in self.current_table.data['independent_variables'][0]['values']:
self.current_table.data['independent_variables'][1]['values'].extend([copy.deepcopy(value) for npt in range(npts)])
# duplicate values of first independent variable npts times
self.current_table.data['independent_variables'][0]['values'] \
= [copy.deepcopy(value) for npt in range(npts) for value in self.current_table.data['independent_variables'][0]['values']]
# suppress header if different for second y-axis
if self.current_table.data['dependent_variables'][0]['header'] != \
self.current_table.data['dependent_variables'][1]['header']:
self.current_table.data['dependent_variables'][0]['header'] = {'name': ''}
# remove qualifier if different for second y-axis
iqdel = [] # list of qualifier indices to be deleted
for iq, qualifier in enumerate(self.current_table.data['dependent_variables'][0]['qualifiers']):
if qualifier != self.current_table.data['dependent_variables'][1]['qualifiers'][iq]:
iqdel.append(iq)
for iq in iqdel[::-1]: # need to delete in reverse order
del self.current_table.data['dependent_variables'][0]['qualifiers'][iq]
# append values of second and subsequent y-axes to first dependent variable
for iy in range(1, nyax):
for value in self.current_table.data['dependent_variables'][iy]['values']:
self.current_table.data['dependent_variables'][0]['values'].append(value)
# finally, delete the second and subsequent y-axes in reverse order
for iy in range(nyax-1, 0, -1):
del self.current_table.data['dependent_variables'][iy]
return True
def _parse_dserror(self, data):
"""Parse dserror attribute of the old HEPData format
example:
*dserror: 7.5 PCT : overall normalization uncertainty
:param data: data to be parsed
:type data: str
"""
self.current_table.dserrors.append(data.strip())
def _parse_reackey(self, data):
"""Parse reackey attribute of the old HEPData format
example:
*reackey: P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
"""
self.current_table.reactions.append(data.strip())
def _parse_obskey(self, data):
"""Parse obskey attribute of the old HEPData format
example:
*obskey: DSIG/DPT
:param data: data to be parsed
:type data: str
"""
self.current_table.observables.append(data.strip())
def _parse_phrase(self, data):
"""Parse phrase attribute of the old HEPData format
example:
*phrase: Z pair Production
:param data: data to be parsed
:type data: str
"""
self.current_table.phrases.append(data.strip())
def _parse_energies(self, data):
"""Add energy given in data to tables energies
this method is here for completeness sake, it's used in only one other place so
can be safely extracted
:param data: data to be appended to table's energies
:type data: str
"""
self.current_table.energies.append(data)
def _parse_header(self, data):
"""Parse header (xheader or yheader)
:param data: data to be parsed
:type data: str
:return: list with header's data
:rtype: list
"""
return_list = []
headers = data.split(':')
for header in headers:
header = re.split(' IN ', header, flags=re.I) # ignore case
xheader = {'name': header[0].strip()}
if len(header) > 1:
xheader['units'] = header[1].strip()
return_list.append(xheader)
return return_list
def _parse_xheaders(self, data):
"""parse xheaders from old HEPData format
:param data: data with xheaders to be parsed
:type data: str
"""
self.current_table.xheaders += self._parse_header(data)
def _parse_yheaders(self, data):
"""parse yheaders from old HEPData format
:param data: data with yheaders to be parsed
:type data: str
"""
self.current_table.yheaders += self._parse_header(data)
@staticmethod
def _strip_comments(line):
"""Processes line stripping any comments from it
:param line: line to be processed
:type line: str
:return: line with removed comments
:rtype: str
"""
if line == '':
return line
r = re.search('(?P<line>[^#]*)(#(?P<comment>.*))?', line)
if r:
line = r.group('line')
if not line.endswith('\n'):
line += '\n'
return line
return '\n'
def _read_multiline(self, init_data):
"""Reads multiline symbols (ususally comments)
:param init_data: initial data (parsed from the line containing keyword)
:return: parsed value of the multiline symbol
:rtype: str
"""
result = init_data
first = True
while True:
last_index = self.current_file.tell()
line_raw = self.current_file.readline()
# don't add newlines from full line comments
if line_raw[0] == '#':
continue
# now strip comments
# TODO - is it appropriate behavior?
data = self._strip_comments(line_raw)
# EOF, stop here
if not data:
break
# we arrived to the next command, step back and break
if len(data.strip()) >= 1 and data.strip()[0] == '*':
self.current_file.seek(last_index)
break
if first:
result += '\n'
first = False
result += data
result = result.strip()
if result and not result.endswith('.'):
result += '.'
return result
def _parse_document_comment(self, data):
self.data[0]['comment'] = self._read_multiline(data)
def _bind_set_table_metadata(self, key, multiline=False):
"""Returns parsing function which will parse data as text, and add it to the table metatadata dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
"""
def set_table_metadata(self, data):
if multiline:
data = self._read_multiline(data)
if key == 'location' and data:
data = 'Data from ' + data
self.current_table.metadata[key] = data.strip()
# method must be bound, so we use __get__
return set_table_metadata.__get__(self)
def _bind_parse_record_ids(self, key):
def _parse_record_ids(self, data):
if 'record_ids' not in self.data[0]:
self.data[0]['record_ids'] = []
record_id = {'type': key, 'id': int(data) if data else 0}
if self.data[0]['record_ids'].count(record_id) == 0:
self.data[0]['record_ids'].append(record_id)
elif self.strict:
raise BadFormat("duplicated record: *%s" % key)
# method must be bound, so we use __get__
return _parse_record_ids.__get__(self)
def _bind_parse_additional_data(self, key, multiline=False):
"""Returns parsing function which will parse data as text, and add it to the table additional data dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
"""
def _set_additional_data_bound(self, data):
"""Concrete method for setting additional data
:param self:
:type self: OldHEPData
"""
# if it's multiline, parse it
if multiline:
data = self._read_multiline(data)
if key not in self.additional_data:
self.additional_data[key] = []
self.additional_data[key].append(data)
# method must be bound, so we use __get__
return _set_additional_data_bound.__get__(self)
# employing full fledged FSM for only two states is kind of an overkill, so
# I'll just define them here...
states = ['document', 'table']
|
HEPData/hepdata-converter
|
hepdata_converter/parsers/oldhepdata_parser.py
|
OldHEPData._parse_header
|
python
|
def _parse_header(self, data):
return_list = []
headers = data.split(':')
for header in headers:
header = re.split(' IN ', header, flags=re.I) # ignore case
xheader = {'name': header[0].strip()}
if len(header) > 1:
xheader['units'] = header[1].strip()
return_list.append(xheader)
return return_list
|
Parse header (xheader or yheader)
:param data: data to be parsed
:type data: str
:return: list with header's data
:rtype: list
|
train
|
https://github.com/HEPData/hepdata-converter/blob/354271448980efba86f2f3d27b99d818e75fd90d/hepdata_converter/parsers/oldhepdata_parser.py#L552-L571
| null |
class OldHEPData(Parser):
"""Parser for Old HEPData format
"""
help = 'Parses OLD HepData format - example OLD HepData input format: http://hepdata.cedar.ac.uk/resource/sample.input'
@classmethod
def options(cls):
options = Parser.options()
options['strict'] = Option('strict', default=True, type=bool, required=False,
help='if specified any additional keywords in OldHEPData file will raise an error')
options['use_additional_data'] = Option('use-additional-data', default=False, type=bool, required=False, variable_mapping='use_additional_data',
help=('if specified additional data which does not have equivalent in new HEPData format'
' will be appended to comment section of the output document'))
return options
def reset(self):
"""Clean any processing data, and prepare object for reuse
"""
self.current_table = None
self.tables = []
self.data = [{}]
self.additional_data = {}
self.lines = []
self.set_state('document')
self.current_file = None
self.set_of_energies = set()
def set_state(self, state):
if state not in OldHEPData.states:
raise ValueError("unknown state")
self.current_state = state
def __init__(self, **kwargs):
"""Constructor
:param use_additional_data: if set to True additional data which does not have equivalent in new HEPData format
will be appended to comment section of the output document
:type use_additional_data: bool
:param strict: if set to True, any additional keywords (not specified by documentation)
will raise BadFormat exception during parsing
:type strict: bool
"""
# mapping of OLD HepData format's keywords to proper parsing functions
# all possible keywords are specified here, the ones which aren't used
# by new data format are either mapped to _pass method which does nothing,
# or bound to _add_to_comment which adds the specified data to comment section of the
# output
# functions are specified by states in which parser may be, it may either be parsing table or document
# for those two states different sets of directives are permitted
self.mapping = {
'document': {
'reference': self._parse_reference,
'dataset': self._set_table,
# additional data which have no one to one mapping to new YAML format
'author': self._bind_parse_additional_data('author'),
'doi': self._bind_parse_additional_data('doi'),
'status': self._bind_parse_additional_data('status'),
'experiment': self._bind_parse_additional_data('experiment'),
'detector': self._bind_parse_additional_data('detector'),
'title': self._bind_parse_additional_data('title'),
# add it to record_ids
'spiresId': self._bind_parse_record_ids('spires'),
'inspireId': self._bind_parse_record_ids('inspire'),
'cdsId': self._bind_parse_record_ids('cds'),
'durhamId': self._bind_parse_record_ids('durham'),
'comment': self._parse_document_comment,
'E': self._pass
},
'table': {
'dataend': self._set_document,
'location': self._bind_set_table_metadata('location'),
'dscomment': self._bind_set_table_metadata('description', True),
'dserror': self._parse_dserror,
'reackey': self._parse_reackey,
'qual': self._parse_qual,
'data': self._parse_table_data,
'xheader': self._parse_xheaders,
'yheader': self._parse_yheaders,
'obskey': self._parse_obskey,
'phrase': self._parse_phrase,
'E': self._pass
}
}
OptionInitMixin.__init__(self, options=kwargs)
def _parse(self):
# parse the file
while self._parse_line(self.current_file):
pass
if self.use_additional_data:
if self.additional_data:
self.data[0]['comment'] += 'ADDITIONAL DATA IMPORTED FROM OLD HEPDATA FORMAT: \n'
for key in self.additional_data:
for element in self.additional_data[key]:
self.data[0]['comment'] += "%s: %s" % (key, element)
return ParsedData(self.data[0], self.tables)
def parse(self, data_in):
# clean any possible data from previous parsing
self.reset()
# in case of strings we should treat them as filepaths
if isinstance(data_in, (str, unicode)):
with open(data_in, 'r') as self.current_file:
return self._parse()
else:
self.current_file = data_in
return self._parse()
def _parse_line(self, file):
"""Parse single line (or more if particular keyword actually demands it)
:param file:
:type file: file
"""
line = self._strip_comments(file.readline())
# check if the file ended
if not line:
return False
# line was empty or it was a comment, continue
if line.strip() == '':
return True
# retrieve keyword and its value
reg = re.search("^\*(?P<key>[^:#]*)(:\s*(?P<value>.*)\s*)?$", line)
if reg:
key = reg.group('key').strip()
value = reg.group('value')
if key in self.mapping[self.current_state]:
self.mapping[self.current_state][key](value)
elif self.strict:
raise BadFormat("unknown key: *%s" % key)
else:
raise BadFormat("line can not be parsed: %s" % line)
return True
def _parse_reference(self, data):
"""
:param data:
:type data: str
"""
if 'additional_resources' not in self.data[0]:
self.data[0]['additional_resources'] = []
location = data.split(' : ')[0].strip()
if location.startswith('http'):
self.data[0]['additional_resources'].append({
'location': location,
'description': 'web page with auxiliary material'
})
def _set_table(self, data):
"""Set current parsing state to 'table',
create new table object and add it to tables collection
"""
self.set_state('table')
self.current_table = HEPTable(index=len(self.tables) + 1)
self.tables.append(self.current_table)
self.data.append(self.current_table.metadata)
def _set_document(self, data):
"""Set current parsing state to 'document',
set current_table to None
"""
self.set_state('document')
self.current_table = None
def _pass(self, data):
"""Empty processing function, map it to keywords if they're not used in the new YAML format
"""
pass
def _parse_table_data(self, data):
"""Parse dataset data of the original HEPData format
:param data: header of the table to be parsed
:raise ValueError:
"""
header = data.split(':')
self.current_table.data_header = header
for i, h in enumerate(header):
header[i] = h.strip()
x_count = header.count('x')
y_count = header.count('y')
if not self.current_table.xheaders:
raise BadFormat("*xheader line needs to appear before *data: %s" % data)
if not self.current_table.yheaders:
raise BadFormat("*yheader line needs to appear before *data: %s" % data)
# use deepcopy to avoid references in yaml... may not be required, and will very probably be refactored
# TODO - is this appropriate behavior, or are references in YAML files acceptable, they are certainly less human readable
self.current_table.data = {'independent_variables': [{'header': self.current_table.xheaders[i] if i < len(self.current_table.xheaders) else copy.deepcopy(self.current_table.xheaders[-1]),
'values': []} for i in range(x_count)],
'dependent_variables': [{'header': self.current_table.yheaders[i] if i < len(self.current_table.yheaders) else copy.deepcopy(self.current_table.yheaders[-1]),
'qualifiers': [self.current_table.qualifiers[j][i] if i < len(self.current_table.qualifiers[j]) else copy.deepcopy(self.current_table.qualifiers[j][-1]) for j in range(len(self.current_table.qualifiers)) ],
'values': []} for i in range(y_count)]}
xy_mapping = []
current_x_count = 0
current_y_count = 0
for h in header:
if h == 'x':
xy_mapping.append(current_x_count)
current_x_count += 1
if h == 'y':
xy_mapping.append(current_y_count)
current_y_count += 1
last_index = self.current_file.tell()
line = self._strip_comments(self.current_file.readline())
while line and not line.startswith('*'):
data_entry_elements = line.split(';')[:-1] # split and also strip newline character at the end
if len(data_entry_elements) == len(header):
# this is kind of a big stretch... I assume that x is always first
for i, h in enumerate(header):
single_element = data_entry_elements[i].strip()
# number patterns copied from old subs.pl parsing script
pmnum1 = '[-+]?[\d]+\.?[\d]*'
pmnum2 = '[-+]?\.[\d]+'
pmnum3 = '[-+]?[\d]+\.?[\d]*\s*[eE]+\s*[+-]?\s*[\d]+'
pmnum = '(' + pmnum1 + '|' + pmnum2 + '|' + pmnum3 + ')'
# implement same regular expression matching as in old subs.pl parsing script
if h == 'x': # independent variables
r = re.search('^(?P<value>' + pmnum + ')$', single_element)
if r: # "value"
single_element = {'value': r.group('value')}
else:
r = re.search('^(?P<value>' + pmnum + ')\s*\(\s*BIN\s*=\s*(?P<low>' + pmnum + \
')\s+TO\s+(?P<high>' + pmnum + ')\s*\)$', single_element)
if r: # "value (BIN=low TO high)"
single_element = {'value': float(r.group('value')),
'low': float(r.group('low')), 'high': float(r.group('high'))}
else:
r = re.search('^(?P<low>' + pmnum + ')\s+TO\s+(?P<high>' + pmnum + ')$',
single_element)
if r: # "low TO high"
single_element = {'low': float(r.group('low')), 'high': float(r.group('high'))}
else: # everything else: don't try to convert to float
single_element = {'value': single_element}
# TO DO: subs.pl also parses other formats such as "low high", "value low high" (sorted),
# "value +- err", and "value -err_m, +err_p". Do we need to support these formats here?
# Probably not: unsupported formats will just be written as a text string.
self.current_table.data['independent_variables'][xy_mapping[i]]['values'].append(single_element)
# extract energy if SQRT(S) is one of the 'x' variables
xheader = self.current_table.data['independent_variables'][xy_mapping[i]]['header']
if xheader['name'].startswith('SQRT(S)') and lower(xheader['units']) in ('gev'):
for energy in single_element.values():
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
elif h == 'y': # dependent variable
pmnum_pct = pmnum + '(\s*PCT)?' # errors can possibly be given as percentages
r = re.search('^(?P<value>' + pmnum + ')\s+(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' +
pmnum_pct + '|-)\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element)
element = {'errors': []}
if r: # asymmetric first error
element['value'] = r.group('value').strip()
err_p = r.group('err_p').strip().lstrip('+')
if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format
err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p
err_m = r.group('err_m').strip().lstrip('+')
if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format
err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m
if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%':
err_p = err_p + '%'
if not err_p and not err_m:
raise ValueError("Both asymmetric errors cannot be '-': %s" % line)
if r.group('err_sys'):
element['errors'] += [{'label': 'stat', 'asymerror': {'plus': err_p, 'minus': err_m}}]
else:
element['errors'] += [{'asymerror': {'plus': err_p, 'minus': err_m}}]
else:
r = re.search('^(?P<value>' + pmnum + ')\s*(\+-\s*(?P<error>' +
pmnum_pct + '))?\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element)
if r: # symmetric first error
element['value'] = r.group('value').strip()
if r.group('error'):
error = r.group('error').strip().lstrip('+')
error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error
if r.group('err_sys'):
element['errors'] += [{'label': 'stat', 'symerror': error}]
else:
element['errors'] += [{'symerror': error}]
else: # everything else
element['value'] = single_element
err_sys = []
if r and r.group('err_sys'):
err_sys = r.group('err_sys').strip(' \t()').split('DSYS=')
for err in err_sys + self.current_table.dserrors:
err = err.strip(' \t,')
if not err:
continue
error = {}
label = 'sys'
r = re.search('^(\+-)?\s*(?P<error>' + pmnum_pct + ')\s*(\:\s*(?P<label>.+))?$', err)
if r: # symmetric systematic error
if r.group('label'):
label += ',' + r.group('label')
error = r.group('error').strip().lstrip('+')
error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error
error = {'symerror': error}
else:
r = re.search('^(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' +
pmnum_pct + '|-)\s*(\:\s*(?P<label>.+))?$', err)
if r: # asymmetric systematic error
if r.group('label'):
label += ',' + r.group('label')
err_p = r.group('err_p').strip().lstrip('+')
if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format
err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p
err_m = r.group('err_m').strip().lstrip('+')
if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format
err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m
if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%':
err_p = err_p + '%'
if not err_p and not err_m:
raise ValueError("Both asymmetric errors cannot be '-': %s" % line)
error = {'asymerror': {'plus': err_p, 'minus': err_m}}
if not r:
# error happened
raise ValueError("Error while parsing data line: %s" % line)
error['label'] = label
if element['value'] != single_element:
element['errors'].append(error)
self.current_table.data['dependent_variables'][xy_mapping[i]]['values'].append(element)
elif data_entry_elements:
raise BadFormat("%s data entry elements but %s expected: %s" %
(len(data_entry_elements), len(header), line))
last_index = self.current_file.tell()
l = self.current_file.readline()
line = self._strip_comments(l)
self.current_file.seek(last_index)
# extract minimum and maximum from set of energies
if self.set_of_energies:
energy_min = min(self.set_of_energies)
energy_max = max(self.set_of_energies)
if energy_max > energy_min:
energy = str(energy_min) + '-' + str(energy_max)
else:
energy = energy_min
self._parse_energies(energy)
if self.current_table.description:
if any(word in self.current_table.description.lower() for word in ['covariance', 'correlation', 'matrix']):
reformatted = self._reformat_matrix()
def _reformat_matrix(self):
"""Transform a square matrix into a format with two independent variables and one dependent variable.
"""
nxax = len(self.current_table.data['independent_variables'])
nyax = len(self.current_table.data['dependent_variables'])
npts = len(self.current_table.data['dependent_variables'][0]['values'])
# check if 1 x-axis, and npts (>=2) equals number of y-axes
if nxax != 1 or nyax != npts or npts < 2:
return False
# add second independent variable with each value duplicated npts times
if len(self.current_table.xheaders) == 2:
xheader = self.current_table.xheaders[1]
else:
xheader = copy.deepcopy(self.current_table.data['independent_variables'][0]['header'])
self.current_table.data['independent_variables'].append({'header': xheader, 'values': []})
for value in self.current_table.data['independent_variables'][0]['values']:
self.current_table.data['independent_variables'][1]['values'].extend([copy.deepcopy(value) for npt in range(npts)])
# duplicate values of first independent variable npts times
self.current_table.data['independent_variables'][0]['values'] \
= [copy.deepcopy(value) for npt in range(npts) for value in self.current_table.data['independent_variables'][0]['values']]
# suppress header if different for second y-axis
if self.current_table.data['dependent_variables'][0]['header'] != \
self.current_table.data['dependent_variables'][1]['header']:
self.current_table.data['dependent_variables'][0]['header'] = {'name': ''}
# remove qualifier if different for second y-axis
iqdel = [] # list of qualifier indices to be deleted
for iq, qualifier in enumerate(self.current_table.data['dependent_variables'][0]['qualifiers']):
if qualifier != self.current_table.data['dependent_variables'][1]['qualifiers'][iq]:
iqdel.append(iq)
for iq in iqdel[::-1]: # need to delete in reverse order
del self.current_table.data['dependent_variables'][0]['qualifiers'][iq]
# append values of second and subsequent y-axes to first dependent variable
for iy in range(1, nyax):
for value in self.current_table.data['dependent_variables'][iy]['values']:
self.current_table.data['dependent_variables'][0]['values'].append(value)
# finally, delete the second and subsequent y-axes in reverse order
for iy in range(nyax-1, 0, -1):
del self.current_table.data['dependent_variables'][iy]
return True
def _parse_dserror(self, data):
"""Parse dserror attribute of the old HEPData format
example:
*dserror: 7.5 PCT : overall normalization uncertainty
:param data: data to be parsed
:type data: str
"""
self.current_table.dserrors.append(data.strip())
def _parse_reackey(self, data):
"""Parse reackey attribute of the old HEPData format
example:
*reackey: P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
"""
self.current_table.reactions.append(data.strip())
def _parse_obskey(self, data):
"""Parse obskey attribute of the old HEPData format
example:
*obskey: DSIG/DPT
:param data: data to be parsed
:type data: str
"""
self.current_table.observables.append(data.strip())
def _parse_phrase(self, data):
"""Parse phrase attribute of the old HEPData format
example:
*phrase: Z pair Production
:param data: data to be parsed
:type data: str
"""
self.current_table.phrases.append(data.strip())
def _parse_energies(self, data):
"""Add energy given in data to tables energies
this method is here for completeness sake, it's used in only one other place so
can be safely extracted
:param data: data to be appended to table's energies
:type data: str
"""
self.current_table.energies.append(data)
def _parse_qual(self, data):
"""Parse qual attribute of the old HEPData format
example qual:
*qual: RE : P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
"""
list = []
headers = data.split(':')
name = headers[0].strip()
name = re.split(' IN ', name, flags=re.I) # ignore case
units = None
if len(name) > 1:
units = name[1].strip()
name = name[0].strip()
if len(headers) < 2:
raise BadFormat("*qual line must contain a name and values: %s" % data)
for header in headers[1:]:
xheader = {'name': name}
if units:
xheader['units'] = units
xheader['value'] = header.strip()
list.append(xheader)
# extract energy if SQRT(S) is one of the qualifiers
if name.startswith('SQRT(S)') and lower(units) in ('gev'):
energies = re.split(' TO ', xheader['value'], flags=re.I)
for energy in energies:
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
self.current_table.qualifiers.append(list)
def _parse_xheaders(self, data):
"""parse xheaders from old HEPData format
:param data: data with xheaders to be parsed
:type data: str
"""
self.current_table.xheaders += self._parse_header(data)
def _parse_yheaders(self, data):
"""parse yheaders from old HEPData format
:param data: data with yheaders to be parsed
:type data: str
"""
self.current_table.yheaders += self._parse_header(data)
@staticmethod
def _strip_comments(line):
"""Processes line stripping any comments from it
:param line: line to be processed
:type line: str
:return: line with removed comments
:rtype: str
"""
if line == '':
return line
r = re.search('(?P<line>[^#]*)(#(?P<comment>.*))?', line)
if r:
line = r.group('line')
if not line.endswith('\n'):
line += '\n'
return line
return '\n'
def _read_multiline(self, init_data):
"""Reads multiline symbols (ususally comments)
:param init_data: initial data (parsed from the line containing keyword)
:return: parsed value of the multiline symbol
:rtype: str
"""
result = init_data
first = True
while True:
last_index = self.current_file.tell()
line_raw = self.current_file.readline()
# don't add newlines from full line comments
if line_raw[0] == '#':
continue
# now strip comments
# TODO - is it appropriate behavior?
data = self._strip_comments(line_raw)
# EOF, stop here
if not data:
break
# we arrived to the next command, step back and break
if len(data.strip()) >= 1 and data.strip()[0] == '*':
self.current_file.seek(last_index)
break
if first:
result += '\n'
first = False
result += data
result = result.strip()
if result and not result.endswith('.'):
result += '.'
return result
def _parse_document_comment(self, data):
self.data[0]['comment'] = self._read_multiline(data)
def _bind_set_table_metadata(self, key, multiline=False):
"""Returns parsing function which will parse data as text, and add it to the table metatadata dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
"""
def set_table_metadata(self, data):
if multiline:
data = self._read_multiline(data)
if key == 'location' and data:
data = 'Data from ' + data
self.current_table.metadata[key] = data.strip()
# method must be bound, so we use __get__
return set_table_metadata.__get__(self)
def _bind_parse_record_ids(self, key):
def _parse_record_ids(self, data):
if 'record_ids' not in self.data[0]:
self.data[0]['record_ids'] = []
record_id = {'type': key, 'id': int(data) if data else 0}
if self.data[0]['record_ids'].count(record_id) == 0:
self.data[0]['record_ids'].append(record_id)
elif self.strict:
raise BadFormat("duplicated record: *%s" % key)
# method must be bound, so we use __get__
return _parse_record_ids.__get__(self)
def _bind_parse_additional_data(self, key, multiline=False):
"""Returns parsing function which will parse data as text, and add it to the table additional data dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
"""
def _set_additional_data_bound(self, data):
"""Concrete method for setting additional data
:param self:
:type self: OldHEPData
"""
# if it's multiline, parse it
if multiline:
data = self._read_multiline(data)
if key not in self.additional_data:
self.additional_data[key] = []
self.additional_data[key].append(data)
# method must be bound, so we use __get__
return _set_additional_data_bound.__get__(self)
# employing full fledged FSM for only two states is kind of an overkill, so
# I'll just define them here...
states = ['document', 'table']
|
HEPData/hepdata-converter
|
hepdata_converter/parsers/oldhepdata_parser.py
|
OldHEPData._strip_comments
|
python
|
def _strip_comments(line):
if line == '':
return line
r = re.search('(?P<line>[^#]*)(#(?P<comment>.*))?', line)
if r:
line = r.group('line')
if not line.endswith('\n'):
line += '\n'
return line
return '\n'
|
Processes line stripping any comments from it
:param line: line to be processed
:type line: str
:return: line with removed comments
:rtype: str
|
train
|
https://github.com/HEPData/hepdata-converter/blob/354271448980efba86f2f3d27b99d818e75fd90d/hepdata_converter/parsers/oldhepdata_parser.py#L590-L606
| null |
class OldHEPData(Parser):
"""Parser for Old HEPData format
"""
help = 'Parses OLD HepData format - example OLD HepData input format: http://hepdata.cedar.ac.uk/resource/sample.input'
@classmethod
def options(cls):
options = Parser.options()
options['strict'] = Option('strict', default=True, type=bool, required=False,
help='if specified any additional keywords in OldHEPData file will raise an error')
options['use_additional_data'] = Option('use-additional-data', default=False, type=bool, required=False, variable_mapping='use_additional_data',
help=('if specified additional data which does not have equivalent in new HEPData format'
' will be appended to comment section of the output document'))
return options
def reset(self):
"""Clean any processing data, and prepare object for reuse
"""
self.current_table = None
self.tables = []
self.data = [{}]
self.additional_data = {}
self.lines = []
self.set_state('document')
self.current_file = None
self.set_of_energies = set()
def set_state(self, state):
if state not in OldHEPData.states:
raise ValueError("unknown state")
self.current_state = state
def __init__(self, **kwargs):
"""Constructor
:param use_additional_data: if set to True additional data which does not have equivalent in new HEPData format
will be appended to comment section of the output document
:type use_additional_data: bool
:param strict: if set to True, any additional keywords (not specified by documentation)
will raise BadFormat exception during parsing
:type strict: bool
"""
# mapping of OLD HepData format's keywords to proper parsing functions
# all possible keywords are specified here, the ones which aren't used
# by new data format are either mapped to _pass method which does nothing,
# or bound to _add_to_comment which adds the specified data to comment section of the
# output
# functions are specified by states in which parser may be, it may either be parsing table or document
# for those two states different sets of directives are permitted
self.mapping = {
'document': {
'reference': self._parse_reference,
'dataset': self._set_table,
# additional data which have no one to one mapping to new YAML format
'author': self._bind_parse_additional_data('author'),
'doi': self._bind_parse_additional_data('doi'),
'status': self._bind_parse_additional_data('status'),
'experiment': self._bind_parse_additional_data('experiment'),
'detector': self._bind_parse_additional_data('detector'),
'title': self._bind_parse_additional_data('title'),
# add it to record_ids
'spiresId': self._bind_parse_record_ids('spires'),
'inspireId': self._bind_parse_record_ids('inspire'),
'cdsId': self._bind_parse_record_ids('cds'),
'durhamId': self._bind_parse_record_ids('durham'),
'comment': self._parse_document_comment,
'E': self._pass
},
'table': {
'dataend': self._set_document,
'location': self._bind_set_table_metadata('location'),
'dscomment': self._bind_set_table_metadata('description', True),
'dserror': self._parse_dserror,
'reackey': self._parse_reackey,
'qual': self._parse_qual,
'data': self._parse_table_data,
'xheader': self._parse_xheaders,
'yheader': self._parse_yheaders,
'obskey': self._parse_obskey,
'phrase': self._parse_phrase,
'E': self._pass
}
}
OptionInitMixin.__init__(self, options=kwargs)
def _parse(self):
# parse the file
while self._parse_line(self.current_file):
pass
if self.use_additional_data:
if self.additional_data:
self.data[0]['comment'] += 'ADDITIONAL DATA IMPORTED FROM OLD HEPDATA FORMAT: \n'
for key in self.additional_data:
for element in self.additional_data[key]:
self.data[0]['comment'] += "%s: %s" % (key, element)
return ParsedData(self.data[0], self.tables)
def parse(self, data_in):
# clean any possible data from previous parsing
self.reset()
# in case of strings we should treat them as filepaths
if isinstance(data_in, (str, unicode)):
with open(data_in, 'r') as self.current_file:
return self._parse()
else:
self.current_file = data_in
return self._parse()
def _parse_line(self, file):
"""Parse single line (or more if particular keyword actually demands it)
:param file:
:type file: file
"""
line = self._strip_comments(file.readline())
# check if the file ended
if not line:
return False
# line was empty or it was a comment, continue
if line.strip() == '':
return True
# retrieve keyword and its value
reg = re.search("^\*(?P<key>[^:#]*)(:\s*(?P<value>.*)\s*)?$", line)
if reg:
key = reg.group('key').strip()
value = reg.group('value')
if key in self.mapping[self.current_state]:
self.mapping[self.current_state][key](value)
elif self.strict:
raise BadFormat("unknown key: *%s" % key)
else:
raise BadFormat("line can not be parsed: %s" % line)
return True
def _parse_reference(self, data):
"""
:param data:
:type data: str
"""
if 'additional_resources' not in self.data[0]:
self.data[0]['additional_resources'] = []
location = data.split(' : ')[0].strip()
if location.startswith('http'):
self.data[0]['additional_resources'].append({
'location': location,
'description': 'web page with auxiliary material'
})
def _set_table(self, data):
"""Set current parsing state to 'table',
create new table object and add it to tables collection
"""
self.set_state('table')
self.current_table = HEPTable(index=len(self.tables) + 1)
self.tables.append(self.current_table)
self.data.append(self.current_table.metadata)
def _set_document(self, data):
"""Set current parsing state to 'document',
set current_table to None
"""
self.set_state('document')
self.current_table = None
def _pass(self, data):
"""Empty processing function, map it to keywords if they're not used in the new YAML format
"""
pass
def _parse_table_data(self, data):
"""Parse dataset data of the original HEPData format
:param data: header of the table to be parsed
:raise ValueError:
"""
header = data.split(':')
self.current_table.data_header = header
for i, h in enumerate(header):
header[i] = h.strip()
x_count = header.count('x')
y_count = header.count('y')
if not self.current_table.xheaders:
raise BadFormat("*xheader line needs to appear before *data: %s" % data)
if not self.current_table.yheaders:
raise BadFormat("*yheader line needs to appear before *data: %s" % data)
# use deepcopy to avoid references in yaml... may not be required, and will very probably be refactored
# TODO - is this appropriate behavior, or are references in YAML files acceptable, they are certainly less human readable
self.current_table.data = {'independent_variables': [{'header': self.current_table.xheaders[i] if i < len(self.current_table.xheaders) else copy.deepcopy(self.current_table.xheaders[-1]),
'values': []} for i in range(x_count)],
'dependent_variables': [{'header': self.current_table.yheaders[i] if i < len(self.current_table.yheaders) else copy.deepcopy(self.current_table.yheaders[-1]),
'qualifiers': [self.current_table.qualifiers[j][i] if i < len(self.current_table.qualifiers[j]) else copy.deepcopy(self.current_table.qualifiers[j][-1]) for j in range(len(self.current_table.qualifiers)) ],
'values': []} for i in range(y_count)]}
xy_mapping = []
current_x_count = 0
current_y_count = 0
for h in header:
if h == 'x':
xy_mapping.append(current_x_count)
current_x_count += 1
if h == 'y':
xy_mapping.append(current_y_count)
current_y_count += 1
last_index = self.current_file.tell()
line = self._strip_comments(self.current_file.readline())
while line and not line.startswith('*'):
data_entry_elements = line.split(';')[:-1] # split and also strip newline character at the end
if len(data_entry_elements) == len(header):
# this is kind of a big stretch... I assume that x is always first
for i, h in enumerate(header):
single_element = data_entry_elements[i].strip()
# number patterns copied from old subs.pl parsing script
pmnum1 = '[-+]?[\d]+\.?[\d]*'
pmnum2 = '[-+]?\.[\d]+'
pmnum3 = '[-+]?[\d]+\.?[\d]*\s*[eE]+\s*[+-]?\s*[\d]+'
pmnum = '(' + pmnum1 + '|' + pmnum2 + '|' + pmnum3 + ')'
# implement same regular expression matching as in old subs.pl parsing script
if h == 'x': # independent variables
r = re.search('^(?P<value>' + pmnum + ')$', single_element)
if r: # "value"
single_element = {'value': r.group('value')}
else:
r = re.search('^(?P<value>' + pmnum + ')\s*\(\s*BIN\s*=\s*(?P<low>' + pmnum + \
')\s+TO\s+(?P<high>' + pmnum + ')\s*\)$', single_element)
if r: # "value (BIN=low TO high)"
single_element = {'value': float(r.group('value')),
'low': float(r.group('low')), 'high': float(r.group('high'))}
else:
r = re.search('^(?P<low>' + pmnum + ')\s+TO\s+(?P<high>' + pmnum + ')$',
single_element)
if r: # "low TO high"
single_element = {'low': float(r.group('low')), 'high': float(r.group('high'))}
else: # everything else: don't try to convert to float
single_element = {'value': single_element}
# TO DO: subs.pl also parses other formats such as "low high", "value low high" (sorted),
# "value +- err", and "value -err_m, +err_p". Do we need to support these formats here?
# Probably not: unsupported formats will just be written as a text string.
self.current_table.data['independent_variables'][xy_mapping[i]]['values'].append(single_element)
# extract energy if SQRT(S) is one of the 'x' variables
xheader = self.current_table.data['independent_variables'][xy_mapping[i]]['header']
if xheader['name'].startswith('SQRT(S)') and lower(xheader['units']) in ('gev'):
for energy in single_element.values():
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
elif h == 'y': # dependent variable
pmnum_pct = pmnum + '(\s*PCT)?' # errors can possibly be given as percentages
r = re.search('^(?P<value>' + pmnum + ')\s+(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' +
pmnum_pct + '|-)\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element)
element = {'errors': []}
if r: # asymmetric first error
element['value'] = r.group('value').strip()
err_p = r.group('err_p').strip().lstrip('+')
if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format
err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p
err_m = r.group('err_m').strip().lstrip('+')
if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format
err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m
if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%':
err_p = err_p + '%'
if not err_p and not err_m:
raise ValueError("Both asymmetric errors cannot be '-': %s" % line)
if r.group('err_sys'):
element['errors'] += [{'label': 'stat', 'asymerror': {'plus': err_p, 'minus': err_m}}]
else:
element['errors'] += [{'asymerror': {'plus': err_p, 'minus': err_m}}]
else:
r = re.search('^(?P<value>' + pmnum + ')\s*(\+-\s*(?P<error>' +
pmnum_pct + '))?\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element)
if r: # symmetric first error
element['value'] = r.group('value').strip()
if r.group('error'):
error = r.group('error').strip().lstrip('+')
error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error
if r.group('err_sys'):
element['errors'] += [{'label': 'stat', 'symerror': error}]
else:
element['errors'] += [{'symerror': error}]
else: # everything else
element['value'] = single_element
err_sys = []
if r and r.group('err_sys'):
err_sys = r.group('err_sys').strip(' \t()').split('DSYS=')
for err in err_sys + self.current_table.dserrors:
err = err.strip(' \t,')
if not err:
continue
error = {}
label = 'sys'
r = re.search('^(\+-)?\s*(?P<error>' + pmnum_pct + ')\s*(\:\s*(?P<label>.+))?$', err)
if r: # symmetric systematic error
if r.group('label'):
label += ',' + r.group('label')
error = r.group('error').strip().lstrip('+')
error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error
error = {'symerror': error}
else:
r = re.search('^(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' +
pmnum_pct + '|-)\s*(\:\s*(?P<label>.+))?$', err)
if r: # asymmetric systematic error
if r.group('label'):
label += ',' + r.group('label')
err_p = r.group('err_p').strip().lstrip('+')
if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format
err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p
err_m = r.group('err_m').strip().lstrip('+')
if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format
err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m
if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%':
err_p = err_p + '%'
if not err_p and not err_m:
raise ValueError("Both asymmetric errors cannot be '-': %s" % line)
error = {'asymerror': {'plus': err_p, 'minus': err_m}}
if not r:
# error happened
raise ValueError("Error while parsing data line: %s" % line)
error['label'] = label
if element['value'] != single_element:
element['errors'].append(error)
self.current_table.data['dependent_variables'][xy_mapping[i]]['values'].append(element)
elif data_entry_elements:
raise BadFormat("%s data entry elements but %s expected: %s" %
(len(data_entry_elements), len(header), line))
last_index = self.current_file.tell()
l = self.current_file.readline()
line = self._strip_comments(l)
self.current_file.seek(last_index)
# extract minimum and maximum from set of energies
if self.set_of_energies:
energy_min = min(self.set_of_energies)
energy_max = max(self.set_of_energies)
if energy_max > energy_min:
energy = str(energy_min) + '-' + str(energy_max)
else:
energy = energy_min
self._parse_energies(energy)
if self.current_table.description:
if any(word in self.current_table.description.lower() for word in ['covariance', 'correlation', 'matrix']):
reformatted = self._reformat_matrix()
def _reformat_matrix(self):
"""Transform a square matrix into a format with two independent variables and one dependent variable.
"""
nxax = len(self.current_table.data['independent_variables'])
nyax = len(self.current_table.data['dependent_variables'])
npts = len(self.current_table.data['dependent_variables'][0]['values'])
# check if 1 x-axis, and npts (>=2) equals number of y-axes
if nxax != 1 or nyax != npts or npts < 2:
return False
# add second independent variable with each value duplicated npts times
if len(self.current_table.xheaders) == 2:
xheader = self.current_table.xheaders[1]
else:
xheader = copy.deepcopy(self.current_table.data['independent_variables'][0]['header'])
self.current_table.data['independent_variables'].append({'header': xheader, 'values': []})
for value in self.current_table.data['independent_variables'][0]['values']:
self.current_table.data['independent_variables'][1]['values'].extend([copy.deepcopy(value) for npt in range(npts)])
# duplicate values of first independent variable npts times
self.current_table.data['independent_variables'][0]['values'] \
= [copy.deepcopy(value) for npt in range(npts) for value in self.current_table.data['independent_variables'][0]['values']]
# suppress header if different for second y-axis
if self.current_table.data['dependent_variables'][0]['header'] != \
self.current_table.data['dependent_variables'][1]['header']:
self.current_table.data['dependent_variables'][0]['header'] = {'name': ''}
# remove qualifier if different for second y-axis
iqdel = [] # list of qualifier indices to be deleted
for iq, qualifier in enumerate(self.current_table.data['dependent_variables'][0]['qualifiers']):
if qualifier != self.current_table.data['dependent_variables'][1]['qualifiers'][iq]:
iqdel.append(iq)
for iq in iqdel[::-1]: # need to delete in reverse order
del self.current_table.data['dependent_variables'][0]['qualifiers'][iq]
# append values of second and subsequent y-axes to first dependent variable
for iy in range(1, nyax):
for value in self.current_table.data['dependent_variables'][iy]['values']:
self.current_table.data['dependent_variables'][0]['values'].append(value)
# finally, delete the second and subsequent y-axes in reverse order
for iy in range(nyax-1, 0, -1):
del self.current_table.data['dependent_variables'][iy]
return True
def _parse_dserror(self, data):
"""Parse dserror attribute of the old HEPData format
example:
*dserror: 7.5 PCT : overall normalization uncertainty
:param data: data to be parsed
:type data: str
"""
self.current_table.dserrors.append(data.strip())
def _parse_reackey(self, data):
"""Parse reackey attribute of the old HEPData format
example:
*reackey: P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
"""
self.current_table.reactions.append(data.strip())
def _parse_obskey(self, data):
"""Parse obskey attribute of the old HEPData format
example:
*obskey: DSIG/DPT
:param data: data to be parsed
:type data: str
"""
self.current_table.observables.append(data.strip())
def _parse_phrase(self, data):
"""Parse phrase attribute of the old HEPData format
example:
*phrase: Z pair Production
:param data: data to be parsed
:type data: str
"""
self.current_table.phrases.append(data.strip())
def _parse_energies(self, data):
"""Add energy given in data to tables energies
this method is here for completeness sake, it's used in only one other place so
can be safely extracted
:param data: data to be appended to table's energies
:type data: str
"""
self.current_table.energies.append(data)
def _parse_qual(self, data):
"""Parse qual attribute of the old HEPData format
example qual:
*qual: RE : P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
"""
list = []
headers = data.split(':')
name = headers[0].strip()
name = re.split(' IN ', name, flags=re.I) # ignore case
units = None
if len(name) > 1:
units = name[1].strip()
name = name[0].strip()
if len(headers) < 2:
raise BadFormat("*qual line must contain a name and values: %s" % data)
for header in headers[1:]:
xheader = {'name': name}
if units:
xheader['units'] = units
xheader['value'] = header.strip()
list.append(xheader)
# extract energy if SQRT(S) is one of the qualifiers
if name.startswith('SQRT(S)') and lower(units) in ('gev'):
energies = re.split(' TO ', xheader['value'], flags=re.I)
for energy in energies:
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
self.current_table.qualifiers.append(list)
def _parse_header(self, data):
"""Parse header (xheader or yheader)
:param data: data to be parsed
:type data: str
:return: list with header's data
:rtype: list
"""
return_list = []
headers = data.split(':')
for header in headers:
header = re.split(' IN ', header, flags=re.I) # ignore case
xheader = {'name': header[0].strip()}
if len(header) > 1:
xheader['units'] = header[1].strip()
return_list.append(xheader)
return return_list
def _parse_xheaders(self, data):
"""parse xheaders from old HEPData format
:param data: data with xheaders to be parsed
:type data: str
"""
self.current_table.xheaders += self._parse_header(data)
def _parse_yheaders(self, data):
"""parse yheaders from old HEPData format
:param data: data with yheaders to be parsed
:type data: str
"""
self.current_table.yheaders += self._parse_header(data)
@staticmethod
def _read_multiline(self, init_data):
"""Reads multiline symbols (ususally comments)
:param init_data: initial data (parsed from the line containing keyword)
:return: parsed value of the multiline symbol
:rtype: str
"""
result = init_data
first = True
while True:
last_index = self.current_file.tell()
line_raw = self.current_file.readline()
# don't add newlines from full line comments
if line_raw[0] == '#':
continue
# now strip comments
# TODO - is it appropriate behavior?
data = self._strip_comments(line_raw)
# EOF, stop here
if not data:
break
# we arrived to the next command, step back and break
if len(data.strip()) >= 1 and data.strip()[0] == '*':
self.current_file.seek(last_index)
break
if first:
result += '\n'
first = False
result += data
result = result.strip()
if result and not result.endswith('.'):
result += '.'
return result
def _parse_document_comment(self, data):
self.data[0]['comment'] = self._read_multiline(data)
def _bind_set_table_metadata(self, key, multiline=False):
"""Returns parsing function which will parse data as text, and add it to the table metatadata dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
"""
def set_table_metadata(self, data):
if multiline:
data = self._read_multiline(data)
if key == 'location' and data:
data = 'Data from ' + data
self.current_table.metadata[key] = data.strip()
# method must be bound, so we use __get__
return set_table_metadata.__get__(self)
def _bind_parse_record_ids(self, key):
def _parse_record_ids(self, data):
if 'record_ids' not in self.data[0]:
self.data[0]['record_ids'] = []
record_id = {'type': key, 'id': int(data) if data else 0}
if self.data[0]['record_ids'].count(record_id) == 0:
self.data[0]['record_ids'].append(record_id)
elif self.strict:
raise BadFormat("duplicated record: *%s" % key)
# method must be bound, so we use __get__
return _parse_record_ids.__get__(self)
def _bind_parse_additional_data(self, key, multiline=False):
"""Returns parsing function which will parse data as text, and add it to the table additional data dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
"""
def _set_additional_data_bound(self, data):
"""Concrete method for setting additional data
:param self:
:type self: OldHEPData
"""
# if it's multiline, parse it
if multiline:
data = self._read_multiline(data)
if key not in self.additional_data:
self.additional_data[key] = []
self.additional_data[key].append(data)
# method must be bound, so we use __get__
return _set_additional_data_bound.__get__(self)
# employing full fledged FSM for only two states is kind of an overkill, so
# I'll just define them here...
states = ['document', 'table']
|
HEPData/hepdata-converter
|
hepdata_converter/parsers/oldhepdata_parser.py
|
OldHEPData._read_multiline
|
python
|
def _read_multiline(self, init_data):
result = init_data
first = True
while True:
last_index = self.current_file.tell()
line_raw = self.current_file.readline()
# don't add newlines from full line comments
if line_raw[0] == '#':
continue
# now strip comments
# TODO - is it appropriate behavior?
data = self._strip_comments(line_raw)
# EOF, stop here
if not data:
break
# we arrived to the next command, step back and break
if len(data.strip()) >= 1 and data.strip()[0] == '*':
self.current_file.seek(last_index)
break
if first:
result += '\n'
first = False
result += data
result = result.strip()
if result and not result.endswith('.'):
result += '.'
return result
|
Reads multiline symbols (ususally comments)
:param init_data: initial data (parsed from the line containing keyword)
:return: parsed value of the multiline symbol
:rtype: str
|
train
|
https://github.com/HEPData/hepdata-converter/blob/354271448980efba86f2f3d27b99d818e75fd90d/hepdata_converter/parsers/oldhepdata_parser.py#L608-L648
| null |
class OldHEPData(Parser):
"""Parser for Old HEPData format
"""
help = 'Parses OLD HepData format - example OLD HepData input format: http://hepdata.cedar.ac.uk/resource/sample.input'
@classmethod
def options(cls):
options = Parser.options()
options['strict'] = Option('strict', default=True, type=bool, required=False,
help='if specified any additional keywords in OldHEPData file will raise an error')
options['use_additional_data'] = Option('use-additional-data', default=False, type=bool, required=False, variable_mapping='use_additional_data',
help=('if specified additional data which does not have equivalent in new HEPData format'
' will be appended to comment section of the output document'))
return options
def reset(self):
"""Clean any processing data, and prepare object for reuse
"""
self.current_table = None
self.tables = []
self.data = [{}]
self.additional_data = {}
self.lines = []
self.set_state('document')
self.current_file = None
self.set_of_energies = set()
def set_state(self, state):
if state not in OldHEPData.states:
raise ValueError("unknown state")
self.current_state = state
def __init__(self, **kwargs):
"""Constructor
:param use_additional_data: if set to True additional data which does not have equivalent in new HEPData format
will be appended to comment section of the output document
:type use_additional_data: bool
:param strict: if set to True, any additional keywords (not specified by documentation)
will raise BadFormat exception during parsing
:type strict: bool
"""
# mapping of OLD HepData format's keywords to proper parsing functions
# all possible keywords are specified here, the ones which aren't used
# by new data format are either mapped to _pass method which does nothing,
# or bound to _add_to_comment which adds the specified data to comment section of the
# output
# functions are specified by states in which parser may be, it may either be parsing table or document
# for those two states different sets of directives are permitted
self.mapping = {
'document': {
'reference': self._parse_reference,
'dataset': self._set_table,
# additional data which have no one to one mapping to new YAML format
'author': self._bind_parse_additional_data('author'),
'doi': self._bind_parse_additional_data('doi'),
'status': self._bind_parse_additional_data('status'),
'experiment': self._bind_parse_additional_data('experiment'),
'detector': self._bind_parse_additional_data('detector'),
'title': self._bind_parse_additional_data('title'),
# add it to record_ids
'spiresId': self._bind_parse_record_ids('spires'),
'inspireId': self._bind_parse_record_ids('inspire'),
'cdsId': self._bind_parse_record_ids('cds'),
'durhamId': self._bind_parse_record_ids('durham'),
'comment': self._parse_document_comment,
'E': self._pass
},
'table': {
'dataend': self._set_document,
'location': self._bind_set_table_metadata('location'),
'dscomment': self._bind_set_table_metadata('description', True),
'dserror': self._parse_dserror,
'reackey': self._parse_reackey,
'qual': self._parse_qual,
'data': self._parse_table_data,
'xheader': self._parse_xheaders,
'yheader': self._parse_yheaders,
'obskey': self._parse_obskey,
'phrase': self._parse_phrase,
'E': self._pass
}
}
OptionInitMixin.__init__(self, options=kwargs)
def _parse(self):
# parse the file
while self._parse_line(self.current_file):
pass
if self.use_additional_data:
if self.additional_data:
self.data[0]['comment'] += 'ADDITIONAL DATA IMPORTED FROM OLD HEPDATA FORMAT: \n'
for key in self.additional_data:
for element in self.additional_data[key]:
self.data[0]['comment'] += "%s: %s" % (key, element)
return ParsedData(self.data[0], self.tables)
def parse(self, data_in):
# clean any possible data from previous parsing
self.reset()
# in case of strings we should treat them as filepaths
if isinstance(data_in, (str, unicode)):
with open(data_in, 'r') as self.current_file:
return self._parse()
else:
self.current_file = data_in
return self._parse()
def _parse_line(self, file):
"""Parse single line (or more if particular keyword actually demands it)
:param file:
:type file: file
"""
line = self._strip_comments(file.readline())
# check if the file ended
if not line:
return False
# line was empty or it was a comment, continue
if line.strip() == '':
return True
# retrieve keyword and its value
reg = re.search("^\*(?P<key>[^:#]*)(:\s*(?P<value>.*)\s*)?$", line)
if reg:
key = reg.group('key').strip()
value = reg.group('value')
if key in self.mapping[self.current_state]:
self.mapping[self.current_state][key](value)
elif self.strict:
raise BadFormat("unknown key: *%s" % key)
else:
raise BadFormat("line can not be parsed: %s" % line)
return True
def _parse_reference(self, data):
"""
:param data:
:type data: str
"""
if 'additional_resources' not in self.data[0]:
self.data[0]['additional_resources'] = []
location = data.split(' : ')[0].strip()
if location.startswith('http'):
self.data[0]['additional_resources'].append({
'location': location,
'description': 'web page with auxiliary material'
})
def _set_table(self, data):
"""Set current parsing state to 'table',
create new table object and add it to tables collection
"""
self.set_state('table')
self.current_table = HEPTable(index=len(self.tables) + 1)
self.tables.append(self.current_table)
self.data.append(self.current_table.metadata)
def _set_document(self, data):
"""Set current parsing state to 'document',
set current_table to None
"""
self.set_state('document')
self.current_table = None
def _pass(self, data):
"""Empty processing function, map it to keywords if they're not used in the new YAML format
"""
pass
def _parse_table_data(self, data):
"""Parse dataset data of the original HEPData format
:param data: header of the table to be parsed
:raise ValueError:
"""
header = data.split(':')
self.current_table.data_header = header
for i, h in enumerate(header):
header[i] = h.strip()
x_count = header.count('x')
y_count = header.count('y')
if not self.current_table.xheaders:
raise BadFormat("*xheader line needs to appear before *data: %s" % data)
if not self.current_table.yheaders:
raise BadFormat("*yheader line needs to appear before *data: %s" % data)
# use deepcopy to avoid references in yaml... may not be required, and will very probably be refactored
# TODO - is this appropriate behavior, or are references in YAML files acceptable, they are certainly less human readable
self.current_table.data = {'independent_variables': [{'header': self.current_table.xheaders[i] if i < len(self.current_table.xheaders) else copy.deepcopy(self.current_table.xheaders[-1]),
'values': []} for i in range(x_count)],
'dependent_variables': [{'header': self.current_table.yheaders[i] if i < len(self.current_table.yheaders) else copy.deepcopy(self.current_table.yheaders[-1]),
'qualifiers': [self.current_table.qualifiers[j][i] if i < len(self.current_table.qualifiers[j]) else copy.deepcopy(self.current_table.qualifiers[j][-1]) for j in range(len(self.current_table.qualifiers)) ],
'values': []} for i in range(y_count)]}
xy_mapping = []
current_x_count = 0
current_y_count = 0
for h in header:
if h == 'x':
xy_mapping.append(current_x_count)
current_x_count += 1
if h == 'y':
xy_mapping.append(current_y_count)
current_y_count += 1
last_index = self.current_file.tell()
line = self._strip_comments(self.current_file.readline())
while line and not line.startswith('*'):
data_entry_elements = line.split(';')[:-1] # split and also strip newline character at the end
if len(data_entry_elements) == len(header):
# this is kind of a big stretch... I assume that x is always first
for i, h in enumerate(header):
single_element = data_entry_elements[i].strip()
# number patterns copied from old subs.pl parsing script
pmnum1 = '[-+]?[\d]+\.?[\d]*'
pmnum2 = '[-+]?\.[\d]+'
pmnum3 = '[-+]?[\d]+\.?[\d]*\s*[eE]+\s*[+-]?\s*[\d]+'
pmnum = '(' + pmnum1 + '|' + pmnum2 + '|' + pmnum3 + ')'
# implement same regular expression matching as in old subs.pl parsing script
if h == 'x': # independent variables
r = re.search('^(?P<value>' + pmnum + ')$', single_element)
if r: # "value"
single_element = {'value': r.group('value')}
else:
r = re.search('^(?P<value>' + pmnum + ')\s*\(\s*BIN\s*=\s*(?P<low>' + pmnum + \
')\s+TO\s+(?P<high>' + pmnum + ')\s*\)$', single_element)
if r: # "value (BIN=low TO high)"
single_element = {'value': float(r.group('value')),
'low': float(r.group('low')), 'high': float(r.group('high'))}
else:
r = re.search('^(?P<low>' + pmnum + ')\s+TO\s+(?P<high>' + pmnum + ')$',
single_element)
if r: # "low TO high"
single_element = {'low': float(r.group('low')), 'high': float(r.group('high'))}
else: # everything else: don't try to convert to float
single_element = {'value': single_element}
# TO DO: subs.pl also parses other formats such as "low high", "value low high" (sorted),
# "value +- err", and "value -err_m, +err_p". Do we need to support these formats here?
# Probably not: unsupported formats will just be written as a text string.
self.current_table.data['independent_variables'][xy_mapping[i]]['values'].append(single_element)
# extract energy if SQRT(S) is one of the 'x' variables
xheader = self.current_table.data['independent_variables'][xy_mapping[i]]['header']
if xheader['name'].startswith('SQRT(S)') and lower(xheader['units']) in ('gev'):
for energy in single_element.values():
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
elif h == 'y': # dependent variable
pmnum_pct = pmnum + '(\s*PCT)?' # errors can possibly be given as percentages
r = re.search('^(?P<value>' + pmnum + ')\s+(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' +
pmnum_pct + '|-)\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element)
element = {'errors': []}
if r: # asymmetric first error
element['value'] = r.group('value').strip()
err_p = r.group('err_p').strip().lstrip('+')
if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format
err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p
err_m = r.group('err_m').strip().lstrip('+')
if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format
err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m
if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%':
err_p = err_p + '%'
if not err_p and not err_m:
raise ValueError("Both asymmetric errors cannot be '-': %s" % line)
if r.group('err_sys'):
element['errors'] += [{'label': 'stat', 'asymerror': {'plus': err_p, 'minus': err_m}}]
else:
element['errors'] += [{'asymerror': {'plus': err_p, 'minus': err_m}}]
else:
r = re.search('^(?P<value>' + pmnum + ')\s*(\+-\s*(?P<error>' +
pmnum_pct + '))?\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element)
if r: # symmetric first error
element['value'] = r.group('value').strip()
if r.group('error'):
error = r.group('error').strip().lstrip('+')
error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error
if r.group('err_sys'):
element['errors'] += [{'label': 'stat', 'symerror': error}]
else:
element['errors'] += [{'symerror': error}]
else: # everything else
element['value'] = single_element
err_sys = []
if r and r.group('err_sys'):
err_sys = r.group('err_sys').strip(' \t()').split('DSYS=')
for err in err_sys + self.current_table.dserrors:
err = err.strip(' \t,')
if not err:
continue
error = {}
label = 'sys'
r = re.search('^(\+-)?\s*(?P<error>' + pmnum_pct + ')\s*(\:\s*(?P<label>.+))?$', err)
if r: # symmetric systematic error
if r.group('label'):
label += ',' + r.group('label')
error = r.group('error').strip().lstrip('+')
error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error
error = {'symerror': error}
else:
r = re.search('^(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' +
pmnum_pct + '|-)\s*(\:\s*(?P<label>.+))?$', err)
if r: # asymmetric systematic error
if r.group('label'):
label += ',' + r.group('label')
err_p = r.group('err_p').strip().lstrip('+')
if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format
err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p
err_m = r.group('err_m').strip().lstrip('+')
if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format
err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m
if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%':
err_p = err_p + '%'
if not err_p and not err_m:
raise ValueError("Both asymmetric errors cannot be '-': %s" % line)
error = {'asymerror': {'plus': err_p, 'minus': err_m}}
if not r:
# error happened
raise ValueError("Error while parsing data line: %s" % line)
error['label'] = label
if element['value'] != single_element:
element['errors'].append(error)
self.current_table.data['dependent_variables'][xy_mapping[i]]['values'].append(element)
elif data_entry_elements:
raise BadFormat("%s data entry elements but %s expected: %s" %
(len(data_entry_elements), len(header), line))
last_index = self.current_file.tell()
l = self.current_file.readline()
line = self._strip_comments(l)
self.current_file.seek(last_index)
# extract minimum and maximum from set of energies
if self.set_of_energies:
energy_min = min(self.set_of_energies)
energy_max = max(self.set_of_energies)
if energy_max > energy_min:
energy = str(energy_min) + '-' + str(energy_max)
else:
energy = energy_min
self._parse_energies(energy)
if self.current_table.description:
if any(word in self.current_table.description.lower() for word in ['covariance', 'correlation', 'matrix']):
reformatted = self._reformat_matrix()
def _reformat_matrix(self):
"""Transform a square matrix into a format with two independent variables and one dependent variable.
"""
nxax = len(self.current_table.data['independent_variables'])
nyax = len(self.current_table.data['dependent_variables'])
npts = len(self.current_table.data['dependent_variables'][0]['values'])
# check if 1 x-axis, and npts (>=2) equals number of y-axes
if nxax != 1 or nyax != npts or npts < 2:
return False
# add second independent variable with each value duplicated npts times
if len(self.current_table.xheaders) == 2:
xheader = self.current_table.xheaders[1]
else:
xheader = copy.deepcopy(self.current_table.data['independent_variables'][0]['header'])
self.current_table.data['independent_variables'].append({'header': xheader, 'values': []})
for value in self.current_table.data['independent_variables'][0]['values']:
self.current_table.data['independent_variables'][1]['values'].extend([copy.deepcopy(value) for npt in range(npts)])
# duplicate values of first independent variable npts times
self.current_table.data['independent_variables'][0]['values'] \
= [copy.deepcopy(value) for npt in range(npts) for value in self.current_table.data['independent_variables'][0]['values']]
# suppress header if different for second y-axis
if self.current_table.data['dependent_variables'][0]['header'] != \
self.current_table.data['dependent_variables'][1]['header']:
self.current_table.data['dependent_variables'][0]['header'] = {'name': ''}
# remove qualifier if different for second y-axis
iqdel = [] # list of qualifier indices to be deleted
for iq, qualifier in enumerate(self.current_table.data['dependent_variables'][0]['qualifiers']):
if qualifier != self.current_table.data['dependent_variables'][1]['qualifiers'][iq]:
iqdel.append(iq)
for iq in iqdel[::-1]: # need to delete in reverse order
del self.current_table.data['dependent_variables'][0]['qualifiers'][iq]
# append values of second and subsequent y-axes to first dependent variable
for iy in range(1, nyax):
for value in self.current_table.data['dependent_variables'][iy]['values']:
self.current_table.data['dependent_variables'][0]['values'].append(value)
# finally, delete the second and subsequent y-axes in reverse order
for iy in range(nyax-1, 0, -1):
del self.current_table.data['dependent_variables'][iy]
return True
def _parse_dserror(self, data):
"""Parse dserror attribute of the old HEPData format
example:
*dserror: 7.5 PCT : overall normalization uncertainty
:param data: data to be parsed
:type data: str
"""
self.current_table.dserrors.append(data.strip())
def _parse_reackey(self, data):
"""Parse reackey attribute of the old HEPData format
example:
*reackey: P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
"""
self.current_table.reactions.append(data.strip())
def _parse_obskey(self, data):
"""Parse obskey attribute of the old HEPData format
example:
*obskey: DSIG/DPT
:param data: data to be parsed
:type data: str
"""
self.current_table.observables.append(data.strip())
def _parse_phrase(self, data):
"""Parse phrase attribute of the old HEPData format
example:
*phrase: Z pair Production
:param data: data to be parsed
:type data: str
"""
self.current_table.phrases.append(data.strip())
def _parse_energies(self, data):
"""Add energy given in data to tables energies
this method is here for completeness sake, it's used in only one other place so
can be safely extracted
:param data: data to be appended to table's energies
:type data: str
"""
self.current_table.energies.append(data)
def _parse_qual(self, data):
"""Parse qual attribute of the old HEPData format
example qual:
*qual: RE : P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
"""
list = []
headers = data.split(':')
name = headers[0].strip()
name = re.split(' IN ', name, flags=re.I) # ignore case
units = None
if len(name) > 1:
units = name[1].strip()
name = name[0].strip()
if len(headers) < 2:
raise BadFormat("*qual line must contain a name and values: %s" % data)
for header in headers[1:]:
xheader = {'name': name}
if units:
xheader['units'] = units
xheader['value'] = header.strip()
list.append(xheader)
# extract energy if SQRT(S) is one of the qualifiers
if name.startswith('SQRT(S)') and lower(units) in ('gev'):
energies = re.split(' TO ', xheader['value'], flags=re.I)
for energy in energies:
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
self.current_table.qualifiers.append(list)
def _parse_header(self, data):
"""Parse header (xheader or yheader)
:param data: data to be parsed
:type data: str
:return: list with header's data
:rtype: list
"""
return_list = []
headers = data.split(':')
for header in headers:
header = re.split(' IN ', header, flags=re.I) # ignore case
xheader = {'name': header[0].strip()}
if len(header) > 1:
xheader['units'] = header[1].strip()
return_list.append(xheader)
return return_list
def _parse_xheaders(self, data):
"""parse xheaders from old HEPData format
:param data: data with xheaders to be parsed
:type data: str
"""
self.current_table.xheaders += self._parse_header(data)
def _parse_yheaders(self, data):
"""parse yheaders from old HEPData format
:param data: data with yheaders to be parsed
:type data: str
"""
self.current_table.yheaders += self._parse_header(data)
@staticmethod
def _strip_comments(line):
"""Processes line stripping any comments from it
:param line: line to be processed
:type line: str
:return: line with removed comments
:rtype: str
"""
if line == '':
return line
r = re.search('(?P<line>[^#]*)(#(?P<comment>.*))?', line)
if r:
line = r.group('line')
if not line.endswith('\n'):
line += '\n'
return line
return '\n'
def _parse_document_comment(self, data):
self.data[0]['comment'] = self._read_multiline(data)
def _bind_set_table_metadata(self, key, multiline=False):
"""Returns parsing function which will parse data as text, and add it to the table metatadata dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
"""
def set_table_metadata(self, data):
if multiline:
data = self._read_multiline(data)
if key == 'location' and data:
data = 'Data from ' + data
self.current_table.metadata[key] = data.strip()
# method must be bound, so we use __get__
return set_table_metadata.__get__(self)
def _bind_parse_record_ids(self, key):
def _parse_record_ids(self, data):
if 'record_ids' not in self.data[0]:
self.data[0]['record_ids'] = []
record_id = {'type': key, 'id': int(data) if data else 0}
if self.data[0]['record_ids'].count(record_id) == 0:
self.data[0]['record_ids'].append(record_id)
elif self.strict:
raise BadFormat("duplicated record: *%s" % key)
# method must be bound, so we use __get__
return _parse_record_ids.__get__(self)
def _bind_parse_additional_data(self, key, multiline=False):
"""Returns parsing function which will parse data as text, and add it to the table additional data dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
"""
def _set_additional_data_bound(self, data):
"""Concrete method for setting additional data
:param self:
:type self: OldHEPData
"""
# if it's multiline, parse it
if multiline:
data = self._read_multiline(data)
if key not in self.additional_data:
self.additional_data[key] = []
self.additional_data[key].append(data)
# method must be bound, so we use __get__
return _set_additional_data_bound.__get__(self)
# employing full fledged FSM for only two states is kind of an overkill, so
# I'll just define them here...
states = ['document', 'table']
|
HEPData/hepdata-converter
|
hepdata_converter/parsers/oldhepdata_parser.py
|
OldHEPData._bind_set_table_metadata
|
python
|
def _bind_set_table_metadata(self, key, multiline=False):
def set_table_metadata(self, data):
if multiline:
data = self._read_multiline(data)
if key == 'location' and data:
data = 'Data from ' + data
self.current_table.metadata[key] = data.strip()
# method must be bound, so we use __get__
return set_table_metadata.__get__(self)
|
Returns parsing function which will parse data as text, and add it to the table metatadata dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
|
train
|
https://github.com/HEPData/hepdata-converter/blob/354271448980efba86f2f3d27b99d818e75fd90d/hepdata_converter/parsers/oldhepdata_parser.py#L653-L672
| null |
class OldHEPData(Parser):
"""Parser for Old HEPData format
"""
help = 'Parses OLD HepData format - example OLD HepData input format: http://hepdata.cedar.ac.uk/resource/sample.input'
@classmethod
def options(cls):
options = Parser.options()
options['strict'] = Option('strict', default=True, type=bool, required=False,
help='if specified any additional keywords in OldHEPData file will raise an error')
options['use_additional_data'] = Option('use-additional-data', default=False, type=bool, required=False, variable_mapping='use_additional_data',
help=('if specified additional data which does not have equivalent in new HEPData format'
' will be appended to comment section of the output document'))
return options
def reset(self):
"""Clean any processing data, and prepare object for reuse
"""
self.current_table = None
self.tables = []
self.data = [{}]
self.additional_data = {}
self.lines = []
self.set_state('document')
self.current_file = None
self.set_of_energies = set()
def set_state(self, state):
if state not in OldHEPData.states:
raise ValueError("unknown state")
self.current_state = state
def __init__(self, **kwargs):
"""Constructor
:param use_additional_data: if set to True additional data which does not have equivalent in new HEPData format
will be appended to comment section of the output document
:type use_additional_data: bool
:param strict: if set to True, any additional keywords (not specified by documentation)
will raise BadFormat exception during parsing
:type strict: bool
"""
# mapping of OLD HepData format's keywords to proper parsing functions
# all possible keywords are specified here, the ones which aren't used
# by new data format are either mapped to _pass method which does nothing,
# or bound to _add_to_comment which adds the specified data to comment section of the
# output
# functions are specified by states in which parser may be, it may either be parsing table or document
# for those two states different sets of directives are permitted
self.mapping = {
'document': {
'reference': self._parse_reference,
'dataset': self._set_table,
# additional data which have no one to one mapping to new YAML format
'author': self._bind_parse_additional_data('author'),
'doi': self._bind_parse_additional_data('doi'),
'status': self._bind_parse_additional_data('status'),
'experiment': self._bind_parse_additional_data('experiment'),
'detector': self._bind_parse_additional_data('detector'),
'title': self._bind_parse_additional_data('title'),
# add it to record_ids
'spiresId': self._bind_parse_record_ids('spires'),
'inspireId': self._bind_parse_record_ids('inspire'),
'cdsId': self._bind_parse_record_ids('cds'),
'durhamId': self._bind_parse_record_ids('durham'),
'comment': self._parse_document_comment,
'E': self._pass
},
'table': {
'dataend': self._set_document,
'location': self._bind_set_table_metadata('location'),
'dscomment': self._bind_set_table_metadata('description', True),
'dserror': self._parse_dserror,
'reackey': self._parse_reackey,
'qual': self._parse_qual,
'data': self._parse_table_data,
'xheader': self._parse_xheaders,
'yheader': self._parse_yheaders,
'obskey': self._parse_obskey,
'phrase': self._parse_phrase,
'E': self._pass
}
}
OptionInitMixin.__init__(self, options=kwargs)
def _parse(self):
# parse the file
while self._parse_line(self.current_file):
pass
if self.use_additional_data:
if self.additional_data:
self.data[0]['comment'] += 'ADDITIONAL DATA IMPORTED FROM OLD HEPDATA FORMAT: \n'
for key in self.additional_data:
for element in self.additional_data[key]:
self.data[0]['comment'] += "%s: %s" % (key, element)
return ParsedData(self.data[0], self.tables)
def parse(self, data_in):
# clean any possible data from previous parsing
self.reset()
# in case of strings we should treat them as filepaths
if isinstance(data_in, (str, unicode)):
with open(data_in, 'r') as self.current_file:
return self._parse()
else:
self.current_file = data_in
return self._parse()
def _parse_line(self, file):
"""Parse single line (or more if particular keyword actually demands it)
:param file:
:type file: file
"""
line = self._strip_comments(file.readline())
# check if the file ended
if not line:
return False
# line was empty or it was a comment, continue
if line.strip() == '':
return True
# retrieve keyword and its value
reg = re.search("^\*(?P<key>[^:#]*)(:\s*(?P<value>.*)\s*)?$", line)
if reg:
key = reg.group('key').strip()
value = reg.group('value')
if key in self.mapping[self.current_state]:
self.mapping[self.current_state][key](value)
elif self.strict:
raise BadFormat("unknown key: *%s" % key)
else:
raise BadFormat("line can not be parsed: %s" % line)
return True
def _parse_reference(self, data):
"""
:param data:
:type data: str
"""
if 'additional_resources' not in self.data[0]:
self.data[0]['additional_resources'] = []
location = data.split(' : ')[0].strip()
if location.startswith('http'):
self.data[0]['additional_resources'].append({
'location': location,
'description': 'web page with auxiliary material'
})
def _set_table(self, data):
"""Set current parsing state to 'table',
create new table object and add it to tables collection
"""
self.set_state('table')
self.current_table = HEPTable(index=len(self.tables) + 1)
self.tables.append(self.current_table)
self.data.append(self.current_table.metadata)
def _set_document(self, data):
"""Set current parsing state to 'document',
set current_table to None
"""
self.set_state('document')
self.current_table = None
def _pass(self, data):
"""Empty processing function, map it to keywords if they're not used in the new YAML format
"""
pass
def _parse_table_data(self, data):
"""Parse dataset data of the original HEPData format
:param data: header of the table to be parsed
:raise ValueError:
"""
header = data.split(':')
self.current_table.data_header = header
for i, h in enumerate(header):
header[i] = h.strip()
x_count = header.count('x')
y_count = header.count('y')
if not self.current_table.xheaders:
raise BadFormat("*xheader line needs to appear before *data: %s" % data)
if not self.current_table.yheaders:
raise BadFormat("*yheader line needs to appear before *data: %s" % data)
# use deepcopy to avoid references in yaml... may not be required, and will very probably be refactored
# TODO - is this appropriate behavior, or are references in YAML files acceptable, they are certainly less human readable
self.current_table.data = {'independent_variables': [{'header': self.current_table.xheaders[i] if i < len(self.current_table.xheaders) else copy.deepcopy(self.current_table.xheaders[-1]),
'values': []} for i in range(x_count)],
'dependent_variables': [{'header': self.current_table.yheaders[i] if i < len(self.current_table.yheaders) else copy.deepcopy(self.current_table.yheaders[-1]),
'qualifiers': [self.current_table.qualifiers[j][i] if i < len(self.current_table.qualifiers[j]) else copy.deepcopy(self.current_table.qualifiers[j][-1]) for j in range(len(self.current_table.qualifiers)) ],
'values': []} for i in range(y_count)]}
xy_mapping = []
current_x_count = 0
current_y_count = 0
for h in header:
if h == 'x':
xy_mapping.append(current_x_count)
current_x_count += 1
if h == 'y':
xy_mapping.append(current_y_count)
current_y_count += 1
last_index = self.current_file.tell()
line = self._strip_comments(self.current_file.readline())
while line and not line.startswith('*'):
data_entry_elements = line.split(';')[:-1] # split and also strip newline character at the end
if len(data_entry_elements) == len(header):
# this is kind of a big stretch... I assume that x is always first
for i, h in enumerate(header):
single_element = data_entry_elements[i].strip()
# number patterns copied from old subs.pl parsing script
pmnum1 = '[-+]?[\d]+\.?[\d]*'
pmnum2 = '[-+]?\.[\d]+'
pmnum3 = '[-+]?[\d]+\.?[\d]*\s*[eE]+\s*[+-]?\s*[\d]+'
pmnum = '(' + pmnum1 + '|' + pmnum2 + '|' + pmnum3 + ')'
# implement same regular expression matching as in old subs.pl parsing script
if h == 'x': # independent variables
r = re.search('^(?P<value>' + pmnum + ')$', single_element)
if r: # "value"
single_element = {'value': r.group('value')}
else:
r = re.search('^(?P<value>' + pmnum + ')\s*\(\s*BIN\s*=\s*(?P<low>' + pmnum + \
')\s+TO\s+(?P<high>' + pmnum + ')\s*\)$', single_element)
if r: # "value (BIN=low TO high)"
single_element = {'value': float(r.group('value')),
'low': float(r.group('low')), 'high': float(r.group('high'))}
else:
r = re.search('^(?P<low>' + pmnum + ')\s+TO\s+(?P<high>' + pmnum + ')$',
single_element)
if r: # "low TO high"
single_element = {'low': float(r.group('low')), 'high': float(r.group('high'))}
else: # everything else: don't try to convert to float
single_element = {'value': single_element}
# TO DO: subs.pl also parses other formats such as "low high", "value low high" (sorted),
# "value +- err", and "value -err_m, +err_p". Do we need to support these formats here?
# Probably not: unsupported formats will just be written as a text string.
self.current_table.data['independent_variables'][xy_mapping[i]]['values'].append(single_element)
# extract energy if SQRT(S) is one of the 'x' variables
xheader = self.current_table.data['independent_variables'][xy_mapping[i]]['header']
if xheader['name'].startswith('SQRT(S)') and lower(xheader['units']) in ('gev'):
for energy in single_element.values():
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
elif h == 'y': # dependent variable
pmnum_pct = pmnum + '(\s*PCT)?' # errors can possibly be given as percentages
r = re.search('^(?P<value>' + pmnum + ')\s+(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' +
pmnum_pct + '|-)\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element)
element = {'errors': []}
if r: # asymmetric first error
element['value'] = r.group('value').strip()
err_p = r.group('err_p').strip().lstrip('+')
if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format
err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p
err_m = r.group('err_m').strip().lstrip('+')
if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format
err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m
if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%':
err_p = err_p + '%'
if not err_p and not err_m:
raise ValueError("Both asymmetric errors cannot be '-': %s" % line)
if r.group('err_sys'):
element['errors'] += [{'label': 'stat', 'asymerror': {'plus': err_p, 'minus': err_m}}]
else:
element['errors'] += [{'asymerror': {'plus': err_p, 'minus': err_m}}]
else:
r = re.search('^(?P<value>' + pmnum + ')\s*(\+-\s*(?P<error>' +
pmnum_pct + '))?\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element)
if r: # symmetric first error
element['value'] = r.group('value').strip()
if r.group('error'):
error = r.group('error').strip().lstrip('+')
error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error
if r.group('err_sys'):
element['errors'] += [{'label': 'stat', 'symerror': error}]
else:
element['errors'] += [{'symerror': error}]
else: # everything else
element['value'] = single_element
err_sys = []
if r and r.group('err_sys'):
err_sys = r.group('err_sys').strip(' \t()').split('DSYS=')
for err in err_sys + self.current_table.dserrors:
err = err.strip(' \t,')
if not err:
continue
error = {}
label = 'sys'
r = re.search('^(\+-)?\s*(?P<error>' + pmnum_pct + ')\s*(\:\s*(?P<label>.+))?$', err)
if r: # symmetric systematic error
if r.group('label'):
label += ',' + r.group('label')
error = r.group('error').strip().lstrip('+')
error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error
error = {'symerror': error}
else:
r = re.search('^(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' +
pmnum_pct + '|-)\s*(\:\s*(?P<label>.+))?$', err)
if r: # asymmetric systematic error
if r.group('label'):
label += ',' + r.group('label')
err_p = r.group('err_p').strip().lstrip('+')
if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format
err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p
err_m = r.group('err_m').strip().lstrip('+')
if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format
err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m
if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%':
err_p = err_p + '%'
if not err_p and not err_m:
raise ValueError("Both asymmetric errors cannot be '-': %s" % line)
error = {'asymerror': {'plus': err_p, 'minus': err_m}}
if not r:
# error happened
raise ValueError("Error while parsing data line: %s" % line)
error['label'] = label
if element['value'] != single_element:
element['errors'].append(error)
self.current_table.data['dependent_variables'][xy_mapping[i]]['values'].append(element)
elif data_entry_elements:
raise BadFormat("%s data entry elements but %s expected: %s" %
(len(data_entry_elements), len(header), line))
last_index = self.current_file.tell()
l = self.current_file.readline()
line = self._strip_comments(l)
self.current_file.seek(last_index)
# extract minimum and maximum from set of energies
if self.set_of_energies:
energy_min = min(self.set_of_energies)
energy_max = max(self.set_of_energies)
if energy_max > energy_min:
energy = str(energy_min) + '-' + str(energy_max)
else:
energy = energy_min
self._parse_energies(energy)
if self.current_table.description:
if any(word in self.current_table.description.lower() for word in ['covariance', 'correlation', 'matrix']):
reformatted = self._reformat_matrix()
def _reformat_matrix(self):
"""Transform a square matrix into a format with two independent variables and one dependent variable.
"""
nxax = len(self.current_table.data['independent_variables'])
nyax = len(self.current_table.data['dependent_variables'])
npts = len(self.current_table.data['dependent_variables'][0]['values'])
# check if 1 x-axis, and npts (>=2) equals number of y-axes
if nxax != 1 or nyax != npts or npts < 2:
return False
# add second independent variable with each value duplicated npts times
if len(self.current_table.xheaders) == 2:
xheader = self.current_table.xheaders[1]
else:
xheader = copy.deepcopy(self.current_table.data['independent_variables'][0]['header'])
self.current_table.data['independent_variables'].append({'header': xheader, 'values': []})
for value in self.current_table.data['independent_variables'][0]['values']:
self.current_table.data['independent_variables'][1]['values'].extend([copy.deepcopy(value) for npt in range(npts)])
# duplicate values of first independent variable npts times
self.current_table.data['independent_variables'][0]['values'] \
= [copy.deepcopy(value) for npt in range(npts) for value in self.current_table.data['independent_variables'][0]['values']]
# suppress header if different for second y-axis
if self.current_table.data['dependent_variables'][0]['header'] != \
self.current_table.data['dependent_variables'][1]['header']:
self.current_table.data['dependent_variables'][0]['header'] = {'name': ''}
# remove qualifier if different for second y-axis
iqdel = [] # list of qualifier indices to be deleted
for iq, qualifier in enumerate(self.current_table.data['dependent_variables'][0]['qualifiers']):
if qualifier != self.current_table.data['dependent_variables'][1]['qualifiers'][iq]:
iqdel.append(iq)
for iq in iqdel[::-1]: # need to delete in reverse order
del self.current_table.data['dependent_variables'][0]['qualifiers'][iq]
# append values of second and subsequent y-axes to first dependent variable
for iy in range(1, nyax):
for value in self.current_table.data['dependent_variables'][iy]['values']:
self.current_table.data['dependent_variables'][0]['values'].append(value)
# finally, delete the second and subsequent y-axes in reverse order
for iy in range(nyax-1, 0, -1):
del self.current_table.data['dependent_variables'][iy]
return True
def _parse_dserror(self, data):
"""Parse dserror attribute of the old HEPData format
example:
*dserror: 7.5 PCT : overall normalization uncertainty
:param data: data to be parsed
:type data: str
"""
self.current_table.dserrors.append(data.strip())
def _parse_reackey(self, data):
"""Parse reackey attribute of the old HEPData format
example:
*reackey: P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
"""
self.current_table.reactions.append(data.strip())
def _parse_obskey(self, data):
"""Parse obskey attribute of the old HEPData format
example:
*obskey: DSIG/DPT
:param data: data to be parsed
:type data: str
"""
self.current_table.observables.append(data.strip())
def _parse_phrase(self, data):
"""Parse phrase attribute of the old HEPData format
example:
*phrase: Z pair Production
:param data: data to be parsed
:type data: str
"""
self.current_table.phrases.append(data.strip())
def _parse_energies(self, data):
"""Add energy given in data to tables energies
this method is here for completeness sake, it's used in only one other place so
can be safely extracted
:param data: data to be appended to table's energies
:type data: str
"""
self.current_table.energies.append(data)
def _parse_qual(self, data):
"""Parse qual attribute of the old HEPData format
example qual:
*qual: RE : P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
"""
list = []
headers = data.split(':')
name = headers[0].strip()
name = re.split(' IN ', name, flags=re.I) # ignore case
units = None
if len(name) > 1:
units = name[1].strip()
name = name[0].strip()
if len(headers) < 2:
raise BadFormat("*qual line must contain a name and values: %s" % data)
for header in headers[1:]:
xheader = {'name': name}
if units:
xheader['units'] = units
xheader['value'] = header.strip()
list.append(xheader)
# extract energy if SQRT(S) is one of the qualifiers
if name.startswith('SQRT(S)') and lower(units) in ('gev'):
energies = re.split(' TO ', xheader['value'], flags=re.I)
for energy in energies:
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
self.current_table.qualifiers.append(list)
def _parse_header(self, data):
"""Parse header (xheader or yheader)
:param data: data to be parsed
:type data: str
:return: list with header's data
:rtype: list
"""
return_list = []
headers = data.split(':')
for header in headers:
header = re.split(' IN ', header, flags=re.I) # ignore case
xheader = {'name': header[0].strip()}
if len(header) > 1:
xheader['units'] = header[1].strip()
return_list.append(xheader)
return return_list
def _parse_xheaders(self, data):
"""parse xheaders from old HEPData format
:param data: data with xheaders to be parsed
:type data: str
"""
self.current_table.xheaders += self._parse_header(data)
def _parse_yheaders(self, data):
"""parse yheaders from old HEPData format
:param data: data with yheaders to be parsed
:type data: str
"""
self.current_table.yheaders += self._parse_header(data)
@staticmethod
def _strip_comments(line):
"""Processes line stripping any comments from it
:param line: line to be processed
:type line: str
:return: line with removed comments
:rtype: str
"""
if line == '':
return line
r = re.search('(?P<line>[^#]*)(#(?P<comment>.*))?', line)
if r:
line = r.group('line')
if not line.endswith('\n'):
line += '\n'
return line
return '\n'
def _read_multiline(self, init_data):
"""Reads multiline symbols (ususally comments)
:param init_data: initial data (parsed from the line containing keyword)
:return: parsed value of the multiline symbol
:rtype: str
"""
result = init_data
first = True
while True:
last_index = self.current_file.tell()
line_raw = self.current_file.readline()
# don't add newlines from full line comments
if line_raw[0] == '#':
continue
# now strip comments
# TODO - is it appropriate behavior?
data = self._strip_comments(line_raw)
# EOF, stop here
if not data:
break
# we arrived to the next command, step back and break
if len(data.strip()) >= 1 and data.strip()[0] == '*':
self.current_file.seek(last_index)
break
if first:
result += '\n'
first = False
result += data
result = result.strip()
if result and not result.endswith('.'):
result += '.'
return result
def _parse_document_comment(self, data):
self.data[0]['comment'] = self._read_multiline(data)
def _bind_parse_record_ids(self, key):
def _parse_record_ids(self, data):
if 'record_ids' not in self.data[0]:
self.data[0]['record_ids'] = []
record_id = {'type': key, 'id': int(data) if data else 0}
if self.data[0]['record_ids'].count(record_id) == 0:
self.data[0]['record_ids'].append(record_id)
elif self.strict:
raise BadFormat("duplicated record: *%s" % key)
# method must be bound, so we use __get__
return _parse_record_ids.__get__(self)
def _bind_parse_additional_data(self, key, multiline=False):
"""Returns parsing function which will parse data as text, and add it to the table additional data dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
"""
def _set_additional_data_bound(self, data):
"""Concrete method for setting additional data
:param self:
:type self: OldHEPData
"""
# if it's multiline, parse it
if multiline:
data = self._read_multiline(data)
if key not in self.additional_data:
self.additional_data[key] = []
self.additional_data[key].append(data)
# method must be bound, so we use __get__
return _set_additional_data_bound.__get__(self)
# employing full fledged FSM for only two states is kind of an overkill, so
# I'll just define them here...
states = ['document', 'table']
|
HEPData/hepdata-converter
|
hepdata_converter/parsers/oldhepdata_parser.py
|
OldHEPData._bind_parse_additional_data
|
python
|
def _bind_parse_additional_data(self, key, multiline=False):
def _set_additional_data_bound(self, data):
"""Concrete method for setting additional data
:param self:
:type self: OldHEPData
"""
# if it's multiline, parse it
if multiline:
data = self._read_multiline(data)
if key not in self.additional_data:
self.additional_data[key] = []
self.additional_data[key].append(data)
# method must be bound, so we use __get__
return _set_additional_data_bound.__get__(self)
|
Returns parsing function which will parse data as text, and add it to the table additional data dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
|
train
|
https://github.com/HEPData/hepdata-converter/blob/354271448980efba86f2f3d27b99d818e75fd90d/hepdata_converter/parsers/oldhepdata_parser.py#L689-L715
| null |
class OldHEPData(Parser):
"""Parser for Old HEPData format
"""
help = 'Parses OLD HepData format - example OLD HepData input format: http://hepdata.cedar.ac.uk/resource/sample.input'
@classmethod
def options(cls):
options = Parser.options()
options['strict'] = Option('strict', default=True, type=bool, required=False,
help='if specified any additional keywords in OldHEPData file will raise an error')
options['use_additional_data'] = Option('use-additional-data', default=False, type=bool, required=False, variable_mapping='use_additional_data',
help=('if specified additional data which does not have equivalent in new HEPData format'
' will be appended to comment section of the output document'))
return options
def reset(self):
"""Clean any processing data, and prepare object for reuse
"""
self.current_table = None
self.tables = []
self.data = [{}]
self.additional_data = {}
self.lines = []
self.set_state('document')
self.current_file = None
self.set_of_energies = set()
def set_state(self, state):
if state not in OldHEPData.states:
raise ValueError("unknown state")
self.current_state = state
def __init__(self, **kwargs):
"""Constructor
:param use_additional_data: if set to True additional data which does not have equivalent in new HEPData format
will be appended to comment section of the output document
:type use_additional_data: bool
:param strict: if set to True, any additional keywords (not specified by documentation)
will raise BadFormat exception during parsing
:type strict: bool
"""
# mapping of OLD HepData format's keywords to proper parsing functions
# all possible keywords are specified here, the ones which aren't used
# by new data format are either mapped to _pass method which does nothing,
# or bound to _add_to_comment which adds the specified data to comment section of the
# output
# functions are specified by states in which parser may be, it may either be parsing table or document
# for those two states different sets of directives are permitted
self.mapping = {
'document': {
'reference': self._parse_reference,
'dataset': self._set_table,
# additional data which have no one to one mapping to new YAML format
'author': self._bind_parse_additional_data('author'),
'doi': self._bind_parse_additional_data('doi'),
'status': self._bind_parse_additional_data('status'),
'experiment': self._bind_parse_additional_data('experiment'),
'detector': self._bind_parse_additional_data('detector'),
'title': self._bind_parse_additional_data('title'),
# add it to record_ids
'spiresId': self._bind_parse_record_ids('spires'),
'inspireId': self._bind_parse_record_ids('inspire'),
'cdsId': self._bind_parse_record_ids('cds'),
'durhamId': self._bind_parse_record_ids('durham'),
'comment': self._parse_document_comment,
'E': self._pass
},
'table': {
'dataend': self._set_document,
'location': self._bind_set_table_metadata('location'),
'dscomment': self._bind_set_table_metadata('description', True),
'dserror': self._parse_dserror,
'reackey': self._parse_reackey,
'qual': self._parse_qual,
'data': self._parse_table_data,
'xheader': self._parse_xheaders,
'yheader': self._parse_yheaders,
'obskey': self._parse_obskey,
'phrase': self._parse_phrase,
'E': self._pass
}
}
OptionInitMixin.__init__(self, options=kwargs)
def _parse(self):
# parse the file
while self._parse_line(self.current_file):
pass
if self.use_additional_data:
if self.additional_data:
self.data[0]['comment'] += 'ADDITIONAL DATA IMPORTED FROM OLD HEPDATA FORMAT: \n'
for key in self.additional_data:
for element in self.additional_data[key]:
self.data[0]['comment'] += "%s: %s" % (key, element)
return ParsedData(self.data[0], self.tables)
def parse(self, data_in):
# clean any possible data from previous parsing
self.reset()
# in case of strings we should treat them as filepaths
if isinstance(data_in, (str, unicode)):
with open(data_in, 'r') as self.current_file:
return self._parse()
else:
self.current_file = data_in
return self._parse()
def _parse_line(self, file):
"""Parse single line (or more if particular keyword actually demands it)
:param file:
:type file: file
"""
line = self._strip_comments(file.readline())
# check if the file ended
if not line:
return False
# line was empty or it was a comment, continue
if line.strip() == '':
return True
# retrieve keyword and its value
reg = re.search("^\*(?P<key>[^:#]*)(:\s*(?P<value>.*)\s*)?$", line)
if reg:
key = reg.group('key').strip()
value = reg.group('value')
if key in self.mapping[self.current_state]:
self.mapping[self.current_state][key](value)
elif self.strict:
raise BadFormat("unknown key: *%s" % key)
else:
raise BadFormat("line can not be parsed: %s" % line)
return True
def _parse_reference(self, data):
"""
:param data:
:type data: str
"""
if 'additional_resources' not in self.data[0]:
self.data[0]['additional_resources'] = []
location = data.split(' : ')[0].strip()
if location.startswith('http'):
self.data[0]['additional_resources'].append({
'location': location,
'description': 'web page with auxiliary material'
})
def _set_table(self, data):
"""Set current parsing state to 'table',
create new table object and add it to tables collection
"""
self.set_state('table')
self.current_table = HEPTable(index=len(self.tables) + 1)
self.tables.append(self.current_table)
self.data.append(self.current_table.metadata)
def _set_document(self, data):
"""Set current parsing state to 'document',
set current_table to None
"""
self.set_state('document')
self.current_table = None
def _pass(self, data):
"""Empty processing function, map it to keywords if they're not used in the new YAML format
"""
pass
def _parse_table_data(self, data):
"""Parse dataset data of the original HEPData format
:param data: header of the table to be parsed
:raise ValueError:
"""
header = data.split(':')
self.current_table.data_header = header
for i, h in enumerate(header):
header[i] = h.strip()
x_count = header.count('x')
y_count = header.count('y')
if not self.current_table.xheaders:
raise BadFormat("*xheader line needs to appear before *data: %s" % data)
if not self.current_table.yheaders:
raise BadFormat("*yheader line needs to appear before *data: %s" % data)
# use deepcopy to avoid references in yaml... may not be required, and will very probably be refactored
# TODO - is this appropriate behavior, or are references in YAML files acceptable, they are certainly less human readable
self.current_table.data = {'independent_variables': [{'header': self.current_table.xheaders[i] if i < len(self.current_table.xheaders) else copy.deepcopy(self.current_table.xheaders[-1]),
'values': []} for i in range(x_count)],
'dependent_variables': [{'header': self.current_table.yheaders[i] if i < len(self.current_table.yheaders) else copy.deepcopy(self.current_table.yheaders[-1]),
'qualifiers': [self.current_table.qualifiers[j][i] if i < len(self.current_table.qualifiers[j]) else copy.deepcopy(self.current_table.qualifiers[j][-1]) for j in range(len(self.current_table.qualifiers)) ],
'values': []} for i in range(y_count)]}
xy_mapping = []
current_x_count = 0
current_y_count = 0
for h in header:
if h == 'x':
xy_mapping.append(current_x_count)
current_x_count += 1
if h == 'y':
xy_mapping.append(current_y_count)
current_y_count += 1
last_index = self.current_file.tell()
line = self._strip_comments(self.current_file.readline())
while line and not line.startswith('*'):
data_entry_elements = line.split(';')[:-1] # split and also strip newline character at the end
if len(data_entry_elements) == len(header):
# this is kind of a big stretch... I assume that x is always first
for i, h in enumerate(header):
single_element = data_entry_elements[i].strip()
# number patterns copied from old subs.pl parsing script
pmnum1 = '[-+]?[\d]+\.?[\d]*'
pmnum2 = '[-+]?\.[\d]+'
pmnum3 = '[-+]?[\d]+\.?[\d]*\s*[eE]+\s*[+-]?\s*[\d]+'
pmnum = '(' + pmnum1 + '|' + pmnum2 + '|' + pmnum3 + ')'
# implement same regular expression matching as in old subs.pl parsing script
if h == 'x': # independent variables
r = re.search('^(?P<value>' + pmnum + ')$', single_element)
if r: # "value"
single_element = {'value': r.group('value')}
else:
r = re.search('^(?P<value>' + pmnum + ')\s*\(\s*BIN\s*=\s*(?P<low>' + pmnum + \
')\s+TO\s+(?P<high>' + pmnum + ')\s*\)$', single_element)
if r: # "value (BIN=low TO high)"
single_element = {'value': float(r.group('value')),
'low': float(r.group('low')), 'high': float(r.group('high'))}
else:
r = re.search('^(?P<low>' + pmnum + ')\s+TO\s+(?P<high>' + pmnum + ')$',
single_element)
if r: # "low TO high"
single_element = {'low': float(r.group('low')), 'high': float(r.group('high'))}
else: # everything else: don't try to convert to float
single_element = {'value': single_element}
# TO DO: subs.pl also parses other formats such as "low high", "value low high" (sorted),
# "value +- err", and "value -err_m, +err_p". Do we need to support these formats here?
# Probably not: unsupported formats will just be written as a text string.
self.current_table.data['independent_variables'][xy_mapping[i]]['values'].append(single_element)
# extract energy if SQRT(S) is one of the 'x' variables
xheader = self.current_table.data['independent_variables'][xy_mapping[i]]['header']
if xheader['name'].startswith('SQRT(S)') and lower(xheader['units']) in ('gev'):
for energy in single_element.values():
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
elif h == 'y': # dependent variable
pmnum_pct = pmnum + '(\s*PCT)?' # errors can possibly be given as percentages
r = re.search('^(?P<value>' + pmnum + ')\s+(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' +
pmnum_pct + '|-)\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element)
element = {'errors': []}
if r: # asymmetric first error
element['value'] = r.group('value').strip()
err_p = r.group('err_p').strip().lstrip('+')
if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format
err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p
err_m = r.group('err_m').strip().lstrip('+')
if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format
err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m
if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%':
err_p = err_p + '%'
if not err_p and not err_m:
raise ValueError("Both asymmetric errors cannot be '-': %s" % line)
if r.group('err_sys'):
element['errors'] += [{'label': 'stat', 'asymerror': {'plus': err_p, 'minus': err_m}}]
else:
element['errors'] += [{'asymerror': {'plus': err_p, 'minus': err_m}}]
else:
r = re.search('^(?P<value>' + pmnum + ')\s*(\+-\s*(?P<error>' +
pmnum_pct + '))?\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element)
if r: # symmetric first error
element['value'] = r.group('value').strip()
if r.group('error'):
error = r.group('error').strip().lstrip('+')
error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error
if r.group('err_sys'):
element['errors'] += [{'label': 'stat', 'symerror': error}]
else:
element['errors'] += [{'symerror': error}]
else: # everything else
element['value'] = single_element
err_sys = []
if r and r.group('err_sys'):
err_sys = r.group('err_sys').strip(' \t()').split('DSYS=')
for err in err_sys + self.current_table.dserrors:
err = err.strip(' \t,')
if not err:
continue
error = {}
label = 'sys'
r = re.search('^(\+-)?\s*(?P<error>' + pmnum_pct + ')\s*(\:\s*(?P<label>.+))?$', err)
if r: # symmetric systematic error
if r.group('label'):
label += ',' + r.group('label')
error = r.group('error').strip().lstrip('+')
error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error
error = {'symerror': error}
else:
r = re.search('^(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' +
pmnum_pct + '|-)\s*(\:\s*(?P<label>.+))?$', err)
if r: # asymmetric systematic error
if r.group('label'):
label += ',' + r.group('label')
err_p = r.group('err_p').strip().lstrip('+')
if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format
err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p
err_m = r.group('err_m').strip().lstrip('+')
if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format
err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m
if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%':
err_p = err_p + '%'
if not err_p and not err_m:
raise ValueError("Both asymmetric errors cannot be '-': %s" % line)
error = {'asymerror': {'plus': err_p, 'minus': err_m}}
if not r:
# error happened
raise ValueError("Error while parsing data line: %s" % line)
error['label'] = label
if element['value'] != single_element:
element['errors'].append(error)
self.current_table.data['dependent_variables'][xy_mapping[i]]['values'].append(element)
elif data_entry_elements:
raise BadFormat("%s data entry elements but %s expected: %s" %
(len(data_entry_elements), len(header), line))
last_index = self.current_file.tell()
l = self.current_file.readline()
line = self._strip_comments(l)
self.current_file.seek(last_index)
# extract minimum and maximum from set of energies
if self.set_of_energies:
energy_min = min(self.set_of_energies)
energy_max = max(self.set_of_energies)
if energy_max > energy_min:
energy = str(energy_min) + '-' + str(energy_max)
else:
energy = energy_min
self._parse_energies(energy)
if self.current_table.description:
if any(word in self.current_table.description.lower() for word in ['covariance', 'correlation', 'matrix']):
reformatted = self._reformat_matrix()
def _reformat_matrix(self):
"""Transform a square matrix into a format with two independent variables and one dependent variable.
"""
nxax = len(self.current_table.data['independent_variables'])
nyax = len(self.current_table.data['dependent_variables'])
npts = len(self.current_table.data['dependent_variables'][0]['values'])
# check if 1 x-axis, and npts (>=2) equals number of y-axes
if nxax != 1 or nyax != npts or npts < 2:
return False
# add second independent variable with each value duplicated npts times
if len(self.current_table.xheaders) == 2:
xheader = self.current_table.xheaders[1]
else:
xheader = copy.deepcopy(self.current_table.data['independent_variables'][0]['header'])
self.current_table.data['independent_variables'].append({'header': xheader, 'values': []})
for value in self.current_table.data['independent_variables'][0]['values']:
self.current_table.data['independent_variables'][1]['values'].extend([copy.deepcopy(value) for npt in range(npts)])
# duplicate values of first independent variable npts times
self.current_table.data['independent_variables'][0]['values'] \
= [copy.deepcopy(value) for npt in range(npts) for value in self.current_table.data['independent_variables'][0]['values']]
# suppress header if different for second y-axis
if self.current_table.data['dependent_variables'][0]['header'] != \
self.current_table.data['dependent_variables'][1]['header']:
self.current_table.data['dependent_variables'][0]['header'] = {'name': ''}
# remove qualifier if different for second y-axis
iqdel = [] # list of qualifier indices to be deleted
for iq, qualifier in enumerate(self.current_table.data['dependent_variables'][0]['qualifiers']):
if qualifier != self.current_table.data['dependent_variables'][1]['qualifiers'][iq]:
iqdel.append(iq)
for iq in iqdel[::-1]: # need to delete in reverse order
del self.current_table.data['dependent_variables'][0]['qualifiers'][iq]
# append values of second and subsequent y-axes to first dependent variable
for iy in range(1, nyax):
for value in self.current_table.data['dependent_variables'][iy]['values']:
self.current_table.data['dependent_variables'][0]['values'].append(value)
# finally, delete the second and subsequent y-axes in reverse order
for iy in range(nyax-1, 0, -1):
del self.current_table.data['dependent_variables'][iy]
return True
def _parse_dserror(self, data):
"""Parse dserror attribute of the old HEPData format
example:
*dserror: 7.5 PCT : overall normalization uncertainty
:param data: data to be parsed
:type data: str
"""
self.current_table.dserrors.append(data.strip())
def _parse_reackey(self, data):
"""Parse reackey attribute of the old HEPData format
example:
*reackey: P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
"""
self.current_table.reactions.append(data.strip())
def _parse_obskey(self, data):
"""Parse obskey attribute of the old HEPData format
example:
*obskey: DSIG/DPT
:param data: data to be parsed
:type data: str
"""
self.current_table.observables.append(data.strip())
def _parse_phrase(self, data):
"""Parse phrase attribute of the old HEPData format
example:
*phrase: Z pair Production
:param data: data to be parsed
:type data: str
"""
self.current_table.phrases.append(data.strip())
def _parse_energies(self, data):
"""Add energy given in data to tables energies
this method is here for completeness sake, it's used in only one other place so
can be safely extracted
:param data: data to be appended to table's energies
:type data: str
"""
self.current_table.energies.append(data)
def _parse_qual(self, data):
"""Parse qual attribute of the old HEPData format
example qual:
*qual: RE : P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
"""
list = []
headers = data.split(':')
name = headers[0].strip()
name = re.split(' IN ', name, flags=re.I) # ignore case
units = None
if len(name) > 1:
units = name[1].strip()
name = name[0].strip()
if len(headers) < 2:
raise BadFormat("*qual line must contain a name and values: %s" % data)
for header in headers[1:]:
xheader = {'name': name}
if units:
xheader['units'] = units
xheader['value'] = header.strip()
list.append(xheader)
# extract energy if SQRT(S) is one of the qualifiers
if name.startswith('SQRT(S)') and lower(units) in ('gev'):
energies = re.split(' TO ', xheader['value'], flags=re.I)
for energy in energies:
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
self.current_table.qualifiers.append(list)
def _parse_header(self, data):
"""Parse header (xheader or yheader)
:param data: data to be parsed
:type data: str
:return: list with header's data
:rtype: list
"""
return_list = []
headers = data.split(':')
for header in headers:
header = re.split(' IN ', header, flags=re.I) # ignore case
xheader = {'name': header[0].strip()}
if len(header) > 1:
xheader['units'] = header[1].strip()
return_list.append(xheader)
return return_list
def _parse_xheaders(self, data):
"""parse xheaders from old HEPData format
:param data: data with xheaders to be parsed
:type data: str
"""
self.current_table.xheaders += self._parse_header(data)
def _parse_yheaders(self, data):
"""parse yheaders from old HEPData format
:param data: data with yheaders to be parsed
:type data: str
"""
self.current_table.yheaders += self._parse_header(data)
@staticmethod
def _strip_comments(line):
"""Processes line stripping any comments from it
:param line: line to be processed
:type line: str
:return: line with removed comments
:rtype: str
"""
if line == '':
return line
r = re.search('(?P<line>[^#]*)(#(?P<comment>.*))?', line)
if r:
line = r.group('line')
if not line.endswith('\n'):
line += '\n'
return line
return '\n'
def _read_multiline(self, init_data):
"""Reads multiline symbols (ususally comments)
:param init_data: initial data (parsed from the line containing keyword)
:return: parsed value of the multiline symbol
:rtype: str
"""
result = init_data
first = True
while True:
last_index = self.current_file.tell()
line_raw = self.current_file.readline()
# don't add newlines from full line comments
if line_raw[0] == '#':
continue
# now strip comments
# TODO - is it appropriate behavior?
data = self._strip_comments(line_raw)
# EOF, stop here
if not data:
break
# we arrived to the next command, step back and break
if len(data.strip()) >= 1 and data.strip()[0] == '*':
self.current_file.seek(last_index)
break
if first:
result += '\n'
first = False
result += data
result = result.strip()
if result and not result.endswith('.'):
result += '.'
return result
def _parse_document_comment(self, data):
self.data[0]['comment'] = self._read_multiline(data)
def _bind_set_table_metadata(self, key, multiline=False):
"""Returns parsing function which will parse data as text, and add it to the table metatadata dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
"""
def set_table_metadata(self, data):
if multiline:
data = self._read_multiline(data)
if key == 'location' and data:
data = 'Data from ' + data
self.current_table.metadata[key] = data.strip()
# method must be bound, so we use __get__
return set_table_metadata.__get__(self)
def _bind_parse_record_ids(self, key):
def _parse_record_ids(self, data):
if 'record_ids' not in self.data[0]:
self.data[0]['record_ids'] = []
record_id = {'type': key, 'id': int(data) if data else 0}
if self.data[0]['record_ids'].count(record_id) == 0:
self.data[0]['record_ids'].append(record_id)
elif self.strict:
raise BadFormat("duplicated record: *%s" % key)
# method must be bound, so we use __get__
return _parse_record_ids.__get__(self)
# employing full fledged FSM for only two states is kind of an overkill, so
# I'll just define them here...
states = ['document', 'table']
|
HEPData/hepdata-converter
|
hepdata_converter/writers/utils.py
|
error_value_processor
|
python
|
def error_value_processor(value, error):
if isinstance(error, (str, unicode)):
try:
if "%" in error:
error_float = float(error.replace("%", ""))
error_abs = (value/100) * error_float
return error_abs
elif error == "":
error = 0.0
else:
error = float(error)
except:
pass
return error
|
If an error is a percentage, we convert to a float, then
calculate the percentage of the supplied value.
:param value: base value, e.g. 10
:param error: e.g. 20.0%
:return: the absolute error, e.g. 12 for the above case.
|
train
|
https://github.com/HEPData/hepdata-converter/blob/354271448980efba86f2f3d27b99d818e75fd90d/hepdata_converter/writers/utils.py#L2-L24
| null | |
HEPData/hepdata-converter
|
hepdata_converter/__init__.py
|
convert
|
python
|
def convert(input, output=None, options={}):
if 'input_format' not in options and 'output_format' not in options:
raise ValueError("no input_format and output_format specified!")
input_format = options.get('input_format', 'yaml')
output_format = options.get('output_format', 'yaml')
parser = Parser.get_concrete_class(input_format)(**options)
writer = Writer.get_concrete_class(output_format)(**options)
if not output and not writer.single_file_output:
raise ValueError("this output_format requires specifying 'output' argument")
# if no output was specified create proxy output to which writer can insert data
_output = output
if not _output:
_output = StringIO.StringIO()
writer.write(parser.parse(input), _output)
# if no output was specified return output
if not output:
return _output.getvalue()
|
Converts a supported ``input_format`` (*oldhepdata*, *yaml*)
to a supported ``output_format`` (*csv*, *root*, *yaml*, *yoda*).
:param input: location of input file for *oldhepdata* format or input directory for *yaml* format
:param output: location of output directory to which converted files will be written
:param options: additional options such as ``input_format`` and ``output_format`` used for conversion
:type input: str
:type output: str
:type options: dict
:raise ValueError: raised if no ``input_format`` or ``output_format`` is specified
|
train
|
https://github.com/HEPData/hepdata-converter/blob/354271448980efba86f2f3d27b99d818e75fd90d/hepdata_converter/__init__.py#L9-L43
|
[
"def get_concrete_class(cls, class_name):\n \"\"\"This method provides easier access to all writers inheriting Writer class\n\n :param class_name: name of the parser (name of the parser class which should be used)\n :type class_name: str\n :return: Writer subclass specified by parser_name\n :rtype: Writer subclass\n :raise ValueError:\n \"\"\"\n def recurrent_class_lookup(cls):\n for cls in cls.__subclasses__():\n if lower(cls.__name__) == lower(class_name):\n return cls\n elif len(cls.__subclasses__()) > 0:\n r = recurrent_class_lookup(cls)\n if r is not None:\n return r\n return None\n\n cls = recurrent_class_lookup(cls)\n if cls:\n return cls\n else:\n raise ValueError(\"'class_name '%s' is invalid\" % class_name)\n"
] |
import StringIO
import argparse
import sys
import version
from parsers import Parser
from writers import Writer
def make_exit(message='', code=0):
return code, message
def generate_help_epilogue():
margin = ' '
r = 'Parsers:\n'
r += '[use them as --input-format parameter]\n'
r += '\n'
for cls in Parser.get_all_subclasses():
r += cls.get_help(margin)
r += '\nWriters:\n'
r += '[use them as --output-format parameter]\n'
r += '\n'
for cls in Writer.get_all_subclasses():
r += cls.get_help(margin)
return r
def _main(arguments=sys.argv):
# if version is specified ignore any other arguments
if '--version' in arguments or '-v' in arguments:
return make_exit(message="hepdata-converter version: %s" % version.__version__)
parser = argparse.ArgumentParser(description="CLI tools for converting between HEP data formats", add_help=True,
formatter_class=argparse.RawTextHelpFormatter,
epilog=generate_help_epilogue())
parser.add_argument("--input-format", '-i', action='store', default='yaml', help='format of the input file/s (default: yaml) [choose one option from Parsers section below]')
parser.add_argument("--output-format", '-o', action='store', default='yaml', help='format of the output file/s (default: yaml) [choose one option from Writers section below]')
parser.add_argument("--version", '-v', action='store_const', const=True, default=False, help='Show hepdata-converter version')
parser.add_argument("--hepdata-doi", '-d', action='store', default='', help='Pass HEPData DOI, e.g. "10.17182/hepdata.74247.v1"')
parser.add_argument("input")
parser.add_argument("output")
if arguments == sys.argv:
arguments = sys.argv[1:]
program_args = vars(parser.parse_known_args(arguments)[0])
input_format = program_args['input_format']
output_format = program_args['output_format']
Parser.get_concrete_class(input_format).register_cli_options(parser)
Writer.get_concrete_class(output_format).register_cli_options(parser)
# reparse arguments, now with added options from concrete parsers / writers
program_args = vars(parser.parse_args(arguments))
try:
convert(program_args['input'], program_args['output'], program_args)
return make_exit()
except ValueError as e:
return make_exit(message="Options error: %s" % str(e), code=1)
def main(arguments=sys.argv):
r, message = _main(arguments)
if r == 0:
print message
else:
print >> sys.stderr, message
sys.exit(r)
|
HEPData/hepdata-converter
|
hepdata_converter/writers/csv_writer.py
|
CSV._write_packed_data
|
python
|
def _write_packed_data(self, data_out, table):
headers = []
data = []
qualifiers_marks = []
qualifiers = {}
self._extract_independent_variables(table, headers, data, qualifiers_marks)
for dependent_variable in table.dependent_variables:
self._parse_dependent_variable(dependent_variable, headers, qualifiers, qualifiers_marks, data)
self._write_metadata(data_out, table)
self._write_csv_data(data_out, qualifiers, qualifiers_marks, headers, data)
|
This is kind of legacy function - this functionality may be useful for some people, so even though
now the default of writing CSV is writing unpacked data (divided by independent variable) this method is
still available and accessible if ```pack``` flag is specified in Writer's options
:param output: output file like object to which data will be written
:param table: input table
:type table: hepdata_converter.parsers.Table
|
train
|
https://github.com/HEPData/hepdata-converter/blob/354271448980efba86f2f3d27b99d818e75fd90d/hepdata_converter/writers/csv_writer.py#L82-L102
| null |
class CSV(ArrayWriter):
help = 'Writes to CSV format, it can write either one table (specified by --table parameter) or all tables from the ' \
'input file. In the case of one table output must be filepath to the new csv file, in the case of multiple tables ' \
'the output must be specified to be a directory to which all table files should be written'
@classmethod
def options(cls):
options = ArrayWriter.options()
options['pack'] = Option('pack', type=bool, default=False, required=False,
help=('If specified, dependent variables will be put in one table, instead of creating one '
'table per dependent variable in CSV file'))
options['separator'] = Option('separator', type=str, default=',', required=False,
help='Defines separator for CSV file, the default is comma: ","')
return options
def __init__(self, *args, **kwargs):
super(CSV, self).__init__(*args, **kwargs)
self.extension = 'csv'
def _write_metadata(self, data_out, table):
if self.hepdata_doi:
table_doi = self.hepdata_doi + '/t' + str(table.index)
data_out.write(unicode("#: table_doi: %s\n" % table_doi).encode('utf8', 'replace'))
data_out.write(unicode("#: name: %s\n" % table.metadata['name']).encode('utf8', 'replace'))
data_out.write(unicode("#: description: %s\n" % table.metadata['description']).encode('utf8', 'replace'))
data_out.write(unicode("#: data_file: %s\n" % table.metadata['data_file']).encode('utf8', 'replace'))
#license:
if 'data_license' in table.metadata and table.metadata['data_license']:
license_text = (table.metadata['data_license'].get('name') or '') + ' ' + \
(table.metadata['data_license'].get('url') or '') + ' ' + \
(table.metadata['data_license'].get('description') or '')
data_out.write("#: data_license: %s\n" % license_text)
for keyword in table.metadata.get('keywords', []):
data_out.write("#: keyword %s: %s\n" % (keyword['name'], ' | '.join([str(val) for val in keyword.get('values', [])])))
def _write_table(self, data_out, table):
if self.pack:
self._write_packed_data(data_out, table)
else:
self._write_unpacked_data(data_out, table)
def _write_csv_data(self, output, qualifiers, qualifiers_marks, headers, data):
lineterminator = '\n'
self._write_qualifiers(output, qualifiers, qualifiers_marks, self.separator, lineterminator)
# Use defaults of quotechar='"' and quoting=csv.QUOTE_MINIMAL instead of previous quotechar="'" and quoting=csv.QUOTE_NONNUMERIC.
csv_writer = csv.writer(output, delimiter=self.separator, lineterminator=lineterminator)
csv_writer.writerow(headers)
for i in xrange(len(data[0])):
csv_writer.writerow([unicode(data[j][i]).encode('utf8', 'replace') for j in xrange(len(data))])
return csv_writer
@classmethod
def _write_qualifiers(cls, writer, qualifiers, qualifiers_marks, field_separator, newline):
for qualifier_key in qualifiers:
row = []
i = 0
for qualifier in qualifiers[qualifier_key]:
for i in xrange(i, len(qualifiers_marks)):
if qualifiers_marks[i]:
row.append(qualifier)
i += 1
break
else:
row.append(None)
writer.write(unicode(field_separator.join([str(val) if val is not None else '' for val in
['#: ' + qualifier_key] + row]) + newline).replace('utf8', 'replace'))
def _write_unpacked_data(self, output, table):
headers_original = []
data_original = []
qualifiers_marks_original = []
self._extract_independent_variables(table, headers_original, data_original, qualifiers_marks_original)
self._write_metadata(output, table)
for dependent_variable in table.dependent_variables:
qualifiers = {}
# make a copy of the original list
headers = list(headers_original)
data = list(data_original)
qualifiers_marks = copy.deepcopy(qualifiers_marks_original)
self._parse_dependent_variable(dependent_variable, headers, qualifiers, qualifiers_marks, data)
self._write_csv_data(output, qualifiers, qualifiers_marks, headers, data)
output.write('\n')
|
HEPData/hepdata-converter
|
hepdata_converter/common.py
|
GetConcreteSubclassMixin.get_concrete_class
|
python
|
def get_concrete_class(cls, class_name):
def recurrent_class_lookup(cls):
for cls in cls.__subclasses__():
if lower(cls.__name__) == lower(class_name):
return cls
elif len(cls.__subclasses__()) > 0:
r = recurrent_class_lookup(cls)
if r is not None:
return r
return None
cls = recurrent_class_lookup(cls)
if cls:
return cls
else:
raise ValueError("'class_name '%s' is invalid" % class_name)
|
This method provides easier access to all writers inheriting Writer class
:param class_name: name of the parser (name of the parser class which should be used)
:type class_name: str
:return: Writer subclass specified by parser_name
:rtype: Writer subclass
:raise ValueError:
|
train
|
https://github.com/HEPData/hepdata-converter/blob/354271448980efba86f2f3d27b99d818e75fd90d/hepdata_converter/common.py#L92-L115
|
[
"def recurrent_class_lookup(cls):\n for cls in cls.__subclasses__():\n if lower(cls.__name__) == lower(class_name):\n return cls\n elif len(cls.__subclasses__()) > 0:\n r = recurrent_class_lookup(cls)\n if r is not None:\n return r\n return None\n"
] |
class GetConcreteSubclassMixin(object):
@classmethod
@classmethod
def get_all_subclasses(cls, include_abstract=False):
def recurrent_class_list(cls):
r = []
for cls in cls.__subclasses__():
if include_abstract or not inspect.isabstract(cls):
r.append(cls)
if len(cls.__subclasses__()) > 0:
r += recurrent_class_list(cls)
return r
return recurrent_class_list(cls)
|
HEPData/hepdata-converter
|
hepdata_converter/writers/root_writer.py
|
ROOT._prepare_outputs
|
python
|
def _prepare_outputs(self, data_out, outputs):
compress = ROOTModule.ROOT.CompressionSettings(ROOTModule.ROOT.kZLIB, 1)
if isinstance(data_out, (str, unicode)):
self.file_emulation = True
outputs.append(ROOTModule.TFile.Open(data_out, 'RECREATE', '', compress))
# multiple tables - require directory
elif isinstance(data_out, ROOTModule.TFile):
outputs.append(data_out)
else: # assume it's a file like object
self.file_emulation = True
filename = os.path.join(tempfile.mkdtemp(),'tmp.root')
outputs.append(ROOTModule.TFile.Open(filename, 'RECREATE', '', compress))
|
Open a ROOT file with option 'RECREATE' to create a new file (the file will
be overwritten if it already exists), and using the ZLIB compression algorithm
(with compression level 1) for better compatibility with older ROOT versions
(see https://root.cern.ch/doc/v614/release-notes.html#important-notice ).
:param data_out:
:param outputs:
:return:
|
train
|
https://github.com/HEPData/hepdata-converter/blob/354271448980efba86f2f3d27b99d818e75fd90d/hepdata_converter/writers/root_writer.py#L405-L425
| null |
class ROOT(ArrayWriter):
help = 'Writes to ROOT format (binary) converts tables into files containing TH1 objects'
class_list = [TH3FRootClass, TH2FRootClass, TH1FRootClass, TGraph2DErrorsClass, TGraphAsymmErrorsRootClass]
def __init__(self, *args, **kwargs):
super(ROOT, self).__init__(*args, **kwargs)
self.extension = 'root'
def _write_table(self, data_out, table):
data_out.mkdir(table.name.replace('/','-').replace('$', '').replace('\\',''))
data_out.cd(table.name.replace('/','-').replace('$', '').replace('\\',''))
# if no independent variables, use bins of unit width and centred on integers (1, 2, 3, etc.)
if not table.independent_variables and table.dependent_variables:
if table.dependent_variables[0]['values']:
table.independent_variables.append({'header': {'name': 'Bin number'}, 'values': []})
for i, value in enumerate(table.dependent_variables[0]['values']):
table.independent_variables[0]['values'].append({'low': i+0.5, 'high': i+1.5})
# if any non-numeric independent variable values, use bins of unit width and centred on integers (1, 2, 3, etc.)
# store original variables as alphanumeric labels to be passed to ROOT histograms
for ii, independent_variable in enumerate(table.independent_variables):
if False in ObjectWrapper.is_number_var(independent_variable):
independent_variable_bins = \
{'header': {'name': independent_variable['header']['name'] + ' bin'},
'values': [], 'labels': []}
for i, value in enumerate(independent_variable['values']):
independent_variable_bins['values'].append({'low': i + 0.5, 'high': i + 1.5})
if 'value' in value:
independent_variable_bins['labels'].append(str(value['value']))
else:
independent_variable_bins['labels'].append(str(value['low']) + '-' + str(value['high']))
table.independent_variables[ii] = independent_variable_bins
if self.hepdata_doi:
table_doi = 'doi:' + self.hepdata_doi + '/t' + str(table.index)
else:
table_doi = table.name
f = ObjectFactory(self.class_list, table.independent_variables, table.dependent_variables)
for graph in f.get_next_object():
graph.SetTitle(table_doi)
graph.Write()
def write(self, data_in, data_out, *args, **kwargs):
"""
:param data_in:
:type data_in: hepconverter.parsers.ParsedData
:param data_out: filelike object
:type data_out: file
:param args:
:param kwargs:
"""
self._get_tables(data_in)
self.file_emulation = False
outputs = []
self._prepare_outputs(data_out, outputs)
output = outputs[0]
for i in xrange(len(self.tables)):
table = self.tables[i]
self._write_table(output, table)
if data_out != output and hasattr(data_out, 'write'):
output.Flush()
output.ReOpen('read')
file_size = output.GetSize()
buff = bytearray(file_size)
output.ReadBuffer(buff, file_size)
data_out.write(buff)
if self.file_emulation:
filename = output.GetName()
output.Close()
|
HEPData/hepdata-converter
|
hepdata_converter/writers/root_writer.py
|
ROOT.write
|
python
|
def write(self, data_in, data_out, *args, **kwargs):
self._get_tables(data_in)
self.file_emulation = False
outputs = []
self._prepare_outputs(data_out, outputs)
output = outputs[0]
for i in xrange(len(self.tables)):
table = self.tables[i]
self._write_table(output, table)
if data_out != output and hasattr(data_out, 'write'):
output.Flush()
output.ReOpen('read')
file_size = output.GetSize()
buff = bytearray(file_size)
output.ReadBuffer(buff, file_size)
data_out.write(buff)
if self.file_emulation:
filename = output.GetName()
output.Close()
|
:param data_in:
:type data_in: hepconverter.parsers.ParsedData
:param data_out: filelike object
:type data_out: file
:param args:
:param kwargs:
|
train
|
https://github.com/HEPData/hepdata-converter/blob/354271448980efba86f2f3d27b99d818e75fd90d/hepdata_converter/writers/root_writer.py#L427-L457
|
[
"def _get_tables(self, data_in):\n # get table to work on\n if self.table_id is not None:\n if isinstance(self.table_id, int):\n self.tables.append(data_in.get_table(id=self.table_id))\n else:\n try:\n tab = data_in.get_table(file=self.table_id)\n except IndexError:\n tab = data_in.get_table(name=self.table_id)\n\n self.tables.append(tab)\n else:\n self.tables = data_in.tables\n",
"def _write_table(self, data_out, table):\n data_out.mkdir(table.name.replace('/','-').replace('$', '').replace('\\\\',''))\n data_out.cd(table.name.replace('/','-').replace('$', '').replace('\\\\',''))\n\n # if no independent variables, use bins of unit width and centred on integers (1, 2, 3, etc.)\n if not table.independent_variables and table.dependent_variables:\n if table.dependent_variables[0]['values']:\n table.independent_variables.append({'header': {'name': 'Bin number'}, 'values': []})\n for i, value in enumerate(table.dependent_variables[0]['values']):\n table.independent_variables[0]['values'].append({'low': i+0.5, 'high': i+1.5})\n\n # if any non-numeric independent variable values, use bins of unit width and centred on integers (1, 2, 3, etc.)\n # store original variables as alphanumeric labels to be passed to ROOT histograms\n for ii, independent_variable in enumerate(table.independent_variables):\n if False in ObjectWrapper.is_number_var(independent_variable):\n independent_variable_bins = \\\n {'header': {'name': independent_variable['header']['name'] + ' bin'},\n 'values': [], 'labels': []}\n for i, value in enumerate(independent_variable['values']):\n independent_variable_bins['values'].append({'low': i + 0.5, 'high': i + 1.5})\n if 'value' in value:\n independent_variable_bins['labels'].append(str(value['value']))\n else:\n independent_variable_bins['labels'].append(str(value['low']) + '-' + str(value['high']))\n table.independent_variables[ii] = independent_variable_bins\n\n if self.hepdata_doi:\n table_doi = 'doi:' + self.hepdata_doi + '/t' + str(table.index)\n else:\n table_doi = table.name\n f = ObjectFactory(self.class_list, table.independent_variables, table.dependent_variables)\n for graph in f.get_next_object():\n graph.SetTitle(table_doi)\n graph.Write()\n",
"def _prepare_outputs(self, data_out, outputs):\n \"\"\" Open a ROOT file with option 'RECREATE' to create a new file (the file will\n be overwritten if it already exists), and using the ZLIB compression algorithm\n (with compression level 1) for better compatibility with older ROOT versions\n (see https://root.cern.ch/doc/v614/release-notes.html#important-notice ).\n\n :param data_out:\n :param outputs:\n :return:\n \"\"\"\n compress = ROOTModule.ROOT.CompressionSettings(ROOTModule.ROOT.kZLIB, 1)\n if isinstance(data_out, (str, unicode)):\n self.file_emulation = True\n outputs.append(ROOTModule.TFile.Open(data_out, 'RECREATE', '', compress))\n # multiple tables - require directory\n elif isinstance(data_out, ROOTModule.TFile):\n outputs.append(data_out)\n else: # assume it's a file like object\n self.file_emulation = True\n filename = os.path.join(tempfile.mkdtemp(),'tmp.root')\n outputs.append(ROOTModule.TFile.Open(filename, 'RECREATE', '', compress))\n"
] |
class ROOT(ArrayWriter):
help = 'Writes to ROOT format (binary) converts tables into files containing TH1 objects'
class_list = [TH3FRootClass, TH2FRootClass, TH1FRootClass, TGraph2DErrorsClass, TGraphAsymmErrorsRootClass]
def __init__(self, *args, **kwargs):
super(ROOT, self).__init__(*args, **kwargs)
self.extension = 'root'
def _write_table(self, data_out, table):
data_out.mkdir(table.name.replace('/','-').replace('$', '').replace('\\',''))
data_out.cd(table.name.replace('/','-').replace('$', '').replace('\\',''))
# if no independent variables, use bins of unit width and centred on integers (1, 2, 3, etc.)
if not table.independent_variables and table.dependent_variables:
if table.dependent_variables[0]['values']:
table.independent_variables.append({'header': {'name': 'Bin number'}, 'values': []})
for i, value in enumerate(table.dependent_variables[0]['values']):
table.independent_variables[0]['values'].append({'low': i+0.5, 'high': i+1.5})
# if any non-numeric independent variable values, use bins of unit width and centred on integers (1, 2, 3, etc.)
# store original variables as alphanumeric labels to be passed to ROOT histograms
for ii, independent_variable in enumerate(table.independent_variables):
if False in ObjectWrapper.is_number_var(independent_variable):
independent_variable_bins = \
{'header': {'name': independent_variable['header']['name'] + ' bin'},
'values': [], 'labels': []}
for i, value in enumerate(independent_variable['values']):
independent_variable_bins['values'].append({'low': i + 0.5, 'high': i + 1.5})
if 'value' in value:
independent_variable_bins['labels'].append(str(value['value']))
else:
independent_variable_bins['labels'].append(str(value['low']) + '-' + str(value['high']))
table.independent_variables[ii] = independent_variable_bins
if self.hepdata_doi:
table_doi = 'doi:' + self.hepdata_doi + '/t' + str(table.index)
else:
table_doi = table.name
f = ObjectFactory(self.class_list, table.independent_variables, table.dependent_variables)
for graph in f.get_next_object():
graph.SetTitle(table_doi)
graph.Write()
def _prepare_outputs(self, data_out, outputs):
""" Open a ROOT file with option 'RECREATE' to create a new file (the file will
be overwritten if it already exists), and using the ZLIB compression algorithm
(with compression level 1) for better compatibility with older ROOT versions
(see https://root.cern.ch/doc/v614/release-notes.html#important-notice ).
:param data_out:
:param outputs:
:return:
"""
compress = ROOTModule.ROOT.CompressionSettings(ROOTModule.ROOT.kZLIB, 1)
if isinstance(data_out, (str, unicode)):
self.file_emulation = True
outputs.append(ROOTModule.TFile.Open(data_out, 'RECREATE', '', compress))
# multiple tables - require directory
elif isinstance(data_out, ROOTModule.TFile):
outputs.append(data_out)
else: # assume it's a file like object
self.file_emulation = True
filename = os.path.join(tempfile.mkdtemp(),'tmp.root')
outputs.append(ROOTModule.TFile.Open(filename, 'RECREATE', '', compress))
|
HEPData/hepdata-converter
|
hepdata_converter/writers/array_writer.py
|
ArrayWriter.process_error_labels
|
python
|
def process_error_labels(value):
observed_error_labels = {}
for error in value.get('errors', []):
label = error.get('label', 'error')
if label not in observed_error_labels:
observed_error_labels[label] = 0
observed_error_labels[label] += 1
if observed_error_labels[label] > 1:
error['label'] = label + '_' + str(observed_error_labels[label])
# append "_1" to first error label that has a duplicate
if observed_error_labels[label] == 2:
for error1 in value.get('errors', []):
error1_label = error1.get('label', 'error')
if error1_label == label:
error1['label'] = label + "_1"
break
|
Process the error labels of a dependent variable 'value' to ensure uniqueness.
|
train
|
https://github.com/HEPData/hepdata-converter/blob/354271448980efba86f2f3d27b99d818e75fd90d/hepdata_converter/writers/array_writer.py#L140-L160
| null |
class ArrayWriter(Writer):
__metaclass__ = abc.ABCMeta
@staticmethod
@staticmethod
def calculate_total_errors(variable, is_number_list, min_errs, max_errs, values, err_breakdown={}):
for i, entry in enumerate(variable['values']):
if not is_number_list[i]: continue # skip non-numeric y values
if 'value' in entry:
values.append(entry['value'])
if 'errors' in entry:
errors_min = 0.0
errors_max = 0.0
err_breakdown[i] = {}
# process the error labels to ensure uniqueness
ArrayWriter.process_error_labels(entry)
for error in entry['errors']:
label = error.get('label', 'error')
err_breakdown[i][label] = {}
if 'asymerror' in error:
try:
err_minus = error_value_processor(entry['value'], error['asymerror']['minus'])
err_plus = error_value_processor(entry['value'], error['asymerror']['plus'])
errors_min += pow(min(err_plus, err_minus, 0.0), 2)
errors_max += pow(max(err_plus, err_minus, 0.0), 2)
err_breakdown[i][label]['up'] = err_plus # want to maintain directionality of errors
err_breakdown[i][label]['dn'] = err_minus # want to maintain directionality of errors
except TypeError:
log.error('TypeError encountered when parsing {0} and {1}'.format(
unicode(error['asymerror']['minus']).encode('utf8', 'replace'),
unicode(error['asymerror']['plus']).encode('utf8', 'replace')))
elif 'symerror' in error:
try:
err = error_value_processor(entry['value'], error['symerror'])
errors_min += pow(err, 2)
errors_max += pow(err, 2)
err_breakdown[i][label]['up'] = err
err_breakdown[i][label]['dn'] = -err
except TypeError:
log.error('TypeError encountered when parsing {0}'.format(
unicode(error['symerror']).encode('utf8', 'replace'))
)
min_errs.append(sqrt(errors_min))
max_errs.append(sqrt(errors_max))
elif 'low' in entry and 'high' in entry:
min_errs.append(entry['value'] - entry['low'])
max_errs.append(entry['high'] - entry['value'])
else:
min_errs.append(0.0)
max_errs.append(0.0)
else:
middle_val = (entry['high'] - entry['low']) * 0.5 + entry['low']
values.append(middle_val)
min_errs.append(middle_val - entry['low'])
max_errs.append(entry['high'] - middle_val)
@classmethod
def options(cls):
options = super(ArrayWriter, cls).options()
options['table'] = Option('table', 't', required=False, variable_mapping='table_id', default=None,
help=('Specifies which table should be exported, if not specified all tables will be exported '
'(in this case output must be a directory, not a file)'))
return options
def __init__(self, *args, **kwargs):
kwargs['single_file_output'] = True
super(ArrayWriter, self).__init__(*args, **kwargs)
self.tables = []
self.extension = None
@abc.abstractmethod
def _write_table(self, data_out, table):
pass
def _get_tables(self, data_in):
# get table to work on
if self.table_id is not None:
if isinstance(self.table_id, int):
self.tables.append(data_in.get_table(id=self.table_id))
else:
try:
tab = data_in.get_table(file=self.table_id)
except IndexError:
tab = data_in.get_table(name=self.table_id)
self.tables.append(tab)
else:
self.tables = data_in.tables
def _prepare_outputs(self, data_out, outputs):
if isinstance(data_out, (str, unicode)):
self.file_emulation = True
if self.table_id is not None:
f = open(data_out, 'w')
outputs.append(f)
# data_out is a directory
else:
# create output dir if it doesn't exist
self.create_dir(data_out)
for table in self.tables:
outputs.append(open(os.path.join(data_out, table.name.replace(' ','').replace('/','-').replace('$','').replace('\\','') + '.' + self.extension), 'w'))
# multiple tables - require directory
elif len(self.tables) > 1 and not isinstance(data_out, (str, unicode)):
raise ValueError("Multiple tables, output must be a directory")
else:
outputs.append(data_out)
def write(self, data_in, data_out, *args, **kwargs):
"""
:param data_in:
:type data_in: hepconverter.parsers.ParsedData
:param data_out: filelike object
:type data_out: file
:param args:
:param kwargs:
"""
self._get_tables(data_in)
self.file_emulation = False
outputs = []
self._prepare_outputs(data_out, outputs)
for i in xrange(len(self.tables)):
data_out = outputs[i]
table = self.tables[i]
self._write_table(data_out, table)
if self.file_emulation:
data_out.close()
@classmethod
def _extract_independent_variables(cls, table, headers, data, qualifiers_marks):
for independent_variable in table.independent_variables:
name = independent_variable['header']['name']
if 'units' in independent_variable['header']:
name += ' [%s]' % independent_variable['header']['units']
headers.append(unicode(name).encode('utf8', 'replace'))
x_data = []
x_data_low = []
x_data_high = []
for value in independent_variable['values']:
if 'high' in value and 'low' in value:
x_data_low.append(value['low'])
x_data_high.append(value['high'])
if 'value' in value:
x_data.append(value['value'])
else:
x_data.append(0.5*(value['low'] + value['high']))
else:
x_data_low.append(value['value'])
x_data_high.append(value['value'])
x_data.append(value['value'])
data.append(x_data)
if x_data_high != x_data_low:
data.append(x_data_low)
data.append(x_data_high)
header = headers[-1]
headers.append(header + ' LOW')
qualifiers_marks.append(False)
headers.append(header + ' HIGH')
qualifiers_marks.append(False)
@classmethod
def _parse_dependent_variable(cls, dependent_variable, headers, qualifiers, qualifiers_marks, data):
units = ''
if 'units' in dependent_variable['header']:
units = ' [%s]' % dependent_variable['header']['units']
headers.append(unicode(dependent_variable['header']['name'] + units).encode('utf8', 'replace'))
qualifiers_marks.append(True)
# peek at first value and create empty lists
y_order = []
y_data = {'values': []}
y_order.append(y_data['values'])
for value in dependent_variable['values']:
# process the error labels to ensure uniqueness
cls.process_error_labels(value)
# fill error template for all values
for error in value.get('errors', []):
label = error.get('label', 'error')
if label + '_plus' not in y_data:
headers.append(label + ' +')
qualifiers_marks.append(False)
headers.append(label + ' -')
qualifiers_marks.append(False)
plus = []
y_data[label + '_plus'] = plus
y_order.append(plus)
minus = []
y_data[label + '_minus'] = minus
y_order.append(minus)
for value in dependent_variable['values']:
y_data['values'].append(value['value'])
if 'errors' not in value:
for key, val in y_data.items():
if key != 'values':
val.append(0)
else:
for key, val in y_data.items():
has_error = False
for i in xrange(len(value.get('errors', []))):
error = value['errors'][i]
label = error.get('label', 'error')
if 'symerror' in error:
error_plus = error['symerror']
if isinstance(error_plus, (str, unicode)):
error_plus = error_plus.strip()
if len(error_plus) > 1 and error_plus[0] == '-':
error_minus = error_plus[1:]
elif error_plus:
error_minus = '-' + error_plus
else:
error_minus = error_plus
else: # error_plus is numeric
error_minus = -error_plus
else:
error_plus = error['asymerror']['plus']
error_minus = error['asymerror']['minus']
if key == label + '_plus':
val.append(error_plus)
has_error = True
elif key == label + '_minus':
val.append(error_minus)
has_error = True
if key != 'values' and not has_error:
val.append(0)
for entry in y_order:
data.append(entry)
for qualifier in dependent_variable.get('qualifiers', []):
units = ''
if 'units' in qualifier:
units = ' [%s]' % qualifier['units']
name = qualifier['name'] + units
if name not in qualifiers:
qualifiers[name] = []
qualifiers[name].append(qualifier['value'])
|
HEPData/hepdata-converter
|
hepdata_converter/parsers/yaml_parser.py
|
YAML.parse
|
python
|
def parse(self, data_in, *args, **kwargs):
if not os.path.exists(data_in):
raise ValueError("File / Directory does not exist: %s" % data_in)
if os.path.isdir(data_in):
submission_filepath = os.path.join(data_in, 'submission.yaml')
if not os.path.exists(submission_filepath):
submission_filepath = os.path.join(data_in, 'submission.yml')
if not os.path.exists(submission_filepath):
raise ValueError("No submission file in %s" % data_in)
data_in = submission_filepath
# first validate submission file:
with open(data_in, 'r') as submission_file:
submission_data = list(yaml.load_all(submission_file, Loader=Loader))
if len(submission_data) == 0:
raise RuntimeError("Submission file (%s) is empty" % data_in)
submission_file_validator = SubmissionFileValidator()
if not submission_file_validator.validate(file_path=data_in,
data=submission_data):
raise RuntimeError(
"Submission file (%s) did not pass validation: %s" %
(data_in, self._pretty_print_errors(
submission_file_validator.get_messages())))
metadata = {}
tables = []
# validator for table data
data_file_validator = DataFileValidator()
index = 0
for i in range(0, len(submission_data)):
if not submission_data[i]: # empty YAML document
continue
if 'data_file' not in submission_data[i]:
metadata = submission_data[i] # information about whole submission
continue
table_filepath = os.path.join(os.path.dirname(data_in),
submission_data[i]['data_file'])
with open(table_filepath, 'r') as table_file:
if not os.path.exists(table_filepath):
raise ValueError(
"table file: %s does not exist" % table.data_file)
table_data = yaml.load(table_file, Loader=Loader)
if not data_file_validator.validate(data=table_data,
file_path=table_filepath):
raise RuntimeError(
"Data file (%s) did not pass validation: %s" %
(table_filepath, self._pretty_print_errors(
data_file_validator.get_messages())))
index = index + 1
table = Table(index=index, metadata=submission_data[i],
data=table_data)
tables.append(table)
return ParsedData(metadata, tables)
|
:param data_in: path to submission.yaml
:param args:
:param kwargs:
:raise ValueError:
|
train
|
https://github.com/HEPData/hepdata-converter/blob/354271448980efba86f2f3d27b99d818e75fd90d/hepdata_converter/parsers/yaml_parser.py#L42-L109
| null |
class YAML(Parser):
help = 'Parses New HEPData YAML format. Input parameter should be path to ' \
'the directory where submission.yaml file ' \
'is present (or direct filepath to the submission.yaml file)'
pool = Pool()
def __init__(self, *args, **kwargs):
super(YAML, self).__init__(*args, **kwargs)
def _pretty_print_errors(self, message_dict):
return ' '.join(
['%s: %s' % (key, ' | '.join([e.message for e in val])) for
key, val in message_dict.items()])
|
ryukinix/decorating
|
decorating/decorator.py
|
Decorator.default_arguments
|
python
|
def default_arguments(cls):
func = cls.__init__
args = func.__code__.co_varnames
defaults = func.__defaults__
index = -len(defaults)
return {k: v for k, v in zip(args[index:], defaults)}
|
Returns the available kwargs of the called class
|
train
|
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/decorator.py#L134-L140
| null |
class Decorator(DecoratorManager):
"""Decorator base class to keep easy creating more decorators
triggers:
self.start
self.stop
context_manager:
self.__enter__
self.__exit__
Only this is in generall necessary to implement the class you are writing,
like this:
class Wired(Decorator):
def __init__(self, user='Lain')
self.user = user
def start(self):
self.login()
def stop(self):
self.logoff()
def login(self):
print('Welcome to the Wired, {user}!'.format(user=self.user))
def logoff(self):
print('Close this world, open the next!'.)
And all the black magic is done for you behind the scenes. In theory,
you can use the decorator in these way:
@Wired('lain')
def foo():
pass
@Wired(argument='banana')
def bar():
pass
@Wired
def lain():
pass
@Wired()
def death():
pass
And all are okay! As well, natively, you have support to use as
context managers.
So that you can handle that way:
with Wired:
print("Download the Knight files...")
with Wired():
print("Underlying bugs not anymore")
with Wired("Lerax"):
print("I'm exists?")
with Wired(user="Lerax"):
print("I don't have the real answer.")
And all occurs be fine like you thinks this do.
"""
# a map of instances to handle between the various forms
# of using decorators, like @foo() or @foo.
instances = []
@classmethod
def __call__(cls, *args, **kwargs):
instance = cls.recreate(*args, **kwargs)
cls.instances.append(instance)
if any(args) and callable(args[0]): # pass a function/class
return instance._over_wrapper(args[0])
return instance
def _over_wrapper(self, function):
@wraps(function)
def _wrapper(*args, **kargs):
self.start()
result = function(*args, **kargs)
self.stop()
return result
return _wrapper
@classmethod
@classmethod
def recreate(cls, *args, **kwargs):
"""Recreate the class based in your args, multiple uses"""
cls.check_arguments(kwargs)
first_is_callable = True if any(args) and callable(args[0]) else False
signature = cls.default_arguments()
allowed_arguments = {k: v for k, v in kwargs.items() if k in signature}
if (any(allowed_arguments) or any(args)) and not first_is_callable:
if any(args) and not first_is_callable:
return cls(args[0], **allowed_arguments)
elif any(allowed_arguments):
return cls(**allowed_arguments)
return cls.instances[-1] if any(cls.instances) else cls()
@classmethod
def check_arguments(cls, passed):
"""Put warnings of arguments whose can't be handle by the class"""
defaults = list(cls.default_arguments().keys())
template = ("Pass arg {argument:!r} in {cname:!r}, can be a typo? "
"Supported key arguments: {defaults}")
fails = []
for arg in passed:
if arg not in defaults:
warn(template.format(argument=arg,
cname=cls.__name__,
defaults=defaults))
fails.append(arg)
return any(fails)
|
ryukinix/decorating
|
decorating/decorator.py
|
Decorator.recreate
|
python
|
def recreate(cls, *args, **kwargs):
cls.check_arguments(kwargs)
first_is_callable = True if any(args) and callable(args[0]) else False
signature = cls.default_arguments()
allowed_arguments = {k: v for k, v in kwargs.items() if k in signature}
if (any(allowed_arguments) or any(args)) and not first_is_callable:
if any(args) and not first_is_callable:
return cls(args[0], **allowed_arguments)
elif any(allowed_arguments):
return cls(**allowed_arguments)
return cls.instances[-1] if any(cls.instances) else cls()
|
Recreate the class based in your args, multiple uses
|
train
|
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/decorator.py#L143-L155
| null |
class Decorator(DecoratorManager):
"""Decorator base class to keep easy creating more decorators
triggers:
self.start
self.stop
context_manager:
self.__enter__
self.__exit__
Only this is in generall necessary to implement the class you are writing,
like this:
class Wired(Decorator):
def __init__(self, user='Lain')
self.user = user
def start(self):
self.login()
def stop(self):
self.logoff()
def login(self):
print('Welcome to the Wired, {user}!'.format(user=self.user))
def logoff(self):
print('Close this world, open the next!'.)
And all the black magic is done for you behind the scenes. In theory,
you can use the decorator in these way:
@Wired('lain')
def foo():
pass
@Wired(argument='banana')
def bar():
pass
@Wired
def lain():
pass
@Wired()
def death():
pass
And all are okay! As well, natively, you have support to use as
context managers.
So that you can handle that way:
with Wired:
print("Download the Knight files...")
with Wired():
print("Underlying bugs not anymore")
with Wired("Lerax"):
print("I'm exists?")
with Wired(user="Lerax"):
print("I don't have the real answer.")
And all occurs be fine like you thinks this do.
"""
# a map of instances to handle between the various forms
# of using decorators, like @foo() or @foo.
instances = []
@classmethod
def __call__(cls, *args, **kwargs):
instance = cls.recreate(*args, **kwargs)
cls.instances.append(instance)
if any(args) and callable(args[0]): # pass a function/class
return instance._over_wrapper(args[0])
return instance
def _over_wrapper(self, function):
@wraps(function)
def _wrapper(*args, **kargs):
self.start()
result = function(*args, **kargs)
self.stop()
return result
return _wrapper
@classmethod
def default_arguments(cls):
"""Returns the available kwargs of the called class"""
func = cls.__init__
args = func.__code__.co_varnames
defaults = func.__defaults__
index = -len(defaults)
return {k: v for k, v in zip(args[index:], defaults)}
@classmethod
@classmethod
def check_arguments(cls, passed):
"""Put warnings of arguments whose can't be handle by the class"""
defaults = list(cls.default_arguments().keys())
template = ("Pass arg {argument:!r} in {cname:!r}, can be a typo? "
"Supported key arguments: {defaults}")
fails = []
for arg in passed:
if arg not in defaults:
warn(template.format(argument=arg,
cname=cls.__name__,
defaults=defaults))
fails.append(arg)
return any(fails)
|
ryukinix/decorating
|
decorating/decorator.py
|
Decorator.check_arguments
|
python
|
def check_arguments(cls, passed):
defaults = list(cls.default_arguments().keys())
template = ("Pass arg {argument:!r} in {cname:!r}, can be a typo? "
"Supported key arguments: {defaults}")
fails = []
for arg in passed:
if arg not in defaults:
warn(template.format(argument=arg,
cname=cls.__name__,
defaults=defaults))
fails.append(arg)
return any(fails)
|
Put warnings of arguments whose can't be handle by the class
|
train
|
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/decorator.py#L158-L171
| null |
class Decorator(DecoratorManager):
"""Decorator base class to keep easy creating more decorators
triggers:
self.start
self.stop
context_manager:
self.__enter__
self.__exit__
Only this is in generall necessary to implement the class you are writing,
like this:
class Wired(Decorator):
def __init__(self, user='Lain')
self.user = user
def start(self):
self.login()
def stop(self):
self.logoff()
def login(self):
print('Welcome to the Wired, {user}!'.format(user=self.user))
def logoff(self):
print('Close this world, open the next!'.)
And all the black magic is done for you behind the scenes. In theory,
you can use the decorator in these way:
@Wired('lain')
def foo():
pass
@Wired(argument='banana')
def bar():
pass
@Wired
def lain():
pass
@Wired()
def death():
pass
And all are okay! As well, natively, you have support to use as
context managers.
So that you can handle that way:
with Wired:
print("Download the Knight files...")
with Wired():
print("Underlying bugs not anymore")
with Wired("Lerax"):
print("I'm exists?")
with Wired(user="Lerax"):
print("I don't have the real answer.")
And all occurs be fine like you thinks this do.
"""
# a map of instances to handle between the various forms
# of using decorators, like @foo() or @foo.
instances = []
@classmethod
def __call__(cls, *args, **kwargs):
instance = cls.recreate(*args, **kwargs)
cls.instances.append(instance)
if any(args) and callable(args[0]): # pass a function/class
return instance._over_wrapper(args[0])
return instance
def _over_wrapper(self, function):
@wraps(function)
def _wrapper(*args, **kargs):
self.start()
result = function(*args, **kargs)
self.stop()
return result
return _wrapper
@classmethod
def default_arguments(cls):
"""Returns the available kwargs of the called class"""
func = cls.__init__
args = func.__code__.co_varnames
defaults = func.__defaults__
index = -len(defaults)
return {k: v for k, v in zip(args[index:], defaults)}
@classmethod
def recreate(cls, *args, **kwargs):
"""Recreate the class based in your args, multiple uses"""
cls.check_arguments(kwargs)
first_is_callable = True if any(args) and callable(args[0]) else False
signature = cls.default_arguments()
allowed_arguments = {k: v for k, v in kwargs.items() if k in signature}
if (any(allowed_arguments) or any(args)) and not first_is_callable:
if any(args) and not first_is_callable:
return cls(args[0], **allowed_arguments)
elif any(allowed_arguments):
return cls(**allowed_arguments)
return cls.instances[-1] if any(cls.instances) else cls()
@classmethod
|
ryukinix/decorating
|
decorating/color.py
|
colorize
|
python
|
def colorize(printable, color, style='normal', autoreset=True):
if not COLORED: # disable color
return printable
if color not in COLOR_MAP:
raise RuntimeError('invalid color set, no {}'.format(color))
return '{color}{printable}{reset}'.format(
printable=printable,
color=COLOR_MAP[color].format(style=STYLE_MAP[style]),
reset=COLOR_MAP['reset'] if autoreset else ''
)
|
Colorize some message with ANSI colors specification
:param printable: interface whose has __str__ or __repr__ method
:param color: the colors defined in COLOR_MAP to colorize the text
:style: can be 'normal', 'bold' or 'underline'
:returns: the 'printable' colorized with style
|
train
|
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/color.py#L45-L63
| null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright © Manoel Vilela 2016
#
# @project: Decorating
# @author: Manoel Vilela
# @email: manoel_vilela@engineer.com
#
"""
Module focused in termcolor operations
If the exection is not attatched in any tty,
so colored is disabled
"""
from __future__ import unicode_literals
import sys
COLORED = True
if not sys.stdout.isatty() or sys.platform == 'win32':
COLORED = False # pragma: no cover
COLOR_MAP = {
'brown': '\033[{style};30m',
'red': '\033[{style};31m',
'green': '\033[{style};32m',
'yellow': '\033[{style};33m',
'blue': '\033[{style};34m',
'pink': '\033[{style};35m',
'cyan': '\033[{style};36m',
'gray': '\033[{style};37m',
'white': '\033[{style};40m',
'reset': '\033[00;00m'
}
STYLE_MAP = {
'normal': '00',
'bold': '01',
'underline': '04',
}
|
ryukinix/decorating
|
decorating/general.py
|
cache
|
python
|
def cache(function):
memory = {}
miss = object()
@wraps(function)
def _wrapper(*args):
result = memory.get(args, miss)
if result is miss:
_wrapper.call += 1
result = function(*args)
memory[args] = result
return result
_wrapper.call = 0
return _wrapper
|
Function: cache
Summary: Decorator used to cache the input->output
Examples: An fib memoized executes at O(1) time
instead O(e^n)
Attributes:
@param (function): function
Returns: wrapped function
TODO: Give support to functions with kwargs
|
train
|
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/general.py#L50-L75
| null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright © Manoel Vilela 2016
#
# @project: Decorating
# @author: Manoel Vilela
# @email: manoel_vilela@engineer.com
#
# pylint: disable=redefined-builtin
# pylint: disable=invalid-name
"""
An collection of usefull decorators for debug
and time evaluation of functions flow
"""
# stdlib
from functools import wraps
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY2: # pragma: no cover
from itertools import izip
zip = izip
else: # pragma: no cover
zip = zip
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
# Copied from `six' library.
# Copyright (c) 2010-2015 Benjamin Peterson
# License: MIT
class metaclass(meta):
"""Dummy metaclass"""
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
|
ryukinix/decorating
|
decorating/animation.py
|
space_wave
|
python
|
def space_wave(phase, amplitude=12, frequency=0.1):
wave = cycle(horizontal)
return ''.join((next(wave) for x in range
(int((amplitude + 1) * abs(sin(frequency * (phase)))))))
|
Function: space_wave
Summary: This function is used to generate a wave-like padding
spacement based on the variable lambda
Examples: >>> print('\n'.join(space_wave(x) for x in range(100))
█
███
████
██████
███████
████████
█████████
██████████
██████████
██████████
██████████
██████████
██████████
█████████
████████
███████
█████
████
██
█
Attributes:
@param (phase): your positive variable, can be a int or float
@param (char) default='█': the char to construct the space_wave
@param (amplitude) default=10: a float/int number to describe
how long is the space_wave max
@param (frequency) default=0.1: the speed of change
Returns: a unique string of a sequence of 'char'
|
train
|
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/animation.py#L114-L151
| null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright © Manoel Vilela 2016
#
# @project: Decorating
# @author: Manoel Vilela
# @email: manoel_vilela@engineer.com
#
# pylint: disable=no-member
# pylint: disable=C0103
# pylint: disable=too-few-public-methods
"""
This module was be done to handle the beautiful animation using
the sin function (whose cause a pulse in the stdout).
Some examples of using is here:
@animated
def slow():
heavy_stuff()
As well with custom messages
@animated('WOOOOW')
def download_the_universe():
while True:
pass
with animated('loool'):
stuff_from_hell()
@writing
def printer():
lot_of_messages()
with writing(delay=0.5):
print("L O L => IS NOT THE STUPID GAME LOL, LOL.")
"""
from __future__ import unicode_literals
import signal
import sys
import threading
from math import sin
from functools import partial
from itertools import cycle
from . import decorator, color, stream, asciiart
from .general import zip
# THIS IS A LOL ZONE
# /\O | _O | O
# /\/ | //|_ | /_
# /\ | | | |\
# / \ | /| | / |
# LOL LOL | LLOL | LOLLOL
# HACK: Global variables to customize behavior of spinner
horizontal = asciiart.WAVE
vertical1 = ''.join(x * 5 for x in asciiart.BRAILY)
vertical2 = asciiart.VPULSE
animation_color = {
'message': 'red',
'padding': 'blue',
'start': 'cyan',
'end': 'cyan'
}
class SpinnerController(object):
"""Variables to controlling the spinner in parallel
Bias: the initial value of the padding function
is used here because after a animation stop
and other is started in sequence, the padding
for a semantic view need be in the same place.
running: variable signal-like to stop the thread
on the main loop doing the animation
message: the actual messaging on the spinner
stream: the stream to do the animation, needs
implement the AbstractClass stream.Stream
"""
bias = 0
running = False
message = ''
stream = stream.Animation(sys.stderr)
fpadding = None
class AnimationController(object):
"""Used to controlling thread & running
context: the context level added +1 at each nested 'with'
running: the object running in the actual moment
"""
context = 0
thread = None
messages = []
def _spinner(control):
if not sys.stdout.isatty(): # not send to pipe/redirection
return # pragma: no cover
colorize_no_reset = partial(color.colorize, autoreset=False)
template = '{padding} {start} {message} {end}'
iterator = zip(cycle(vertical1), cycle(vertical2))
for i, (start, end) in enumerate(iterator):
padding = control.fpadding(i + control.bias)
message = '\r' + template.format(
message=colorize_no_reset(control.message, animation_color['message']),
padding=colorize_no_reset(padding, animation_color['padding']),
start=colorize_no_reset(start, animation_color['start']),
end=color.colorize(end, animation_color['end'])
)
with control.stream.lock:
control.stream.write(message)
if not control.running:
control.bias = i
break
control.stream.erase(message)
# D
# E
# C
# O
# R
# A
# T
# O
# R
# S
# deal with it
class AnimatedDecorator(decorator.Decorator):
"""The animated decorator from hell
You can use this these way:
@animated
def slow():
heavy_stuff()
As well with custom messages
@animated('WOOOOW')
def download_the_universe():
while True:
pass
with animated('loool'):
stuff_from_hell()
"""
# if nothing is passed, so this is the message
default_message = 'loading'
# to handle various decorated functions
spinner = SpinnerController()
# to know if some instance of this class is running
# and proper handle that, like ctrl + c and exits
animation = AnimationController()
_enabled = True
def __init__(self, message=None, fpadding=space_wave):
super(AnimatedDecorator, self).__init__()
self.message = message
self.spinner.fpadding = fpadding
@property
def enabled(self):
"""True if animation is enabled, false otherwise"""
return AnimatedDecorator._enabled
@enabled.setter
def enabled(self, state):
"""Set a state on AnimatedDecorator._enabled"""
AnimatedDecorator._enabled = state
def start(self, autopush=True):
"""Start a new animation instance"""
if self.enabled:
if autopush:
self.push_message(self.message)
self.spinner.message = ' - '.join(self.animation.messages)
if not self.spinner.running:
self.animation.thread = threading.Thread(target=_spinner,
args=(self.spinner,))
self.spinner.running = True
self.animation.thread.start()
sys.stdout = stream.Clean(sys.stdout, self.spinner.stream)
@classmethod
def stop(cls):
"""Stop the thread animation gracefully and reset_message"""
if AnimatedDecorator._enabled:
if cls.spinner.running:
cls.spinner.running = False
cls.animation.thread.join()
if any(cls.animation.messages):
cls.pop_message()
sys.stdout = sys.__stdout__
def __enter__(self):
if self.enabled:
self.animation.context += 1
self.start()
def __exit__(self, *args):
# if the context manager doesn't running yet
if self.enabled:
self.animation.context -= 1
self.pop_message()
if self.animation.context == 0:
self.stop()
else:
self.start(autopush=False)
@classmethod
def push_message(cls, message):
"""Push a new message for the public messages stack"""
return cls.animation.messages.append(message)
@classmethod
def pop_message(cls):
"""Pop a new message (last) from the public message stack"""
return cls.animation.messages.pop(-1)
@classmethod
def __call__(cls, *args, **kwargs):
obj = super(AnimatedDecorator, cls).__call__(*args, **kwargs)
if any(cls.instances):
last_instance = cls.instances[-1]
last_instance.message = last_instance.auto_message(args)
elif isinstance(obj, cls):
obj.message = obj.auto_message(args)
return obj
def auto_message(self, args):
"""Try guess the message by the args passed
args: a set of args passed on the wrapper __call__ in
the definition above.
if the object already have some message (defined in __init__),
we don't change that. If the first arg is a function, so is decorated
without argument, use the func name as the message.
If not self.message anyway, use the default_message global,
another else use the default self.message already
"""
if any(args) and callable(args[0]) and not self.message:
return args[0].__name__
elif not self.message:
return self.default_message
else:
return self.message
class WritingDecorator(decorator.Decorator):
"""A writing class context to simulate a delayed stream
You can do something like that:
with writing(delay=0.3):
print('LOL!!! This is so awesome!')
Or, as expected for this lib, using as decorator!
@writing
def somebody_talking():
print("Oh man... I'm so sad. Why I even exists?")
print("I don't meeting anybody")
print("I don't want answer my phone")
print("I don't even to live")
print("But dying is so hassle.")
print("I'd wish just disappears.")
delay: the down speed of writing, more bigger, more slow.
"""
# to handle nested streams
streams = []
enabled = True
def __init__(self, delay=0.05):
super(WritingDecorator, self).__init__()
self.stream = stream.Writting(sys.stdout, delay=delay)
if not self.enabled:
self.stream = sys.__stdout__
def start(self):
"""Activate the TypingStream on stdout"""
self.streams.append(sys.stdout)
sys.stdout = self.stream
@classmethod
def stop(cls):
"""Change back the normal stdout after the end"""
if any(cls.streams):
sys.stdout = cls.streams.pop(-1)
else:
sys.stdout = sys.__stdout__
def _killed(): # pragma: no cover
AnimatedDecorator.stop()
WritingDecorator.stop()
AnimatedDecorator.spinner.stream.dump.close()
raise KeyboardInterrupt
signal.signal(signal.SIGINT, lambda x, y: _killed())
animated = AnimatedDecorator('loading')
writing = WritingDecorator()
__all__ = [
'animated',
'writing'
]
|
ryukinix/decorating
|
decorating/animation.py
|
AnimatedDecorator.start
|
python
|
def start(self, autopush=True):
if self.enabled:
if autopush:
self.push_message(self.message)
self.spinner.message = ' - '.join(self.animation.messages)
if not self.spinner.running:
self.animation.thread = threading.Thread(target=_spinner,
args=(self.spinner,))
self.spinner.running = True
self.animation.thread.start()
sys.stdout = stream.Clean(sys.stdout, self.spinner.stream)
|
Start a new animation instance
|
train
|
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/animation.py#L235-L246
|
[
"def push_message(cls, message):\n \"\"\"Push a new message for the public messages stack\"\"\"\n return cls.animation.messages.append(message)\n"
] |
class AnimatedDecorator(decorator.Decorator):
"""The animated decorator from hell
You can use this these way:
@animated
def slow():
heavy_stuff()
As well with custom messages
@animated('WOOOOW')
def download_the_universe():
while True:
pass
with animated('loool'):
stuff_from_hell()
"""
# if nothing is passed, so this is the message
default_message = 'loading'
# to handle various decorated functions
spinner = SpinnerController()
# to know if some instance of this class is running
# and proper handle that, like ctrl + c and exits
animation = AnimationController()
_enabled = True
def __init__(self, message=None, fpadding=space_wave):
super(AnimatedDecorator, self).__init__()
self.message = message
self.spinner.fpadding = fpadding
@property
def enabled(self):
"""True if animation is enabled, false otherwise"""
return AnimatedDecorator._enabled
@enabled.setter
def enabled(self, state):
"""Set a state on AnimatedDecorator._enabled"""
AnimatedDecorator._enabled = state
@classmethod
def stop(cls):
"""Stop the thread animation gracefully and reset_message"""
if AnimatedDecorator._enabled:
if cls.spinner.running:
cls.spinner.running = False
cls.animation.thread.join()
if any(cls.animation.messages):
cls.pop_message()
sys.stdout = sys.__stdout__
def __enter__(self):
if self.enabled:
self.animation.context += 1
self.start()
def __exit__(self, *args):
# if the context manager doesn't running yet
if self.enabled:
self.animation.context -= 1
self.pop_message()
if self.animation.context == 0:
self.stop()
else:
self.start(autopush=False)
@classmethod
def push_message(cls, message):
"""Push a new message for the public messages stack"""
return cls.animation.messages.append(message)
@classmethod
def pop_message(cls):
"""Pop a new message (last) from the public message stack"""
return cls.animation.messages.pop(-1)
@classmethod
def __call__(cls, *args, **kwargs):
obj = super(AnimatedDecorator, cls).__call__(*args, **kwargs)
if any(cls.instances):
last_instance = cls.instances[-1]
last_instance.message = last_instance.auto_message(args)
elif isinstance(obj, cls):
obj.message = obj.auto_message(args)
return obj
def auto_message(self, args):
"""Try guess the message by the args passed
args: a set of args passed on the wrapper __call__ in
the definition above.
if the object already have some message (defined in __init__),
we don't change that. If the first arg is a function, so is decorated
without argument, use the func name as the message.
If not self.message anyway, use the default_message global,
another else use the default self.message already
"""
if any(args) and callable(args[0]) and not self.message:
return args[0].__name__
elif not self.message:
return self.default_message
else:
return self.message
|
ryukinix/decorating
|
decorating/animation.py
|
AnimatedDecorator.stop
|
python
|
def stop(cls):
if AnimatedDecorator._enabled:
if cls.spinner.running:
cls.spinner.running = False
cls.animation.thread.join()
if any(cls.animation.messages):
cls.pop_message()
sys.stdout = sys.__stdout__
|
Stop the thread animation gracefully and reset_message
|
train
|
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/animation.py#L249-L259
|
[
"def pop_message(cls):\n \"\"\"Pop a new message (last) from the public message stack\"\"\"\n return cls.animation.messages.pop(-1)\n"
] |
class AnimatedDecorator(decorator.Decorator):
"""The animated decorator from hell
You can use this these way:
@animated
def slow():
heavy_stuff()
As well with custom messages
@animated('WOOOOW')
def download_the_universe():
while True:
pass
with animated('loool'):
stuff_from_hell()
"""
# if nothing is passed, so this is the message
default_message = 'loading'
# to handle various decorated functions
spinner = SpinnerController()
# to know if some instance of this class is running
# and proper handle that, like ctrl + c and exits
animation = AnimationController()
_enabled = True
def __init__(self, message=None, fpadding=space_wave):
super(AnimatedDecorator, self).__init__()
self.message = message
self.spinner.fpadding = fpadding
@property
def enabled(self):
"""True if animation is enabled, false otherwise"""
return AnimatedDecorator._enabled
@enabled.setter
def enabled(self, state):
"""Set a state on AnimatedDecorator._enabled"""
AnimatedDecorator._enabled = state
def start(self, autopush=True):
"""Start a new animation instance"""
if self.enabled:
if autopush:
self.push_message(self.message)
self.spinner.message = ' - '.join(self.animation.messages)
if not self.spinner.running:
self.animation.thread = threading.Thread(target=_spinner,
args=(self.spinner,))
self.spinner.running = True
self.animation.thread.start()
sys.stdout = stream.Clean(sys.stdout, self.spinner.stream)
@classmethod
def __enter__(self):
if self.enabled:
self.animation.context += 1
self.start()
def __exit__(self, *args):
# if the context manager doesn't running yet
if self.enabled:
self.animation.context -= 1
self.pop_message()
if self.animation.context == 0:
self.stop()
else:
self.start(autopush=False)
@classmethod
def push_message(cls, message):
"""Push a new message for the public messages stack"""
return cls.animation.messages.append(message)
@classmethod
def pop_message(cls):
"""Pop a new message (last) from the public message stack"""
return cls.animation.messages.pop(-1)
@classmethod
def __call__(cls, *args, **kwargs):
obj = super(AnimatedDecorator, cls).__call__(*args, **kwargs)
if any(cls.instances):
last_instance = cls.instances[-1]
last_instance.message = last_instance.auto_message(args)
elif isinstance(obj, cls):
obj.message = obj.auto_message(args)
return obj
def auto_message(self, args):
"""Try guess the message by the args passed
args: a set of args passed on the wrapper __call__ in
the definition above.
if the object already have some message (defined in __init__),
we don't change that. If the first arg is a function, so is decorated
without argument, use the func name as the message.
If not self.message anyway, use the default_message global,
another else use the default self.message already
"""
if any(args) and callable(args[0]) and not self.message:
return args[0].__name__
elif not self.message:
return self.default_message
else:
return self.message
|
ryukinix/decorating
|
decorating/animation.py
|
AnimatedDecorator.auto_message
|
python
|
def auto_message(self, args):
if any(args) and callable(args[0]) and not self.message:
return args[0].__name__
elif not self.message:
return self.default_message
else:
return self.message
|
Try guess the message by the args passed
args: a set of args passed on the wrapper __call__ in
the definition above.
if the object already have some message (defined in __init__),
we don't change that. If the first arg is a function, so is decorated
without argument, use the func name as the message.
If not self.message anyway, use the default_message global,
another else use the default self.message already
|
train
|
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/animation.py#L296-L315
| null |
class AnimatedDecorator(decorator.Decorator):
"""The animated decorator from hell
You can use this these way:
@animated
def slow():
heavy_stuff()
As well with custom messages
@animated('WOOOOW')
def download_the_universe():
while True:
pass
with animated('loool'):
stuff_from_hell()
"""
# if nothing is passed, so this is the message
default_message = 'loading'
# to handle various decorated functions
spinner = SpinnerController()
# to know if some instance of this class is running
# and proper handle that, like ctrl + c and exits
animation = AnimationController()
_enabled = True
def __init__(self, message=None, fpadding=space_wave):
super(AnimatedDecorator, self).__init__()
self.message = message
self.spinner.fpadding = fpadding
@property
def enabled(self):
"""True if animation is enabled, false otherwise"""
return AnimatedDecorator._enabled
@enabled.setter
def enabled(self, state):
"""Set a state on AnimatedDecorator._enabled"""
AnimatedDecorator._enabled = state
def start(self, autopush=True):
"""Start a new animation instance"""
if self.enabled:
if autopush:
self.push_message(self.message)
self.spinner.message = ' - '.join(self.animation.messages)
if not self.spinner.running:
self.animation.thread = threading.Thread(target=_spinner,
args=(self.spinner,))
self.spinner.running = True
self.animation.thread.start()
sys.stdout = stream.Clean(sys.stdout, self.spinner.stream)
@classmethod
def stop(cls):
"""Stop the thread animation gracefully and reset_message"""
if AnimatedDecorator._enabled:
if cls.spinner.running:
cls.spinner.running = False
cls.animation.thread.join()
if any(cls.animation.messages):
cls.pop_message()
sys.stdout = sys.__stdout__
def __enter__(self):
if self.enabled:
self.animation.context += 1
self.start()
def __exit__(self, *args):
# if the context manager doesn't running yet
if self.enabled:
self.animation.context -= 1
self.pop_message()
if self.animation.context == 0:
self.stop()
else:
self.start(autopush=False)
@classmethod
def push_message(cls, message):
"""Push a new message for the public messages stack"""
return cls.animation.messages.append(message)
@classmethod
def pop_message(cls):
"""Pop a new message (last) from the public message stack"""
return cls.animation.messages.pop(-1)
@classmethod
def __call__(cls, *args, **kwargs):
obj = super(AnimatedDecorator, cls).__call__(*args, **kwargs)
if any(cls.instances):
last_instance = cls.instances[-1]
last_instance.message = last_instance.auto_message(args)
elif isinstance(obj, cls):
obj.message = obj.auto_message(args)
return obj
|
ryukinix/decorating
|
decorating/animation.py
|
WritingDecorator.start
|
python
|
def start(self):
self.streams.append(sys.stdout)
sys.stdout = self.stream
|
Activate the TypingStream on stdout
|
train
|
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/animation.py#L353-L356
| null |
class WritingDecorator(decorator.Decorator):
"""A writing class context to simulate a delayed stream
You can do something like that:
with writing(delay=0.3):
print('LOL!!! This is so awesome!')
Or, as expected for this lib, using as decorator!
@writing
def somebody_talking():
print("Oh man... I'm so sad. Why I even exists?")
print("I don't meeting anybody")
print("I don't want answer my phone")
print("I don't even to live")
print("But dying is so hassle.")
print("I'd wish just disappears.")
delay: the down speed of writing, more bigger, more slow.
"""
# to handle nested streams
streams = []
enabled = True
def __init__(self, delay=0.05):
super(WritingDecorator, self).__init__()
self.stream = stream.Writting(sys.stdout, delay=delay)
if not self.enabled:
self.stream = sys.__stdout__
@classmethod
def stop(cls):
"""Change back the normal stdout after the end"""
if any(cls.streams):
sys.stdout = cls.streams.pop(-1)
else:
sys.stdout = sys.__stdout__
|
ryukinix/decorating
|
decorating/animation.py
|
WritingDecorator.stop
|
python
|
def stop(cls):
if any(cls.streams):
sys.stdout = cls.streams.pop(-1)
else:
sys.stdout = sys.__stdout__
|
Change back the normal stdout after the end
|
train
|
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/animation.py#L359-L364
| null |
class WritingDecorator(decorator.Decorator):
"""A writing class context to simulate a delayed stream
You can do something like that:
with writing(delay=0.3):
print('LOL!!! This is so awesome!')
Or, as expected for this lib, using as decorator!
@writing
def somebody_talking():
print("Oh man... I'm so sad. Why I even exists?")
print("I don't meeting anybody")
print("I don't want answer my phone")
print("I don't even to live")
print("But dying is so hassle.")
print("I'd wish just disappears.")
delay: the down speed of writing, more bigger, more slow.
"""
# to handle nested streams
streams = []
enabled = True
def __init__(self, delay=0.05):
super(WritingDecorator, self).__init__()
self.stream = stream.Writting(sys.stdout, delay=delay)
if not self.enabled:
self.stream = sys.__stdout__
def start(self):
"""Activate the TypingStream on stdout"""
self.streams.append(sys.stdout)
sys.stdout = self.stream
@classmethod
|
ryukinix/decorating
|
decorating/stream.py
|
Unbuffered.write
|
python
|
def write(self, message, flush=True):
self.stream.write(message)
if flush:
self.stream.flush()
|
Function: write
Summary: write method on the default stream
Examples: >>> stream.write('message')
'message'
Attributes:
@param (message): str-like content to send on stream
@param (flush) default=True: flush the stdout after write
Returns: None
|
train
|
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/stream.py#L45-L58
| null |
class Unbuffered(Stream):
"""Unbuffered whose flush automaticly
That way we don't need flush after a write.
"""
lock = Lock()
def __init__(self, stream):
super(Unbuffered, self).__init__(stream)
self.stream = stream
def __getattr__(self, attr):
return getattr(self.stream, attr)
|
ryukinix/decorating
|
decorating/stream.py
|
Animation.write
|
python
|
def write(self, message, autoerase=True):
super(Animation, self).write(message)
self.last_message = message
if autoerase:
time.sleep(self.interval)
self.erase(message)
|
Send something for stdout and erased after delay
|
train
|
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/stream.py#L82-L88
|
[
"def write(self, message, flush=True):\n \"\"\"\n Function: write\n Summary: write method on the default stream\n Examples: >>> stream.write('message')\n 'message'\n Attributes:\n @param (message): str-like content to send on stream\n @param (flush) default=True: flush the stdout after write\n Returns: None\n \"\"\"\n self.stream.write(message)\n if flush:\n self.stream.flush()\n",
"def erase(self, message=None):\n \"\"\"Erase something whose you write before: message\"\"\"\n if not message:\n message = self.last_message\n # Move cursor to the beginning of line\n super(Animation, self).write(\"\\033[G\")\n # Erase in line from cursor\n super(Animation, self).write(\"\\033[K\")\n"
] |
class Animation(Unbuffered):
"""A stream unbuffered whose write & erase at interval
After you write something, you can easily clean the buffer
and restart the points of the older message.
stream = Animation(stream, delay=0.5)
self.write('message')
"""
last_message = ''
ansi_escape = re.compile(r'\x1b[^m]*m')
def __init__(self, stream, interval=0.05):
super(Animation, self).__init__(stream)
self.interval = interval
def erase(self, message=None):
"""Erase something whose you write before: message"""
if not message:
message = self.last_message
# Move cursor to the beginning of line
super(Animation, self).write("\033[G")
# Erase in line from cursor
super(Animation, self).write("\033[K")
def __getattr__(self, attr):
return getattr(self.stream, attr)
|
ryukinix/decorating
|
decorating/stream.py
|
Animation.erase
|
python
|
def erase(self, message=None):
if not message:
message = self.last_message
# Move cursor to the beginning of line
super(Animation, self).write("\033[G")
# Erase in line from cursor
super(Animation, self).write("\033[K")
|
Erase something whose you write before: message
|
train
|
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/stream.py#L90-L97
|
[
"def write(self, message, flush=True):\n \"\"\"\n Function: write\n Summary: write method on the default stream\n Examples: >>> stream.write('message')\n 'message'\n Attributes:\n @param (message): str-like content to send on stream\n @param (flush) default=True: flush the stdout after write\n Returns: None\n \"\"\"\n self.stream.write(message)\n if flush:\n self.stream.flush()\n"
] |
class Animation(Unbuffered):
"""A stream unbuffered whose write & erase at interval
After you write something, you can easily clean the buffer
and restart the points of the older message.
stream = Animation(stream, delay=0.5)
self.write('message')
"""
last_message = ''
ansi_escape = re.compile(r'\x1b[^m]*m')
def __init__(self, stream, interval=0.05):
super(Animation, self).__init__(stream)
self.interval = interval
def write(self, message, autoerase=True):
"""Send something for stdout and erased after delay"""
super(Animation, self).write(message)
self.last_message = message
if autoerase:
time.sleep(self.interval)
self.erase(message)
def __getattr__(self, attr):
return getattr(self.stream, attr)
|
ryukinix/decorating
|
decorating/stream.py
|
Clean.write
|
python
|
def write(self, message, flush=False):
# this need be threadsafe because the concurrent spinning running on
# the stderr
with self.lock:
self.paralell_stream.erase()
super(Clean, self).write(message, flush)
|
Write something on the default stream with a prefixed message
|
train
|
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/stream.py#L120-L126
|
[
"def write(self, message, flush=True):\n \"\"\"\n Function: write\n Summary: write method on the default stream\n Examples: >>> stream.write('message')\n 'message'\n Attributes:\n @param (message): str-like content to send on stream\n @param (flush) default=True: flush the stdout after write\n Returns: None\n \"\"\"\n self.stream.write(message)\n if flush:\n self.stream.flush()\n"
] |
class Clean(Unbuffered):
"""A stream wrapper to prepend '\n' in each write
This is used to not break the animations when he is activated
So in the start_animation we do:
sys.stdout = Clean(sys.stdout, <paralell-stream>)
In the stop_animation we do:
sys.stdout = sys.__stdout__Whose paralell_stream is a Animation object.
"""
def __init__(self, stream, paralell_stream):
super(Clean, self).__init__(stream)
self.paralell_stream = paralell_stream
|
ryukinix/decorating
|
decorating/stream.py
|
Writting.write
|
python
|
def write(self, message, flush=True):
if isinstance(message, bytes): # pragma: no cover
message = message.decode('utf-8')
for char in message:
time.sleep(self.delay * (4 if char == '\n' else 1))
super(Writting, self).write(char, flush)
|
A Writting like write method, delayed at each char
|
train
|
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/stream.py#L148-L155
|
[
"def write(self, message, flush=True):\n \"\"\"\n Function: write\n Summary: write method on the default stream\n Examples: >>> stream.write('message')\n 'message'\n Attributes:\n @param (message): str-like content to send on stream\n @param (flush) default=True: flush the stdout after write\n Returns: None\n \"\"\"\n self.stream.write(message)\n if flush:\n self.stream.flush()\n"
] |
class Writting(Unbuffered):
"""
The Writting stream is a delayed stream whose
simulate an user Writting something.
The base class is the AnimationStream
"""
def __init__(self, stream, delay=0.08):
super(Writting, self).__init__(stream)
self.delay = delay
|
ryukinix/decorating
|
decorating/debugging.py
|
debug
|
python
|
def debug(function):
@wraps(function)
def _wrapper(*args, **kwargs):
result = function(*args, **kwargs)
for key, value in kwargs.items():
args += tuple(['{}={!r}'.format(key, value)])
if len(args) == 1:
args = '({})'.format(args[0])
print('@{0}{1} -> {2}'.format(function.__name__, args, result))
_wrapper.last_output = [function.__name__, str(args), result]
return result
_wrapper.last_output = []
return _wrapper
|
Function: debug
Summary: decorator to debug a function
Examples: at the execution of the function wrapped,
the decorator will allows to print the
input and output of each execution
Attributes:
@param (function): function
Returns: wrapped function
|
train
|
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/debugging.py#L20-L43
| null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright © Manoel Vilela 2016
#
# @project: Decorating
# @author: Manoel Vilela
# @email: manoel_vilela@engineer.com
#
"""
An collection of usefull decorators for debug
and time evaluation of functions flow
"""
from __future__ import unicode_literals
from functools import wraps
from time import time
def counter(function):
"""
Function: counter
Summary: Decorator to count the number of a function is executed each time
Examples: You can use that to had a progress of heally heavy
computation without progress feedback
Attributes:
@param (function): function
Returns: wrapped function
"""
@wraps(function)
def _wrapper(*args, **kwargs):
_wrapper.count += 1
res = function(*args, **kwargs)
funcname = function.__name__
count = _wrapper.count
print("{} has been used: {}x".format(funcname, count))
return res
_wrapper.count = 0
return _wrapper
def count_time(function):
"""
Function: count_time
Summary: get the time to finish a function
print at the end that time to stdout
Examples: <NONE>
Attributes:
@param (function): function
Returns: wrapped function
"""
@wraps(function)
def _wrapper(*args, **kwargs):
before = time()
result = function(*args, **kwargs)
diff = time() - before
funcname = function.__name__
print("{!r} func leave it {:.2f} ms to finish".format(funcname, diff))
_wrapper.time = diff
return result
_wrapper.time = 0
return _wrapper
|
ryukinix/decorating
|
decorating/debugging.py
|
counter
|
python
|
def counter(function):
@wraps(function)
def _wrapper(*args, **kwargs):
_wrapper.count += 1
res = function(*args, **kwargs)
funcname = function.__name__
count = _wrapper.count
print("{} has been used: {}x".format(funcname, count))
return res
_wrapper.count = 0
return _wrapper
|
Function: counter
Summary: Decorator to count the number of a function is executed each time
Examples: You can use that to had a progress of heally heavy
computation without progress feedback
Attributes:
@param (function): function
Returns: wrapped function
|
train
|
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/debugging.py#L46-L66
| null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright © Manoel Vilela 2016
#
# @project: Decorating
# @author: Manoel Vilela
# @email: manoel_vilela@engineer.com
#
"""
An collection of usefull decorators for debug
and time evaluation of functions flow
"""
from __future__ import unicode_literals
from functools import wraps
from time import time
def debug(function):
"""
Function: debug
Summary: decorator to debug a function
Examples: at the execution of the function wrapped,
the decorator will allows to print the
input and output of each execution
Attributes:
@param (function): function
Returns: wrapped function
"""
@wraps(function)
def _wrapper(*args, **kwargs):
result = function(*args, **kwargs)
for key, value in kwargs.items():
args += tuple(['{}={!r}'.format(key, value)])
if len(args) == 1:
args = '({})'.format(args[0])
print('@{0}{1} -> {2}'.format(function.__name__, args, result))
_wrapper.last_output = [function.__name__, str(args), result]
return result
_wrapper.last_output = []
return _wrapper
def count_time(function):
"""
Function: count_time
Summary: get the time to finish a function
print at the end that time to stdout
Examples: <NONE>
Attributes:
@param (function): function
Returns: wrapped function
"""
@wraps(function)
def _wrapper(*args, **kwargs):
before = time()
result = function(*args, **kwargs)
diff = time() - before
funcname = function.__name__
print("{!r} func leave it {:.2f} ms to finish".format(funcname, diff))
_wrapper.time = diff
return result
_wrapper.time = 0
return _wrapper
|
ryukinix/decorating
|
decorating/debugging.py
|
count_time
|
python
|
def count_time(function):
@wraps(function)
def _wrapper(*args, **kwargs):
before = time()
result = function(*args, **kwargs)
diff = time() - before
funcname = function.__name__
print("{!r} func leave it {:.2f} ms to finish".format(funcname, diff))
_wrapper.time = diff
return result
_wrapper.time = 0
return _wrapper
|
Function: count_time
Summary: get the time to finish a function
print at the end that time to stdout
Examples: <NONE>
Attributes:
@param (function): function
Returns: wrapped function
|
train
|
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/debugging.py#L69-L90
| null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright © Manoel Vilela 2016
#
# @project: Decorating
# @author: Manoel Vilela
# @email: manoel_vilela@engineer.com
#
"""
An collection of usefull decorators for debug
and time evaluation of functions flow
"""
from __future__ import unicode_literals
from functools import wraps
from time import time
def debug(function):
"""
Function: debug
Summary: decorator to debug a function
Examples: at the execution of the function wrapped,
the decorator will allows to print the
input and output of each execution
Attributes:
@param (function): function
Returns: wrapped function
"""
@wraps(function)
def _wrapper(*args, **kwargs):
result = function(*args, **kwargs)
for key, value in kwargs.items():
args += tuple(['{}={!r}'.format(key, value)])
if len(args) == 1:
args = '({})'.format(args[0])
print('@{0}{1} -> {2}'.format(function.__name__, args, result))
_wrapper.last_output = [function.__name__, str(args), result]
return result
_wrapper.last_output = []
return _wrapper
def counter(function):
"""
Function: counter
Summary: Decorator to count the number of a function is executed each time
Examples: You can use that to had a progress of heally heavy
computation without progress feedback
Attributes:
@param (function): function
Returns: wrapped function
"""
@wraps(function)
def _wrapper(*args, **kwargs):
_wrapper.count += 1
res = function(*args, **kwargs)
funcname = function.__name__
count = _wrapper.count
print("{} has been used: {}x".format(funcname, count))
return res
_wrapper.count = 0
return _wrapper
|
inveniosoftware/invenio-indexer
|
examples/app.py
|
records
|
python
|
def records():
with db.session.begin_nested():
for idx in range(20):
# create the record
id_ = uuid.uuid4()
Record.create({
'title': 'LHC experiment {}'.format(idx),
'description': 'Data from experiment {}.'.format(idx),
'type': 'data',
'recid': idx
}, id_=id_)
PersistentIdentifier.create(
pid_type='recid',
pid_value=idx,
object_type='rec',
object_uuid=id_,
status=PIDStatus.REGISTERED,
)
db.session.commit()
|
Load test data fixture.
|
train
|
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/examples/app.py#L84-L104
| null |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
r"""Minimal Flask application example for development.
SPHINX-START
Run ElasticSearch and RabbitMQ server and then run example development server:
.. code-block:: console
$ pip install -e .[all]
$ cd examples
$ ./app-setup.sh
$ ./app-fixtures.sh
Try to get some records:
.. code-block:: console
$ curl -X GET localhost:9200/_cat/indices?v
$ curl -X GET localhost:9200/testrecords-testrecord-v1.0.0/_search | \
python -m json.tool
To be able to uninstall the example app:
.. code-block:: console
$ ./app-teardown.sh
SPHINX-END
"""
from __future__ import absolute_import, print_function
import os
import uuid
from flask import Flask
from flask_celeryext import FlaskCeleryExt
from invenio_db import InvenioDB, db
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
from invenio_records import InvenioRecords
from invenio_records.api import Record
from invenio_search import InvenioSearch
from invenio_indexer import InvenioIndexer
# Create Flask application
index_name = 'testrecords-testrecord-v1.0.0'
app = Flask(__name__)
app.config.update(
CELERY_ALWAYS_EAGER=True,
CELERY_CACHE_BACKEND='memory',
CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
CELERY_RESULT_BACKEND='cache',
INDEXER_DEFAULT_DOC_TYPE='testrecord-v1.0.0',
INDEXER_DEFAULT_INDEX=index_name,
SQLALCHEMY_DATABASE_URI=os.getenv('SQLALCHEMY_DATABASE_URI',
'sqlite:///app.db'),
SQLALCHEMY_TRACK_MODIFICATIONS=True,
)
FlaskCeleryExt(app)
InvenioDB(app)
InvenioRecords(app)
search = InvenioSearch(app)
search.register_mappings('testrecords', 'data')
InvenioIndexer(app)
@app.cli.group()
def fixtures():
"""Command for working with test data."""
@fixtures.command()
|
inveniosoftware/invenio-indexer
|
invenio_indexer/api.py
|
Producer.publish
|
python
|
def publish(self, data, **kwargs):
assert data.get('op') in {'index', 'create', 'delete', 'update'}
return super(Producer, self).publish(data, **kwargs)
|
Validate operation type.
|
train
|
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L36-L39
| null |
class Producer(KombuProducer):
"""Producer validating published messages.
For more information visit :class:`kombu:kombu.Producer`.
"""
|
inveniosoftware/invenio-indexer
|
invenio_indexer/api.py
|
RecordIndexer.index
|
python
|
def index(self, record):
index, doc_type = self.record_to_index(record)
return self.client.index(
id=str(record.id),
version=record.revision_id,
version_type=self._version_type,
index=index,
doc_type=doc_type,
body=self._prepare_record(record, index, doc_type),
)
|
Index a record.
The caller is responsible for ensuring that the record has already been
committed to the database. If a newer version of a record has already
been indexed then the provided record will not be indexed. This
behavior can be controlled by providing a different ``version_type``
when initializing ``RecordIndexer``.
:param record: Record instance.
|
train
|
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L106-L126
|
[
"def record_to_index(self, record):\n \"\"\"Get index/doc_type given a record.\n\n :param record: The record where to look for the information.\n :returns: A tuple (index, doc_type).\n \"\"\"\n return self._record_to_index(record)\n",
"def _prepare_record(record, index, doc_type):\n \"\"\"Prepare record data for indexing.\n\n :param record: The record to prepare.\n :param index: The Elasticsearch index.\n :param doc_type: The Elasticsearch document type.\n :returns: The record metadata.\n \"\"\"\n if current_app.config['INDEXER_REPLACE_REFS']:\n data = copy.deepcopy(record.replace_refs())\n else:\n data = record.dumps()\n\n data['_created'] = pytz.utc.localize(record.created).isoformat() \\\n if record.created else None\n data['_updated'] = pytz.utc.localize(record.updated).isoformat() \\\n if record.updated else None\n\n # Allow modification of data prior to sending to Elasticsearch.\n before_record_index.send(\n current_app._get_current_object(),\n json=data,\n record=record,\n index=index,\n doc_type=doc_type,\n )\n\n return data\n"
] |
class RecordIndexer(object):
r"""Provide an interface for indexing records in Elasticsearch.
Bulk indexing works by queuing requests for indexing records and processing
these requests in bulk.
"""
def __init__(self, search_client=None, exchange=None, queue=None,
routing_key=None, version_type=None, record_to_index=None):
"""Initialize indexer.
:param search_client: Elasticsearch client.
(Default: ``current_search_client``)
:param exchange: A :class:`kombu.Exchange` instance for message queue.
:param queue: A :class:`kombu.Queue` instance for message queue.
:param routing_key: Routing key for message queue.
:param version_type: Elasticsearch version type.
(Default: ``external_gte``)
:param record_to_index: Function to extract the index and doc_type
from the record.
"""
self.client = search_client or current_search_client
self._exchange = exchange
self._queue = queue
self._record_to_index = record_to_index or current_record_to_index
self._routing_key = routing_key
self._version_type = version_type or 'external_gte'
def record_to_index(self, record):
"""Get index/doc_type given a record.
:param record: The record where to look for the information.
:returns: A tuple (index, doc_type).
"""
return self._record_to_index(record)
@property
def mq_queue(self):
"""Message Queue queue.
:returns: The Message Queue queue.
"""
return self._queue or current_app.config['INDEXER_MQ_QUEUE']
@property
def mq_exchange(self):
"""Message Queue exchange.
:returns: The Message Queue exchange.
"""
return self._exchange or current_app.config['INDEXER_MQ_EXCHANGE']
@property
def mq_routing_key(self):
"""Message Queue routing key.
:returns: The Message Queue routing key.
"""
return (self._routing_key or
current_app.config['INDEXER_MQ_ROUTING_KEY'])
#
# High-level API
#
def index_by_id(self, record_uuid):
"""Index a record by record identifier.
:param record_uuid: Record identifier.
"""
return self.index(Record.get_record(record_uuid))
def delete(self, record):
"""Delete a record.
:param record: Record instance.
"""
index, doc_type = self.record_to_index(record)
return self.client.delete(
id=str(record.id),
index=index,
doc_type=doc_type,
)
def delete_by_id(self, record_uuid):
"""Delete record from index by record identifier."""
self.delete(Record.get_record(record_uuid))
def bulk_index(self, record_id_iterator):
"""Bulk index records.
:param record_id_iterator: Iterator yielding record UUIDs.
"""
self._bulk_op(record_id_iterator, 'index')
def bulk_delete(self, record_id_iterator):
"""Bulk delete records from index.
:param record_id_iterator: Iterator yielding record UUIDs.
"""
self._bulk_op(record_id_iterator, 'delete')
def process_bulk_queue(self, es_bulk_kwargs=None):
"""Process bulk indexing queue.
:param dict es_bulk_kwargs: Passed to
:func:`elasticsearch:elasticsearch.helpers.bulk`.
"""
with current_celery_app.pool.acquire(block=True) as conn:
consumer = Consumer(
connection=conn,
queue=self.mq_queue.name,
exchange=self.mq_exchange.name,
routing_key=self.mq_routing_key,
)
req_timeout = current_app.config['INDEXER_BULK_REQUEST_TIMEOUT']
es_bulk_kwargs = es_bulk_kwargs or {}
count = bulk(
self.client,
self._actionsiter(consumer.iterqueue()),
stats_only=True,
request_timeout=req_timeout,
**es_bulk_kwargs
)
consumer.close()
return count
@contextmanager
def create_producer(self):
"""Context manager that yields an instance of ``Producer``."""
with current_celery_app.pool.acquire(block=True) as conn:
yield Producer(
conn,
exchange=self.mq_exchange,
routing_key=self.mq_routing_key,
auto_declare=True,
)
#
# Low-level implementation
#
def _bulk_op(self, record_id_iterator, op_type, index=None, doc_type=None):
"""Index record in Elasticsearch asynchronously.
:param record_id_iterator: Iterator that yields record UUIDs.
:param op_type: Indexing operation (one of ``index``, ``create``,
``delete`` or ``update``).
:param index: The Elasticsearch index. (Default: ``None``)
:param doc_type: The Elasticsearch doc_type. (Default: ``None``)
"""
with self.create_producer() as producer:
for rec in record_id_iterator:
producer.publish(dict(
id=str(rec),
op=op_type,
index=index,
doc_type=doc_type
))
def _actionsiter(self, message_iterator):
"""Iterate bulk actions.
:param message_iterator: Iterator yielding messages from a queue.
"""
for message in message_iterator:
payload = message.decode()
try:
if payload['op'] == 'delete':
yield self._delete_action(payload)
else:
yield self._index_action(payload)
message.ack()
except NoResultFound:
message.reject()
except Exception:
message.reject()
current_app.logger.error(
"Failed to index record {0}".format(payload.get('id')),
exc_info=True)
def _delete_action(self, payload):
"""Bulk delete action.
:param payload: Decoded message body.
:returns: Dictionary defining an Elasticsearch bulk 'delete' action.
"""
index, doc_type = payload.get('index'), payload.get('doc_type')
if not (index and doc_type):
record = Record.get_record(payload['id'])
index, doc_type = self.record_to_index(record)
return {
'_op_type': 'delete',
'_index': index,
'_type': doc_type,
'_id': payload['id'],
}
def _index_action(self, payload):
"""Bulk index action.
:param payload: Decoded message body.
:returns: Dictionary defining an Elasticsearch bulk 'index' action.
"""
record = Record.get_record(payload['id'])
index, doc_type = self.record_to_index(record)
return {
'_op_type': 'index',
'_index': index,
'_type': doc_type,
'_id': str(record.id),
'_version': record.revision_id,
'_version_type': self._version_type,
'_source': self._prepare_record(record, index, doc_type),
}
@staticmethod
def _prepare_record(record, index, doc_type):
"""Prepare record data for indexing.
:param record: The record to prepare.
:param index: The Elasticsearch index.
:param doc_type: The Elasticsearch document type.
:returns: The record metadata.
"""
if current_app.config['INDEXER_REPLACE_REFS']:
data = copy.deepcopy(record.replace_refs())
else:
data = record.dumps()
data['_created'] = pytz.utc.localize(record.created).isoformat() \
if record.created else None
data['_updated'] = pytz.utc.localize(record.updated).isoformat() \
if record.updated else None
# Allow modification of data prior to sending to Elasticsearch.
before_record_index.send(
current_app._get_current_object(),
json=data,
record=record,
index=index,
doc_type=doc_type,
)
return data
|
inveniosoftware/invenio-indexer
|
invenio_indexer/api.py
|
RecordIndexer.delete
|
python
|
def delete(self, record):
index, doc_type = self.record_to_index(record)
return self.client.delete(
id=str(record.id),
index=index,
doc_type=doc_type,
)
|
Delete a record.
:param record: Record instance.
|
train
|
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L135-L146
|
[
"def record_to_index(self, record):\n \"\"\"Get index/doc_type given a record.\n\n :param record: The record where to look for the information.\n :returns: A tuple (index, doc_type).\n \"\"\"\n return self._record_to_index(record)\n"
] |
class RecordIndexer(object):
r"""Provide an interface for indexing records in Elasticsearch.
Bulk indexing works by queuing requests for indexing records and processing
these requests in bulk.
"""
def __init__(self, search_client=None, exchange=None, queue=None,
routing_key=None, version_type=None, record_to_index=None):
"""Initialize indexer.
:param search_client: Elasticsearch client.
(Default: ``current_search_client``)
:param exchange: A :class:`kombu.Exchange` instance for message queue.
:param queue: A :class:`kombu.Queue` instance for message queue.
:param routing_key: Routing key for message queue.
:param version_type: Elasticsearch version type.
(Default: ``external_gte``)
:param record_to_index: Function to extract the index and doc_type
from the record.
"""
self.client = search_client or current_search_client
self._exchange = exchange
self._queue = queue
self._record_to_index = record_to_index or current_record_to_index
self._routing_key = routing_key
self._version_type = version_type or 'external_gte'
def record_to_index(self, record):
"""Get index/doc_type given a record.
:param record: The record where to look for the information.
:returns: A tuple (index, doc_type).
"""
return self._record_to_index(record)
@property
def mq_queue(self):
"""Message Queue queue.
:returns: The Message Queue queue.
"""
return self._queue or current_app.config['INDEXER_MQ_QUEUE']
@property
def mq_exchange(self):
"""Message Queue exchange.
:returns: The Message Queue exchange.
"""
return self._exchange or current_app.config['INDEXER_MQ_EXCHANGE']
@property
def mq_routing_key(self):
"""Message Queue routing key.
:returns: The Message Queue routing key.
"""
return (self._routing_key or
current_app.config['INDEXER_MQ_ROUTING_KEY'])
#
# High-level API
#
def index(self, record):
"""Index a record.
The caller is responsible for ensuring that the record has already been
committed to the database. If a newer version of a record has already
been indexed then the provided record will not be indexed. This
behavior can be controlled by providing a different ``version_type``
when initializing ``RecordIndexer``.
:param record: Record instance.
"""
index, doc_type = self.record_to_index(record)
return self.client.index(
id=str(record.id),
version=record.revision_id,
version_type=self._version_type,
index=index,
doc_type=doc_type,
body=self._prepare_record(record, index, doc_type),
)
def index_by_id(self, record_uuid):
"""Index a record by record identifier.
:param record_uuid: Record identifier.
"""
return self.index(Record.get_record(record_uuid))
def delete_by_id(self, record_uuid):
"""Delete record from index by record identifier."""
self.delete(Record.get_record(record_uuid))
def bulk_index(self, record_id_iterator):
"""Bulk index records.
:param record_id_iterator: Iterator yielding record UUIDs.
"""
self._bulk_op(record_id_iterator, 'index')
def bulk_delete(self, record_id_iterator):
"""Bulk delete records from index.
:param record_id_iterator: Iterator yielding record UUIDs.
"""
self._bulk_op(record_id_iterator, 'delete')
def process_bulk_queue(self, es_bulk_kwargs=None):
"""Process bulk indexing queue.
:param dict es_bulk_kwargs: Passed to
:func:`elasticsearch:elasticsearch.helpers.bulk`.
"""
with current_celery_app.pool.acquire(block=True) as conn:
consumer = Consumer(
connection=conn,
queue=self.mq_queue.name,
exchange=self.mq_exchange.name,
routing_key=self.mq_routing_key,
)
req_timeout = current_app.config['INDEXER_BULK_REQUEST_TIMEOUT']
es_bulk_kwargs = es_bulk_kwargs or {}
count = bulk(
self.client,
self._actionsiter(consumer.iterqueue()),
stats_only=True,
request_timeout=req_timeout,
**es_bulk_kwargs
)
consumer.close()
return count
@contextmanager
def create_producer(self):
"""Context manager that yields an instance of ``Producer``."""
with current_celery_app.pool.acquire(block=True) as conn:
yield Producer(
conn,
exchange=self.mq_exchange,
routing_key=self.mq_routing_key,
auto_declare=True,
)
#
# Low-level implementation
#
def _bulk_op(self, record_id_iterator, op_type, index=None, doc_type=None):
"""Index record in Elasticsearch asynchronously.
:param record_id_iterator: Iterator that yields record UUIDs.
:param op_type: Indexing operation (one of ``index``, ``create``,
``delete`` or ``update``).
:param index: The Elasticsearch index. (Default: ``None``)
:param doc_type: The Elasticsearch doc_type. (Default: ``None``)
"""
with self.create_producer() as producer:
for rec in record_id_iterator:
producer.publish(dict(
id=str(rec),
op=op_type,
index=index,
doc_type=doc_type
))
def _actionsiter(self, message_iterator):
"""Iterate bulk actions.
:param message_iterator: Iterator yielding messages from a queue.
"""
for message in message_iterator:
payload = message.decode()
try:
if payload['op'] == 'delete':
yield self._delete_action(payload)
else:
yield self._index_action(payload)
message.ack()
except NoResultFound:
message.reject()
except Exception:
message.reject()
current_app.logger.error(
"Failed to index record {0}".format(payload.get('id')),
exc_info=True)
def _delete_action(self, payload):
"""Bulk delete action.
:param payload: Decoded message body.
:returns: Dictionary defining an Elasticsearch bulk 'delete' action.
"""
index, doc_type = payload.get('index'), payload.get('doc_type')
if not (index and doc_type):
record = Record.get_record(payload['id'])
index, doc_type = self.record_to_index(record)
return {
'_op_type': 'delete',
'_index': index,
'_type': doc_type,
'_id': payload['id'],
}
def _index_action(self, payload):
"""Bulk index action.
:param payload: Decoded message body.
:returns: Dictionary defining an Elasticsearch bulk 'index' action.
"""
record = Record.get_record(payload['id'])
index, doc_type = self.record_to_index(record)
return {
'_op_type': 'index',
'_index': index,
'_type': doc_type,
'_id': str(record.id),
'_version': record.revision_id,
'_version_type': self._version_type,
'_source': self._prepare_record(record, index, doc_type),
}
@staticmethod
def _prepare_record(record, index, doc_type):
"""Prepare record data for indexing.
:param record: The record to prepare.
:param index: The Elasticsearch index.
:param doc_type: The Elasticsearch document type.
:returns: The record metadata.
"""
if current_app.config['INDEXER_REPLACE_REFS']:
data = copy.deepcopy(record.replace_refs())
else:
data = record.dumps()
data['_created'] = pytz.utc.localize(record.created).isoformat() \
if record.created else None
data['_updated'] = pytz.utc.localize(record.updated).isoformat() \
if record.updated else None
# Allow modification of data prior to sending to Elasticsearch.
before_record_index.send(
current_app._get_current_object(),
json=data,
record=record,
index=index,
doc_type=doc_type,
)
return data
|
inveniosoftware/invenio-indexer
|
invenio_indexer/api.py
|
RecordIndexer.process_bulk_queue
|
python
|
def process_bulk_queue(self, es_bulk_kwargs=None):
with current_celery_app.pool.acquire(block=True) as conn:
consumer = Consumer(
connection=conn,
queue=self.mq_queue.name,
exchange=self.mq_exchange.name,
routing_key=self.mq_routing_key,
)
req_timeout = current_app.config['INDEXER_BULK_REQUEST_TIMEOUT']
es_bulk_kwargs = es_bulk_kwargs or {}
count = bulk(
self.client,
self._actionsiter(consumer.iterqueue()),
stats_only=True,
request_timeout=req_timeout,
**es_bulk_kwargs
)
consumer.close()
return count
|
Process bulk indexing queue.
:param dict es_bulk_kwargs: Passed to
:func:`elasticsearch:elasticsearch.helpers.bulk`.
|
train
|
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L166-L193
|
[
"def _actionsiter(self, message_iterator):\n \"\"\"Iterate bulk actions.\n\n :param message_iterator: Iterator yielding messages from a queue.\n \"\"\"\n for message in message_iterator:\n payload = message.decode()\n try:\n if payload['op'] == 'delete':\n yield self._delete_action(payload)\n else:\n yield self._index_action(payload)\n message.ack()\n except NoResultFound:\n message.reject()\n except Exception:\n message.reject()\n current_app.logger.error(\n \"Failed to index record {0}\".format(payload.get('id')),\n exc_info=True)\n"
] |
class RecordIndexer(object):
r"""Provide an interface for indexing records in Elasticsearch.
Bulk indexing works by queuing requests for indexing records and processing
these requests in bulk.
"""
def __init__(self, search_client=None, exchange=None, queue=None,
routing_key=None, version_type=None, record_to_index=None):
"""Initialize indexer.
:param search_client: Elasticsearch client.
(Default: ``current_search_client``)
:param exchange: A :class:`kombu.Exchange` instance for message queue.
:param queue: A :class:`kombu.Queue` instance for message queue.
:param routing_key: Routing key for message queue.
:param version_type: Elasticsearch version type.
(Default: ``external_gte``)
:param record_to_index: Function to extract the index and doc_type
from the record.
"""
self.client = search_client or current_search_client
self._exchange = exchange
self._queue = queue
self._record_to_index = record_to_index or current_record_to_index
self._routing_key = routing_key
self._version_type = version_type or 'external_gte'
def record_to_index(self, record):
"""Get index/doc_type given a record.
:param record: The record where to look for the information.
:returns: A tuple (index, doc_type).
"""
return self._record_to_index(record)
@property
def mq_queue(self):
"""Message Queue queue.
:returns: The Message Queue queue.
"""
return self._queue or current_app.config['INDEXER_MQ_QUEUE']
@property
def mq_exchange(self):
"""Message Queue exchange.
:returns: The Message Queue exchange.
"""
return self._exchange or current_app.config['INDEXER_MQ_EXCHANGE']
@property
def mq_routing_key(self):
"""Message Queue routing key.
:returns: The Message Queue routing key.
"""
return (self._routing_key or
current_app.config['INDEXER_MQ_ROUTING_KEY'])
#
# High-level API
#
def index(self, record):
"""Index a record.
The caller is responsible for ensuring that the record has already been
committed to the database. If a newer version of a record has already
been indexed then the provided record will not be indexed. This
behavior can be controlled by providing a different ``version_type``
when initializing ``RecordIndexer``.
:param record: Record instance.
"""
index, doc_type = self.record_to_index(record)
return self.client.index(
id=str(record.id),
version=record.revision_id,
version_type=self._version_type,
index=index,
doc_type=doc_type,
body=self._prepare_record(record, index, doc_type),
)
def index_by_id(self, record_uuid):
"""Index a record by record identifier.
:param record_uuid: Record identifier.
"""
return self.index(Record.get_record(record_uuid))
def delete(self, record):
"""Delete a record.
:param record: Record instance.
"""
index, doc_type = self.record_to_index(record)
return self.client.delete(
id=str(record.id),
index=index,
doc_type=doc_type,
)
def delete_by_id(self, record_uuid):
"""Delete record from index by record identifier."""
self.delete(Record.get_record(record_uuid))
def bulk_index(self, record_id_iterator):
"""Bulk index records.
:param record_id_iterator: Iterator yielding record UUIDs.
"""
self._bulk_op(record_id_iterator, 'index')
def bulk_delete(self, record_id_iterator):
"""Bulk delete records from index.
:param record_id_iterator: Iterator yielding record UUIDs.
"""
self._bulk_op(record_id_iterator, 'delete')
@contextmanager
def create_producer(self):
"""Context manager that yields an instance of ``Producer``."""
with current_celery_app.pool.acquire(block=True) as conn:
yield Producer(
conn,
exchange=self.mq_exchange,
routing_key=self.mq_routing_key,
auto_declare=True,
)
#
# Low-level implementation
#
def _bulk_op(self, record_id_iterator, op_type, index=None, doc_type=None):
"""Index record in Elasticsearch asynchronously.
:param record_id_iterator: Iterator that yields record UUIDs.
:param op_type: Indexing operation (one of ``index``, ``create``,
``delete`` or ``update``).
:param index: The Elasticsearch index. (Default: ``None``)
:param doc_type: The Elasticsearch doc_type. (Default: ``None``)
"""
with self.create_producer() as producer:
for rec in record_id_iterator:
producer.publish(dict(
id=str(rec),
op=op_type,
index=index,
doc_type=doc_type
))
def _actionsiter(self, message_iterator):
"""Iterate bulk actions.
:param message_iterator: Iterator yielding messages from a queue.
"""
for message in message_iterator:
payload = message.decode()
try:
if payload['op'] == 'delete':
yield self._delete_action(payload)
else:
yield self._index_action(payload)
message.ack()
except NoResultFound:
message.reject()
except Exception:
message.reject()
current_app.logger.error(
"Failed to index record {0}".format(payload.get('id')),
exc_info=True)
def _delete_action(self, payload):
"""Bulk delete action.
:param payload: Decoded message body.
:returns: Dictionary defining an Elasticsearch bulk 'delete' action.
"""
index, doc_type = payload.get('index'), payload.get('doc_type')
if not (index and doc_type):
record = Record.get_record(payload['id'])
index, doc_type = self.record_to_index(record)
return {
'_op_type': 'delete',
'_index': index,
'_type': doc_type,
'_id': payload['id'],
}
def _index_action(self, payload):
"""Bulk index action.
:param payload: Decoded message body.
:returns: Dictionary defining an Elasticsearch bulk 'index' action.
"""
record = Record.get_record(payload['id'])
index, doc_type = self.record_to_index(record)
return {
'_op_type': 'index',
'_index': index,
'_type': doc_type,
'_id': str(record.id),
'_version': record.revision_id,
'_version_type': self._version_type,
'_source': self._prepare_record(record, index, doc_type),
}
@staticmethod
def _prepare_record(record, index, doc_type):
"""Prepare record data for indexing.
:param record: The record to prepare.
:param index: The Elasticsearch index.
:param doc_type: The Elasticsearch document type.
:returns: The record metadata.
"""
if current_app.config['INDEXER_REPLACE_REFS']:
data = copy.deepcopy(record.replace_refs())
else:
data = record.dumps()
data['_created'] = pytz.utc.localize(record.created).isoformat() \
if record.created else None
data['_updated'] = pytz.utc.localize(record.updated).isoformat() \
if record.updated else None
# Allow modification of data prior to sending to Elasticsearch.
before_record_index.send(
current_app._get_current_object(),
json=data,
record=record,
index=index,
doc_type=doc_type,
)
return data
|
inveniosoftware/invenio-indexer
|
invenio_indexer/api.py
|
RecordIndexer.create_producer
|
python
|
def create_producer(self):
with current_celery_app.pool.acquire(block=True) as conn:
yield Producer(
conn,
exchange=self.mq_exchange,
routing_key=self.mq_routing_key,
auto_declare=True,
)
|
Context manager that yields an instance of ``Producer``.
|
train
|
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L196-L204
| null |
class RecordIndexer(object):
r"""Provide an interface for indexing records in Elasticsearch.
Bulk indexing works by queuing requests for indexing records and processing
these requests in bulk.
"""
def __init__(self, search_client=None, exchange=None, queue=None,
routing_key=None, version_type=None, record_to_index=None):
"""Initialize indexer.
:param search_client: Elasticsearch client.
(Default: ``current_search_client``)
:param exchange: A :class:`kombu.Exchange` instance for message queue.
:param queue: A :class:`kombu.Queue` instance for message queue.
:param routing_key: Routing key for message queue.
:param version_type: Elasticsearch version type.
(Default: ``external_gte``)
:param record_to_index: Function to extract the index and doc_type
from the record.
"""
self.client = search_client or current_search_client
self._exchange = exchange
self._queue = queue
self._record_to_index = record_to_index or current_record_to_index
self._routing_key = routing_key
self._version_type = version_type or 'external_gte'
def record_to_index(self, record):
"""Get index/doc_type given a record.
:param record: The record where to look for the information.
:returns: A tuple (index, doc_type).
"""
return self._record_to_index(record)
@property
def mq_queue(self):
"""Message Queue queue.
:returns: The Message Queue queue.
"""
return self._queue or current_app.config['INDEXER_MQ_QUEUE']
@property
def mq_exchange(self):
"""Message Queue exchange.
:returns: The Message Queue exchange.
"""
return self._exchange or current_app.config['INDEXER_MQ_EXCHANGE']
@property
def mq_routing_key(self):
"""Message Queue routing key.
:returns: The Message Queue routing key.
"""
return (self._routing_key or
current_app.config['INDEXER_MQ_ROUTING_KEY'])
#
# High-level API
#
def index(self, record):
"""Index a record.
The caller is responsible for ensuring that the record has already been
committed to the database. If a newer version of a record has already
been indexed then the provided record will not be indexed. This
behavior can be controlled by providing a different ``version_type``
when initializing ``RecordIndexer``.
:param record: Record instance.
"""
index, doc_type = self.record_to_index(record)
return self.client.index(
id=str(record.id),
version=record.revision_id,
version_type=self._version_type,
index=index,
doc_type=doc_type,
body=self._prepare_record(record, index, doc_type),
)
def index_by_id(self, record_uuid):
"""Index a record by record identifier.
:param record_uuid: Record identifier.
"""
return self.index(Record.get_record(record_uuid))
def delete(self, record):
"""Delete a record.
:param record: Record instance.
"""
index, doc_type = self.record_to_index(record)
return self.client.delete(
id=str(record.id),
index=index,
doc_type=doc_type,
)
def delete_by_id(self, record_uuid):
"""Delete record from index by record identifier."""
self.delete(Record.get_record(record_uuid))
def bulk_index(self, record_id_iterator):
"""Bulk index records.
:param record_id_iterator: Iterator yielding record UUIDs.
"""
self._bulk_op(record_id_iterator, 'index')
def bulk_delete(self, record_id_iterator):
"""Bulk delete records from index.
:param record_id_iterator: Iterator yielding record UUIDs.
"""
self._bulk_op(record_id_iterator, 'delete')
def process_bulk_queue(self, es_bulk_kwargs=None):
"""Process bulk indexing queue.
:param dict es_bulk_kwargs: Passed to
:func:`elasticsearch:elasticsearch.helpers.bulk`.
"""
with current_celery_app.pool.acquire(block=True) as conn:
consumer = Consumer(
connection=conn,
queue=self.mq_queue.name,
exchange=self.mq_exchange.name,
routing_key=self.mq_routing_key,
)
req_timeout = current_app.config['INDEXER_BULK_REQUEST_TIMEOUT']
es_bulk_kwargs = es_bulk_kwargs or {}
count = bulk(
self.client,
self._actionsiter(consumer.iterqueue()),
stats_only=True,
request_timeout=req_timeout,
**es_bulk_kwargs
)
consumer.close()
return count
@contextmanager
#
# Low-level implementation
#
def _bulk_op(self, record_id_iterator, op_type, index=None, doc_type=None):
"""Index record in Elasticsearch asynchronously.
:param record_id_iterator: Iterator that yields record UUIDs.
:param op_type: Indexing operation (one of ``index``, ``create``,
``delete`` or ``update``).
:param index: The Elasticsearch index. (Default: ``None``)
:param doc_type: The Elasticsearch doc_type. (Default: ``None``)
"""
with self.create_producer() as producer:
for rec in record_id_iterator:
producer.publish(dict(
id=str(rec),
op=op_type,
index=index,
doc_type=doc_type
))
def _actionsiter(self, message_iterator):
"""Iterate bulk actions.
:param message_iterator: Iterator yielding messages from a queue.
"""
for message in message_iterator:
payload = message.decode()
try:
if payload['op'] == 'delete':
yield self._delete_action(payload)
else:
yield self._index_action(payload)
message.ack()
except NoResultFound:
message.reject()
except Exception:
message.reject()
current_app.logger.error(
"Failed to index record {0}".format(payload.get('id')),
exc_info=True)
def _delete_action(self, payload):
"""Bulk delete action.
:param payload: Decoded message body.
:returns: Dictionary defining an Elasticsearch bulk 'delete' action.
"""
index, doc_type = payload.get('index'), payload.get('doc_type')
if not (index and doc_type):
record = Record.get_record(payload['id'])
index, doc_type = self.record_to_index(record)
return {
'_op_type': 'delete',
'_index': index,
'_type': doc_type,
'_id': payload['id'],
}
def _index_action(self, payload):
"""Bulk index action.
:param payload: Decoded message body.
:returns: Dictionary defining an Elasticsearch bulk 'index' action.
"""
record = Record.get_record(payload['id'])
index, doc_type = self.record_to_index(record)
return {
'_op_type': 'index',
'_index': index,
'_type': doc_type,
'_id': str(record.id),
'_version': record.revision_id,
'_version_type': self._version_type,
'_source': self._prepare_record(record, index, doc_type),
}
@staticmethod
def _prepare_record(record, index, doc_type):
"""Prepare record data for indexing.
:param record: The record to prepare.
:param index: The Elasticsearch index.
:param doc_type: The Elasticsearch document type.
:returns: The record metadata.
"""
if current_app.config['INDEXER_REPLACE_REFS']:
data = copy.deepcopy(record.replace_refs())
else:
data = record.dumps()
data['_created'] = pytz.utc.localize(record.created).isoformat() \
if record.created else None
data['_updated'] = pytz.utc.localize(record.updated).isoformat() \
if record.updated else None
# Allow modification of data prior to sending to Elasticsearch.
before_record_index.send(
current_app._get_current_object(),
json=data,
record=record,
index=index,
doc_type=doc_type,
)
return data
|
inveniosoftware/invenio-indexer
|
invenio_indexer/api.py
|
RecordIndexer._bulk_op
|
python
|
def _bulk_op(self, record_id_iterator, op_type, index=None, doc_type=None):
with self.create_producer() as producer:
for rec in record_id_iterator:
producer.publish(dict(
id=str(rec),
op=op_type,
index=index,
doc_type=doc_type
))
|
Index record in Elasticsearch asynchronously.
:param record_id_iterator: Iterator that yields record UUIDs.
:param op_type: Indexing operation (one of ``index``, ``create``,
``delete`` or ``update``).
:param index: The Elasticsearch index. (Default: ``None``)
:param doc_type: The Elasticsearch doc_type. (Default: ``None``)
|
train
|
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L209-L225
| null |
class RecordIndexer(object):
r"""Provide an interface for indexing records in Elasticsearch.
Bulk indexing works by queuing requests for indexing records and processing
these requests in bulk.
"""
def __init__(self, search_client=None, exchange=None, queue=None,
routing_key=None, version_type=None, record_to_index=None):
"""Initialize indexer.
:param search_client: Elasticsearch client.
(Default: ``current_search_client``)
:param exchange: A :class:`kombu.Exchange` instance for message queue.
:param queue: A :class:`kombu.Queue` instance for message queue.
:param routing_key: Routing key for message queue.
:param version_type: Elasticsearch version type.
(Default: ``external_gte``)
:param record_to_index: Function to extract the index and doc_type
from the record.
"""
self.client = search_client or current_search_client
self._exchange = exchange
self._queue = queue
self._record_to_index = record_to_index or current_record_to_index
self._routing_key = routing_key
self._version_type = version_type or 'external_gte'
def record_to_index(self, record):
"""Get index/doc_type given a record.
:param record: The record where to look for the information.
:returns: A tuple (index, doc_type).
"""
return self._record_to_index(record)
@property
def mq_queue(self):
"""Message Queue queue.
:returns: The Message Queue queue.
"""
return self._queue or current_app.config['INDEXER_MQ_QUEUE']
@property
def mq_exchange(self):
"""Message Queue exchange.
:returns: The Message Queue exchange.
"""
return self._exchange or current_app.config['INDEXER_MQ_EXCHANGE']
@property
def mq_routing_key(self):
"""Message Queue routing key.
:returns: The Message Queue routing key.
"""
return (self._routing_key or
current_app.config['INDEXER_MQ_ROUTING_KEY'])
#
# High-level API
#
def index(self, record):
"""Index a record.
The caller is responsible for ensuring that the record has already been
committed to the database. If a newer version of a record has already
been indexed then the provided record will not be indexed. This
behavior can be controlled by providing a different ``version_type``
when initializing ``RecordIndexer``.
:param record: Record instance.
"""
index, doc_type = self.record_to_index(record)
return self.client.index(
id=str(record.id),
version=record.revision_id,
version_type=self._version_type,
index=index,
doc_type=doc_type,
body=self._prepare_record(record, index, doc_type),
)
def index_by_id(self, record_uuid):
"""Index a record by record identifier.
:param record_uuid: Record identifier.
"""
return self.index(Record.get_record(record_uuid))
def delete(self, record):
"""Delete a record.
:param record: Record instance.
"""
index, doc_type = self.record_to_index(record)
return self.client.delete(
id=str(record.id),
index=index,
doc_type=doc_type,
)
def delete_by_id(self, record_uuid):
"""Delete record from index by record identifier."""
self.delete(Record.get_record(record_uuid))
def bulk_index(self, record_id_iterator):
"""Bulk index records.
:param record_id_iterator: Iterator yielding record UUIDs.
"""
self._bulk_op(record_id_iterator, 'index')
def bulk_delete(self, record_id_iterator):
"""Bulk delete records from index.
:param record_id_iterator: Iterator yielding record UUIDs.
"""
self._bulk_op(record_id_iterator, 'delete')
def process_bulk_queue(self, es_bulk_kwargs=None):
"""Process bulk indexing queue.
:param dict es_bulk_kwargs: Passed to
:func:`elasticsearch:elasticsearch.helpers.bulk`.
"""
with current_celery_app.pool.acquire(block=True) as conn:
consumer = Consumer(
connection=conn,
queue=self.mq_queue.name,
exchange=self.mq_exchange.name,
routing_key=self.mq_routing_key,
)
req_timeout = current_app.config['INDEXER_BULK_REQUEST_TIMEOUT']
es_bulk_kwargs = es_bulk_kwargs or {}
count = bulk(
self.client,
self._actionsiter(consumer.iterqueue()),
stats_only=True,
request_timeout=req_timeout,
**es_bulk_kwargs
)
consumer.close()
return count
@contextmanager
def create_producer(self):
"""Context manager that yields an instance of ``Producer``."""
with current_celery_app.pool.acquire(block=True) as conn:
yield Producer(
conn,
exchange=self.mq_exchange,
routing_key=self.mq_routing_key,
auto_declare=True,
)
#
# Low-level implementation
#
def _actionsiter(self, message_iterator):
"""Iterate bulk actions.
:param message_iterator: Iterator yielding messages from a queue.
"""
for message in message_iterator:
payload = message.decode()
try:
if payload['op'] == 'delete':
yield self._delete_action(payload)
else:
yield self._index_action(payload)
message.ack()
except NoResultFound:
message.reject()
except Exception:
message.reject()
current_app.logger.error(
"Failed to index record {0}".format(payload.get('id')),
exc_info=True)
def _delete_action(self, payload):
"""Bulk delete action.
:param payload: Decoded message body.
:returns: Dictionary defining an Elasticsearch bulk 'delete' action.
"""
index, doc_type = payload.get('index'), payload.get('doc_type')
if not (index and doc_type):
record = Record.get_record(payload['id'])
index, doc_type = self.record_to_index(record)
return {
'_op_type': 'delete',
'_index': index,
'_type': doc_type,
'_id': payload['id'],
}
def _index_action(self, payload):
"""Bulk index action.
:param payload: Decoded message body.
:returns: Dictionary defining an Elasticsearch bulk 'index' action.
"""
record = Record.get_record(payload['id'])
index, doc_type = self.record_to_index(record)
return {
'_op_type': 'index',
'_index': index,
'_type': doc_type,
'_id': str(record.id),
'_version': record.revision_id,
'_version_type': self._version_type,
'_source': self._prepare_record(record, index, doc_type),
}
@staticmethod
def _prepare_record(record, index, doc_type):
"""Prepare record data for indexing.
:param record: The record to prepare.
:param index: The Elasticsearch index.
:param doc_type: The Elasticsearch document type.
:returns: The record metadata.
"""
if current_app.config['INDEXER_REPLACE_REFS']:
data = copy.deepcopy(record.replace_refs())
else:
data = record.dumps()
data['_created'] = pytz.utc.localize(record.created).isoformat() \
if record.created else None
data['_updated'] = pytz.utc.localize(record.updated).isoformat() \
if record.updated else None
# Allow modification of data prior to sending to Elasticsearch.
before_record_index.send(
current_app._get_current_object(),
json=data,
record=record,
index=index,
doc_type=doc_type,
)
return data
|
inveniosoftware/invenio-indexer
|
invenio_indexer/api.py
|
RecordIndexer._actionsiter
|
python
|
def _actionsiter(self, message_iterator):
for message in message_iterator:
payload = message.decode()
try:
if payload['op'] == 'delete':
yield self._delete_action(payload)
else:
yield self._index_action(payload)
message.ack()
except NoResultFound:
message.reject()
except Exception:
message.reject()
current_app.logger.error(
"Failed to index record {0}".format(payload.get('id')),
exc_info=True)
|
Iterate bulk actions.
:param message_iterator: Iterator yielding messages from a queue.
|
train
|
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L227-L246
|
[
"def _delete_action(self, payload):\n \"\"\"Bulk delete action.\n\n :param payload: Decoded message body.\n :returns: Dictionary defining an Elasticsearch bulk 'delete' action.\n \"\"\"\n index, doc_type = payload.get('index'), payload.get('doc_type')\n if not (index and doc_type):\n record = Record.get_record(payload['id'])\n index, doc_type = self.record_to_index(record)\n\n return {\n '_op_type': 'delete',\n '_index': index,\n '_type': doc_type,\n '_id': payload['id'],\n }\n",
"def _index_action(self, payload):\n \"\"\"Bulk index action.\n\n :param payload: Decoded message body.\n :returns: Dictionary defining an Elasticsearch bulk 'index' action.\n \"\"\"\n record = Record.get_record(payload['id'])\n index, doc_type = self.record_to_index(record)\n\n return {\n '_op_type': 'index',\n '_index': index,\n '_type': doc_type,\n '_id': str(record.id),\n '_version': record.revision_id,\n '_version_type': self._version_type,\n '_source': self._prepare_record(record, index, doc_type),\n }\n"
] |
class RecordIndexer(object):
r"""Provide an interface for indexing records in Elasticsearch.
Bulk indexing works by queuing requests for indexing records and processing
these requests in bulk.
"""
def __init__(self, search_client=None, exchange=None, queue=None,
routing_key=None, version_type=None, record_to_index=None):
"""Initialize indexer.
:param search_client: Elasticsearch client.
(Default: ``current_search_client``)
:param exchange: A :class:`kombu.Exchange` instance for message queue.
:param queue: A :class:`kombu.Queue` instance for message queue.
:param routing_key: Routing key for message queue.
:param version_type: Elasticsearch version type.
(Default: ``external_gte``)
:param record_to_index: Function to extract the index and doc_type
from the record.
"""
self.client = search_client or current_search_client
self._exchange = exchange
self._queue = queue
self._record_to_index = record_to_index or current_record_to_index
self._routing_key = routing_key
self._version_type = version_type or 'external_gte'
def record_to_index(self, record):
"""Get index/doc_type given a record.
:param record: The record where to look for the information.
:returns: A tuple (index, doc_type).
"""
return self._record_to_index(record)
@property
def mq_queue(self):
"""Message Queue queue.
:returns: The Message Queue queue.
"""
return self._queue or current_app.config['INDEXER_MQ_QUEUE']
@property
def mq_exchange(self):
"""Message Queue exchange.
:returns: The Message Queue exchange.
"""
return self._exchange or current_app.config['INDEXER_MQ_EXCHANGE']
@property
def mq_routing_key(self):
"""Message Queue routing key.
:returns: The Message Queue routing key.
"""
return (self._routing_key or
current_app.config['INDEXER_MQ_ROUTING_KEY'])
#
# High-level API
#
def index(self, record):
"""Index a record.
The caller is responsible for ensuring that the record has already been
committed to the database. If a newer version of a record has already
been indexed then the provided record will not be indexed. This
behavior can be controlled by providing a different ``version_type``
when initializing ``RecordIndexer``.
:param record: Record instance.
"""
index, doc_type = self.record_to_index(record)
return self.client.index(
id=str(record.id),
version=record.revision_id,
version_type=self._version_type,
index=index,
doc_type=doc_type,
body=self._prepare_record(record, index, doc_type),
)
def index_by_id(self, record_uuid):
"""Index a record by record identifier.
:param record_uuid: Record identifier.
"""
return self.index(Record.get_record(record_uuid))
def delete(self, record):
"""Delete a record.
:param record: Record instance.
"""
index, doc_type = self.record_to_index(record)
return self.client.delete(
id=str(record.id),
index=index,
doc_type=doc_type,
)
def delete_by_id(self, record_uuid):
"""Delete record from index by record identifier."""
self.delete(Record.get_record(record_uuid))
def bulk_index(self, record_id_iterator):
"""Bulk index records.
:param record_id_iterator: Iterator yielding record UUIDs.
"""
self._bulk_op(record_id_iterator, 'index')
def bulk_delete(self, record_id_iterator):
"""Bulk delete records from index.
:param record_id_iterator: Iterator yielding record UUIDs.
"""
self._bulk_op(record_id_iterator, 'delete')
def process_bulk_queue(self, es_bulk_kwargs=None):
"""Process bulk indexing queue.
:param dict es_bulk_kwargs: Passed to
:func:`elasticsearch:elasticsearch.helpers.bulk`.
"""
with current_celery_app.pool.acquire(block=True) as conn:
consumer = Consumer(
connection=conn,
queue=self.mq_queue.name,
exchange=self.mq_exchange.name,
routing_key=self.mq_routing_key,
)
req_timeout = current_app.config['INDEXER_BULK_REQUEST_TIMEOUT']
es_bulk_kwargs = es_bulk_kwargs or {}
count = bulk(
self.client,
self._actionsiter(consumer.iterqueue()),
stats_only=True,
request_timeout=req_timeout,
**es_bulk_kwargs
)
consumer.close()
return count
@contextmanager
def create_producer(self):
"""Context manager that yields an instance of ``Producer``."""
with current_celery_app.pool.acquire(block=True) as conn:
yield Producer(
conn,
exchange=self.mq_exchange,
routing_key=self.mq_routing_key,
auto_declare=True,
)
#
# Low-level implementation
#
def _bulk_op(self, record_id_iterator, op_type, index=None, doc_type=None):
"""Index record in Elasticsearch asynchronously.
:param record_id_iterator: Iterator that yields record UUIDs.
:param op_type: Indexing operation (one of ``index``, ``create``,
``delete`` or ``update``).
:param index: The Elasticsearch index. (Default: ``None``)
:param doc_type: The Elasticsearch doc_type. (Default: ``None``)
"""
with self.create_producer() as producer:
for rec in record_id_iterator:
producer.publish(dict(
id=str(rec),
op=op_type,
index=index,
doc_type=doc_type
))
def _delete_action(self, payload):
"""Bulk delete action.
:param payload: Decoded message body.
:returns: Dictionary defining an Elasticsearch bulk 'delete' action.
"""
index, doc_type = payload.get('index'), payload.get('doc_type')
if not (index and doc_type):
record = Record.get_record(payload['id'])
index, doc_type = self.record_to_index(record)
return {
'_op_type': 'delete',
'_index': index,
'_type': doc_type,
'_id': payload['id'],
}
def _index_action(self, payload):
"""Bulk index action.
:param payload: Decoded message body.
:returns: Dictionary defining an Elasticsearch bulk 'index' action.
"""
record = Record.get_record(payload['id'])
index, doc_type = self.record_to_index(record)
return {
'_op_type': 'index',
'_index': index,
'_type': doc_type,
'_id': str(record.id),
'_version': record.revision_id,
'_version_type': self._version_type,
'_source': self._prepare_record(record, index, doc_type),
}
@staticmethod
def _prepare_record(record, index, doc_type):
"""Prepare record data for indexing.
:param record: The record to prepare.
:param index: The Elasticsearch index.
:param doc_type: The Elasticsearch document type.
:returns: The record metadata.
"""
if current_app.config['INDEXER_REPLACE_REFS']:
data = copy.deepcopy(record.replace_refs())
else:
data = record.dumps()
data['_created'] = pytz.utc.localize(record.created).isoformat() \
if record.created else None
data['_updated'] = pytz.utc.localize(record.updated).isoformat() \
if record.updated else None
# Allow modification of data prior to sending to Elasticsearch.
before_record_index.send(
current_app._get_current_object(),
json=data,
record=record,
index=index,
doc_type=doc_type,
)
return data
|
inveniosoftware/invenio-indexer
|
invenio_indexer/api.py
|
RecordIndexer._delete_action
|
python
|
def _delete_action(self, payload):
index, doc_type = payload.get('index'), payload.get('doc_type')
if not (index and doc_type):
record = Record.get_record(payload['id'])
index, doc_type = self.record_to_index(record)
return {
'_op_type': 'delete',
'_index': index,
'_type': doc_type,
'_id': payload['id'],
}
|
Bulk delete action.
:param payload: Decoded message body.
:returns: Dictionary defining an Elasticsearch bulk 'delete' action.
|
train
|
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L248-L264
|
[
"def record_to_index(self, record):\n \"\"\"Get index/doc_type given a record.\n\n :param record: The record where to look for the information.\n :returns: A tuple (index, doc_type).\n \"\"\"\n return self._record_to_index(record)\n"
] |
class RecordIndexer(object):
r"""Provide an interface for indexing records in Elasticsearch.
Bulk indexing works by queuing requests for indexing records and processing
these requests in bulk.
"""
def __init__(self, search_client=None, exchange=None, queue=None,
routing_key=None, version_type=None, record_to_index=None):
"""Initialize indexer.
:param search_client: Elasticsearch client.
(Default: ``current_search_client``)
:param exchange: A :class:`kombu.Exchange` instance for message queue.
:param queue: A :class:`kombu.Queue` instance for message queue.
:param routing_key: Routing key for message queue.
:param version_type: Elasticsearch version type.
(Default: ``external_gte``)
:param record_to_index: Function to extract the index and doc_type
from the record.
"""
self.client = search_client or current_search_client
self._exchange = exchange
self._queue = queue
self._record_to_index = record_to_index or current_record_to_index
self._routing_key = routing_key
self._version_type = version_type or 'external_gte'
def record_to_index(self, record):
"""Get index/doc_type given a record.
:param record: The record where to look for the information.
:returns: A tuple (index, doc_type).
"""
return self._record_to_index(record)
@property
def mq_queue(self):
"""Message Queue queue.
:returns: The Message Queue queue.
"""
return self._queue or current_app.config['INDEXER_MQ_QUEUE']
@property
def mq_exchange(self):
"""Message Queue exchange.
:returns: The Message Queue exchange.
"""
return self._exchange or current_app.config['INDEXER_MQ_EXCHANGE']
@property
def mq_routing_key(self):
"""Message Queue routing key.
:returns: The Message Queue routing key.
"""
return (self._routing_key or
current_app.config['INDEXER_MQ_ROUTING_KEY'])
#
# High-level API
#
def index(self, record):
"""Index a record.
The caller is responsible for ensuring that the record has already been
committed to the database. If a newer version of a record has already
been indexed then the provided record will not be indexed. This
behavior can be controlled by providing a different ``version_type``
when initializing ``RecordIndexer``.
:param record: Record instance.
"""
index, doc_type = self.record_to_index(record)
return self.client.index(
id=str(record.id),
version=record.revision_id,
version_type=self._version_type,
index=index,
doc_type=doc_type,
body=self._prepare_record(record, index, doc_type),
)
def index_by_id(self, record_uuid):
"""Index a record by record identifier.
:param record_uuid: Record identifier.
"""
return self.index(Record.get_record(record_uuid))
def delete(self, record):
"""Delete a record.
:param record: Record instance.
"""
index, doc_type = self.record_to_index(record)
return self.client.delete(
id=str(record.id),
index=index,
doc_type=doc_type,
)
def delete_by_id(self, record_uuid):
"""Delete record from index by record identifier."""
self.delete(Record.get_record(record_uuid))
def bulk_index(self, record_id_iterator):
"""Bulk index records.
:param record_id_iterator: Iterator yielding record UUIDs.
"""
self._bulk_op(record_id_iterator, 'index')
def bulk_delete(self, record_id_iterator):
"""Bulk delete records from index.
:param record_id_iterator: Iterator yielding record UUIDs.
"""
self._bulk_op(record_id_iterator, 'delete')
def process_bulk_queue(self, es_bulk_kwargs=None):
"""Process bulk indexing queue.
:param dict es_bulk_kwargs: Passed to
:func:`elasticsearch:elasticsearch.helpers.bulk`.
"""
with current_celery_app.pool.acquire(block=True) as conn:
consumer = Consumer(
connection=conn,
queue=self.mq_queue.name,
exchange=self.mq_exchange.name,
routing_key=self.mq_routing_key,
)
req_timeout = current_app.config['INDEXER_BULK_REQUEST_TIMEOUT']
es_bulk_kwargs = es_bulk_kwargs or {}
count = bulk(
self.client,
self._actionsiter(consumer.iterqueue()),
stats_only=True,
request_timeout=req_timeout,
**es_bulk_kwargs
)
consumer.close()
return count
@contextmanager
def create_producer(self):
"""Context manager that yields an instance of ``Producer``."""
with current_celery_app.pool.acquire(block=True) as conn:
yield Producer(
conn,
exchange=self.mq_exchange,
routing_key=self.mq_routing_key,
auto_declare=True,
)
#
# Low-level implementation
#
def _bulk_op(self, record_id_iterator, op_type, index=None, doc_type=None):
"""Index record in Elasticsearch asynchronously.
:param record_id_iterator: Iterator that yields record UUIDs.
:param op_type: Indexing operation (one of ``index``, ``create``,
``delete`` or ``update``).
:param index: The Elasticsearch index. (Default: ``None``)
:param doc_type: The Elasticsearch doc_type. (Default: ``None``)
"""
with self.create_producer() as producer:
for rec in record_id_iterator:
producer.publish(dict(
id=str(rec),
op=op_type,
index=index,
doc_type=doc_type
))
def _actionsiter(self, message_iterator):
"""Iterate bulk actions.
:param message_iterator: Iterator yielding messages from a queue.
"""
for message in message_iterator:
payload = message.decode()
try:
if payload['op'] == 'delete':
yield self._delete_action(payload)
else:
yield self._index_action(payload)
message.ack()
except NoResultFound:
message.reject()
except Exception:
message.reject()
current_app.logger.error(
"Failed to index record {0}".format(payload.get('id')),
exc_info=True)
def _index_action(self, payload):
"""Bulk index action.
:param payload: Decoded message body.
:returns: Dictionary defining an Elasticsearch bulk 'index' action.
"""
record = Record.get_record(payload['id'])
index, doc_type = self.record_to_index(record)
return {
'_op_type': 'index',
'_index': index,
'_type': doc_type,
'_id': str(record.id),
'_version': record.revision_id,
'_version_type': self._version_type,
'_source': self._prepare_record(record, index, doc_type),
}
@staticmethod
def _prepare_record(record, index, doc_type):
"""Prepare record data for indexing.
:param record: The record to prepare.
:param index: The Elasticsearch index.
:param doc_type: The Elasticsearch document type.
:returns: The record metadata.
"""
if current_app.config['INDEXER_REPLACE_REFS']:
data = copy.deepcopy(record.replace_refs())
else:
data = record.dumps()
data['_created'] = pytz.utc.localize(record.created).isoformat() \
if record.created else None
data['_updated'] = pytz.utc.localize(record.updated).isoformat() \
if record.updated else None
# Allow modification of data prior to sending to Elasticsearch.
before_record_index.send(
current_app._get_current_object(),
json=data,
record=record,
index=index,
doc_type=doc_type,
)
return data
|
inveniosoftware/invenio-indexer
|
invenio_indexer/api.py
|
RecordIndexer._index_action
|
python
|
def _index_action(self, payload):
record = Record.get_record(payload['id'])
index, doc_type = self.record_to_index(record)
return {
'_op_type': 'index',
'_index': index,
'_type': doc_type,
'_id': str(record.id),
'_version': record.revision_id,
'_version_type': self._version_type,
'_source': self._prepare_record(record, index, doc_type),
}
|
Bulk index action.
:param payload: Decoded message body.
:returns: Dictionary defining an Elasticsearch bulk 'index' action.
|
train
|
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L266-L283
|
[
"def record_to_index(self, record):\n \"\"\"Get index/doc_type given a record.\n\n :param record: The record where to look for the information.\n :returns: A tuple (index, doc_type).\n \"\"\"\n return self._record_to_index(record)\n",
"def _prepare_record(record, index, doc_type):\n \"\"\"Prepare record data for indexing.\n\n :param record: The record to prepare.\n :param index: The Elasticsearch index.\n :param doc_type: The Elasticsearch document type.\n :returns: The record metadata.\n \"\"\"\n if current_app.config['INDEXER_REPLACE_REFS']:\n data = copy.deepcopy(record.replace_refs())\n else:\n data = record.dumps()\n\n data['_created'] = pytz.utc.localize(record.created).isoformat() \\\n if record.created else None\n data['_updated'] = pytz.utc.localize(record.updated).isoformat() \\\n if record.updated else None\n\n # Allow modification of data prior to sending to Elasticsearch.\n before_record_index.send(\n current_app._get_current_object(),\n json=data,\n record=record,\n index=index,\n doc_type=doc_type,\n )\n\n return data\n"
] |
class RecordIndexer(object):
r"""Provide an interface for indexing records in Elasticsearch.
Bulk indexing works by queuing requests for indexing records and processing
these requests in bulk.
"""
def __init__(self, search_client=None, exchange=None, queue=None,
routing_key=None, version_type=None, record_to_index=None):
"""Initialize indexer.
:param search_client: Elasticsearch client.
(Default: ``current_search_client``)
:param exchange: A :class:`kombu.Exchange` instance for message queue.
:param queue: A :class:`kombu.Queue` instance for message queue.
:param routing_key: Routing key for message queue.
:param version_type: Elasticsearch version type.
(Default: ``external_gte``)
:param record_to_index: Function to extract the index and doc_type
from the record.
"""
self.client = search_client or current_search_client
self._exchange = exchange
self._queue = queue
self._record_to_index = record_to_index or current_record_to_index
self._routing_key = routing_key
self._version_type = version_type or 'external_gte'
def record_to_index(self, record):
"""Get index/doc_type given a record.
:param record: The record where to look for the information.
:returns: A tuple (index, doc_type).
"""
return self._record_to_index(record)
@property
def mq_queue(self):
"""Message Queue queue.
:returns: The Message Queue queue.
"""
return self._queue or current_app.config['INDEXER_MQ_QUEUE']
@property
def mq_exchange(self):
"""Message Queue exchange.
:returns: The Message Queue exchange.
"""
return self._exchange or current_app.config['INDEXER_MQ_EXCHANGE']
@property
def mq_routing_key(self):
"""Message Queue routing key.
:returns: The Message Queue routing key.
"""
return (self._routing_key or
current_app.config['INDEXER_MQ_ROUTING_KEY'])
#
# High-level API
#
def index(self, record):
"""Index a record.
The caller is responsible for ensuring that the record has already been
committed to the database. If a newer version of a record has already
been indexed then the provided record will not be indexed. This
behavior can be controlled by providing a different ``version_type``
when initializing ``RecordIndexer``.
:param record: Record instance.
"""
index, doc_type = self.record_to_index(record)
return self.client.index(
id=str(record.id),
version=record.revision_id,
version_type=self._version_type,
index=index,
doc_type=doc_type,
body=self._prepare_record(record, index, doc_type),
)
def index_by_id(self, record_uuid):
"""Index a record by record identifier.
:param record_uuid: Record identifier.
"""
return self.index(Record.get_record(record_uuid))
def delete(self, record):
"""Delete a record.
:param record: Record instance.
"""
index, doc_type = self.record_to_index(record)
return self.client.delete(
id=str(record.id),
index=index,
doc_type=doc_type,
)
def delete_by_id(self, record_uuid):
"""Delete record from index by record identifier."""
self.delete(Record.get_record(record_uuid))
def bulk_index(self, record_id_iterator):
"""Bulk index records.
:param record_id_iterator: Iterator yielding record UUIDs.
"""
self._bulk_op(record_id_iterator, 'index')
def bulk_delete(self, record_id_iterator):
"""Bulk delete records from index.
:param record_id_iterator: Iterator yielding record UUIDs.
"""
self._bulk_op(record_id_iterator, 'delete')
def process_bulk_queue(self, es_bulk_kwargs=None):
"""Process bulk indexing queue.
:param dict es_bulk_kwargs: Passed to
:func:`elasticsearch:elasticsearch.helpers.bulk`.
"""
with current_celery_app.pool.acquire(block=True) as conn:
consumer = Consumer(
connection=conn,
queue=self.mq_queue.name,
exchange=self.mq_exchange.name,
routing_key=self.mq_routing_key,
)
req_timeout = current_app.config['INDEXER_BULK_REQUEST_TIMEOUT']
es_bulk_kwargs = es_bulk_kwargs or {}
count = bulk(
self.client,
self._actionsiter(consumer.iterqueue()),
stats_only=True,
request_timeout=req_timeout,
**es_bulk_kwargs
)
consumer.close()
return count
@contextmanager
def create_producer(self):
"""Context manager that yields an instance of ``Producer``."""
with current_celery_app.pool.acquire(block=True) as conn:
yield Producer(
conn,
exchange=self.mq_exchange,
routing_key=self.mq_routing_key,
auto_declare=True,
)
#
# Low-level implementation
#
def _bulk_op(self, record_id_iterator, op_type, index=None, doc_type=None):
"""Index record in Elasticsearch asynchronously.
:param record_id_iterator: Iterator that yields record UUIDs.
:param op_type: Indexing operation (one of ``index``, ``create``,
``delete`` or ``update``).
:param index: The Elasticsearch index. (Default: ``None``)
:param doc_type: The Elasticsearch doc_type. (Default: ``None``)
"""
with self.create_producer() as producer:
for rec in record_id_iterator:
producer.publish(dict(
id=str(rec),
op=op_type,
index=index,
doc_type=doc_type
))
def _actionsiter(self, message_iterator):
"""Iterate bulk actions.
:param message_iterator: Iterator yielding messages from a queue.
"""
for message in message_iterator:
payload = message.decode()
try:
if payload['op'] == 'delete':
yield self._delete_action(payload)
else:
yield self._index_action(payload)
message.ack()
except NoResultFound:
message.reject()
except Exception:
message.reject()
current_app.logger.error(
"Failed to index record {0}".format(payload.get('id')),
exc_info=True)
def _delete_action(self, payload):
"""Bulk delete action.
:param payload: Decoded message body.
:returns: Dictionary defining an Elasticsearch bulk 'delete' action.
"""
index, doc_type = payload.get('index'), payload.get('doc_type')
if not (index and doc_type):
record = Record.get_record(payload['id'])
index, doc_type = self.record_to_index(record)
return {
'_op_type': 'delete',
'_index': index,
'_type': doc_type,
'_id': payload['id'],
}
@staticmethod
def _prepare_record(record, index, doc_type):
"""Prepare record data for indexing.
:param record: The record to prepare.
:param index: The Elasticsearch index.
:param doc_type: The Elasticsearch document type.
:returns: The record metadata.
"""
if current_app.config['INDEXER_REPLACE_REFS']:
data = copy.deepcopy(record.replace_refs())
else:
data = record.dumps()
data['_created'] = pytz.utc.localize(record.created).isoformat() \
if record.created else None
data['_updated'] = pytz.utc.localize(record.updated).isoformat() \
if record.updated else None
# Allow modification of data prior to sending to Elasticsearch.
before_record_index.send(
current_app._get_current_object(),
json=data,
record=record,
index=index,
doc_type=doc_type,
)
return data
|
inveniosoftware/invenio-indexer
|
invenio_indexer/api.py
|
RecordIndexer._prepare_record
|
python
|
def _prepare_record(record, index, doc_type):
if current_app.config['INDEXER_REPLACE_REFS']:
data = copy.deepcopy(record.replace_refs())
else:
data = record.dumps()
data['_created'] = pytz.utc.localize(record.created).isoformat() \
if record.created else None
data['_updated'] = pytz.utc.localize(record.updated).isoformat() \
if record.updated else None
# Allow modification of data prior to sending to Elasticsearch.
before_record_index.send(
current_app._get_current_object(),
json=data,
record=record,
index=index,
doc_type=doc_type,
)
return data
|
Prepare record data for indexing.
:param record: The record to prepare.
:param index: The Elasticsearch index.
:param doc_type: The Elasticsearch document type.
:returns: The record metadata.
|
train
|
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L286-L313
| null |
class RecordIndexer(object):
r"""Provide an interface for indexing records in Elasticsearch.
Bulk indexing works by queuing requests for indexing records and processing
these requests in bulk.
"""
def __init__(self, search_client=None, exchange=None, queue=None,
routing_key=None, version_type=None, record_to_index=None):
"""Initialize indexer.
:param search_client: Elasticsearch client.
(Default: ``current_search_client``)
:param exchange: A :class:`kombu.Exchange` instance for message queue.
:param queue: A :class:`kombu.Queue` instance for message queue.
:param routing_key: Routing key for message queue.
:param version_type: Elasticsearch version type.
(Default: ``external_gte``)
:param record_to_index: Function to extract the index and doc_type
from the record.
"""
self.client = search_client or current_search_client
self._exchange = exchange
self._queue = queue
self._record_to_index = record_to_index or current_record_to_index
self._routing_key = routing_key
self._version_type = version_type or 'external_gte'
def record_to_index(self, record):
"""Get index/doc_type given a record.
:param record: The record where to look for the information.
:returns: A tuple (index, doc_type).
"""
return self._record_to_index(record)
@property
def mq_queue(self):
"""Message Queue queue.
:returns: The Message Queue queue.
"""
return self._queue or current_app.config['INDEXER_MQ_QUEUE']
@property
def mq_exchange(self):
"""Message Queue exchange.
:returns: The Message Queue exchange.
"""
return self._exchange or current_app.config['INDEXER_MQ_EXCHANGE']
@property
def mq_routing_key(self):
"""Message Queue routing key.
:returns: The Message Queue routing key.
"""
return (self._routing_key or
current_app.config['INDEXER_MQ_ROUTING_KEY'])
#
# High-level API
#
def index(self, record):
"""Index a record.
The caller is responsible for ensuring that the record has already been
committed to the database. If a newer version of a record has already
been indexed then the provided record will not be indexed. This
behavior can be controlled by providing a different ``version_type``
when initializing ``RecordIndexer``.
:param record: Record instance.
"""
index, doc_type = self.record_to_index(record)
return self.client.index(
id=str(record.id),
version=record.revision_id,
version_type=self._version_type,
index=index,
doc_type=doc_type,
body=self._prepare_record(record, index, doc_type),
)
def index_by_id(self, record_uuid):
"""Index a record by record identifier.
:param record_uuid: Record identifier.
"""
return self.index(Record.get_record(record_uuid))
def delete(self, record):
"""Delete a record.
:param record: Record instance.
"""
index, doc_type = self.record_to_index(record)
return self.client.delete(
id=str(record.id),
index=index,
doc_type=doc_type,
)
def delete_by_id(self, record_uuid):
"""Delete record from index by record identifier."""
self.delete(Record.get_record(record_uuid))
def bulk_index(self, record_id_iterator):
"""Bulk index records.
:param record_id_iterator: Iterator yielding record UUIDs.
"""
self._bulk_op(record_id_iterator, 'index')
def bulk_delete(self, record_id_iterator):
"""Bulk delete records from index.
:param record_id_iterator: Iterator yielding record UUIDs.
"""
self._bulk_op(record_id_iterator, 'delete')
def process_bulk_queue(self, es_bulk_kwargs=None):
"""Process bulk indexing queue.
:param dict es_bulk_kwargs: Passed to
:func:`elasticsearch:elasticsearch.helpers.bulk`.
"""
with current_celery_app.pool.acquire(block=True) as conn:
consumer = Consumer(
connection=conn,
queue=self.mq_queue.name,
exchange=self.mq_exchange.name,
routing_key=self.mq_routing_key,
)
req_timeout = current_app.config['INDEXER_BULK_REQUEST_TIMEOUT']
es_bulk_kwargs = es_bulk_kwargs or {}
count = bulk(
self.client,
self._actionsiter(consumer.iterqueue()),
stats_only=True,
request_timeout=req_timeout,
**es_bulk_kwargs
)
consumer.close()
return count
@contextmanager
def create_producer(self):
"""Context manager that yields an instance of ``Producer``."""
with current_celery_app.pool.acquire(block=True) as conn:
yield Producer(
conn,
exchange=self.mq_exchange,
routing_key=self.mq_routing_key,
auto_declare=True,
)
#
# Low-level implementation
#
def _bulk_op(self, record_id_iterator, op_type, index=None, doc_type=None):
"""Index record in Elasticsearch asynchronously.
:param record_id_iterator: Iterator that yields record UUIDs.
:param op_type: Indexing operation (one of ``index``, ``create``,
``delete`` or ``update``).
:param index: The Elasticsearch index. (Default: ``None``)
:param doc_type: The Elasticsearch doc_type. (Default: ``None``)
"""
with self.create_producer() as producer:
for rec in record_id_iterator:
producer.publish(dict(
id=str(rec),
op=op_type,
index=index,
doc_type=doc_type
))
def _actionsiter(self, message_iterator):
"""Iterate bulk actions.
:param message_iterator: Iterator yielding messages from a queue.
"""
for message in message_iterator:
payload = message.decode()
try:
if payload['op'] == 'delete':
yield self._delete_action(payload)
else:
yield self._index_action(payload)
message.ack()
except NoResultFound:
message.reject()
except Exception:
message.reject()
current_app.logger.error(
"Failed to index record {0}".format(payload.get('id')),
exc_info=True)
def _delete_action(self, payload):
"""Bulk delete action.
:param payload: Decoded message body.
:returns: Dictionary defining an Elasticsearch bulk 'delete' action.
"""
index, doc_type = payload.get('index'), payload.get('doc_type')
if not (index and doc_type):
record = Record.get_record(payload['id'])
index, doc_type = self.record_to_index(record)
return {
'_op_type': 'delete',
'_index': index,
'_type': doc_type,
'_id': payload['id'],
}
def _index_action(self, payload):
"""Bulk index action.
:param payload: Decoded message body.
:returns: Dictionary defining an Elasticsearch bulk 'index' action.
"""
record = Record.get_record(payload['id'])
index, doc_type = self.record_to_index(record)
return {
'_op_type': 'index',
'_index': index,
'_type': doc_type,
'_id': str(record.id),
'_version': record.revision_id,
'_version_type': self._version_type,
'_source': self._prepare_record(record, index, doc_type),
}
@staticmethod
|
inveniosoftware/invenio-indexer
|
invenio_indexer/cli.py
|
run
|
python
|
def run(delayed, concurrency, version_type=None, queue=None,
raise_on_error=True):
if delayed:
celery_kwargs = {
'kwargs': {
'version_type': version_type,
'es_bulk_kwargs': {'raise_on_error': raise_on_error},
}
}
click.secho(
'Starting {0} tasks for indexing records...'.format(concurrency),
fg='green')
if queue is not None:
celery_kwargs.update({'queue': queue})
for c in range(0, concurrency):
process_bulk_queue.apply_async(**celery_kwargs)
else:
click.secho('Indexing records...', fg='green')
RecordIndexer(version_type=version_type).process_bulk_queue(
es_bulk_kwargs={'raise_on_error': raise_on_error})
|
Run bulk record indexing.
|
train
|
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/cli.py#L43-L63
|
[
"def process_bulk_queue(self, es_bulk_kwargs=None):\n \"\"\"Process bulk indexing queue.\n\n :param dict es_bulk_kwargs: Passed to\n :func:`elasticsearch:elasticsearch.helpers.bulk`.\n \"\"\"\n with current_celery_app.pool.acquire(block=True) as conn:\n consumer = Consumer(\n connection=conn,\n queue=self.mq_queue.name,\n exchange=self.mq_exchange.name,\n routing_key=self.mq_routing_key,\n )\n\n req_timeout = current_app.config['INDEXER_BULK_REQUEST_TIMEOUT']\n\n es_bulk_kwargs = es_bulk_kwargs or {}\n count = bulk(\n self.client,\n self._actionsiter(consumer.iterqueue()),\n stats_only=True,\n request_timeout=req_timeout,\n **es_bulk_kwargs\n )\n\n consumer.close()\n\n return count\n"
] |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""CLI for indexer."""
from __future__ import absolute_import, print_function
import click
from celery.messaging import establish_connection
from flask import current_app
from flask.cli import with_appcontext
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
from invenio_search.cli import index
from .api import RecordIndexer
from .tasks import process_bulk_queue
def abort_if_false(ctx, param, value):
"""Abort command is value is False."""
if not value:
ctx.abort()
@index.command()
@click.option(
'--delayed', '-d', is_flag=True, help='Run indexing in background.')
@click.option(
'--concurrency', '-c', default=1, type=int,
help='Number of concurrent indexing tasks to start.')
@click.option('--queue', '-q', type=str,
help='Name of the celery queue used to put the tasks into.')
@click.option('--version-type', help='Elasticsearch version type to use.')
@click.option(
'--raise-on-error/--skip-errors', default=True,
help='Controls if Elasticsearch bulk indexing errors raise an exception.')
@with_appcontext
@index.command()
@click.option('--yes-i-know', is_flag=True, callback=abort_if_false,
expose_value=False,
prompt='Do you really want to reindex all records?')
@click.option('-t', '--pid-type', multiple=True, required=True)
@with_appcontext
def reindex(pid_type):
"""Reindex all records.
:param pid_type: Pid type.
"""
click.secho('Sending records to indexing queue ...', fg='green')
query = (x[0] for x in PersistentIdentifier.query.filter_by(
object_type='rec', status=PIDStatus.REGISTERED
).filter(
PersistentIdentifier.pid_type.in_(pid_type)
).values(
PersistentIdentifier.object_uuid
))
RecordIndexer().bulk_index(query)
click.secho('Execute "run" command to process the queue!',
fg='yellow')
@index.group(chain=True)
def queue():
"""Manage indexing queue."""
@queue.resultcallback()
@with_appcontext
def process_actions(actions):
"""Process queue actions."""
queue = current_app.config['INDEXER_MQ_QUEUE']
with establish_connection() as c:
q = queue(c)
for action in actions:
q = action(q)
@queue.command('init')
def init_queue():
"""Initialize indexing queue."""
def action(queue):
queue.declare()
click.secho('Indexing queue has been initialized.', fg='green')
return queue
return action
@queue.command('purge')
def purge_queue():
"""Purge indexing queue."""
def action(queue):
queue.purge()
click.secho('Indexing queue has been purged.', fg='green')
return queue
return action
@queue.command('delete')
def delete_queue():
"""Delete indexing queue."""
def action(queue):
queue.delete()
click.secho('Indexing queue has been deleted.', fg='green')
return queue
return action
|
inveniosoftware/invenio-indexer
|
invenio_indexer/cli.py
|
reindex
|
python
|
def reindex(pid_type):
click.secho('Sending records to indexing queue ...', fg='green')
query = (x[0] for x in PersistentIdentifier.query.filter_by(
object_type='rec', status=PIDStatus.REGISTERED
).filter(
PersistentIdentifier.pid_type.in_(pid_type)
).values(
PersistentIdentifier.object_uuid
))
RecordIndexer().bulk_index(query)
click.secho('Execute "run" command to process the queue!',
fg='yellow')
|
Reindex all records.
:param pid_type: Pid type.
|
train
|
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/cli.py#L72-L88
|
[
"def bulk_index(self, record_id_iterator):\n \"\"\"Bulk index records.\n\n :param record_id_iterator: Iterator yielding record UUIDs.\n \"\"\"\n self._bulk_op(record_id_iterator, 'index')\n"
] |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""CLI for indexer."""
from __future__ import absolute_import, print_function
import click
from celery.messaging import establish_connection
from flask import current_app
from flask.cli import with_appcontext
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
from invenio_search.cli import index
from .api import RecordIndexer
from .tasks import process_bulk_queue
def abort_if_false(ctx, param, value):
"""Abort command is value is False."""
if not value:
ctx.abort()
@index.command()
@click.option(
'--delayed', '-d', is_flag=True, help='Run indexing in background.')
@click.option(
'--concurrency', '-c', default=1, type=int,
help='Number of concurrent indexing tasks to start.')
@click.option('--queue', '-q', type=str,
help='Name of the celery queue used to put the tasks into.')
@click.option('--version-type', help='Elasticsearch version type to use.')
@click.option(
'--raise-on-error/--skip-errors', default=True,
help='Controls if Elasticsearch bulk indexing errors raise an exception.')
@with_appcontext
def run(delayed, concurrency, version_type=None, queue=None,
raise_on_error=True):
"""Run bulk record indexing."""
if delayed:
celery_kwargs = {
'kwargs': {
'version_type': version_type,
'es_bulk_kwargs': {'raise_on_error': raise_on_error},
}
}
click.secho(
'Starting {0} tasks for indexing records...'.format(concurrency),
fg='green')
if queue is not None:
celery_kwargs.update({'queue': queue})
for c in range(0, concurrency):
process_bulk_queue.apply_async(**celery_kwargs)
else:
click.secho('Indexing records...', fg='green')
RecordIndexer(version_type=version_type).process_bulk_queue(
es_bulk_kwargs={'raise_on_error': raise_on_error})
@index.command()
@click.option('--yes-i-know', is_flag=True, callback=abort_if_false,
expose_value=False,
prompt='Do you really want to reindex all records?')
@click.option('-t', '--pid-type', multiple=True, required=True)
@with_appcontext
@index.group(chain=True)
def queue():
"""Manage indexing queue."""
@queue.resultcallback()
@with_appcontext
def process_actions(actions):
"""Process queue actions."""
queue = current_app.config['INDEXER_MQ_QUEUE']
with establish_connection() as c:
q = queue(c)
for action in actions:
q = action(q)
@queue.command('init')
def init_queue():
"""Initialize indexing queue."""
def action(queue):
queue.declare()
click.secho('Indexing queue has been initialized.', fg='green')
return queue
return action
@queue.command('purge')
def purge_queue():
"""Purge indexing queue."""
def action(queue):
queue.purge()
click.secho('Indexing queue has been purged.', fg='green')
return queue
return action
@queue.command('delete')
def delete_queue():
"""Delete indexing queue."""
def action(queue):
queue.delete()
click.secho('Indexing queue has been deleted.', fg='green')
return queue
return action
|
inveniosoftware/invenio-indexer
|
invenio_indexer/cli.py
|
process_actions
|
python
|
def process_actions(actions):
queue = current_app.config['INDEXER_MQ_QUEUE']
with establish_connection() as c:
q = queue(c)
for action in actions:
q = action(q)
|
Process queue actions.
|
train
|
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/cli.py#L98-L104
| null |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""CLI for indexer."""
from __future__ import absolute_import, print_function
import click
from celery.messaging import establish_connection
from flask import current_app
from flask.cli import with_appcontext
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
from invenio_search.cli import index
from .api import RecordIndexer
from .tasks import process_bulk_queue
def abort_if_false(ctx, param, value):
"""Abort command is value is False."""
if not value:
ctx.abort()
@index.command()
@click.option(
'--delayed', '-d', is_flag=True, help='Run indexing in background.')
@click.option(
'--concurrency', '-c', default=1, type=int,
help='Number of concurrent indexing tasks to start.')
@click.option('--queue', '-q', type=str,
help='Name of the celery queue used to put the tasks into.')
@click.option('--version-type', help='Elasticsearch version type to use.')
@click.option(
'--raise-on-error/--skip-errors', default=True,
help='Controls if Elasticsearch bulk indexing errors raise an exception.')
@with_appcontext
def run(delayed, concurrency, version_type=None, queue=None,
raise_on_error=True):
"""Run bulk record indexing."""
if delayed:
celery_kwargs = {
'kwargs': {
'version_type': version_type,
'es_bulk_kwargs': {'raise_on_error': raise_on_error},
}
}
click.secho(
'Starting {0} tasks for indexing records...'.format(concurrency),
fg='green')
if queue is not None:
celery_kwargs.update({'queue': queue})
for c in range(0, concurrency):
process_bulk_queue.apply_async(**celery_kwargs)
else:
click.secho('Indexing records...', fg='green')
RecordIndexer(version_type=version_type).process_bulk_queue(
es_bulk_kwargs={'raise_on_error': raise_on_error})
@index.command()
@click.option('--yes-i-know', is_flag=True, callback=abort_if_false,
expose_value=False,
prompt='Do you really want to reindex all records?')
@click.option('-t', '--pid-type', multiple=True, required=True)
@with_appcontext
def reindex(pid_type):
"""Reindex all records.
:param pid_type: Pid type.
"""
click.secho('Sending records to indexing queue ...', fg='green')
query = (x[0] for x in PersistentIdentifier.query.filter_by(
object_type='rec', status=PIDStatus.REGISTERED
).filter(
PersistentIdentifier.pid_type.in_(pid_type)
).values(
PersistentIdentifier.object_uuid
))
RecordIndexer().bulk_index(query)
click.secho('Execute "run" command to process the queue!',
fg='yellow')
@index.group(chain=True)
def queue():
"""Manage indexing queue."""
@queue.resultcallback()
@with_appcontext
@queue.command('init')
def init_queue():
"""Initialize indexing queue."""
def action(queue):
queue.declare()
click.secho('Indexing queue has been initialized.', fg='green')
return queue
return action
@queue.command('purge')
def purge_queue():
"""Purge indexing queue."""
def action(queue):
queue.purge()
click.secho('Indexing queue has been purged.', fg='green')
return queue
return action
@queue.command('delete')
def delete_queue():
"""Delete indexing queue."""
def action(queue):
queue.delete()
click.secho('Indexing queue has been deleted.', fg='green')
return queue
return action
|
inveniosoftware/invenio-indexer
|
invenio_indexer/cli.py
|
init_queue
|
python
|
def init_queue():
def action(queue):
queue.declare()
click.secho('Indexing queue has been initialized.', fg='green')
return queue
return action
|
Initialize indexing queue.
|
train
|
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/cli.py#L108-L114
| null |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""CLI for indexer."""
from __future__ import absolute_import, print_function
import click
from celery.messaging import establish_connection
from flask import current_app
from flask.cli import with_appcontext
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
from invenio_search.cli import index
from .api import RecordIndexer
from .tasks import process_bulk_queue
def abort_if_false(ctx, param, value):
"""Abort command is value is False."""
if not value:
ctx.abort()
@index.command()
@click.option(
'--delayed', '-d', is_flag=True, help='Run indexing in background.')
@click.option(
'--concurrency', '-c', default=1, type=int,
help='Number of concurrent indexing tasks to start.')
@click.option('--queue', '-q', type=str,
help='Name of the celery queue used to put the tasks into.')
@click.option('--version-type', help='Elasticsearch version type to use.')
@click.option(
'--raise-on-error/--skip-errors', default=True,
help='Controls if Elasticsearch bulk indexing errors raise an exception.')
@with_appcontext
def run(delayed, concurrency, version_type=None, queue=None,
raise_on_error=True):
"""Run bulk record indexing."""
if delayed:
celery_kwargs = {
'kwargs': {
'version_type': version_type,
'es_bulk_kwargs': {'raise_on_error': raise_on_error},
}
}
click.secho(
'Starting {0} tasks for indexing records...'.format(concurrency),
fg='green')
if queue is not None:
celery_kwargs.update({'queue': queue})
for c in range(0, concurrency):
process_bulk_queue.apply_async(**celery_kwargs)
else:
click.secho('Indexing records...', fg='green')
RecordIndexer(version_type=version_type).process_bulk_queue(
es_bulk_kwargs={'raise_on_error': raise_on_error})
@index.command()
@click.option('--yes-i-know', is_flag=True, callback=abort_if_false,
expose_value=False,
prompt='Do you really want to reindex all records?')
@click.option('-t', '--pid-type', multiple=True, required=True)
@with_appcontext
def reindex(pid_type):
"""Reindex all records.
:param pid_type: Pid type.
"""
click.secho('Sending records to indexing queue ...', fg='green')
query = (x[0] for x in PersistentIdentifier.query.filter_by(
object_type='rec', status=PIDStatus.REGISTERED
).filter(
PersistentIdentifier.pid_type.in_(pid_type)
).values(
PersistentIdentifier.object_uuid
))
RecordIndexer().bulk_index(query)
click.secho('Execute "run" command to process the queue!',
fg='yellow')
@index.group(chain=True)
def queue():
"""Manage indexing queue."""
@queue.resultcallback()
@with_appcontext
def process_actions(actions):
"""Process queue actions."""
queue = current_app.config['INDEXER_MQ_QUEUE']
with establish_connection() as c:
q = queue(c)
for action in actions:
q = action(q)
@queue.command('init')
@queue.command('purge')
def purge_queue():
"""Purge indexing queue."""
def action(queue):
queue.purge()
click.secho('Indexing queue has been purged.', fg='green')
return queue
return action
@queue.command('delete')
def delete_queue():
"""Delete indexing queue."""
def action(queue):
queue.delete()
click.secho('Indexing queue has been deleted.', fg='green')
return queue
return action
|
inveniosoftware/invenio-indexer
|
invenio_indexer/cli.py
|
purge_queue
|
python
|
def purge_queue():
def action(queue):
queue.purge()
click.secho('Indexing queue has been purged.', fg='green')
return queue
return action
|
Purge indexing queue.
|
train
|
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/cli.py#L118-L124
| null |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""CLI for indexer."""
from __future__ import absolute_import, print_function
import click
from celery.messaging import establish_connection
from flask import current_app
from flask.cli import with_appcontext
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
from invenio_search.cli import index
from .api import RecordIndexer
from .tasks import process_bulk_queue
def abort_if_false(ctx, param, value):
"""Abort command is value is False."""
if not value:
ctx.abort()
@index.command()
@click.option(
'--delayed', '-d', is_flag=True, help='Run indexing in background.')
@click.option(
'--concurrency', '-c', default=1, type=int,
help='Number of concurrent indexing tasks to start.')
@click.option('--queue', '-q', type=str,
help='Name of the celery queue used to put the tasks into.')
@click.option('--version-type', help='Elasticsearch version type to use.')
@click.option(
'--raise-on-error/--skip-errors', default=True,
help='Controls if Elasticsearch bulk indexing errors raise an exception.')
@with_appcontext
def run(delayed, concurrency, version_type=None, queue=None,
raise_on_error=True):
"""Run bulk record indexing."""
if delayed:
celery_kwargs = {
'kwargs': {
'version_type': version_type,
'es_bulk_kwargs': {'raise_on_error': raise_on_error},
}
}
click.secho(
'Starting {0} tasks for indexing records...'.format(concurrency),
fg='green')
if queue is not None:
celery_kwargs.update({'queue': queue})
for c in range(0, concurrency):
process_bulk_queue.apply_async(**celery_kwargs)
else:
click.secho('Indexing records...', fg='green')
RecordIndexer(version_type=version_type).process_bulk_queue(
es_bulk_kwargs={'raise_on_error': raise_on_error})
@index.command()
@click.option('--yes-i-know', is_flag=True, callback=abort_if_false,
expose_value=False,
prompt='Do you really want to reindex all records?')
@click.option('-t', '--pid-type', multiple=True, required=True)
@with_appcontext
def reindex(pid_type):
"""Reindex all records.
:param pid_type: Pid type.
"""
click.secho('Sending records to indexing queue ...', fg='green')
query = (x[0] for x in PersistentIdentifier.query.filter_by(
object_type='rec', status=PIDStatus.REGISTERED
).filter(
PersistentIdentifier.pid_type.in_(pid_type)
).values(
PersistentIdentifier.object_uuid
))
RecordIndexer().bulk_index(query)
click.secho('Execute "run" command to process the queue!',
fg='yellow')
@index.group(chain=True)
def queue():
"""Manage indexing queue."""
@queue.resultcallback()
@with_appcontext
def process_actions(actions):
"""Process queue actions."""
queue = current_app.config['INDEXER_MQ_QUEUE']
with establish_connection() as c:
q = queue(c)
for action in actions:
q = action(q)
@queue.command('init')
def init_queue():
"""Initialize indexing queue."""
def action(queue):
queue.declare()
click.secho('Indexing queue has been initialized.', fg='green')
return queue
return action
@queue.command('purge')
@queue.command('delete')
def delete_queue():
"""Delete indexing queue."""
def action(queue):
queue.delete()
click.secho('Indexing queue has been deleted.', fg='green')
return queue
return action
|
inveniosoftware/invenio-indexer
|
invenio_indexer/cli.py
|
delete_queue
|
python
|
def delete_queue():
def action(queue):
queue.delete()
click.secho('Indexing queue has been deleted.', fg='green')
return queue
return action
|
Delete indexing queue.
|
train
|
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/cli.py#L128-L134
| null |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""CLI for indexer."""
from __future__ import absolute_import, print_function
import click
from celery.messaging import establish_connection
from flask import current_app
from flask.cli import with_appcontext
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
from invenio_search.cli import index
from .api import RecordIndexer
from .tasks import process_bulk_queue
def abort_if_false(ctx, param, value):
"""Abort command is value is False."""
if not value:
ctx.abort()
@index.command()
@click.option(
'--delayed', '-d', is_flag=True, help='Run indexing in background.')
@click.option(
'--concurrency', '-c', default=1, type=int,
help='Number of concurrent indexing tasks to start.')
@click.option('--queue', '-q', type=str,
help='Name of the celery queue used to put the tasks into.')
@click.option('--version-type', help='Elasticsearch version type to use.')
@click.option(
'--raise-on-error/--skip-errors', default=True,
help='Controls if Elasticsearch bulk indexing errors raise an exception.')
@with_appcontext
def run(delayed, concurrency, version_type=None, queue=None,
raise_on_error=True):
"""Run bulk record indexing."""
if delayed:
celery_kwargs = {
'kwargs': {
'version_type': version_type,
'es_bulk_kwargs': {'raise_on_error': raise_on_error},
}
}
click.secho(
'Starting {0} tasks for indexing records...'.format(concurrency),
fg='green')
if queue is not None:
celery_kwargs.update({'queue': queue})
for c in range(0, concurrency):
process_bulk_queue.apply_async(**celery_kwargs)
else:
click.secho('Indexing records...', fg='green')
RecordIndexer(version_type=version_type).process_bulk_queue(
es_bulk_kwargs={'raise_on_error': raise_on_error})
@index.command()
@click.option('--yes-i-know', is_flag=True, callback=abort_if_false,
expose_value=False,
prompt='Do you really want to reindex all records?')
@click.option('-t', '--pid-type', multiple=True, required=True)
@with_appcontext
def reindex(pid_type):
"""Reindex all records.
:param pid_type: Pid type.
"""
click.secho('Sending records to indexing queue ...', fg='green')
query = (x[0] for x in PersistentIdentifier.query.filter_by(
object_type='rec', status=PIDStatus.REGISTERED
).filter(
PersistentIdentifier.pid_type.in_(pid_type)
).values(
PersistentIdentifier.object_uuid
))
RecordIndexer().bulk_index(query)
click.secho('Execute "run" command to process the queue!',
fg='yellow')
@index.group(chain=True)
def queue():
"""Manage indexing queue."""
@queue.resultcallback()
@with_appcontext
def process_actions(actions):
"""Process queue actions."""
queue = current_app.config['INDEXER_MQ_QUEUE']
with establish_connection() as c:
q = queue(c)
for action in actions:
q = action(q)
@queue.command('init')
def init_queue():
"""Initialize indexing queue."""
def action(queue):
queue.declare()
click.secho('Indexing queue has been initialized.', fg='green')
return queue
return action
@queue.command('purge')
def purge_queue():
"""Purge indexing queue."""
def action(queue):
queue.purge()
click.secho('Indexing queue has been purged.', fg='green')
return queue
return action
@queue.command('delete')
|
inveniosoftware/invenio-indexer
|
invenio_indexer/utils.py
|
default_record_to_index
|
python
|
def default_record_to_index(record):
index_names = current_search.mappings.keys()
schema = record.get('$schema', '')
if isinstance(schema, dict):
schema = schema.get('$ref', '')
index, doc_type = schema_to_index(schema, index_names=index_names)
if index and doc_type:
return index, doc_type
else:
return (current_app.config['INDEXER_DEFAULT_INDEX'],
current_app.config['INDEXER_DEFAULT_DOC_TYPE'])
|
Get index/doc_type given a record.
It tries to extract from `record['$schema']` the index and doc_type.
If it fails, return the default values.
:param record: The record object.
:returns: Tuple (index, doc_type).
|
train
|
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/utils.py#L16-L36
| null |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Utility functions for data processing."""
from flask import current_app
from invenio_search import current_search
from invenio_search.utils import schema_to_index
|
inveniosoftware/invenio-indexer
|
invenio_indexer/ext.py
|
InvenioIndexer.init_app
|
python
|
def init_app(self, app):
self.init_config(app)
app.extensions['invenio-indexer'] = self
hooks = app.config.get('INDEXER_BEFORE_INDEX_HOOKS', [])
for hook in hooks:
if isinstance(hook, six.string_types):
hook = import_string(hook)
before_record_index.connect_via(app)(hook)
|
Flask application initialization.
:param app: The Flask application.
|
train
|
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/ext.py#L34-L46
|
[
"def init_config(self, app):\n \"\"\"Initialize configuration.\n\n :param app: The Flask application.\n \"\"\"\n for k in dir(config):\n if k.startswith('INDEXER_'):\n app.config.setdefault(k, getattr(config, k))\n"
] |
class InvenioIndexer(object):
"""Invenio-Indexer extension."""
def __init__(self, app=None):
"""Extension initialization.
:param app: The Flask application. (Default: ``None``)
"""
if app:
self.init_app(app)
def init_config(self, app):
"""Initialize configuration.
:param app: The Flask application.
"""
for k in dir(config):
if k.startswith('INDEXER_'):
app.config.setdefault(k, getattr(config, k))
@cached_property
def record_to_index(self):
"""Import the configurable 'record_to_index' function."""
return import_string(current_app.config.get('INDEXER_RECORD_TO_INDEX'))
|
nocarryr/python-dispatch
|
pydispatch/dispatch.py
|
Dispatcher.register_event
|
python
|
def register_event(self, *names):
for name in names:
if name in self.__events:
continue
self.__events[name] = Event(name)
|
Registers new events after instance creation
Args:
*names (str): Name or names of the events to register
|
train
|
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/dispatch.py#L125-L134
| null |
class Dispatcher(object):
"""Core class used to enable all functionality in the library
Interfaces with :class:`Event` and :class:`~pydispatch.properties.Property`
objects upon instance creation.
Events can be created by calling :meth:`register_event` or by the subclass
definition::
class Foo(Dispatcher):
_events_ = ['awesome_event', 'on_less_awesome_event']
Once defined, an event can be dispatched to listeners by calling :meth:`emit`.
"""
__initialized_subclasses = set()
__skip_initialized = True
def __new__(cls, *args, **kwargs):
def iter_bases(_cls):
if _cls is not object:
yield _cls
for b in _cls.__bases__:
for _cls_ in iter_bases(b):
yield _cls_
skip_initialized = Dispatcher._Dispatcher__skip_initialized
if not skip_initialized or cls not in Dispatcher._Dispatcher__initialized_subclasses:
props = {}
events = set()
for _cls in iter_bases(cls):
for attr in dir(_cls):
prop = getattr(_cls, attr)
if attr not in props and isinstance(prop, Property):
props[attr] = prop
prop.name = attr
_events = getattr(_cls, '_events_', [])
events |= set(_events)
cls._PROPERTIES_ = props
cls._EVENTS_ = events
if skip_initialized:
Dispatcher._Dispatcher__initialized_subclasses.add(cls)
obj = super(Dispatcher, cls).__new__(cls)
obj._Dispatcher__init_events()
return obj
def __init__(self, *args, **kwargs):
# Everything is handled by __new__
# This is only here to prevent exceptions being raised
pass
def __init_events(self):
if hasattr(self, '_Dispatcher__events'):
return
self.__events = {}
for name in self._EVENTS_:
self.__events[name] = Event(name)
self.__property_events = {}
for name, prop in self._PROPERTIES_.items():
self.__property_events[name] = Event(name)
prop._add_instance(self)
def bind(self, **kwargs):
"""Subscribes to events or to :class:`~pydispatch.properties.Property` updates
Keyword arguments are used with the Event or Property names as keys
and the callbacks as values::
class Foo(Dispatcher):
name = Property()
foo = Foo()
foo.bind(name=my_listener.on_foo_name_changed)
foo.bind(name=other_listener.on_name,
value=other_listener.on_value)
The callbacks are stored as weak references and their order is not
maintained relative to the order of binding.
**Async Callbacks**:
Callbacks may be :term:`coroutine functions <coroutine function>`
(defined using :keyword:`async def` or decorated with
:func:`@asyncio.coroutine <asyncio.coroutine>`), but an event loop
must be explicitly provided with the keyword
argument ``"__aio_loop__"`` (an instance of
:class:`asyncio.BaseEventLoop`)::
import asyncio
from pydispatch import Dispatcher
class Foo(Dispatcher):
_events_ = ['test_event']
class Bar(object):
def __init__(self):
self.got_foo_event = asyncio.Event()
async def wait_for_foo(self):
await self.got_foo_event.wait()
print('got foo!')
async def on_foo_test_event(self, *args, **kwargs):
self.got_foo_event.set()
foo = Foo()
bar = Bar()
loop = asyncio.get_event_loop()
foo.bind(test_event=bar.on_foo_test_event, __aio_loop__=loop)
loop.run_until_complete(bar.wait_for_foo())
This can also be done using :meth:`bind_async`.
.. versionadded:: 0.1.0
"""
aio_loop = kwargs.pop('__aio_loop__', None)
props = self.__property_events
events = self.__events
for name, cb in kwargs.items():
if name in props:
e = props[name]
else:
e = events[name]
e.add_listener(cb, __aio_loop__=aio_loop)
def unbind(self, *args):
"""Unsubscribes from events or :class:`~pydispatch.properties.Property` updates
Multiple arguments can be given. Each of which can be either the method
that was used for the original call to :meth:`bind` or an instance
object.
If an instance of an object is supplied, any previously bound Events and
Properties will be 'unbound'.
"""
props = self.__property_events.values()
events = self.__events.values()
for arg in args:
for prop in props:
prop.remove_listener(arg)
for e in events:
e.remove_listener(arg)
def bind_async(self, loop, **kwargs):
"""Subscribes to events with async callbacks
Functionality is matches the :meth:`bind` method, except the provided
callbacks should be coroutine functions. When the event is dispatched,
callbacks will be placed on the given event loop.
For keyword arguments, see :meth:`bind`.
Args:
loop: The :class:`EventLoop <asyncio.BaseEventLoop>` to use when
events are dispatched
Availability:
Python>=3.5
.. versionadded:: 0.1.0
"""
kwargs['__aio_loop__'] = loop
self.bind(**kwargs)
def emit(self, name, *args, **kwargs):
"""Dispatches an event to any subscribed listeners
Note:
If a listener returns :obj:`False`, the event will stop dispatching to
other listeners. Any other return value is ignored.
Args:
name (str): The name of the :class:`Event` to dispatch
*args (Optional): Positional arguments to be sent to listeners
**kwargs (Optional): Keyword arguments to be sent to listeners
"""
e = self.__property_events.get(name)
if e is None:
e = self.__events[name]
return e(*args, **kwargs)
def get_dispatcher_event(self, name):
"""Retrieves an Event object by name
Args:
name (str): The name of the :class:`Event` or
:class:`~pydispatch.properties.Property` object to retrieve
Returns:
The :class:`Event` instance for the event or property definition
.. versionadded:: 0.1.0
"""
e = self.__property_events.get(name)
if e is None:
e = self.__events[name]
return e
def emission_lock(self, name):
"""Holds emission of events and dispatches the last event on release
The context manager returned will store the last event data called by
:meth:`emit` and prevent callbacks until it exits. On exit, it will
dispatch the last event captured (if any)::
class Foo(Dispatcher):
_events_ = ['my_event']
def on_my_event(value):
print(value)
foo = Foo()
foo.bind(my_event=on_my_event)
with foo.emission_lock('my_event'):
foo.emit('my_event', 1)
foo.emit('my_event', 2)
>>> 2
Args:
name (str): The name of the :class:`Event` or
:class:`~pydispatch.properties.Property`
Returns:
A context manager to be used by the :keyword:`with` statement.
If available, this will also be an async context manager to be used
with the :keyword:`async with` statement (see `PEP 492`_).
Note:
The context manager is re-entrant, meaning that multiple calls to
this method within nested context scopes are possible.
.. _PEP 492: https://www.python.org/dev/peps/pep-0492/#asynchronous-context-managers-and-async-with
"""
e = self.__property_events.get(name)
if e is None:
e = self.__events[name]
return e.emission_lock
|
nocarryr/python-dispatch
|
pydispatch/dispatch.py
|
Dispatcher.bind
|
python
|
def bind(self, **kwargs):
aio_loop = kwargs.pop('__aio_loop__', None)
props = self.__property_events
events = self.__events
for name, cb in kwargs.items():
if name in props:
e = props[name]
else:
e = events[name]
e.add_listener(cb, __aio_loop__=aio_loop)
|
Subscribes to events or to :class:`~pydispatch.properties.Property` updates
Keyword arguments are used with the Event or Property names as keys
and the callbacks as values::
class Foo(Dispatcher):
name = Property()
foo = Foo()
foo.bind(name=my_listener.on_foo_name_changed)
foo.bind(name=other_listener.on_name,
value=other_listener.on_value)
The callbacks are stored as weak references and their order is not
maintained relative to the order of binding.
**Async Callbacks**:
Callbacks may be :term:`coroutine functions <coroutine function>`
(defined using :keyword:`async def` or decorated with
:func:`@asyncio.coroutine <asyncio.coroutine>`), but an event loop
must be explicitly provided with the keyword
argument ``"__aio_loop__"`` (an instance of
:class:`asyncio.BaseEventLoop`)::
import asyncio
from pydispatch import Dispatcher
class Foo(Dispatcher):
_events_ = ['test_event']
class Bar(object):
def __init__(self):
self.got_foo_event = asyncio.Event()
async def wait_for_foo(self):
await self.got_foo_event.wait()
print('got foo!')
async def on_foo_test_event(self, *args, **kwargs):
self.got_foo_event.set()
foo = Foo()
bar = Bar()
loop = asyncio.get_event_loop()
foo.bind(test_event=bar.on_foo_test_event, __aio_loop__=loop)
loop.run_until_complete(bar.wait_for_foo())
This can also be done using :meth:`bind_async`.
.. versionadded:: 0.1.0
|
train
|
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/dispatch.py#L135-L198
| null |
class Dispatcher(object):
"""Core class used to enable all functionality in the library
Interfaces with :class:`Event` and :class:`~pydispatch.properties.Property`
objects upon instance creation.
Events can be created by calling :meth:`register_event` or by the subclass
definition::
class Foo(Dispatcher):
_events_ = ['awesome_event', 'on_less_awesome_event']
Once defined, an event can be dispatched to listeners by calling :meth:`emit`.
"""
__initialized_subclasses = set()
__skip_initialized = True
def __new__(cls, *args, **kwargs):
def iter_bases(_cls):
if _cls is not object:
yield _cls
for b in _cls.__bases__:
for _cls_ in iter_bases(b):
yield _cls_
skip_initialized = Dispatcher._Dispatcher__skip_initialized
if not skip_initialized or cls not in Dispatcher._Dispatcher__initialized_subclasses:
props = {}
events = set()
for _cls in iter_bases(cls):
for attr in dir(_cls):
prop = getattr(_cls, attr)
if attr not in props and isinstance(prop, Property):
props[attr] = prop
prop.name = attr
_events = getattr(_cls, '_events_', [])
events |= set(_events)
cls._PROPERTIES_ = props
cls._EVENTS_ = events
if skip_initialized:
Dispatcher._Dispatcher__initialized_subclasses.add(cls)
obj = super(Dispatcher, cls).__new__(cls)
obj._Dispatcher__init_events()
return obj
def __init__(self, *args, **kwargs):
# Everything is handled by __new__
# This is only here to prevent exceptions being raised
pass
def __init_events(self):
if hasattr(self, '_Dispatcher__events'):
return
self.__events = {}
for name in self._EVENTS_:
self.__events[name] = Event(name)
self.__property_events = {}
for name, prop in self._PROPERTIES_.items():
self.__property_events[name] = Event(name)
prop._add_instance(self)
def register_event(self, *names):
"""Registers new events after instance creation
Args:
*names (str): Name or names of the events to register
"""
for name in names:
if name in self.__events:
continue
self.__events[name] = Event(name)
def unbind(self, *args):
"""Unsubscribes from events or :class:`~pydispatch.properties.Property` updates
Multiple arguments can be given. Each of which can be either the method
that was used for the original call to :meth:`bind` or an instance
object.
If an instance of an object is supplied, any previously bound Events and
Properties will be 'unbound'.
"""
props = self.__property_events.values()
events = self.__events.values()
for arg in args:
for prop in props:
prop.remove_listener(arg)
for e in events:
e.remove_listener(arg)
def bind_async(self, loop, **kwargs):
"""Subscribes to events with async callbacks
Functionality is matches the :meth:`bind` method, except the provided
callbacks should be coroutine functions. When the event is dispatched,
callbacks will be placed on the given event loop.
For keyword arguments, see :meth:`bind`.
Args:
loop: The :class:`EventLoop <asyncio.BaseEventLoop>` to use when
events are dispatched
Availability:
Python>=3.5
.. versionadded:: 0.1.0
"""
kwargs['__aio_loop__'] = loop
self.bind(**kwargs)
def emit(self, name, *args, **kwargs):
"""Dispatches an event to any subscribed listeners
Note:
If a listener returns :obj:`False`, the event will stop dispatching to
other listeners. Any other return value is ignored.
Args:
name (str): The name of the :class:`Event` to dispatch
*args (Optional): Positional arguments to be sent to listeners
**kwargs (Optional): Keyword arguments to be sent to listeners
"""
e = self.__property_events.get(name)
if e is None:
e = self.__events[name]
return e(*args, **kwargs)
def get_dispatcher_event(self, name):
"""Retrieves an Event object by name
Args:
name (str): The name of the :class:`Event` or
:class:`~pydispatch.properties.Property` object to retrieve
Returns:
The :class:`Event` instance for the event or property definition
.. versionadded:: 0.1.0
"""
e = self.__property_events.get(name)
if e is None:
e = self.__events[name]
return e
def emission_lock(self, name):
"""Holds emission of events and dispatches the last event on release
The context manager returned will store the last event data called by
:meth:`emit` and prevent callbacks until it exits. On exit, it will
dispatch the last event captured (if any)::
class Foo(Dispatcher):
_events_ = ['my_event']
def on_my_event(value):
print(value)
foo = Foo()
foo.bind(my_event=on_my_event)
with foo.emission_lock('my_event'):
foo.emit('my_event', 1)
foo.emit('my_event', 2)
>>> 2
Args:
name (str): The name of the :class:`Event` or
:class:`~pydispatch.properties.Property`
Returns:
A context manager to be used by the :keyword:`with` statement.
If available, this will also be an async context manager to be used
with the :keyword:`async with` statement (see `PEP 492`_).
Note:
The context manager is re-entrant, meaning that multiple calls to
this method within nested context scopes are possible.
.. _PEP 492: https://www.python.org/dev/peps/pep-0492/#asynchronous-context-managers-and-async-with
"""
e = self.__property_events.get(name)
if e is None:
e = self.__events[name]
return e.emission_lock
|
nocarryr/python-dispatch
|
pydispatch/dispatch.py
|
Dispatcher.unbind
|
python
|
def unbind(self, *args):
props = self.__property_events.values()
events = self.__events.values()
for arg in args:
for prop in props:
prop.remove_listener(arg)
for e in events:
e.remove_listener(arg)
|
Unsubscribes from events or :class:`~pydispatch.properties.Property` updates
Multiple arguments can be given. Each of which can be either the method
that was used for the original call to :meth:`bind` or an instance
object.
If an instance of an object is supplied, any previously bound Events and
Properties will be 'unbound'.
|
train
|
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/dispatch.py#L199-L215
| null |
class Dispatcher(object):
"""Core class used to enable all functionality in the library
Interfaces with :class:`Event` and :class:`~pydispatch.properties.Property`
objects upon instance creation.
Events can be created by calling :meth:`register_event` or by the subclass
definition::
class Foo(Dispatcher):
_events_ = ['awesome_event', 'on_less_awesome_event']
Once defined, an event can be dispatched to listeners by calling :meth:`emit`.
"""
__initialized_subclasses = set()
__skip_initialized = True
def __new__(cls, *args, **kwargs):
def iter_bases(_cls):
if _cls is not object:
yield _cls
for b in _cls.__bases__:
for _cls_ in iter_bases(b):
yield _cls_
skip_initialized = Dispatcher._Dispatcher__skip_initialized
if not skip_initialized or cls not in Dispatcher._Dispatcher__initialized_subclasses:
props = {}
events = set()
for _cls in iter_bases(cls):
for attr in dir(_cls):
prop = getattr(_cls, attr)
if attr not in props and isinstance(prop, Property):
props[attr] = prop
prop.name = attr
_events = getattr(_cls, '_events_', [])
events |= set(_events)
cls._PROPERTIES_ = props
cls._EVENTS_ = events
if skip_initialized:
Dispatcher._Dispatcher__initialized_subclasses.add(cls)
obj = super(Dispatcher, cls).__new__(cls)
obj._Dispatcher__init_events()
return obj
def __init__(self, *args, **kwargs):
# Everything is handled by __new__
# This is only here to prevent exceptions being raised
pass
def __init_events(self):
if hasattr(self, '_Dispatcher__events'):
return
self.__events = {}
for name in self._EVENTS_:
self.__events[name] = Event(name)
self.__property_events = {}
for name, prop in self._PROPERTIES_.items():
self.__property_events[name] = Event(name)
prop._add_instance(self)
def register_event(self, *names):
"""Registers new events after instance creation
Args:
*names (str): Name or names of the events to register
"""
for name in names:
if name in self.__events:
continue
self.__events[name] = Event(name)
def bind(self, **kwargs):
"""Subscribes to events or to :class:`~pydispatch.properties.Property` updates
Keyword arguments are used with the Event or Property names as keys
and the callbacks as values::
class Foo(Dispatcher):
name = Property()
foo = Foo()
foo.bind(name=my_listener.on_foo_name_changed)
foo.bind(name=other_listener.on_name,
value=other_listener.on_value)
The callbacks are stored as weak references and their order is not
maintained relative to the order of binding.
**Async Callbacks**:
Callbacks may be :term:`coroutine functions <coroutine function>`
(defined using :keyword:`async def` or decorated with
:func:`@asyncio.coroutine <asyncio.coroutine>`), but an event loop
must be explicitly provided with the keyword
argument ``"__aio_loop__"`` (an instance of
:class:`asyncio.BaseEventLoop`)::
import asyncio
from pydispatch import Dispatcher
class Foo(Dispatcher):
_events_ = ['test_event']
class Bar(object):
def __init__(self):
self.got_foo_event = asyncio.Event()
async def wait_for_foo(self):
await self.got_foo_event.wait()
print('got foo!')
async def on_foo_test_event(self, *args, **kwargs):
self.got_foo_event.set()
foo = Foo()
bar = Bar()
loop = asyncio.get_event_loop()
foo.bind(test_event=bar.on_foo_test_event, __aio_loop__=loop)
loop.run_until_complete(bar.wait_for_foo())
This can also be done using :meth:`bind_async`.
.. versionadded:: 0.1.0
"""
aio_loop = kwargs.pop('__aio_loop__', None)
props = self.__property_events
events = self.__events
for name, cb in kwargs.items():
if name in props:
e = props[name]
else:
e = events[name]
e.add_listener(cb, __aio_loop__=aio_loop)
def bind_async(self, loop, **kwargs):
"""Subscribes to events with async callbacks
Functionality is matches the :meth:`bind` method, except the provided
callbacks should be coroutine functions. When the event is dispatched,
callbacks will be placed on the given event loop.
For keyword arguments, see :meth:`bind`.
Args:
loop: The :class:`EventLoop <asyncio.BaseEventLoop>` to use when
events are dispatched
Availability:
Python>=3.5
.. versionadded:: 0.1.0
"""
kwargs['__aio_loop__'] = loop
self.bind(**kwargs)
def emit(self, name, *args, **kwargs):
"""Dispatches an event to any subscribed listeners
Note:
If a listener returns :obj:`False`, the event will stop dispatching to
other listeners. Any other return value is ignored.
Args:
name (str): The name of the :class:`Event` to dispatch
*args (Optional): Positional arguments to be sent to listeners
**kwargs (Optional): Keyword arguments to be sent to listeners
"""
e = self.__property_events.get(name)
if e is None:
e = self.__events[name]
return e(*args, **kwargs)
def get_dispatcher_event(self, name):
"""Retrieves an Event object by name
Args:
name (str): The name of the :class:`Event` or
:class:`~pydispatch.properties.Property` object to retrieve
Returns:
The :class:`Event` instance for the event or property definition
.. versionadded:: 0.1.0
"""
e = self.__property_events.get(name)
if e is None:
e = self.__events[name]
return e
def emission_lock(self, name):
"""Holds emission of events and dispatches the last event on release
The context manager returned will store the last event data called by
:meth:`emit` and prevent callbacks until it exits. On exit, it will
dispatch the last event captured (if any)::
class Foo(Dispatcher):
_events_ = ['my_event']
def on_my_event(value):
print(value)
foo = Foo()
foo.bind(my_event=on_my_event)
with foo.emission_lock('my_event'):
foo.emit('my_event', 1)
foo.emit('my_event', 2)
>>> 2
Args:
name (str): The name of the :class:`Event` or
:class:`~pydispatch.properties.Property`
Returns:
A context manager to be used by the :keyword:`with` statement.
If available, this will also be an async context manager to be used
with the :keyword:`async with` statement (see `PEP 492`_).
Note:
The context manager is re-entrant, meaning that multiple calls to
this method within nested context scopes are possible.
.. _PEP 492: https://www.python.org/dev/peps/pep-0492/#asynchronous-context-managers-and-async-with
"""
e = self.__property_events.get(name)
if e is None:
e = self.__events[name]
return e.emission_lock
|
nocarryr/python-dispatch
|
pydispatch/dispatch.py
|
Dispatcher.emit
|
python
|
def emit(self, name, *args, **kwargs):
e = self.__property_events.get(name)
if e is None:
e = self.__events[name]
return e(*args, **kwargs)
|
Dispatches an event to any subscribed listeners
Note:
If a listener returns :obj:`False`, the event will stop dispatching to
other listeners. Any other return value is ignored.
Args:
name (str): The name of the :class:`Event` to dispatch
*args (Optional): Positional arguments to be sent to listeners
**kwargs (Optional): Keyword arguments to be sent to listeners
|
train
|
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/dispatch.py#L236-L251
| null |
class Dispatcher(object):
"""Core class used to enable all functionality in the library
Interfaces with :class:`Event` and :class:`~pydispatch.properties.Property`
objects upon instance creation.
Events can be created by calling :meth:`register_event` or by the subclass
definition::
class Foo(Dispatcher):
_events_ = ['awesome_event', 'on_less_awesome_event']
Once defined, an event can be dispatched to listeners by calling :meth:`emit`.
"""
__initialized_subclasses = set()
__skip_initialized = True
def __new__(cls, *args, **kwargs):
def iter_bases(_cls):
if _cls is not object:
yield _cls
for b in _cls.__bases__:
for _cls_ in iter_bases(b):
yield _cls_
skip_initialized = Dispatcher._Dispatcher__skip_initialized
if not skip_initialized or cls not in Dispatcher._Dispatcher__initialized_subclasses:
props = {}
events = set()
for _cls in iter_bases(cls):
for attr in dir(_cls):
prop = getattr(_cls, attr)
if attr not in props and isinstance(prop, Property):
props[attr] = prop
prop.name = attr
_events = getattr(_cls, '_events_', [])
events |= set(_events)
cls._PROPERTIES_ = props
cls._EVENTS_ = events
if skip_initialized:
Dispatcher._Dispatcher__initialized_subclasses.add(cls)
obj = super(Dispatcher, cls).__new__(cls)
obj._Dispatcher__init_events()
return obj
def __init__(self, *args, **kwargs):
# Everything is handled by __new__
# This is only here to prevent exceptions being raised
pass
def __init_events(self):
if hasattr(self, '_Dispatcher__events'):
return
self.__events = {}
for name in self._EVENTS_:
self.__events[name] = Event(name)
self.__property_events = {}
for name, prop in self._PROPERTIES_.items():
self.__property_events[name] = Event(name)
prop._add_instance(self)
def register_event(self, *names):
"""Registers new events after instance creation
Args:
*names (str): Name or names of the events to register
"""
for name in names:
if name in self.__events:
continue
self.__events[name] = Event(name)
def bind(self, **kwargs):
"""Subscribes to events or to :class:`~pydispatch.properties.Property` updates
Keyword arguments are used with the Event or Property names as keys
and the callbacks as values::
class Foo(Dispatcher):
name = Property()
foo = Foo()
foo.bind(name=my_listener.on_foo_name_changed)
foo.bind(name=other_listener.on_name,
value=other_listener.on_value)
The callbacks are stored as weak references and their order is not
maintained relative to the order of binding.
**Async Callbacks**:
Callbacks may be :term:`coroutine functions <coroutine function>`
(defined using :keyword:`async def` or decorated with
:func:`@asyncio.coroutine <asyncio.coroutine>`), but an event loop
must be explicitly provided with the keyword
argument ``"__aio_loop__"`` (an instance of
:class:`asyncio.BaseEventLoop`)::
import asyncio
from pydispatch import Dispatcher
class Foo(Dispatcher):
_events_ = ['test_event']
class Bar(object):
def __init__(self):
self.got_foo_event = asyncio.Event()
async def wait_for_foo(self):
await self.got_foo_event.wait()
print('got foo!')
async def on_foo_test_event(self, *args, **kwargs):
self.got_foo_event.set()
foo = Foo()
bar = Bar()
loop = asyncio.get_event_loop()
foo.bind(test_event=bar.on_foo_test_event, __aio_loop__=loop)
loop.run_until_complete(bar.wait_for_foo())
This can also be done using :meth:`bind_async`.
.. versionadded:: 0.1.0
"""
aio_loop = kwargs.pop('__aio_loop__', None)
props = self.__property_events
events = self.__events
for name, cb in kwargs.items():
if name in props:
e = props[name]
else:
e = events[name]
e.add_listener(cb, __aio_loop__=aio_loop)
def unbind(self, *args):
"""Unsubscribes from events or :class:`~pydispatch.properties.Property` updates
Multiple arguments can be given. Each of which can be either the method
that was used for the original call to :meth:`bind` or an instance
object.
If an instance of an object is supplied, any previously bound Events and
Properties will be 'unbound'.
"""
props = self.__property_events.values()
events = self.__events.values()
for arg in args:
for prop in props:
prop.remove_listener(arg)
for e in events:
e.remove_listener(arg)
def bind_async(self, loop, **kwargs):
"""Subscribes to events with async callbacks
Functionality is matches the :meth:`bind` method, except the provided
callbacks should be coroutine functions. When the event is dispatched,
callbacks will be placed on the given event loop.
For keyword arguments, see :meth:`bind`.
Args:
loop: The :class:`EventLoop <asyncio.BaseEventLoop>` to use when
events are dispatched
Availability:
Python>=3.5
.. versionadded:: 0.1.0
"""
kwargs['__aio_loop__'] = loop
self.bind(**kwargs)
def get_dispatcher_event(self, name):
"""Retrieves an Event object by name
Args:
name (str): The name of the :class:`Event` or
:class:`~pydispatch.properties.Property` object to retrieve
Returns:
The :class:`Event` instance for the event or property definition
.. versionadded:: 0.1.0
"""
e = self.__property_events.get(name)
if e is None:
e = self.__events[name]
return e
def emission_lock(self, name):
"""Holds emission of events and dispatches the last event on release
The context manager returned will store the last event data called by
:meth:`emit` and prevent callbacks until it exits. On exit, it will
dispatch the last event captured (if any)::
class Foo(Dispatcher):
_events_ = ['my_event']
def on_my_event(value):
print(value)
foo = Foo()
foo.bind(my_event=on_my_event)
with foo.emission_lock('my_event'):
foo.emit('my_event', 1)
foo.emit('my_event', 2)
>>> 2
Args:
name (str): The name of the :class:`Event` or
:class:`~pydispatch.properties.Property`
Returns:
A context manager to be used by the :keyword:`with` statement.
If available, this will also be an async context manager to be used
with the :keyword:`async with` statement (see `PEP 492`_).
Note:
The context manager is re-entrant, meaning that multiple calls to
this method within nested context scopes are possible.
.. _PEP 492: https://www.python.org/dev/peps/pep-0492/#asynchronous-context-managers-and-async-with
"""
e = self.__property_events.get(name)
if e is None:
e = self.__events[name]
return e.emission_lock
|
nocarryr/python-dispatch
|
pydispatch/dispatch.py
|
Dispatcher.get_dispatcher_event
|
python
|
def get_dispatcher_event(self, name):
e = self.__property_events.get(name)
if e is None:
e = self.__events[name]
return e
|
Retrieves an Event object by name
Args:
name (str): The name of the :class:`Event` or
:class:`~pydispatch.properties.Property` object to retrieve
Returns:
The :class:`Event` instance for the event or property definition
.. versionadded:: 0.1.0
|
train
|
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/dispatch.py#L252-L267
| null |
class Dispatcher(object):
"""Core class used to enable all functionality in the library
Interfaces with :class:`Event` and :class:`~pydispatch.properties.Property`
objects upon instance creation.
Events can be created by calling :meth:`register_event` or by the subclass
definition::
class Foo(Dispatcher):
_events_ = ['awesome_event', 'on_less_awesome_event']
Once defined, an event can be dispatched to listeners by calling :meth:`emit`.
"""
__initialized_subclasses = set()
__skip_initialized = True
def __new__(cls, *args, **kwargs):
def iter_bases(_cls):
if _cls is not object:
yield _cls
for b in _cls.__bases__:
for _cls_ in iter_bases(b):
yield _cls_
skip_initialized = Dispatcher._Dispatcher__skip_initialized
if not skip_initialized or cls not in Dispatcher._Dispatcher__initialized_subclasses:
props = {}
events = set()
for _cls in iter_bases(cls):
for attr in dir(_cls):
prop = getattr(_cls, attr)
if attr not in props and isinstance(prop, Property):
props[attr] = prop
prop.name = attr
_events = getattr(_cls, '_events_', [])
events |= set(_events)
cls._PROPERTIES_ = props
cls._EVENTS_ = events
if skip_initialized:
Dispatcher._Dispatcher__initialized_subclasses.add(cls)
obj = super(Dispatcher, cls).__new__(cls)
obj._Dispatcher__init_events()
return obj
def __init__(self, *args, **kwargs):
# Everything is handled by __new__
# This is only here to prevent exceptions being raised
pass
def __init_events(self):
if hasattr(self, '_Dispatcher__events'):
return
self.__events = {}
for name in self._EVENTS_:
self.__events[name] = Event(name)
self.__property_events = {}
for name, prop in self._PROPERTIES_.items():
self.__property_events[name] = Event(name)
prop._add_instance(self)
def register_event(self, *names):
"""Registers new events after instance creation
Args:
*names (str): Name or names of the events to register
"""
for name in names:
if name in self.__events:
continue
self.__events[name] = Event(name)
def bind(self, **kwargs):
"""Subscribes to events or to :class:`~pydispatch.properties.Property` updates
Keyword arguments are used with the Event or Property names as keys
and the callbacks as values::
class Foo(Dispatcher):
name = Property()
foo = Foo()
foo.bind(name=my_listener.on_foo_name_changed)
foo.bind(name=other_listener.on_name,
value=other_listener.on_value)
The callbacks are stored as weak references and their order is not
maintained relative to the order of binding.
**Async Callbacks**:
Callbacks may be :term:`coroutine functions <coroutine function>`
(defined using :keyword:`async def` or decorated with
:func:`@asyncio.coroutine <asyncio.coroutine>`), but an event loop
must be explicitly provided with the keyword
argument ``"__aio_loop__"`` (an instance of
:class:`asyncio.BaseEventLoop`)::
import asyncio
from pydispatch import Dispatcher
class Foo(Dispatcher):
_events_ = ['test_event']
class Bar(object):
def __init__(self):
self.got_foo_event = asyncio.Event()
async def wait_for_foo(self):
await self.got_foo_event.wait()
print('got foo!')
async def on_foo_test_event(self, *args, **kwargs):
self.got_foo_event.set()
foo = Foo()
bar = Bar()
loop = asyncio.get_event_loop()
foo.bind(test_event=bar.on_foo_test_event, __aio_loop__=loop)
loop.run_until_complete(bar.wait_for_foo())
This can also be done using :meth:`bind_async`.
.. versionadded:: 0.1.0
"""
aio_loop = kwargs.pop('__aio_loop__', None)
props = self.__property_events
events = self.__events
for name, cb in kwargs.items():
if name in props:
e = props[name]
else:
e = events[name]
e.add_listener(cb, __aio_loop__=aio_loop)
def unbind(self, *args):
"""Unsubscribes from events or :class:`~pydispatch.properties.Property` updates
Multiple arguments can be given. Each of which can be either the method
that was used for the original call to :meth:`bind` or an instance
object.
If an instance of an object is supplied, any previously bound Events and
Properties will be 'unbound'.
"""
props = self.__property_events.values()
events = self.__events.values()
for arg in args:
for prop in props:
prop.remove_listener(arg)
for e in events:
e.remove_listener(arg)
def bind_async(self, loop, **kwargs):
"""Subscribes to events with async callbacks
Functionality is matches the :meth:`bind` method, except the provided
callbacks should be coroutine functions. When the event is dispatched,
callbacks will be placed on the given event loop.
For keyword arguments, see :meth:`bind`.
Args:
loop: The :class:`EventLoop <asyncio.BaseEventLoop>` to use when
events are dispatched
Availability:
Python>=3.5
.. versionadded:: 0.1.0
"""
kwargs['__aio_loop__'] = loop
self.bind(**kwargs)
def emit(self, name, *args, **kwargs):
"""Dispatches an event to any subscribed listeners
Note:
If a listener returns :obj:`False`, the event will stop dispatching to
other listeners. Any other return value is ignored.
Args:
name (str): The name of the :class:`Event` to dispatch
*args (Optional): Positional arguments to be sent to listeners
**kwargs (Optional): Keyword arguments to be sent to listeners
"""
e = self.__property_events.get(name)
if e is None:
e = self.__events[name]
return e(*args, **kwargs)
def emission_lock(self, name):
"""Holds emission of events and dispatches the last event on release
The context manager returned will store the last event data called by
:meth:`emit` and prevent callbacks until it exits. On exit, it will
dispatch the last event captured (if any)::
class Foo(Dispatcher):
_events_ = ['my_event']
def on_my_event(value):
print(value)
foo = Foo()
foo.bind(my_event=on_my_event)
with foo.emission_lock('my_event'):
foo.emit('my_event', 1)
foo.emit('my_event', 2)
>>> 2
Args:
name (str): The name of the :class:`Event` or
:class:`~pydispatch.properties.Property`
Returns:
A context manager to be used by the :keyword:`with` statement.
If available, this will also be an async context manager to be used
with the :keyword:`async with` statement (see `PEP 492`_).
Note:
The context manager is re-entrant, meaning that multiple calls to
this method within nested context scopes are possible.
.. _PEP 492: https://www.python.org/dev/peps/pep-0492/#asynchronous-context-managers-and-async-with
"""
e = self.__property_events.get(name)
if e is None:
e = self.__events[name]
return e.emission_lock
|
nocarryr/python-dispatch
|
pydispatch/dispatch.py
|
Dispatcher.emission_lock
|
python
|
def emission_lock(self, name):
e = self.__property_events.get(name)
if e is None:
e = self.__events[name]
return e.emission_lock
|
Holds emission of events and dispatches the last event on release
The context manager returned will store the last event data called by
:meth:`emit` and prevent callbacks until it exits. On exit, it will
dispatch the last event captured (if any)::
class Foo(Dispatcher):
_events_ = ['my_event']
def on_my_event(value):
print(value)
foo = Foo()
foo.bind(my_event=on_my_event)
with foo.emission_lock('my_event'):
foo.emit('my_event', 1)
foo.emit('my_event', 2)
>>> 2
Args:
name (str): The name of the :class:`Event` or
:class:`~pydispatch.properties.Property`
Returns:
A context manager to be used by the :keyword:`with` statement.
If available, this will also be an async context manager to be used
with the :keyword:`async with` statement (see `PEP 492`_).
Note:
The context manager is re-entrant, meaning that multiple calls to
this method within nested context scopes are possible.
.. _PEP 492: https://www.python.org/dev/peps/pep-0492/#asynchronous-context-managers-and-async-with
|
train
|
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/dispatch.py#L268-L309
| null |
class Dispatcher(object):
"""Core class used to enable all functionality in the library
Interfaces with :class:`Event` and :class:`~pydispatch.properties.Property`
objects upon instance creation.
Events can be created by calling :meth:`register_event` or by the subclass
definition::
class Foo(Dispatcher):
_events_ = ['awesome_event', 'on_less_awesome_event']
Once defined, an event can be dispatched to listeners by calling :meth:`emit`.
"""
__initialized_subclasses = set()
__skip_initialized = True
def __new__(cls, *args, **kwargs):
def iter_bases(_cls):
if _cls is not object:
yield _cls
for b in _cls.__bases__:
for _cls_ in iter_bases(b):
yield _cls_
skip_initialized = Dispatcher._Dispatcher__skip_initialized
if not skip_initialized or cls not in Dispatcher._Dispatcher__initialized_subclasses:
props = {}
events = set()
for _cls in iter_bases(cls):
for attr in dir(_cls):
prop = getattr(_cls, attr)
if attr not in props and isinstance(prop, Property):
props[attr] = prop
prop.name = attr
_events = getattr(_cls, '_events_', [])
events |= set(_events)
cls._PROPERTIES_ = props
cls._EVENTS_ = events
if skip_initialized:
Dispatcher._Dispatcher__initialized_subclasses.add(cls)
obj = super(Dispatcher, cls).__new__(cls)
obj._Dispatcher__init_events()
return obj
def __init__(self, *args, **kwargs):
# Everything is handled by __new__
# This is only here to prevent exceptions being raised
pass
def __init_events(self):
if hasattr(self, '_Dispatcher__events'):
return
self.__events = {}
for name in self._EVENTS_:
self.__events[name] = Event(name)
self.__property_events = {}
for name, prop in self._PROPERTIES_.items():
self.__property_events[name] = Event(name)
prop._add_instance(self)
def register_event(self, *names):
"""Registers new events after instance creation
Args:
*names (str): Name or names of the events to register
"""
for name in names:
if name in self.__events:
continue
self.__events[name] = Event(name)
def bind(self, **kwargs):
"""Subscribes to events or to :class:`~pydispatch.properties.Property` updates
Keyword arguments are used with the Event or Property names as keys
and the callbacks as values::
class Foo(Dispatcher):
name = Property()
foo = Foo()
foo.bind(name=my_listener.on_foo_name_changed)
foo.bind(name=other_listener.on_name,
value=other_listener.on_value)
The callbacks are stored as weak references and their order is not
maintained relative to the order of binding.
**Async Callbacks**:
Callbacks may be :term:`coroutine functions <coroutine function>`
(defined using :keyword:`async def` or decorated with
:func:`@asyncio.coroutine <asyncio.coroutine>`), but an event loop
must be explicitly provided with the keyword
argument ``"__aio_loop__"`` (an instance of
:class:`asyncio.BaseEventLoop`)::
import asyncio
from pydispatch import Dispatcher
class Foo(Dispatcher):
_events_ = ['test_event']
class Bar(object):
def __init__(self):
self.got_foo_event = asyncio.Event()
async def wait_for_foo(self):
await self.got_foo_event.wait()
print('got foo!')
async def on_foo_test_event(self, *args, **kwargs):
self.got_foo_event.set()
foo = Foo()
bar = Bar()
loop = asyncio.get_event_loop()
foo.bind(test_event=bar.on_foo_test_event, __aio_loop__=loop)
loop.run_until_complete(bar.wait_for_foo())
This can also be done using :meth:`bind_async`.
.. versionadded:: 0.1.0
"""
aio_loop = kwargs.pop('__aio_loop__', None)
props = self.__property_events
events = self.__events
for name, cb in kwargs.items():
if name in props:
e = props[name]
else:
e = events[name]
e.add_listener(cb, __aio_loop__=aio_loop)
def unbind(self, *args):
"""Unsubscribes from events or :class:`~pydispatch.properties.Property` updates
Multiple arguments can be given. Each of which can be either the method
that was used for the original call to :meth:`bind` or an instance
object.
If an instance of an object is supplied, any previously bound Events and
Properties will be 'unbound'.
"""
props = self.__property_events.values()
events = self.__events.values()
for arg in args:
for prop in props:
prop.remove_listener(arg)
for e in events:
e.remove_listener(arg)
def bind_async(self, loop, **kwargs):
"""Subscribes to events with async callbacks
Functionality is matches the :meth:`bind` method, except the provided
callbacks should be coroutine functions. When the event is dispatched,
callbacks will be placed on the given event loop.
For keyword arguments, see :meth:`bind`.
Args:
loop: The :class:`EventLoop <asyncio.BaseEventLoop>` to use when
events are dispatched
Availability:
Python>=3.5
.. versionadded:: 0.1.0
"""
kwargs['__aio_loop__'] = loop
self.bind(**kwargs)
def emit(self, name, *args, **kwargs):
"""Dispatches an event to any subscribed listeners
Note:
If a listener returns :obj:`False`, the event will stop dispatching to
other listeners. Any other return value is ignored.
Args:
name (str): The name of the :class:`Event` to dispatch
*args (Optional): Positional arguments to be sent to listeners
**kwargs (Optional): Keyword arguments to be sent to listeners
"""
e = self.__property_events.get(name)
if e is None:
e = self.__events[name]
return e(*args, **kwargs)
def get_dispatcher_event(self, name):
"""Retrieves an Event object by name
Args:
name (str): The name of the :class:`Event` or
:class:`~pydispatch.properties.Property` object to retrieve
Returns:
The :class:`Event` instance for the event or property definition
.. versionadded:: 0.1.0
"""
e = self.__property_events.get(name)
if e is None:
e = self.__events[name]
return e
|
nocarryr/python-dispatch
|
pydispatch/properties.py
|
Property._on_change
|
python
|
def _on_change(self, obj, old, value, **kwargs):
kwargs['property'] = self
obj.emit(self.name, obj, value, old=old, **kwargs)
|
Called internally to emit changes from the instance object
The keyword arguments here will be passed to callbacks through the
instance object's :meth:`~pydispatch.dispatch.Dispatcher.emit` method.
Keyword Args:
property: The :class:`Property` instance. This is useful if multiple
properties are bound to the same callback. The attribute name
keys (optional): If the :class:`Property` is a container type
(:class:`ListProperty` or :class:`DictProperty`), the changes
may be found here.
This is not implemented for nested containers and will only be
available for operations that do not alter the size of the
container.
|
train
|
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/properties.py#L96-L114
| null |
class Property(object):
"""Defined on the class level to create an observable attribute
Args:
default (Optional): If supplied, this will be the default value of the
Property for all instances of the class. Otherwise :obj:`None`
Attributes:
name (str): The name of the Property as defined in the class definition.
This will match the attribute name for the
:class:`~pydispatch.dispatch.Dispatcher` instance.
"""
def __init__(self, default=None):
self._name = ''
self.default = default
self.__storage = {}
self.__weakrefs = InformativeWVDict(del_callback=self._on_weakref_fin)
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if self._name != '':
return
self._name = value
def _add_instance(self, obj, default=None):
if default is None:
default = self.default
self.__storage[id(obj)] = self.default
self.__weakrefs[id(obj)] = obj
def _del_instance(self, obj):
del self.__storage[id(obj)]
def _on_weakref_fin(self, obj_id):
if obj_id in self.__storage:
del self.__storage[obj_id]
def __get__(self, obj, objcls=None):
if obj is None:
return self
obj_id = id(obj)
if obj_id not in self.__storage:
self._add_instance(obj)
return self.__storage[obj_id]
def __set__(self, obj, value):
obj_id = id(obj)
if obj_id not in self.__storage:
self._add_instance(obj)
current = self.__storage[obj_id]
if current == value:
return
self.__storage[obj_id] = value
self._on_change(obj, current, value)
def __repr__(self):
return '<{}: {}>'.format(self.__class__, self)
def __str__(self):
return self.name
|
nocarryr/python-dispatch
|
pydispatch/utils.py
|
WeakMethodContainer.add_method
|
python
|
def add_method(self, m, **kwargs):
if isinstance(m, types.FunctionType):
self['function', id(m)] = m
else:
f, obj = get_method_vars(m)
wrkey = (f, id(obj))
self[wrkey] = obj
|
Add an instance method or function
Args:
m: The instance method or function to store
|
train
|
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/utils.py#L49-L60
|
[
"def get_method_vars(m):\n if PY2:\n f = m.im_func\n obj = m.im_self\n else:\n f = m.__func__\n obj = m.__self__\n return f, obj\n"
] |
class WeakMethodContainer(weakref.WeakValueDictionary):
"""Container to store weak references to callbacks
Instance methods are stored using the underlying :term:`function` object
and the instance id (using :func:`id(obj) <id>`) as the key (a two-tuple)
and the object itself as the value. This ensures proper weak referencing.
Functions are stored using the string "function" and the id of the function
as the key (a two-tuple).
"""
def keys(self):
if PY2:
return self.iterkeys()
return super(WeakMethodContainer, self).keys()
def del_method(self, m):
"""Remove an instance method or function if it exists
Args:
m: The instance method or function to remove
"""
if isinstance(m, types.FunctionType) and not iscoroutinefunction(m):
wrkey = ('function', id(m))
else:
f, obj = get_method_vars(m)
wrkey = (f, id(obj))
if wrkey in self:
del self[wrkey]
def del_instance(self, obj):
"""Remove any stored instance methods that belong to an object
Args:
obj: The instance object to remove
"""
to_remove = set()
for wrkey, _obj in self.iter_instances():
if obj is _obj:
to_remove.add(wrkey)
for wrkey in to_remove:
del self[wrkey]
def iter_instances(self):
"""Iterate over the stored objects
Yields:
wrkey: The two-tuple key used to store the object
obj: The instance or function object
"""
for wrkey in set(self.keys()):
obj = self.get(wrkey)
if obj is None:
continue
yield wrkey, obj
def iter_methods(self):
"""Iterate over stored functions and instance methods
Yields:
Instance methods or function objects
"""
for wrkey, obj in self.iter_instances():
f, obj_id = wrkey
if f == 'function':
yield self[wrkey]
else:
yield getattr(obj, f.__name__)
|
nocarryr/python-dispatch
|
pydispatch/utils.py
|
WeakMethodContainer.del_method
|
python
|
def del_method(self, m):
if isinstance(m, types.FunctionType) and not iscoroutinefunction(m):
wrkey = ('function', id(m))
else:
f, obj = get_method_vars(m)
wrkey = (f, id(obj))
if wrkey in self:
del self[wrkey]
|
Remove an instance method or function if it exists
Args:
m: The instance method or function to remove
|
train
|
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/utils.py#L61-L73
|
[
"def get_method_vars(m):\n if PY2:\n f = m.im_func\n obj = m.im_self\n else:\n f = m.__func__\n obj = m.__self__\n return f, obj\n",
"def iscoroutinefunction(obj):\n if AIO_AVAILABLE:\n return asyncio.iscoroutinefunction(obj)\n return False\n"
] |
class WeakMethodContainer(weakref.WeakValueDictionary):
"""Container to store weak references to callbacks
Instance methods are stored using the underlying :term:`function` object
and the instance id (using :func:`id(obj) <id>`) as the key (a two-tuple)
and the object itself as the value. This ensures proper weak referencing.
Functions are stored using the string "function" and the id of the function
as the key (a two-tuple).
"""
def keys(self):
if PY2:
return self.iterkeys()
return super(WeakMethodContainer, self).keys()
def add_method(self, m, **kwargs):
"""Add an instance method or function
Args:
m: The instance method or function to store
"""
if isinstance(m, types.FunctionType):
self['function', id(m)] = m
else:
f, obj = get_method_vars(m)
wrkey = (f, id(obj))
self[wrkey] = obj
def del_instance(self, obj):
"""Remove any stored instance methods that belong to an object
Args:
obj: The instance object to remove
"""
to_remove = set()
for wrkey, _obj in self.iter_instances():
if obj is _obj:
to_remove.add(wrkey)
for wrkey in to_remove:
del self[wrkey]
def iter_instances(self):
"""Iterate over the stored objects
Yields:
wrkey: The two-tuple key used to store the object
obj: The instance or function object
"""
for wrkey in set(self.keys()):
obj = self.get(wrkey)
if obj is None:
continue
yield wrkey, obj
def iter_methods(self):
"""Iterate over stored functions and instance methods
Yields:
Instance methods or function objects
"""
for wrkey, obj in self.iter_instances():
f, obj_id = wrkey
if f == 'function':
yield self[wrkey]
else:
yield getattr(obj, f.__name__)
|
nocarryr/python-dispatch
|
pydispatch/utils.py
|
WeakMethodContainer.del_instance
|
python
|
def del_instance(self, obj):
to_remove = set()
for wrkey, _obj in self.iter_instances():
if obj is _obj:
to_remove.add(wrkey)
for wrkey in to_remove:
del self[wrkey]
|
Remove any stored instance methods that belong to an object
Args:
obj: The instance object to remove
|
train
|
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/utils.py#L74-L85
|
[
"def iter_instances(self):\n \"\"\"Iterate over the stored objects\n\n Yields:\n wrkey: The two-tuple key used to store the object\n obj: The instance or function object\n \"\"\"\n for wrkey in set(self.keys()):\n obj = self.get(wrkey)\n if obj is None:\n continue\n yield wrkey, obj\n"
] |
class WeakMethodContainer(weakref.WeakValueDictionary):
"""Container to store weak references to callbacks
Instance methods are stored using the underlying :term:`function` object
and the instance id (using :func:`id(obj) <id>`) as the key (a two-tuple)
and the object itself as the value. This ensures proper weak referencing.
Functions are stored using the string "function" and the id of the function
as the key (a two-tuple).
"""
def keys(self):
if PY2:
return self.iterkeys()
return super(WeakMethodContainer, self).keys()
def add_method(self, m, **kwargs):
"""Add an instance method or function
Args:
m: The instance method or function to store
"""
if isinstance(m, types.FunctionType):
self['function', id(m)] = m
else:
f, obj = get_method_vars(m)
wrkey = (f, id(obj))
self[wrkey] = obj
def del_method(self, m):
"""Remove an instance method or function if it exists
Args:
m: The instance method or function to remove
"""
if isinstance(m, types.FunctionType) and not iscoroutinefunction(m):
wrkey = ('function', id(m))
else:
f, obj = get_method_vars(m)
wrkey = (f, id(obj))
if wrkey in self:
del self[wrkey]
def iter_instances(self):
"""Iterate over the stored objects
Yields:
wrkey: The two-tuple key used to store the object
obj: The instance or function object
"""
for wrkey in set(self.keys()):
obj = self.get(wrkey)
if obj is None:
continue
yield wrkey, obj
def iter_methods(self):
"""Iterate over stored functions and instance methods
Yields:
Instance methods or function objects
"""
for wrkey, obj in self.iter_instances():
f, obj_id = wrkey
if f == 'function':
yield self[wrkey]
else:
yield getattr(obj, f.__name__)
|
nocarryr/python-dispatch
|
pydispatch/utils.py
|
WeakMethodContainer.iter_instances
|
python
|
def iter_instances(self):
for wrkey in set(self.keys()):
obj = self.get(wrkey)
if obj is None:
continue
yield wrkey, obj
|
Iterate over the stored objects
Yields:
wrkey: The two-tuple key used to store the object
obj: The instance or function object
|
train
|
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/utils.py#L86-L97
|
[
"def keys(self):\n if PY2:\n return self.iterkeys()\n return super(WeakMethodContainer, self).keys()\n"
] |
class WeakMethodContainer(weakref.WeakValueDictionary):
"""Container to store weak references to callbacks
Instance methods are stored using the underlying :term:`function` object
and the instance id (using :func:`id(obj) <id>`) as the key (a two-tuple)
and the object itself as the value. This ensures proper weak referencing.
Functions are stored using the string "function" and the id of the function
as the key (a two-tuple).
"""
def keys(self):
if PY2:
return self.iterkeys()
return super(WeakMethodContainer, self).keys()
def add_method(self, m, **kwargs):
"""Add an instance method or function
Args:
m: The instance method or function to store
"""
if isinstance(m, types.FunctionType):
self['function', id(m)] = m
else:
f, obj = get_method_vars(m)
wrkey = (f, id(obj))
self[wrkey] = obj
def del_method(self, m):
"""Remove an instance method or function if it exists
Args:
m: The instance method or function to remove
"""
if isinstance(m, types.FunctionType) and not iscoroutinefunction(m):
wrkey = ('function', id(m))
else:
f, obj = get_method_vars(m)
wrkey = (f, id(obj))
if wrkey in self:
del self[wrkey]
def del_instance(self, obj):
"""Remove any stored instance methods that belong to an object
Args:
obj: The instance object to remove
"""
to_remove = set()
for wrkey, _obj in self.iter_instances():
if obj is _obj:
to_remove.add(wrkey)
for wrkey in to_remove:
del self[wrkey]
def iter_methods(self):
"""Iterate over stored functions and instance methods
Yields:
Instance methods or function objects
"""
for wrkey, obj in self.iter_instances():
f, obj_id = wrkey
if f == 'function':
yield self[wrkey]
else:
yield getattr(obj, f.__name__)
|
nocarryr/python-dispatch
|
pydispatch/utils.py
|
WeakMethodContainer.iter_methods
|
python
|
def iter_methods(self):
for wrkey, obj in self.iter_instances():
f, obj_id = wrkey
if f == 'function':
yield self[wrkey]
else:
yield getattr(obj, f.__name__)
|
Iterate over stored functions and instance methods
Yields:
Instance methods or function objects
|
train
|
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/utils.py#L98-L109
|
[
"def iter_instances(self):\n \"\"\"Iterate over the stored objects\n\n Yields:\n wrkey: The two-tuple key used to store the object\n obj: The instance or function object\n \"\"\"\n for wrkey in set(self.keys()):\n obj = self.get(wrkey)\n if obj is None:\n continue\n yield wrkey, obj\n"
] |
class WeakMethodContainer(weakref.WeakValueDictionary):
"""Container to store weak references to callbacks
Instance methods are stored using the underlying :term:`function` object
and the instance id (using :func:`id(obj) <id>`) as the key (a two-tuple)
and the object itself as the value. This ensures proper weak referencing.
Functions are stored using the string "function" and the id of the function
as the key (a two-tuple).
"""
def keys(self):
if PY2:
return self.iterkeys()
return super(WeakMethodContainer, self).keys()
def add_method(self, m, **kwargs):
"""Add an instance method or function
Args:
m: The instance method or function to store
"""
if isinstance(m, types.FunctionType):
self['function', id(m)] = m
else:
f, obj = get_method_vars(m)
wrkey = (f, id(obj))
self[wrkey] = obj
def del_method(self, m):
"""Remove an instance method or function if it exists
Args:
m: The instance method or function to remove
"""
if isinstance(m, types.FunctionType) and not iscoroutinefunction(m):
wrkey = ('function', id(m))
else:
f, obj = get_method_vars(m)
wrkey = (f, id(obj))
if wrkey in self:
del self[wrkey]
def del_instance(self, obj):
"""Remove any stored instance methods that belong to an object
Args:
obj: The instance object to remove
"""
to_remove = set()
for wrkey, _obj in self.iter_instances():
if obj is _obj:
to_remove.add(wrkey)
for wrkey in to_remove:
del self[wrkey]
def iter_instances(self):
"""Iterate over the stored objects
Yields:
wrkey: The two-tuple key used to store the object
obj: The instance or function object
"""
for wrkey in set(self.keys()):
obj = self.get(wrkey)
if obj is None:
continue
yield wrkey, obj
|
nocarryr/python-dispatch
|
pydispatch/aioutils.py
|
AioSimpleLock.acquire
|
python
|
def acquire(self, blocking=True, timeout=-1):
result = self.lock.acquire(blocking, timeout)
return result
|
Acquire the :attr:`lock`
Args:
blocking (bool): See :meth:`threading.Lock.acquire`
timeout (float): See :meth:`threading.Lock.acquire`
Returns:
bool: :obj:`True` if the lock was acquired, otherwise :obj:`False`
|
train
|
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/aioutils.py#L61-L73
| null |
class AioSimpleLock(object):
""":class:`asyncio.Lock` alternative backed by a :class:`threading.Lock`
This is a context manager that supports use in both :keyword:`with` and
:keyword:`async with` context blocks.
Attributes:
lock: Instance of :class:`threading.Lock`
.. versionadded:: 0.1.0
"""
__slots__ = ('lock')
def __init__(self):
self.lock = threading.Lock()
def release(self):
"""Release the :attr:`lock`
"""
self.lock.release()
def __enter__(self):
self.acquire()
return self
def __exit__(self, *args):
self.release()
async def acquire_async(self):
"""Acquire the :attr:`lock` asynchronously
"""
r = self.acquire(blocking=False)
while not r:
await asyncio.sleep(.01)
r = self.acquire(blocking=False)
async def __aenter__(self):
await self.acquire_async()
return self
async def __aexit__(self, *args):
self.release()
|
nocarryr/python-dispatch
|
pydispatch/aioutils.py
|
AioSimpleLock.acquire_async
|
python
|
async def acquire_async(self):
r = self.acquire(blocking=False)
while not r:
await asyncio.sleep(.01)
r = self.acquire(blocking=False)
|
Acquire the :attr:`lock` asynchronously
|
train
|
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/aioutils.py#L83-L90
|
[
"def acquire(self, blocking=True, timeout=-1):\n \"\"\"Acquire the :attr:`lock`\n\n Args:\n blocking (bool): See :meth:`threading.Lock.acquire`\n timeout (float): See :meth:`threading.Lock.acquire`\n\n Returns:\n bool: :obj:`True` if the lock was acquired, otherwise :obj:`False`\n\n \"\"\"\n result = self.lock.acquire(blocking, timeout)\n return result\n"
] |
class AioSimpleLock(object):
""":class:`asyncio.Lock` alternative backed by a :class:`threading.Lock`
This is a context manager that supports use in both :keyword:`with` and
:keyword:`async with` context blocks.
Attributes:
lock: Instance of :class:`threading.Lock`
.. versionadded:: 0.1.0
"""
__slots__ = ('lock')
def __init__(self):
self.lock = threading.Lock()
def acquire(self, blocking=True, timeout=-1):
"""Acquire the :attr:`lock`
Args:
blocking (bool): See :meth:`threading.Lock.acquire`
timeout (float): See :meth:`threading.Lock.acquire`
Returns:
bool: :obj:`True` if the lock was acquired, otherwise :obj:`False`
"""
result = self.lock.acquire(blocking, timeout)
return result
def release(self):
"""Release the :attr:`lock`
"""
self.lock.release()
def __enter__(self):
self.acquire()
return self
def __exit__(self, *args):
self.release()
async def __aenter__(self):
await self.acquire_async()
return self
async def __aexit__(self, *args):
self.release()
|
nocarryr/python-dispatch
|
pydispatch/aioutils.py
|
AioEventWaiter.trigger
|
python
|
def trigger(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.aio_event.set()
|
Called on event emission and notifies the :meth:`wait` method
Called by :class:`AioEventWaiters` when the
:class:`~pydispatch.dispatch.Event` instance is dispatched.
Positional and keyword arguments are stored as instance attributes for
use in the :meth:`wait` method and :attr:`aio_event` is set.
|
train
|
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/aioutils.py#L118-L129
| null |
class AioEventWaiter(object):
"""Stores necessary information for a single "waiter"
Used by :class:`AioEventWaiters` to handle :keyword:`awaiting <await>`
an :class:`~pydispatch.dispatch.Event` on a specific
:class:`event loop <asyncio.BaseEventLoop>`
Attributes:
loop: The :class:`EventLoop <asyncio.BaseEventLoop>` instance
aio_event: An :class:`asyncio.Event` used to track event emission
args (list): The positional arguments attached to the event
kwargs (dict): The keyword arguments attached to the event
.. versionadded:: 0.1.0
"""
__slots__ = ('loop', 'aio_event', 'args', 'kwargs')
def __init__(self, loop):
self.loop = loop
self.aio_event = asyncio.Event(loop=loop)
self.args = []
self.kwargs = {}
async def wait(self):
"""Waits for event emission and returns the event parameters
Returns:
args (list): Positional arguments attached to the event
kwargs (dict): Keyword arguments attached to the event
"""
await self.aio_event.wait()
return self.args, self.kwargs
def __await__(self):
task = asyncio.ensure_future(self.wait())
return task.__await__()
|
nocarryr/python-dispatch
|
pydispatch/aioutils.py
|
AioEventWaiters.add_waiter
|
python
|
async def add_waiter(self):
loop = asyncio.get_event_loop()
async with self.lock:
waiter = AioEventWaiter(loop)
self.waiters.add(waiter)
return waiter
|
Add a :class:`AioEventWaiter` to the :attr:`waiters` container
The event loop to use for :attr:`AioEventWaiter.loop` is found in the
current context using :func:`asyncio.get_event_loop`
Returns:
waiter: The created :class:`AioEventWaiter` instance
|
train
|
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/aioutils.py#L162-L176
| null |
class AioEventWaiters(object):
"""Container used to manage :keyword:`await` use with events
Used by :class:`pydispatch.dispatch.Event` when it is
:keyword:`awaited <await>`
Attributes:
waiters (set): Instances of :class:`AioEventWaiter` currently "awaiting"
the event
lock (AioSimpleLock): A sync/async lock to guard modification to the
:attr:`waiters` container during event emission
.. versionadded:: 0.1.0
"""
__slots__ = ('waiters', 'lock')
def __init__(self):
self.waiters = set()
self.lock = AioSimpleLock()
async def wait(self):
"""Creates a :class:`waiter <AioEventWaiter>` and "awaits" its result
This method is used by :class:`pydispatch.dispatch.Event` instances when
they are "awaited" and is the primary functionality of
:class:`AioEventWaiters` and :class:`AioEventWaiter`.
Returns:
args (list): Positional arguments attached to the event
kwargs (dict): Keyword arguments attached to the event
"""
waiter = await self.add_waiter()
return await waiter
def __await__(self):
task = asyncio.ensure_future(self.wait())
return task.__await__()
def __call__(self, *args, **kwargs):
"""Triggers any stored :class:`waiters <AioEventWaiter>`
Calls :meth:`AioEventWaiter.trigger` method on all instances stored in
:attr:`waiters`. After completion, the :attr:`waiters` are removed.
Args:
*args: Positional arguments to pass to :meth:`AioEventWaiter.trigger`
**kwargs: Keyword arguments to pass to :meth:`AioEventWaiter.trigger`
"""
with self.lock:
for waiter in self.waiters:
waiter.trigger(*args, **kwargs)
self.waiters.clear()
|
nocarryr/python-dispatch
|
pydispatch/aioutils.py
|
AioWeakMethodContainer.add_method
|
python
|
def add_method(self, loop, callback):
f, obj = get_method_vars(callback)
wrkey = (f, id(obj))
self[wrkey] = obj
self.event_loop_map[wrkey] = loop
|
Add a coroutine function
Args:
loop: The :class:`event loop <asyncio.BaseEventLoop>` instance
on which to schedule callbacks
callback: The :term:`coroutine function` to add
|
train
|
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/aioutils.py#L229-L240
|
[
"def get_method_vars(m):\n if PY2:\n f = m.im_func\n obj = m.im_self\n else:\n f = m.__func__\n obj = m.__self__\n return f, obj\n"
] |
class AioWeakMethodContainer(WeakMethodContainer):
"""Storage for coroutine functions as weak references
.. versionadded:: 0.1.0
"""
def __init__(self):
super().__init__()
def remove(wr, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(wr.key)
else:
# Atomic removal is necessary since this function
# can be called asynchronously by the GC
_remove_dead_weakref(self.data, wr.key)
self._on_weakref_fin(wr.key)
self._remove = remove
self.event_loop_map = {}
def iter_instances(self):
"""Iterate over the stored objects
.. seealso:: :meth:`pydispatch.utils.WeakMethodContainer.iter_instances`
"""
with _IterationGuard(self):
yield from super().iter_instances()
def iter_methods(self):
"""Iterate over stored coroutine functions
Yields:
Stored :term:`coroutine function` objects
.. seealso:: :meth:`pydispatch.utils.WeakMethodContainer.iter_instances`
"""
for wrkey, obj in self.iter_instances():
f, obj_id = wrkey
loop = self.event_loop_map[wrkey]
m = getattr(obj, f.__name__)
yield loop, m
def _on_weakref_fin(self, key):
if key in self.event_loop_map:
del self.event_loop_map[key]
def submit_coroutine(self, coro, loop):
"""Schedule and await a coroutine on the specified loop
The coroutine is wrapped and scheduled using
:func:`asyncio.run_coroutine_threadsafe`. While the coroutine is
"awaited", the result is not available as method returns immediately.
Args:
coro: The :term:`coroutine` to schedule
loop: The :class:`event loop <asyncio.BaseEventLoop>` on which to
schedule the coroutine
Note:
This method is used internally by :meth:`__call__` and is not meant
to be called directly.
"""
async def _do_call(_coro):
with _IterationGuard(self):
await _coro
asyncio.run_coroutine_threadsafe(_do_call(coro), loop=loop)
def __call__(self, *args, **kwargs):
"""Triggers all stored callbacks (coroutines)
Args:
*args: Positional arguments to pass to callbacks
**kwargs: Keyword arguments to pass to callbacks
"""
for loop, m in self.iter_methods():
coro = m(*args, **kwargs)
self.submit_coroutine(coro, loop)
def __delitem__(self, key):
if key in self.event_loop_map:
del self.event_loop_map[key]
return super().__delitem__(key)
|
nocarryr/python-dispatch
|
pydispatch/aioutils.py
|
AioWeakMethodContainer.iter_methods
|
python
|
def iter_methods(self):
for wrkey, obj in self.iter_instances():
f, obj_id = wrkey
loop = self.event_loop_map[wrkey]
m = getattr(obj, f.__name__)
yield loop, m
|
Iterate over stored coroutine functions
Yields:
Stored :term:`coroutine function` objects
.. seealso:: :meth:`pydispatch.utils.WeakMethodContainer.iter_instances`
|
train
|
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/aioutils.py#L248-L260
|
[
"def iter_instances(self):\n \"\"\"Iterate over the stored objects\n\n .. seealso:: :meth:`pydispatch.utils.WeakMethodContainer.iter_instances`\n \"\"\"\n with _IterationGuard(self):\n yield from super().iter_instances()\n"
] |
class AioWeakMethodContainer(WeakMethodContainer):
"""Storage for coroutine functions as weak references
.. versionadded:: 0.1.0
"""
def __init__(self):
super().__init__()
def remove(wr, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(wr.key)
else:
# Atomic removal is necessary since this function
# can be called asynchronously by the GC
_remove_dead_weakref(self.data, wr.key)
self._on_weakref_fin(wr.key)
self._remove = remove
self.event_loop_map = {}
def add_method(self, loop, callback):
"""Add a coroutine function
Args:
loop: The :class:`event loop <asyncio.BaseEventLoop>` instance
on which to schedule callbacks
callback: The :term:`coroutine function` to add
"""
f, obj = get_method_vars(callback)
wrkey = (f, id(obj))
self[wrkey] = obj
self.event_loop_map[wrkey] = loop
def iter_instances(self):
"""Iterate over the stored objects
.. seealso:: :meth:`pydispatch.utils.WeakMethodContainer.iter_instances`
"""
with _IterationGuard(self):
yield from super().iter_instances()
def _on_weakref_fin(self, key):
if key in self.event_loop_map:
del self.event_loop_map[key]
def submit_coroutine(self, coro, loop):
"""Schedule and await a coroutine on the specified loop
The coroutine is wrapped and scheduled using
:func:`asyncio.run_coroutine_threadsafe`. While the coroutine is
"awaited", the result is not available as method returns immediately.
Args:
coro: The :term:`coroutine` to schedule
loop: The :class:`event loop <asyncio.BaseEventLoop>` on which to
schedule the coroutine
Note:
This method is used internally by :meth:`__call__` and is not meant
to be called directly.
"""
async def _do_call(_coro):
with _IterationGuard(self):
await _coro
asyncio.run_coroutine_threadsafe(_do_call(coro), loop=loop)
def __call__(self, *args, **kwargs):
"""Triggers all stored callbacks (coroutines)
Args:
*args: Positional arguments to pass to callbacks
**kwargs: Keyword arguments to pass to callbacks
"""
for loop, m in self.iter_methods():
coro = m(*args, **kwargs)
self.submit_coroutine(coro, loop)
def __delitem__(self, key):
if key in self.event_loop_map:
del self.event_loop_map[key]
return super().__delitem__(key)
|
nocarryr/python-dispatch
|
pydispatch/aioutils.py
|
AioWeakMethodContainer.submit_coroutine
|
python
|
def submit_coroutine(self, coro, loop):
async def _do_call(_coro):
with _IterationGuard(self):
await _coro
asyncio.run_coroutine_threadsafe(_do_call(coro), loop=loop)
|
Schedule and await a coroutine on the specified loop
The coroutine is wrapped and scheduled using
:func:`asyncio.run_coroutine_threadsafe`. While the coroutine is
"awaited", the result is not available as method returns immediately.
Args:
coro: The :term:`coroutine` to schedule
loop: The :class:`event loop <asyncio.BaseEventLoop>` on which to
schedule the coroutine
Note:
This method is used internally by :meth:`__call__` and is not meant
to be called directly.
|
train
|
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/aioutils.py#L264-L283
|
[
"async def _do_call(_coro):\n with _IterationGuard(self):\n await _coro\n"
] |
class AioWeakMethodContainer(WeakMethodContainer):
"""Storage for coroutine functions as weak references
.. versionadded:: 0.1.0
"""
def __init__(self):
super().__init__()
def remove(wr, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(wr.key)
else:
# Atomic removal is necessary since this function
# can be called asynchronously by the GC
_remove_dead_weakref(self.data, wr.key)
self._on_weakref_fin(wr.key)
self._remove = remove
self.event_loop_map = {}
def add_method(self, loop, callback):
"""Add a coroutine function
Args:
loop: The :class:`event loop <asyncio.BaseEventLoop>` instance
on which to schedule callbacks
callback: The :term:`coroutine function` to add
"""
f, obj = get_method_vars(callback)
wrkey = (f, id(obj))
self[wrkey] = obj
self.event_loop_map[wrkey] = loop
def iter_instances(self):
"""Iterate over the stored objects
.. seealso:: :meth:`pydispatch.utils.WeakMethodContainer.iter_instances`
"""
with _IterationGuard(self):
yield from super().iter_instances()
def iter_methods(self):
"""Iterate over stored coroutine functions
Yields:
Stored :term:`coroutine function` objects
.. seealso:: :meth:`pydispatch.utils.WeakMethodContainer.iter_instances`
"""
for wrkey, obj in self.iter_instances():
f, obj_id = wrkey
loop = self.event_loop_map[wrkey]
m = getattr(obj, f.__name__)
yield loop, m
def _on_weakref_fin(self, key):
if key in self.event_loop_map:
del self.event_loop_map[key]
def __call__(self, *args, **kwargs):
"""Triggers all stored callbacks (coroutines)
Args:
*args: Positional arguments to pass to callbacks
**kwargs: Keyword arguments to pass to callbacks
"""
for loop, m in self.iter_methods():
coro = m(*args, **kwargs)
self.submit_coroutine(coro, loop)
def __delitem__(self, key):
if key in self.event_loop_map:
del self.event_loop_map[key]
return super().__delitem__(key)
|
wroberts/fsed
|
fsed/utils.py
|
open_file
|
python
|
def open_file(filename, mode='rb'):
if (('r' not in mode or hasattr(filename, 'read')) and
(('a' not in mode and 'w' not in mode) or hasattr(filename, 'write')) and
hasattr(filename, '__iter__')):
return filename
elif isinstance(filename, string_type):
if filename == '-' and 'r' in mode:
if PY3:
return sys.stdin.buffer
return sys.stdin
elif filename == '-' and ('w' in mode or 'a' in mode):
if PY3:
return sys.stdout.buffer
return sys.stdout
if filename.lower().count('.zip:'):
assert 'r' in mode
assert filename.count(':') == 1
import zipfile
zipped_file = zipfile.ZipFile(filename.split(':')[0])
unzipped_file = zipped_file.open(filename.split(':')[1], 'r')
zipped_file.close()
return unzipped_file
elif filename.lower().endswith('.gz'):
import gzip
return gzip.open(filename, mode)
elif filename.lower().endswith('.xz'):
import lzma
tmp = lzma.LZMAFile(filename, mode)
dir(tmp)
return tmp
else:
return open(filename, mode)
else:
raise Exception('Unknown type for argument filename')
|
Opens a file for access with the given mode. This function
transparently wraps gzip and xz files as well as normal files.
You can also open zip files using syntax like:
f = utils.open_file('../semcor-parsed.zip:semcor000.txt')
|
train
|
https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/utils.py#L15-L55
| null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
utils.py
(c) Will Roberts 12 December, 2015
Utility functions.
'''
from __future__ import absolute_import
from fsed.compat import PY3, string_type
import sys
|
wroberts/fsed
|
fsed/ahocorasick.py
|
boundary_transform
|
python
|
def boundary_transform(seq, force_edges = True):
'''
Wraps all word transitions with a boundary token character (\x00).
If desired (with ``force_edges`` set to ``True``), this inserts
the boundary character at the beginning and end of the string.
Arguments:
- `seq`:
- `force_edges = True`:
'''
gen = boundary_words(seq)
if force_edges:
gen = boundary_edges(gen)
gen = remove_duplicates(gen)
for char in gen:
yield char
|
Wraps all word transitions with a boundary token character (\x00).
If desired (with ``force_edges`` set to ``True``), this inserts
the boundary character at the beginning and end of the string.
Arguments:
- `seq`:
- `force_edges = True`:
|
train
|
https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/ahocorasick.py#L404-L419
|
[
"def boundary_words(seq):\n '''\n Wraps all word transitions with a boundary token character (\\x00).\n\n Arguments:\n - `seq`:\n '''\n in_word = None\n for char in seq:\n if char == '\\x00' and in_word is not None:\n in_word = not in_word\n elif char in WHITESPACE_CHARS:\n if in_word is not None and in_word:\n yield '\\x00'\n in_word = False\n else:\n if in_word is not None and not in_word:\n yield '\\x00'\n in_word = True\n yield char\n",
"def boundary_edges(seq):\n '''\n Inserts the boundary token character before and after the given\n string.\n\n Arguments:\n - `seq`:\n '''\n yield '\\x00'\n for char in seq:\n yield char\n yield '\\x00'\n",
"def remove_duplicates(seq):\n '''\n Removes duplicate boundary token characters from the given\n character iterable.\n\n Arguments:\n - `seq`:\n '''\n last_boundary = False\n for char in seq:\n if char == '\\x00':\n if not last_boundary:\n last_boundary = True\n yield char\n else:\n last_boundary = False\n yield char\n"
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
ahocorasick.py
(c) Will Roberts 18 June, 2015
Second attempt writing Aho-Corasick string matching in Python.
'''
from __future__ import absolute_import, print_function, unicode_literals
from collections import deque
# ============================================================
# NODE IN A TRIE
# ============================================================
class TrieNode(dict):
'''A node in a Trie.'''
def __init__(self):
'''Constructor.'''
super(TrieNode, self).__init__()
self._value = None
self.has_value = False
self.depth = 0
self.prefix = ''
self.uplink = None
# suffix is a tuple of (TrieNode, length)
self.suffix = None
# likewise, dict_suffix is a tuple of (TrieNode, length)
self.dict_suffix = None
# longest_prefix is a TrieNode (possibly None) which points to
# the longest prefix of this node that is an accepting state
# in the trie FSM
self.longest_prefix = None
def __unicode__(self):
if self.depth == 0:
return '<ROOT>'
retval = self.prefix
if self.has_value:
retval += ' (value = "{}")'.format(self._value)
if self.has_suffix:
retval += ' (suffix = "{}")'.format(self.suffix.prefix)
if self.has_dict_suffix:
retval += ' (dict_suffix = "{}")'.format(self.dict_suffix.prefix)
return retval
def __repr__(self):
return '<TrieNode prefix "{}" keys {}>'.format(self.prefix, self.keys())
@property
def value(self):
'''Gets this node's value.'''
return self._value
@value.setter
def value(self, newval):
'''Sets this node's value.'''
self._value = newval
self.has_value = True
@property
def has_suffix(self):
'''
Boolean: does this node have a suffix link or not?
'''
return self.suffix is not None
@property
def has_dict_suffix(self):
'''
Boolean: does this node have a dictionary link or not?
'''
return self.dict_suffix is not None
# ============================================================
# BASE TRIE
# ============================================================
class Trie(object):
'''A Trie which stores values.'''
def __init__(self):
'''Constructor.'''
self.root = TrieNode()
def __contains__(self, seq):
current = self.root
for char in seq:
if char not in current:
return False
current = current[char]
return current.has_value
def __getitem__(self, seq):
current = self.root
for char in seq:
if char not in current:
raise KeyError(current.prefix + char)
current = current[char]
if not current.has_value:
raise KeyError(current.prefix)
return current.value
def __setitem__(self, seq, value):
current = self.root
for char in seq:
if char not in current:
new_node = TrieNode()
new_node.uplink = char
new_node.depth = current.depth + 1
new_node.prefix = current.prefix + char
current[char] = new_node
current = current[char]
current.value = value
def dfs(self):
'''
Depth-first search generator. Yields `(node, parent)` for every
node in the tree, beginning with `(self.root, None)`.
'''
yield (self.root, None)
todo = [(self.root[char], self.root) for char in self.root]
while todo:
current, parent = todo.pop()
yield (current, parent)
for char in current:
todo.append((current[char], current))
def bfs(self):
'''
Breadth-first search generator. Yields `(node, parent)` for every
node in the tree, beginning with `(self.root, None)`.
'''
yield (self.root, None)
todo = deque([(self.root[char], self.root) for char in self.root])
while todo:
current, parent = todo.popleft()
yield (current, parent)
for char in current:
todo.append((current[char], current))
def pretty_print_str(self):
'''
Create a string to pretty-print this trie to standard output.
'''
retval = ''
# dfs
todo = [self.root]
while todo:
current = todo.pop()
for char in reversed(sorted(current.keys())):
todo.append(current[char])
indent = ' ' * (current.depth * 2)
retval += indent + current.__unicode__() + '\n'
return retval.rstrip('\n')
def pretty_print(self):
'''
Prints this trie's structure to standard output for debugging.
'''
print(self.pretty_print_str())
# ============================================================
# AHO-CORASICK TRIE
# ============================================================
class AhoCorasickTrie(Trie):
'''A Trie object for performing Aho-Corasick string matching.'''
def __init__(self):
'''Constructor.'''
super(AhoCorasickTrie, self).__init__()
self._suffix_links_set = False
def __setitem__(self, seq, value):
super(AhoCorasickTrie, self).__setitem__(seq, value)
if self._suffix_links_set:
self._reset_suffix_links()
def _reset_suffix_links(self):
'''
Reset all suffix links in all nodes in this trie.
'''
self._suffix_links_set = False
for current, _parent in self.dfs():
current.suffix = None
current.dict_suffix = None
current.longest_prefix = None
def _set_suffix_links(self):
'''
Sets all suffix links in all nodes in this trie.
'''
self._suffix_links_set = True
for current, parent in self.bfs():
# skip the root node
if parent is None:
continue
current.longest_prefix = parent.longest_prefix
if parent.has_value:
current.longest_prefix = parent
# the root doesn't get a suffix link
# also, skip previously set suffix links
if current.has_suffix:
continue
# current is not the root and has no suffix
# set current's suffix to parent's suffix
suffix = parent
while True:
if not suffix.has_suffix:
current.suffix = self.root
break
else:
suffix = suffix.suffix
if current.uplink in suffix:
current.suffix = suffix[current.uplink]
break
# now find the dict_suffix value
suffix = current.suffix
while not suffix.has_value and suffix.has_suffix:
suffix = suffix.suffix
if suffix.has_value:
current.dict_suffix = suffix
def find_all(self, seq):
'''
Generator expression. Yields tuples of `(begin, length, value)`,
where:
- `begin` is an integer identifying the character index where
the match begins (0-based)
- `length` is an integer indicating the length of the match (1
or more characters)
- `value` is the value of the matching node in the trie
'''
if not self._suffix_links_set:
self._set_suffix_links()
current = self.root
for pos, char in enumerate(seq):
# find a state where we can transition on char
while char not in current and current.has_suffix:
current = current.suffix
if char in current:
# transition
current = current[char]
else:
# we must be at the root node
assert current is self.root
# throw out the char without doing anything
# pass
# now perform any matching on the current node
if current.has_value:
yield (1 + pos - current.depth, current.depth, current.value)
dict_suffix = current
while dict_suffix.has_dict_suffix:
dict_suffix = dict_suffix.dict_suffix
yield (1 + pos - dict_suffix.depth, dict_suffix.depth, dict_suffix.value)
def replace(self, seq):
'''
Performs search and replace on the given input string `seq` using
the values stored in this trie. This method uses a O(n**2)
chart-parsing algorithm to find the optimal way of replacing
matches in the input.
Arguments:
- `seq`:
'''
# #1: seq must be stored in a container with a len() function
seq = list(seq)
# chart is a (n-1) X (n) table
# chart[0] represents all matches of length (0+1) = 1
# chart[n-1] represents all matches/rewrites of length (n-1+1) = n
# chart[0][0] represents a match of length 1 starting at character 0
# chart[0][n-1] represents a match of length 1 starting at character n-1
# cells in the chart are tuples:
# (score, list)
# we initialise chart by filling in row 0:
# each cell gets assigned (0, char), where char is the character at
# the corresponding position in the input string
chart = [ [None for _i in range(len(seq)) ] for _i in range(len(seq)) ]
chart[0] = [(0, char) for char in seq]
# now we fill in the chart using the results from the aho-corasick
# string matches
for (begin, length, value) in self.find_all(seq):
chart[length-1][begin] = (length, value)
# now we need to fill in the chart row by row, starting with row 1
for row in range(1, len(chart)):
# each row is 1 cell shorter than the last
for col in range(len(seq) - row):
# the entry in [row][col] is the choice with the highest score; to
# find this, we must search the possible partitions of the cell
#
# things on row 2 have only one possible partition: 1 + 1
# things on row 3 have two: 1 + 2, 2 + 1
# things on row 4 have three: 1+3, 3+1, 2+2
#
# we assume that any pre-existing entry found by aho-corasick
# in a cell is already optimal
#print('scanning [{}][{}]'.format(row, col))
if chart[row][col] is not None:
continue
# chart[1][2] is the cell of matches of length 2 starting at
# character position 2;
# it can only be composed of chart[0][2] + chart[0][3]
#
# partition_point is the length of the first of the two parts
# of the cell
#print('cell[{}][{}] => '.format(row, col))
best_score = -1
best_value = None
for partition_point in range(row):
# the two cells will be [partition_point][col] and
# [row - partition_point - 2][col+partition_point+1]
x1 = partition_point
y1 = col
x2 = row - partition_point - 1
y2 = col + partition_point + 1
#print(' [{}][{}] + [{}][{}]'.format(x1, y1, x2, y2))
s1, v1 = chart[x1][y1]
s2, v2 = chart[x2][y2]
# compute the score
score = s1 + s2
#print(' = {} + {}'.format((s1, v1), (s2, v2)))
#print(' = score {}'.format(score))
if best_score < score:
best_score = score
best_value = v1 + v2
chart[row][col] = (best_score, best_value)
#print(' sets new best score with value {}'.format(
# best_value))
# now the optimal solution is stored at the top of the chart
return chart[len(seq)-1][0][1]
def greedy_replace(self, seq):
'''
Greedily matches strings in ``seq``, and replaces them with their
node values.
Arguments:
- `seq`: an iterable of characters to perform search-and-replace on
'''
if not self._suffix_links_set:
self._set_suffix_links()
# start at the root
current = self.root
buffered = ''
outstr = ''
for char in seq:
while char not in current:
if current.has_dict_suffix:
current = current.dict_suffix
outstr += buffered[:-current.depth]
outstr += current.value
buffered = ''
current = self.root
break
elif current.has_suffix:
current = current.suffix
if current.depth:
outstr += buffered[:-current.depth]
buffered = buffered[-current.depth:]
else:
outstr += buffered
buffered = ''
break
else:
current = self.root
outstr += buffered
buffered = ''
break
if char in current:
buffered += char
current = current[char]
if current.has_value:
outstr += buffered[:-current.depth]
outstr += current.value
buffered = ''
current = self.root
else:
assert current is self.root
outstr += buffered + char
buffered = ''
if current.has_dict_suffix:
current = current.dict_suffix
outstr += buffered[:-current.depth]
outstr += current.value
else:
outstr += buffered
return outstr
# ============================================================
# WORD BOUNDARY DETECTION
# ============================================================
WHITESPACE_CHARS = ' \t\v\r\n'
def boundary_words(seq):
'''
Wraps all word transitions with a boundary token character (\x00).
Arguments:
- `seq`:
'''
in_word = None
for char in seq:
if char == '\x00' and in_word is not None:
in_word = not in_word
elif char in WHITESPACE_CHARS:
if in_word is not None and in_word:
yield '\x00'
in_word = False
else:
if in_word is not None and not in_word:
yield '\x00'
in_word = True
yield char
def boundary_edges(seq):
'''
Inserts the boundary token character before and after the given
string.
Arguments:
- `seq`:
'''
yield '\x00'
for char in seq:
yield char
yield '\x00'
def remove_duplicates(seq):
'''
Removes duplicate boundary token characters from the given
character iterable.
Arguments:
- `seq`:
'''
last_boundary = False
for char in seq:
if char == '\x00':
if not last_boundary:
last_boundary = True
yield char
else:
last_boundary = False
yield char
def boundary_untransform(seq):
'''
Removes boundary token characters from the given character
iterable.
Arguments:
- `seq`:
'''
for char in seq:
if char != '\x00':
yield char
|
wroberts/fsed
|
fsed/ahocorasick.py
|
boundary_words
|
python
|
def boundary_words(seq):
'''
Wraps all word transitions with a boundary token character (\x00).
Arguments:
- `seq`:
'''
in_word = None
for char in seq:
if char == '\x00' and in_word is not None:
in_word = not in_word
elif char in WHITESPACE_CHARS:
if in_word is not None and in_word:
yield '\x00'
in_word = False
else:
if in_word is not None and not in_word:
yield '\x00'
in_word = True
yield char
|
Wraps all word transitions with a boundary token character (\x00).
Arguments:
- `seq`:
|
train
|
https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/ahocorasick.py#L421-L440
| null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
ahocorasick.py
(c) Will Roberts 18 June, 2015
Second attempt writing Aho-Corasick string matching in Python.
'''
from __future__ import absolute_import, print_function, unicode_literals
from collections import deque
# ============================================================
# NODE IN A TRIE
# ============================================================
class TrieNode(dict):
'''A node in a Trie.'''
def __init__(self):
'''Constructor.'''
super(TrieNode, self).__init__()
self._value = None
self.has_value = False
self.depth = 0
self.prefix = ''
self.uplink = None
# suffix is a tuple of (TrieNode, length)
self.suffix = None
# likewise, dict_suffix is a tuple of (TrieNode, length)
self.dict_suffix = None
# longest_prefix is a TrieNode (possibly None) which points to
# the longest prefix of this node that is an accepting state
# in the trie FSM
self.longest_prefix = None
def __unicode__(self):
if self.depth == 0:
return '<ROOT>'
retval = self.prefix
if self.has_value:
retval += ' (value = "{}")'.format(self._value)
if self.has_suffix:
retval += ' (suffix = "{}")'.format(self.suffix.prefix)
if self.has_dict_suffix:
retval += ' (dict_suffix = "{}")'.format(self.dict_suffix.prefix)
return retval
def __repr__(self):
return '<TrieNode prefix "{}" keys {}>'.format(self.prefix, self.keys())
@property
def value(self):
'''Gets this node's value.'''
return self._value
@value.setter
def value(self, newval):
'''Sets this node's value.'''
self._value = newval
self.has_value = True
@property
def has_suffix(self):
'''
Boolean: does this node have a suffix link or not?
'''
return self.suffix is not None
@property
def has_dict_suffix(self):
'''
Boolean: does this node have a dictionary link or not?
'''
return self.dict_suffix is not None
# ============================================================
# BASE TRIE
# ============================================================
class Trie(object):
'''A Trie which stores values.'''
def __init__(self):
'''Constructor.'''
self.root = TrieNode()
def __contains__(self, seq):
current = self.root
for char in seq:
if char not in current:
return False
current = current[char]
return current.has_value
def __getitem__(self, seq):
current = self.root
for char in seq:
if char not in current:
raise KeyError(current.prefix + char)
current = current[char]
if not current.has_value:
raise KeyError(current.prefix)
return current.value
def __setitem__(self, seq, value):
current = self.root
for char in seq:
if char not in current:
new_node = TrieNode()
new_node.uplink = char
new_node.depth = current.depth + 1
new_node.prefix = current.prefix + char
current[char] = new_node
current = current[char]
current.value = value
def dfs(self):
'''
Depth-first search generator. Yields `(node, parent)` for every
node in the tree, beginning with `(self.root, None)`.
'''
yield (self.root, None)
todo = [(self.root[char], self.root) for char in self.root]
while todo:
current, parent = todo.pop()
yield (current, parent)
for char in current:
todo.append((current[char], current))
def bfs(self):
'''
Breadth-first search generator. Yields `(node, parent)` for every
node in the tree, beginning with `(self.root, None)`.
'''
yield (self.root, None)
todo = deque([(self.root[char], self.root) for char in self.root])
while todo:
current, parent = todo.popleft()
yield (current, parent)
for char in current:
todo.append((current[char], current))
def pretty_print_str(self):
'''
Create a string to pretty-print this trie to standard output.
'''
retval = ''
# dfs
todo = [self.root]
while todo:
current = todo.pop()
for char in reversed(sorted(current.keys())):
todo.append(current[char])
indent = ' ' * (current.depth * 2)
retval += indent + current.__unicode__() + '\n'
return retval.rstrip('\n')
def pretty_print(self):
'''
Prints this trie's structure to standard output for debugging.
'''
print(self.pretty_print_str())
# ============================================================
# AHO-CORASICK TRIE
# ============================================================
class AhoCorasickTrie(Trie):
'''A Trie object for performing Aho-Corasick string matching.'''
def __init__(self):
'''Constructor.'''
super(AhoCorasickTrie, self).__init__()
self._suffix_links_set = False
def __setitem__(self, seq, value):
super(AhoCorasickTrie, self).__setitem__(seq, value)
if self._suffix_links_set:
self._reset_suffix_links()
def _reset_suffix_links(self):
'''
Reset all suffix links in all nodes in this trie.
'''
self._suffix_links_set = False
for current, _parent in self.dfs():
current.suffix = None
current.dict_suffix = None
current.longest_prefix = None
def _set_suffix_links(self):
'''
Sets all suffix links in all nodes in this trie.
'''
self._suffix_links_set = True
for current, parent in self.bfs():
# skip the root node
if parent is None:
continue
current.longest_prefix = parent.longest_prefix
if parent.has_value:
current.longest_prefix = parent
# the root doesn't get a suffix link
# also, skip previously set suffix links
if current.has_suffix:
continue
# current is not the root and has no suffix
# set current's suffix to parent's suffix
suffix = parent
while True:
if not suffix.has_suffix:
current.suffix = self.root
break
else:
suffix = suffix.suffix
if current.uplink in suffix:
current.suffix = suffix[current.uplink]
break
# now find the dict_suffix value
suffix = current.suffix
while not suffix.has_value and suffix.has_suffix:
suffix = suffix.suffix
if suffix.has_value:
current.dict_suffix = suffix
def find_all(self, seq):
'''
Generator expression. Yields tuples of `(begin, length, value)`,
where:
- `begin` is an integer identifying the character index where
the match begins (0-based)
- `length` is an integer indicating the length of the match (1
or more characters)
- `value` is the value of the matching node in the trie
'''
if not self._suffix_links_set:
self._set_suffix_links()
current = self.root
for pos, char in enumerate(seq):
# find a state where we can transition on char
while char not in current and current.has_suffix:
current = current.suffix
if char in current:
# transition
current = current[char]
else:
# we must be at the root node
assert current is self.root
# throw out the char without doing anything
# pass
# now perform any matching on the current node
if current.has_value:
yield (1 + pos - current.depth, current.depth, current.value)
dict_suffix = current
while dict_suffix.has_dict_suffix:
dict_suffix = dict_suffix.dict_suffix
yield (1 + pos - dict_suffix.depth, dict_suffix.depth, dict_suffix.value)
def replace(self, seq):
'''
Performs search and replace on the given input string `seq` using
the values stored in this trie. This method uses a O(n**2)
chart-parsing algorithm to find the optimal way of replacing
matches in the input.
Arguments:
- `seq`:
'''
# #1: seq must be stored in a container with a len() function
seq = list(seq)
# chart is a (n-1) X (n) table
# chart[0] represents all matches of length (0+1) = 1
# chart[n-1] represents all matches/rewrites of length (n-1+1) = n
# chart[0][0] represents a match of length 1 starting at character 0
# chart[0][n-1] represents a match of length 1 starting at character n-1
# cells in the chart are tuples:
# (score, list)
# we initialise chart by filling in row 0:
# each cell gets assigned (0, char), where char is the character at
# the corresponding position in the input string
chart = [ [None for _i in range(len(seq)) ] for _i in range(len(seq)) ]
chart[0] = [(0, char) for char in seq]
# now we fill in the chart using the results from the aho-corasick
# string matches
for (begin, length, value) in self.find_all(seq):
chart[length-1][begin] = (length, value)
# now we need to fill in the chart row by row, starting with row 1
for row in range(1, len(chart)):
# each row is 1 cell shorter than the last
for col in range(len(seq) - row):
# the entry in [row][col] is the choice with the highest score; to
# find this, we must search the possible partitions of the cell
#
# things on row 2 have only one possible partition: 1 + 1
# things on row 3 have two: 1 + 2, 2 + 1
# things on row 4 have three: 1+3, 3+1, 2+2
#
# we assume that any pre-existing entry found by aho-corasick
# in a cell is already optimal
#print('scanning [{}][{}]'.format(row, col))
if chart[row][col] is not None:
continue
# chart[1][2] is the cell of matches of length 2 starting at
# character position 2;
# it can only be composed of chart[0][2] + chart[0][3]
#
# partition_point is the length of the first of the two parts
# of the cell
#print('cell[{}][{}] => '.format(row, col))
best_score = -1
best_value = None
for partition_point in range(row):
# the two cells will be [partition_point][col] and
# [row - partition_point - 2][col+partition_point+1]
x1 = partition_point
y1 = col
x2 = row - partition_point - 1
y2 = col + partition_point + 1
#print(' [{}][{}] + [{}][{}]'.format(x1, y1, x2, y2))
s1, v1 = chart[x1][y1]
s2, v2 = chart[x2][y2]
# compute the score
score = s1 + s2
#print(' = {} + {}'.format((s1, v1), (s2, v2)))
#print(' = score {}'.format(score))
if best_score < score:
best_score = score
best_value = v1 + v2
chart[row][col] = (best_score, best_value)
#print(' sets new best score with value {}'.format(
# best_value))
# now the optimal solution is stored at the top of the chart
return chart[len(seq)-1][0][1]
def greedy_replace(self, seq):
'''
Greedily matches strings in ``seq``, and replaces them with their
node values.
Arguments:
- `seq`: an iterable of characters to perform search-and-replace on
'''
if not self._suffix_links_set:
self._set_suffix_links()
# start at the root
current = self.root
buffered = ''
outstr = ''
for char in seq:
while char not in current:
if current.has_dict_suffix:
current = current.dict_suffix
outstr += buffered[:-current.depth]
outstr += current.value
buffered = ''
current = self.root
break
elif current.has_suffix:
current = current.suffix
if current.depth:
outstr += buffered[:-current.depth]
buffered = buffered[-current.depth:]
else:
outstr += buffered
buffered = ''
break
else:
current = self.root
outstr += buffered
buffered = ''
break
if char in current:
buffered += char
current = current[char]
if current.has_value:
outstr += buffered[:-current.depth]
outstr += current.value
buffered = ''
current = self.root
else:
assert current is self.root
outstr += buffered + char
buffered = ''
if current.has_dict_suffix:
current = current.dict_suffix
outstr += buffered[:-current.depth]
outstr += current.value
else:
outstr += buffered
return outstr
# ============================================================
# WORD BOUNDARY DETECTION
# ============================================================
WHITESPACE_CHARS = ' \t\v\r\n'
def boundary_transform(seq, force_edges = True):
'''
Wraps all word transitions with a boundary token character (\x00).
If desired (with ``force_edges`` set to ``True``), this inserts
the boundary character at the beginning and end of the string.
Arguments:
- `seq`:
- `force_edges = True`:
'''
gen = boundary_words(seq)
if force_edges:
gen = boundary_edges(gen)
gen = remove_duplicates(gen)
for char in gen:
yield char
def boundary_edges(seq):
'''
Inserts the boundary token character before and after the given
string.
Arguments:
- `seq`:
'''
yield '\x00'
for char in seq:
yield char
yield '\x00'
def remove_duplicates(seq):
'''
Removes duplicate boundary token characters from the given
character iterable.
Arguments:
- `seq`:
'''
last_boundary = False
for char in seq:
if char == '\x00':
if not last_boundary:
last_boundary = True
yield char
else:
last_boundary = False
yield char
def boundary_untransform(seq):
'''
Removes boundary token characters from the given character
iterable.
Arguments:
- `seq`:
'''
for char in seq:
if char != '\x00':
yield char
|
wroberts/fsed
|
fsed/ahocorasick.py
|
remove_duplicates
|
python
|
def remove_duplicates(seq):
'''
Removes duplicate boundary token characters from the given
character iterable.
Arguments:
- `seq`:
'''
last_boundary = False
for char in seq:
if char == '\x00':
if not last_boundary:
last_boundary = True
yield char
else:
last_boundary = False
yield char
|
Removes duplicate boundary token characters from the given
character iterable.
Arguments:
- `seq`:
|
train
|
https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/ahocorasick.py#L455-L471
| null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
ahocorasick.py
(c) Will Roberts 18 June, 2015
Second attempt writing Aho-Corasick string matching in Python.
'''
from __future__ import absolute_import, print_function, unicode_literals
from collections import deque
# ============================================================
# NODE IN A TRIE
# ============================================================
class TrieNode(dict):
'''A node in a Trie.'''
def __init__(self):
'''Constructor.'''
super(TrieNode, self).__init__()
self._value = None
self.has_value = False
self.depth = 0
self.prefix = ''
self.uplink = None
# suffix is a tuple of (TrieNode, length)
self.suffix = None
# likewise, dict_suffix is a tuple of (TrieNode, length)
self.dict_suffix = None
# longest_prefix is a TrieNode (possibly None) which points to
# the longest prefix of this node that is an accepting state
# in the trie FSM
self.longest_prefix = None
def __unicode__(self):
if self.depth == 0:
return '<ROOT>'
retval = self.prefix
if self.has_value:
retval += ' (value = "{}")'.format(self._value)
if self.has_suffix:
retval += ' (suffix = "{}")'.format(self.suffix.prefix)
if self.has_dict_suffix:
retval += ' (dict_suffix = "{}")'.format(self.dict_suffix.prefix)
return retval
def __repr__(self):
return '<TrieNode prefix "{}" keys {}>'.format(self.prefix, self.keys())
@property
def value(self):
'''Gets this node's value.'''
return self._value
@value.setter
def value(self, newval):
'''Sets this node's value.'''
self._value = newval
self.has_value = True
@property
def has_suffix(self):
'''
Boolean: does this node have a suffix link or not?
'''
return self.suffix is not None
@property
def has_dict_suffix(self):
'''
Boolean: does this node have a dictionary link or not?
'''
return self.dict_suffix is not None
# ============================================================
# BASE TRIE
# ============================================================
class Trie(object):
'''A Trie which stores values.'''
def __init__(self):
'''Constructor.'''
self.root = TrieNode()
def __contains__(self, seq):
current = self.root
for char in seq:
if char not in current:
return False
current = current[char]
return current.has_value
def __getitem__(self, seq):
current = self.root
for char in seq:
if char not in current:
raise KeyError(current.prefix + char)
current = current[char]
if not current.has_value:
raise KeyError(current.prefix)
return current.value
def __setitem__(self, seq, value):
current = self.root
for char in seq:
if char not in current:
new_node = TrieNode()
new_node.uplink = char
new_node.depth = current.depth + 1
new_node.prefix = current.prefix + char
current[char] = new_node
current = current[char]
current.value = value
def dfs(self):
'''
Depth-first search generator. Yields `(node, parent)` for every
node in the tree, beginning with `(self.root, None)`.
'''
yield (self.root, None)
todo = [(self.root[char], self.root) for char in self.root]
while todo:
current, parent = todo.pop()
yield (current, parent)
for char in current:
todo.append((current[char], current))
def bfs(self):
'''
Breadth-first search generator. Yields `(node, parent)` for every
node in the tree, beginning with `(self.root, None)`.
'''
yield (self.root, None)
todo = deque([(self.root[char], self.root) for char in self.root])
while todo:
current, parent = todo.popleft()
yield (current, parent)
for char in current:
todo.append((current[char], current))
def pretty_print_str(self):
'''
Create a string to pretty-print this trie to standard output.
'''
retval = ''
# dfs
todo = [self.root]
while todo:
current = todo.pop()
for char in reversed(sorted(current.keys())):
todo.append(current[char])
indent = ' ' * (current.depth * 2)
retval += indent + current.__unicode__() + '\n'
return retval.rstrip('\n')
def pretty_print(self):
'''
Prints this trie's structure to standard output for debugging.
'''
print(self.pretty_print_str())
# ============================================================
# AHO-CORASICK TRIE
# ============================================================
class AhoCorasickTrie(Trie):
'''A Trie object for performing Aho-Corasick string matching.'''
def __init__(self):
'''Constructor.'''
super(AhoCorasickTrie, self).__init__()
self._suffix_links_set = False
def __setitem__(self, seq, value):
super(AhoCorasickTrie, self).__setitem__(seq, value)
if self._suffix_links_set:
self._reset_suffix_links()
def _reset_suffix_links(self):
'''
Reset all suffix links in all nodes in this trie.
'''
self._suffix_links_set = False
for current, _parent in self.dfs():
current.suffix = None
current.dict_suffix = None
current.longest_prefix = None
def _set_suffix_links(self):
'''
Sets all suffix links in all nodes in this trie.
'''
self._suffix_links_set = True
for current, parent in self.bfs():
# skip the root node
if parent is None:
continue
current.longest_prefix = parent.longest_prefix
if parent.has_value:
current.longest_prefix = parent
# the root doesn't get a suffix link
# also, skip previously set suffix links
if current.has_suffix:
continue
# current is not the root and has no suffix
# set current's suffix to parent's suffix
suffix = parent
while True:
if not suffix.has_suffix:
current.suffix = self.root
break
else:
suffix = suffix.suffix
if current.uplink in suffix:
current.suffix = suffix[current.uplink]
break
# now find the dict_suffix value
suffix = current.suffix
while not suffix.has_value and suffix.has_suffix:
suffix = suffix.suffix
if suffix.has_value:
current.dict_suffix = suffix
def find_all(self, seq):
'''
Generator expression. Yields tuples of `(begin, length, value)`,
where:
- `begin` is an integer identifying the character index where
the match begins (0-based)
- `length` is an integer indicating the length of the match (1
or more characters)
- `value` is the value of the matching node in the trie
'''
if not self._suffix_links_set:
self._set_suffix_links()
current = self.root
for pos, char in enumerate(seq):
# find a state where we can transition on char
while char not in current and current.has_suffix:
current = current.suffix
if char in current:
# transition
current = current[char]
else:
# we must be at the root node
assert current is self.root
# throw out the char without doing anything
# pass
# now perform any matching on the current node
if current.has_value:
yield (1 + pos - current.depth, current.depth, current.value)
dict_suffix = current
while dict_suffix.has_dict_suffix:
dict_suffix = dict_suffix.dict_suffix
yield (1 + pos - dict_suffix.depth, dict_suffix.depth, dict_suffix.value)
def replace(self, seq):
'''
Performs search and replace on the given input string `seq` using
the values stored in this trie. This method uses a O(n**2)
chart-parsing algorithm to find the optimal way of replacing
matches in the input.
Arguments:
- `seq`:
'''
# #1: seq must be stored in a container with a len() function
seq = list(seq)
# chart is a (n-1) X (n) table
# chart[0] represents all matches of length (0+1) = 1
# chart[n-1] represents all matches/rewrites of length (n-1+1) = n
# chart[0][0] represents a match of length 1 starting at character 0
# chart[0][n-1] represents a match of length 1 starting at character n-1
# cells in the chart are tuples:
# (score, list)
# we initialise chart by filling in row 0:
# each cell gets assigned (0, char), where char is the character at
# the corresponding position in the input string
chart = [ [None for _i in range(len(seq)) ] for _i in range(len(seq)) ]
chart[0] = [(0, char) for char in seq]
# now we fill in the chart using the results from the aho-corasick
# string matches
for (begin, length, value) in self.find_all(seq):
chart[length-1][begin] = (length, value)
# now we need to fill in the chart row by row, starting with row 1
for row in range(1, len(chart)):
# each row is 1 cell shorter than the last
for col in range(len(seq) - row):
# the entry in [row][col] is the choice with the highest score; to
# find this, we must search the possible partitions of the cell
#
# things on row 2 have only one possible partition: 1 + 1
# things on row 3 have two: 1 + 2, 2 + 1
# things on row 4 have three: 1+3, 3+1, 2+2
#
# we assume that any pre-existing entry found by aho-corasick
# in a cell is already optimal
#print('scanning [{}][{}]'.format(row, col))
if chart[row][col] is not None:
continue
# chart[1][2] is the cell of matches of length 2 starting at
# character position 2;
# it can only be composed of chart[0][2] + chart[0][3]
#
# partition_point is the length of the first of the two parts
# of the cell
#print('cell[{}][{}] => '.format(row, col))
best_score = -1
best_value = None
for partition_point in range(row):
# the two cells will be [partition_point][col] and
# [row - partition_point - 2][col+partition_point+1]
x1 = partition_point
y1 = col
x2 = row - partition_point - 1
y2 = col + partition_point + 1
#print(' [{}][{}] + [{}][{}]'.format(x1, y1, x2, y2))
s1, v1 = chart[x1][y1]
s2, v2 = chart[x2][y2]
# compute the score
score = s1 + s2
#print(' = {} + {}'.format((s1, v1), (s2, v2)))
#print(' = score {}'.format(score))
if best_score < score:
best_score = score
best_value = v1 + v2
chart[row][col] = (best_score, best_value)
#print(' sets new best score with value {}'.format(
# best_value))
# now the optimal solution is stored at the top of the chart
return chart[len(seq)-1][0][1]
def greedy_replace(self, seq):
'''
Greedily matches strings in ``seq``, and replaces them with their
node values.
Arguments:
- `seq`: an iterable of characters to perform search-and-replace on
'''
if not self._suffix_links_set:
self._set_suffix_links()
# start at the root
current = self.root
buffered = ''
outstr = ''
for char in seq:
while char not in current:
if current.has_dict_suffix:
current = current.dict_suffix
outstr += buffered[:-current.depth]
outstr += current.value
buffered = ''
current = self.root
break
elif current.has_suffix:
current = current.suffix
if current.depth:
outstr += buffered[:-current.depth]
buffered = buffered[-current.depth:]
else:
outstr += buffered
buffered = ''
break
else:
current = self.root
outstr += buffered
buffered = ''
break
if char in current:
buffered += char
current = current[char]
if current.has_value:
outstr += buffered[:-current.depth]
outstr += current.value
buffered = ''
current = self.root
else:
assert current is self.root
outstr += buffered + char
buffered = ''
if current.has_dict_suffix:
current = current.dict_suffix
outstr += buffered[:-current.depth]
outstr += current.value
else:
outstr += buffered
return outstr
# ============================================================
# WORD BOUNDARY DETECTION
# ============================================================
WHITESPACE_CHARS = ' \t\v\r\n'
def boundary_transform(seq, force_edges = True):
'''
Wraps all word transitions with a boundary token character (\x00).
If desired (with ``force_edges`` set to ``True``), this inserts
the boundary character at the beginning and end of the string.
Arguments:
- `seq`:
- `force_edges = True`:
'''
gen = boundary_words(seq)
if force_edges:
gen = boundary_edges(gen)
gen = remove_duplicates(gen)
for char in gen:
yield char
def boundary_words(seq):
'''
Wraps all word transitions with a boundary token character (\x00).
Arguments:
- `seq`:
'''
in_word = None
for char in seq:
if char == '\x00' and in_word is not None:
in_word = not in_word
elif char in WHITESPACE_CHARS:
if in_word is not None and in_word:
yield '\x00'
in_word = False
else:
if in_word is not None and not in_word:
yield '\x00'
in_word = True
yield char
def boundary_edges(seq):
'''
Inserts the boundary token character before and after the given
string.
Arguments:
- `seq`:
'''
yield '\x00'
for char in seq:
yield char
yield '\x00'
def boundary_untransform(seq):
'''
Removes boundary token characters from the given character
iterable.
Arguments:
- `seq`:
'''
for char in seq:
if char != '\x00':
yield char
|
wroberts/fsed
|
fsed/ahocorasick.py
|
Trie.dfs
|
python
|
def dfs(self):
'''
Depth-first search generator. Yields `(node, parent)` for every
node in the tree, beginning with `(self.root, None)`.
'''
yield (self.root, None)
todo = [(self.root[char], self.root) for char in self.root]
while todo:
current, parent = todo.pop()
yield (current, parent)
for char in current:
todo.append((current[char], current))
|
Depth-first search generator. Yields `(node, parent)` for every
node in the tree, beginning with `(self.root, None)`.
|
train
|
https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/ahocorasick.py#L121-L132
| null |
class Trie(object):
'''A Trie which stores values.'''
def __init__(self):
'''Constructor.'''
self.root = TrieNode()
def __contains__(self, seq):
current = self.root
for char in seq:
if char not in current:
return False
current = current[char]
return current.has_value
def __getitem__(self, seq):
current = self.root
for char in seq:
if char not in current:
raise KeyError(current.prefix + char)
current = current[char]
if not current.has_value:
raise KeyError(current.prefix)
return current.value
def __setitem__(self, seq, value):
current = self.root
for char in seq:
if char not in current:
new_node = TrieNode()
new_node.uplink = char
new_node.depth = current.depth + 1
new_node.prefix = current.prefix + char
current[char] = new_node
current = current[char]
current.value = value
def bfs(self):
'''
Breadth-first search generator. Yields `(node, parent)` for every
node in the tree, beginning with `(self.root, None)`.
'''
yield (self.root, None)
todo = deque([(self.root[char], self.root) for char in self.root])
while todo:
current, parent = todo.popleft()
yield (current, parent)
for char in current:
todo.append((current[char], current))
def pretty_print_str(self):
'''
Create a string to pretty-print this trie to standard output.
'''
retval = ''
# dfs
todo = [self.root]
while todo:
current = todo.pop()
for char in reversed(sorted(current.keys())):
todo.append(current[char])
indent = ' ' * (current.depth * 2)
retval += indent + current.__unicode__() + '\n'
return retval.rstrip('\n')
def pretty_print(self):
'''
Prints this trie's structure to standard output for debugging.
'''
print(self.pretty_print_str())
|
wroberts/fsed
|
fsed/ahocorasick.py
|
Trie.bfs
|
python
|
def bfs(self):
'''
Breadth-first search generator. Yields `(node, parent)` for every
node in the tree, beginning with `(self.root, None)`.
'''
yield (self.root, None)
todo = deque([(self.root[char], self.root) for char in self.root])
while todo:
current, parent = todo.popleft()
yield (current, parent)
for char in current:
todo.append((current[char], current))
|
Breadth-first search generator. Yields `(node, parent)` for every
node in the tree, beginning with `(self.root, None)`.
|
train
|
https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/ahocorasick.py#L134-L145
| null |
class Trie(object):
'''A Trie which stores values.'''
def __init__(self):
'''Constructor.'''
self.root = TrieNode()
def __contains__(self, seq):
current = self.root
for char in seq:
if char not in current:
return False
current = current[char]
return current.has_value
def __getitem__(self, seq):
current = self.root
for char in seq:
if char not in current:
raise KeyError(current.prefix + char)
current = current[char]
if not current.has_value:
raise KeyError(current.prefix)
return current.value
def __setitem__(self, seq, value):
current = self.root
for char in seq:
if char not in current:
new_node = TrieNode()
new_node.uplink = char
new_node.depth = current.depth + 1
new_node.prefix = current.prefix + char
current[char] = new_node
current = current[char]
current.value = value
def dfs(self):
'''
Depth-first search generator. Yields `(node, parent)` for every
node in the tree, beginning with `(self.root, None)`.
'''
yield (self.root, None)
todo = [(self.root[char], self.root) for char in self.root]
while todo:
current, parent = todo.pop()
yield (current, parent)
for char in current:
todo.append((current[char], current))
def pretty_print_str(self):
'''
Create a string to pretty-print this trie to standard output.
'''
retval = ''
# dfs
todo = [self.root]
while todo:
current = todo.pop()
for char in reversed(sorted(current.keys())):
todo.append(current[char])
indent = ' ' * (current.depth * 2)
retval += indent + current.__unicode__() + '\n'
return retval.rstrip('\n')
def pretty_print(self):
'''
Prints this trie's structure to standard output for debugging.
'''
print(self.pretty_print_str())
|
wroberts/fsed
|
fsed/ahocorasick.py
|
Trie.pretty_print_str
|
python
|
def pretty_print_str(self):
'''
Create a string to pretty-print this trie to standard output.
'''
retval = ''
# dfs
todo = [self.root]
while todo:
current = todo.pop()
for char in reversed(sorted(current.keys())):
todo.append(current[char])
indent = ' ' * (current.depth * 2)
retval += indent + current.__unicode__() + '\n'
return retval.rstrip('\n')
|
Create a string to pretty-print this trie to standard output.
|
train
|
https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/ahocorasick.py#L147-L160
| null |
class Trie(object):
'''A Trie which stores values.'''
def __init__(self):
'''Constructor.'''
self.root = TrieNode()
def __contains__(self, seq):
current = self.root
for char in seq:
if char not in current:
return False
current = current[char]
return current.has_value
def __getitem__(self, seq):
current = self.root
for char in seq:
if char not in current:
raise KeyError(current.prefix + char)
current = current[char]
if not current.has_value:
raise KeyError(current.prefix)
return current.value
def __setitem__(self, seq, value):
current = self.root
for char in seq:
if char not in current:
new_node = TrieNode()
new_node.uplink = char
new_node.depth = current.depth + 1
new_node.prefix = current.prefix + char
current[char] = new_node
current = current[char]
current.value = value
def dfs(self):
'''
Depth-first search generator. Yields `(node, parent)` for every
node in the tree, beginning with `(self.root, None)`.
'''
yield (self.root, None)
todo = [(self.root[char], self.root) for char in self.root]
while todo:
current, parent = todo.pop()
yield (current, parent)
for char in current:
todo.append((current[char], current))
def bfs(self):
'''
Breadth-first search generator. Yields `(node, parent)` for every
node in the tree, beginning with `(self.root, None)`.
'''
yield (self.root, None)
todo = deque([(self.root[char], self.root) for char in self.root])
while todo:
current, parent = todo.popleft()
yield (current, parent)
for char in current:
todo.append((current[char], current))
def pretty_print(self):
'''
Prints this trie's structure to standard output for debugging.
'''
print(self.pretty_print_str())
|
wroberts/fsed
|
fsed/ahocorasick.py
|
AhoCorasickTrie._reset_suffix_links
|
python
|
def _reset_suffix_links(self):
'''
Reset all suffix links in all nodes in this trie.
'''
self._suffix_links_set = False
for current, _parent in self.dfs():
current.suffix = None
current.dict_suffix = None
current.longest_prefix = None
|
Reset all suffix links in all nodes in this trie.
|
train
|
https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/ahocorasick.py#L185-L193
|
[
"def dfs(self):\n '''\n Depth-first search generator. Yields `(node, parent)` for every\n node in the tree, beginning with `(self.root, None)`.\n '''\n yield (self.root, None)\n todo = [(self.root[char], self.root) for char in self.root]\n while todo:\n current, parent = todo.pop()\n yield (current, parent)\n for char in current:\n todo.append((current[char], current))\n"
] |
class AhoCorasickTrie(Trie):
'''A Trie object for performing Aho-Corasick string matching.'''
def __init__(self):
'''Constructor.'''
super(AhoCorasickTrie, self).__init__()
self._suffix_links_set = False
def __setitem__(self, seq, value):
super(AhoCorasickTrie, self).__setitem__(seq, value)
if self._suffix_links_set:
self._reset_suffix_links()
def _set_suffix_links(self):
'''
Sets all suffix links in all nodes in this trie.
'''
self._suffix_links_set = True
for current, parent in self.bfs():
# skip the root node
if parent is None:
continue
current.longest_prefix = parent.longest_prefix
if parent.has_value:
current.longest_prefix = parent
# the root doesn't get a suffix link
# also, skip previously set suffix links
if current.has_suffix:
continue
# current is not the root and has no suffix
# set current's suffix to parent's suffix
suffix = parent
while True:
if not suffix.has_suffix:
current.suffix = self.root
break
else:
suffix = suffix.suffix
if current.uplink in suffix:
current.suffix = suffix[current.uplink]
break
# now find the dict_suffix value
suffix = current.suffix
while not suffix.has_value and suffix.has_suffix:
suffix = suffix.suffix
if suffix.has_value:
current.dict_suffix = suffix
def find_all(self, seq):
'''
Generator expression. Yields tuples of `(begin, length, value)`,
where:
- `begin` is an integer identifying the character index where
the match begins (0-based)
- `length` is an integer indicating the length of the match (1
or more characters)
- `value` is the value of the matching node in the trie
'''
if not self._suffix_links_set:
self._set_suffix_links()
current = self.root
for pos, char in enumerate(seq):
# find a state where we can transition on char
while char not in current and current.has_suffix:
current = current.suffix
if char in current:
# transition
current = current[char]
else:
# we must be at the root node
assert current is self.root
# throw out the char without doing anything
# pass
# now perform any matching on the current node
if current.has_value:
yield (1 + pos - current.depth, current.depth, current.value)
dict_suffix = current
while dict_suffix.has_dict_suffix:
dict_suffix = dict_suffix.dict_suffix
yield (1 + pos - dict_suffix.depth, dict_suffix.depth, dict_suffix.value)
def replace(self, seq):
'''
Performs search and replace on the given input string `seq` using
the values stored in this trie. This method uses a O(n**2)
chart-parsing algorithm to find the optimal way of replacing
matches in the input.
Arguments:
- `seq`:
'''
# #1: seq must be stored in a container with a len() function
seq = list(seq)
# chart is a (n-1) X (n) table
# chart[0] represents all matches of length (0+1) = 1
# chart[n-1] represents all matches/rewrites of length (n-1+1) = n
# chart[0][0] represents a match of length 1 starting at character 0
# chart[0][n-1] represents a match of length 1 starting at character n-1
# cells in the chart are tuples:
# (score, list)
# we initialise chart by filling in row 0:
# each cell gets assigned (0, char), where char is the character at
# the corresponding position in the input string
chart = [ [None for _i in range(len(seq)) ] for _i in range(len(seq)) ]
chart[0] = [(0, char) for char in seq]
# now we fill in the chart using the results from the aho-corasick
# string matches
for (begin, length, value) in self.find_all(seq):
chart[length-1][begin] = (length, value)
# now we need to fill in the chart row by row, starting with row 1
for row in range(1, len(chart)):
# each row is 1 cell shorter than the last
for col in range(len(seq) - row):
# the entry in [row][col] is the choice with the highest score; to
# find this, we must search the possible partitions of the cell
#
# things on row 2 have only one possible partition: 1 + 1
# things on row 3 have two: 1 + 2, 2 + 1
# things on row 4 have three: 1+3, 3+1, 2+2
#
# we assume that any pre-existing entry found by aho-corasick
# in a cell is already optimal
#print('scanning [{}][{}]'.format(row, col))
if chart[row][col] is not None:
continue
# chart[1][2] is the cell of matches of length 2 starting at
# character position 2;
# it can only be composed of chart[0][2] + chart[0][3]
#
# partition_point is the length of the first of the two parts
# of the cell
#print('cell[{}][{}] => '.format(row, col))
best_score = -1
best_value = None
for partition_point in range(row):
# the two cells will be [partition_point][col] and
# [row - partition_point - 2][col+partition_point+1]
x1 = partition_point
y1 = col
x2 = row - partition_point - 1
y2 = col + partition_point + 1
#print(' [{}][{}] + [{}][{}]'.format(x1, y1, x2, y2))
s1, v1 = chart[x1][y1]
s2, v2 = chart[x2][y2]
# compute the score
score = s1 + s2
#print(' = {} + {}'.format((s1, v1), (s2, v2)))
#print(' = score {}'.format(score))
if best_score < score:
best_score = score
best_value = v1 + v2
chart[row][col] = (best_score, best_value)
#print(' sets new best score with value {}'.format(
# best_value))
# now the optimal solution is stored at the top of the chart
return chart[len(seq)-1][0][1]
def greedy_replace(self, seq):
'''
Greedily matches strings in ``seq``, and replaces them with their
node values.
Arguments:
- `seq`: an iterable of characters to perform search-and-replace on
'''
if not self._suffix_links_set:
self._set_suffix_links()
# start at the root
current = self.root
buffered = ''
outstr = ''
for char in seq:
while char not in current:
if current.has_dict_suffix:
current = current.dict_suffix
outstr += buffered[:-current.depth]
outstr += current.value
buffered = ''
current = self.root
break
elif current.has_suffix:
current = current.suffix
if current.depth:
outstr += buffered[:-current.depth]
buffered = buffered[-current.depth:]
else:
outstr += buffered
buffered = ''
break
else:
current = self.root
outstr += buffered
buffered = ''
break
if char in current:
buffered += char
current = current[char]
if current.has_value:
outstr += buffered[:-current.depth]
outstr += current.value
buffered = ''
current = self.root
else:
assert current is self.root
outstr += buffered + char
buffered = ''
if current.has_dict_suffix:
current = current.dict_suffix
outstr += buffered[:-current.depth]
outstr += current.value
else:
outstr += buffered
return outstr
|
wroberts/fsed
|
fsed/ahocorasick.py
|
AhoCorasickTrie._set_suffix_links
|
python
|
def _set_suffix_links(self):
'''
Sets all suffix links in all nodes in this trie.
'''
self._suffix_links_set = True
for current, parent in self.bfs():
# skip the root node
if parent is None:
continue
current.longest_prefix = parent.longest_prefix
if parent.has_value:
current.longest_prefix = parent
# the root doesn't get a suffix link
# also, skip previously set suffix links
if current.has_suffix:
continue
# current is not the root and has no suffix
# set current's suffix to parent's suffix
suffix = parent
while True:
if not suffix.has_suffix:
current.suffix = self.root
break
else:
suffix = suffix.suffix
if current.uplink in suffix:
current.suffix = suffix[current.uplink]
break
# now find the dict_suffix value
suffix = current.suffix
while not suffix.has_value and suffix.has_suffix:
suffix = suffix.suffix
if suffix.has_value:
current.dict_suffix = suffix
|
Sets all suffix links in all nodes in this trie.
|
train
|
https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/ahocorasick.py#L195-L228
|
[
"def bfs(self):\n '''\n Breadth-first search generator. Yields `(node, parent)` for every\n node in the tree, beginning with `(self.root, None)`.\n '''\n yield (self.root, None)\n todo = deque([(self.root[char], self.root) for char in self.root])\n while todo:\n current, parent = todo.popleft()\n yield (current, parent)\n for char in current:\n todo.append((current[char], current))\n"
] |
class AhoCorasickTrie(Trie):
'''A Trie object for performing Aho-Corasick string matching.'''
def __init__(self):
'''Constructor.'''
super(AhoCorasickTrie, self).__init__()
self._suffix_links_set = False
def __setitem__(self, seq, value):
super(AhoCorasickTrie, self).__setitem__(seq, value)
if self._suffix_links_set:
self._reset_suffix_links()
def _reset_suffix_links(self):
'''
Reset all suffix links in all nodes in this trie.
'''
self._suffix_links_set = False
for current, _parent in self.dfs():
current.suffix = None
current.dict_suffix = None
current.longest_prefix = None
def find_all(self, seq):
'''
Generator expression. Yields tuples of `(begin, length, value)`,
where:
- `begin` is an integer identifying the character index where
the match begins (0-based)
- `length` is an integer indicating the length of the match (1
or more characters)
- `value` is the value of the matching node in the trie
'''
if not self._suffix_links_set:
self._set_suffix_links()
current = self.root
for pos, char in enumerate(seq):
# find a state where we can transition on char
while char not in current and current.has_suffix:
current = current.suffix
if char in current:
# transition
current = current[char]
else:
# we must be at the root node
assert current is self.root
# throw out the char without doing anything
# pass
# now perform any matching on the current node
if current.has_value:
yield (1 + pos - current.depth, current.depth, current.value)
dict_suffix = current
while dict_suffix.has_dict_suffix:
dict_suffix = dict_suffix.dict_suffix
yield (1 + pos - dict_suffix.depth, dict_suffix.depth, dict_suffix.value)
def replace(self, seq):
'''
Performs search and replace on the given input string `seq` using
the values stored in this trie. This method uses a O(n**2)
chart-parsing algorithm to find the optimal way of replacing
matches in the input.
Arguments:
- `seq`:
'''
# #1: seq must be stored in a container with a len() function
seq = list(seq)
# chart is a (n-1) X (n) table
# chart[0] represents all matches of length (0+1) = 1
# chart[n-1] represents all matches/rewrites of length (n-1+1) = n
# chart[0][0] represents a match of length 1 starting at character 0
# chart[0][n-1] represents a match of length 1 starting at character n-1
# cells in the chart are tuples:
# (score, list)
# we initialise chart by filling in row 0:
# each cell gets assigned (0, char), where char is the character at
# the corresponding position in the input string
chart = [ [None for _i in range(len(seq)) ] for _i in range(len(seq)) ]
chart[0] = [(0, char) for char in seq]
# now we fill in the chart using the results from the aho-corasick
# string matches
for (begin, length, value) in self.find_all(seq):
chart[length-1][begin] = (length, value)
# now we need to fill in the chart row by row, starting with row 1
for row in range(1, len(chart)):
# each row is 1 cell shorter than the last
for col in range(len(seq) - row):
# the entry in [row][col] is the choice with the highest score; to
# find this, we must search the possible partitions of the cell
#
# things on row 2 have only one possible partition: 1 + 1
# things on row 3 have two: 1 + 2, 2 + 1
# things on row 4 have three: 1+3, 3+1, 2+2
#
# we assume that any pre-existing entry found by aho-corasick
# in a cell is already optimal
#print('scanning [{}][{}]'.format(row, col))
if chart[row][col] is not None:
continue
# chart[1][2] is the cell of matches of length 2 starting at
# character position 2;
# it can only be composed of chart[0][2] + chart[0][3]
#
# partition_point is the length of the first of the two parts
# of the cell
#print('cell[{}][{}] => '.format(row, col))
best_score = -1
best_value = None
for partition_point in range(row):
# the two cells will be [partition_point][col] and
# [row - partition_point - 2][col+partition_point+1]
x1 = partition_point
y1 = col
x2 = row - partition_point - 1
y2 = col + partition_point + 1
#print(' [{}][{}] + [{}][{}]'.format(x1, y1, x2, y2))
s1, v1 = chart[x1][y1]
s2, v2 = chart[x2][y2]
# compute the score
score = s1 + s2
#print(' = {} + {}'.format((s1, v1), (s2, v2)))
#print(' = score {}'.format(score))
if best_score < score:
best_score = score
best_value = v1 + v2
chart[row][col] = (best_score, best_value)
#print(' sets new best score with value {}'.format(
# best_value))
# now the optimal solution is stored at the top of the chart
return chart[len(seq)-1][0][1]
def greedy_replace(self, seq):
'''
Greedily matches strings in ``seq``, and replaces them with their
node values.
Arguments:
- `seq`: an iterable of characters to perform search-and-replace on
'''
if not self._suffix_links_set:
self._set_suffix_links()
# start at the root
current = self.root
buffered = ''
outstr = ''
for char in seq:
while char not in current:
if current.has_dict_suffix:
current = current.dict_suffix
outstr += buffered[:-current.depth]
outstr += current.value
buffered = ''
current = self.root
break
elif current.has_suffix:
current = current.suffix
if current.depth:
outstr += buffered[:-current.depth]
buffered = buffered[-current.depth:]
else:
outstr += buffered
buffered = ''
break
else:
current = self.root
outstr += buffered
buffered = ''
break
if char in current:
buffered += char
current = current[char]
if current.has_value:
outstr += buffered[:-current.depth]
outstr += current.value
buffered = ''
current = self.root
else:
assert current is self.root
outstr += buffered + char
buffered = ''
if current.has_dict_suffix:
current = current.dict_suffix
outstr += buffered[:-current.depth]
outstr += current.value
else:
outstr += buffered
return outstr
|
wroberts/fsed
|
fsed/ahocorasick.py
|
AhoCorasickTrie.find_all
|
python
|
def find_all(self, seq):
'''
Generator expression. Yields tuples of `(begin, length, value)`,
where:
- `begin` is an integer identifying the character index where
the match begins (0-based)
- `length` is an integer indicating the length of the match (1
or more characters)
- `value` is the value of the matching node in the trie
'''
if not self._suffix_links_set:
self._set_suffix_links()
current = self.root
for pos, char in enumerate(seq):
# find a state where we can transition on char
while char not in current and current.has_suffix:
current = current.suffix
if char in current:
# transition
current = current[char]
else:
# we must be at the root node
assert current is self.root
# throw out the char without doing anything
# pass
# now perform any matching on the current node
if current.has_value:
yield (1 + pos - current.depth, current.depth, current.value)
dict_suffix = current
while dict_suffix.has_dict_suffix:
dict_suffix = dict_suffix.dict_suffix
yield (1 + pos - dict_suffix.depth, dict_suffix.depth, dict_suffix.value)
|
Generator expression. Yields tuples of `(begin, length, value)`,
where:
- `begin` is an integer identifying the character index where
the match begins (0-based)
- `length` is an integer indicating the length of the match (1
or more characters)
- `value` is the value of the matching node in the trie
|
train
|
https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/ahocorasick.py#L230-L262
|
[
"def _set_suffix_links(self):\n '''\n Sets all suffix links in all nodes in this trie.\n '''\n self._suffix_links_set = True\n for current, parent in self.bfs():\n # skip the root node\n if parent is None:\n continue\n current.longest_prefix = parent.longest_prefix\n if parent.has_value:\n current.longest_prefix = parent\n # the root doesn't get a suffix link\n # also, skip previously set suffix links\n if current.has_suffix:\n continue\n # current is not the root and has no suffix\n # set current's suffix to parent's suffix\n suffix = parent\n while True:\n if not suffix.has_suffix:\n current.suffix = self.root\n break\n else:\n suffix = suffix.suffix\n if current.uplink in suffix:\n current.suffix = suffix[current.uplink]\n break\n # now find the dict_suffix value\n suffix = current.suffix\n while not suffix.has_value and suffix.has_suffix:\n suffix = suffix.suffix\n if suffix.has_value:\n current.dict_suffix = suffix\n"
] |
class AhoCorasickTrie(Trie):
'''A Trie object for performing Aho-Corasick string matching.'''
def __init__(self):
'''Constructor.'''
super(AhoCorasickTrie, self).__init__()
self._suffix_links_set = False
def __setitem__(self, seq, value):
super(AhoCorasickTrie, self).__setitem__(seq, value)
if self._suffix_links_set:
self._reset_suffix_links()
def _reset_suffix_links(self):
'''
Reset all suffix links in all nodes in this trie.
'''
self._suffix_links_set = False
for current, _parent in self.dfs():
current.suffix = None
current.dict_suffix = None
current.longest_prefix = None
def _set_suffix_links(self):
'''
Sets all suffix links in all nodes in this trie.
'''
self._suffix_links_set = True
for current, parent in self.bfs():
# skip the root node
if parent is None:
continue
current.longest_prefix = parent.longest_prefix
if parent.has_value:
current.longest_prefix = parent
# the root doesn't get a suffix link
# also, skip previously set suffix links
if current.has_suffix:
continue
# current is not the root and has no suffix
# set current's suffix to parent's suffix
suffix = parent
while True:
if not suffix.has_suffix:
current.suffix = self.root
break
else:
suffix = suffix.suffix
if current.uplink in suffix:
current.suffix = suffix[current.uplink]
break
# now find the dict_suffix value
suffix = current.suffix
while not suffix.has_value and suffix.has_suffix:
suffix = suffix.suffix
if suffix.has_value:
current.dict_suffix = suffix
def replace(self, seq):
'''
Performs search and replace on the given input string `seq` using
the values stored in this trie. This method uses a O(n**2)
chart-parsing algorithm to find the optimal way of replacing
matches in the input.
Arguments:
- `seq`:
'''
# #1: seq must be stored in a container with a len() function
seq = list(seq)
# chart is a (n-1) X (n) table
# chart[0] represents all matches of length (0+1) = 1
# chart[n-1] represents all matches/rewrites of length (n-1+1) = n
# chart[0][0] represents a match of length 1 starting at character 0
# chart[0][n-1] represents a match of length 1 starting at character n-1
# cells in the chart are tuples:
# (score, list)
# we initialise chart by filling in row 0:
# each cell gets assigned (0, char), where char is the character at
# the corresponding position in the input string
chart = [ [None for _i in range(len(seq)) ] for _i in range(len(seq)) ]
chart[0] = [(0, char) for char in seq]
# now we fill in the chart using the results from the aho-corasick
# string matches
for (begin, length, value) in self.find_all(seq):
chart[length-1][begin] = (length, value)
# now we need to fill in the chart row by row, starting with row 1
for row in range(1, len(chart)):
# each row is 1 cell shorter than the last
for col in range(len(seq) - row):
# the entry in [row][col] is the choice with the highest score; to
# find this, we must search the possible partitions of the cell
#
# things on row 2 have only one possible partition: 1 + 1
# things on row 3 have two: 1 + 2, 2 + 1
# things on row 4 have three: 1+3, 3+1, 2+2
#
# we assume that any pre-existing entry found by aho-corasick
# in a cell is already optimal
#print('scanning [{}][{}]'.format(row, col))
if chart[row][col] is not None:
continue
# chart[1][2] is the cell of matches of length 2 starting at
# character position 2;
# it can only be composed of chart[0][2] + chart[0][3]
#
# partition_point is the length of the first of the two parts
# of the cell
#print('cell[{}][{}] => '.format(row, col))
best_score = -1
best_value = None
for partition_point in range(row):
# the two cells will be [partition_point][col] and
# [row - partition_point - 2][col+partition_point+1]
x1 = partition_point
y1 = col
x2 = row - partition_point - 1
y2 = col + partition_point + 1
#print(' [{}][{}] + [{}][{}]'.format(x1, y1, x2, y2))
s1, v1 = chart[x1][y1]
s2, v2 = chart[x2][y2]
# compute the score
score = s1 + s2
#print(' = {} + {}'.format((s1, v1), (s2, v2)))
#print(' = score {}'.format(score))
if best_score < score:
best_score = score
best_value = v1 + v2
chart[row][col] = (best_score, best_value)
#print(' sets new best score with value {}'.format(
# best_value))
# now the optimal solution is stored at the top of the chart
return chart[len(seq)-1][0][1]
def greedy_replace(self, seq):
'''
Greedily matches strings in ``seq``, and replaces them with their
node values.
Arguments:
- `seq`: an iterable of characters to perform search-and-replace on
'''
if not self._suffix_links_set:
self._set_suffix_links()
# start at the root
current = self.root
buffered = ''
outstr = ''
for char in seq:
while char not in current:
if current.has_dict_suffix:
current = current.dict_suffix
outstr += buffered[:-current.depth]
outstr += current.value
buffered = ''
current = self.root
break
elif current.has_suffix:
current = current.suffix
if current.depth:
outstr += buffered[:-current.depth]
buffered = buffered[-current.depth:]
else:
outstr += buffered
buffered = ''
break
else:
current = self.root
outstr += buffered
buffered = ''
break
if char in current:
buffered += char
current = current[char]
if current.has_value:
outstr += buffered[:-current.depth]
outstr += current.value
buffered = ''
current = self.root
else:
assert current is self.root
outstr += buffered + char
buffered = ''
if current.has_dict_suffix:
current = current.dict_suffix
outstr += buffered[:-current.depth]
outstr += current.value
else:
outstr += buffered
return outstr
|
wroberts/fsed
|
fsed/ahocorasick.py
|
AhoCorasickTrie.replace
|
python
|
def replace(self, seq):
'''
Performs search and replace on the given input string `seq` using
the values stored in this trie. This method uses a O(n**2)
chart-parsing algorithm to find the optimal way of replacing
matches in the input.
Arguments:
- `seq`:
'''
# #1: seq must be stored in a container with a len() function
seq = list(seq)
# chart is a (n-1) X (n) table
# chart[0] represents all matches of length (0+1) = 1
# chart[n-1] represents all matches/rewrites of length (n-1+1) = n
# chart[0][0] represents a match of length 1 starting at character 0
# chart[0][n-1] represents a match of length 1 starting at character n-1
# cells in the chart are tuples:
# (score, list)
# we initialise chart by filling in row 0:
# each cell gets assigned (0, char), where char is the character at
# the corresponding position in the input string
chart = [ [None for _i in range(len(seq)) ] for _i in range(len(seq)) ]
chart[0] = [(0, char) for char in seq]
# now we fill in the chart using the results from the aho-corasick
# string matches
for (begin, length, value) in self.find_all(seq):
chart[length-1][begin] = (length, value)
# now we need to fill in the chart row by row, starting with row 1
for row in range(1, len(chart)):
# each row is 1 cell shorter than the last
for col in range(len(seq) - row):
# the entry in [row][col] is the choice with the highest score; to
# find this, we must search the possible partitions of the cell
#
# things on row 2 have only one possible partition: 1 + 1
# things on row 3 have two: 1 + 2, 2 + 1
# things on row 4 have three: 1+3, 3+1, 2+2
#
# we assume that any pre-existing entry found by aho-corasick
# in a cell is already optimal
#print('scanning [{}][{}]'.format(row, col))
if chart[row][col] is not None:
continue
# chart[1][2] is the cell of matches of length 2 starting at
# character position 2;
# it can only be composed of chart[0][2] + chart[0][3]
#
# partition_point is the length of the first of the two parts
# of the cell
#print('cell[{}][{}] => '.format(row, col))
best_score = -1
best_value = None
for partition_point in range(row):
# the two cells will be [partition_point][col] and
# [row - partition_point - 2][col+partition_point+1]
x1 = partition_point
y1 = col
x2 = row - partition_point - 1
y2 = col + partition_point + 1
#print(' [{}][{}] + [{}][{}]'.format(x1, y1, x2, y2))
s1, v1 = chart[x1][y1]
s2, v2 = chart[x2][y2]
# compute the score
score = s1 + s2
#print(' = {} + {}'.format((s1, v1), (s2, v2)))
#print(' = score {}'.format(score))
if best_score < score:
best_score = score
best_value = v1 + v2
chart[row][col] = (best_score, best_value)
#print(' sets new best score with value {}'.format(
# best_value))
# now the optimal solution is stored at the top of the chart
return chart[len(seq)-1][0][1]
|
Performs search and replace on the given input string `seq` using
the values stored in this trie. This method uses a O(n**2)
chart-parsing algorithm to find the optimal way of replacing
matches in the input.
Arguments:
- `seq`:
|
train
|
https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/ahocorasick.py#L264-L338
|
[
"def find_all(self, seq):\n '''\n Generator expression. Yields tuples of `(begin, length, value)`,\n where:\n\n - `begin` is an integer identifying the character index where\n the match begins (0-based)\n - `length` is an integer indicating the length of the match (1\n or more characters)\n - `value` is the value of the matching node in the trie\n '''\n if not self._suffix_links_set:\n self._set_suffix_links()\n current = self.root\n for pos, char in enumerate(seq):\n # find a state where we can transition on char\n while char not in current and current.has_suffix:\n current = current.suffix\n if char in current:\n # transition\n current = current[char]\n else:\n # we must be at the root node\n assert current is self.root\n # throw out the char without doing anything\n # pass\n # now perform any matching on the current node\n if current.has_value:\n yield (1 + pos - current.depth, current.depth, current.value)\n dict_suffix = current\n while dict_suffix.has_dict_suffix:\n dict_suffix = dict_suffix.dict_suffix\n yield (1 + pos - dict_suffix.depth, dict_suffix.depth, dict_suffix.value)\n"
] |
class AhoCorasickTrie(Trie):
'''A Trie object for performing Aho-Corasick string matching.'''
def __init__(self):
'''Constructor.'''
super(AhoCorasickTrie, self).__init__()
self._suffix_links_set = False
def __setitem__(self, seq, value):
super(AhoCorasickTrie, self).__setitem__(seq, value)
if self._suffix_links_set:
self._reset_suffix_links()
def _reset_suffix_links(self):
'''
Reset all suffix links in all nodes in this trie.
'''
self._suffix_links_set = False
for current, _parent in self.dfs():
current.suffix = None
current.dict_suffix = None
current.longest_prefix = None
def _set_suffix_links(self):
'''
Sets all suffix links in all nodes in this trie.
'''
self._suffix_links_set = True
for current, parent in self.bfs():
# skip the root node
if parent is None:
continue
current.longest_prefix = parent.longest_prefix
if parent.has_value:
current.longest_prefix = parent
# the root doesn't get a suffix link
# also, skip previously set suffix links
if current.has_suffix:
continue
# current is not the root and has no suffix
# set current's suffix to parent's suffix
suffix = parent
while True:
if not suffix.has_suffix:
current.suffix = self.root
break
else:
suffix = suffix.suffix
if current.uplink in suffix:
current.suffix = suffix[current.uplink]
break
# now find the dict_suffix value
suffix = current.suffix
while not suffix.has_value and suffix.has_suffix:
suffix = suffix.suffix
if suffix.has_value:
current.dict_suffix = suffix
def find_all(self, seq):
'''
Generator expression. Yields tuples of `(begin, length, value)`,
where:
- `begin` is an integer identifying the character index where
the match begins (0-based)
- `length` is an integer indicating the length of the match (1
or more characters)
- `value` is the value of the matching node in the trie
'''
if not self._suffix_links_set:
self._set_suffix_links()
current = self.root
for pos, char in enumerate(seq):
# find a state where we can transition on char
while char not in current and current.has_suffix:
current = current.suffix
if char in current:
# transition
current = current[char]
else:
# we must be at the root node
assert current is self.root
# throw out the char without doing anything
# pass
# now perform any matching on the current node
if current.has_value:
yield (1 + pos - current.depth, current.depth, current.value)
dict_suffix = current
while dict_suffix.has_dict_suffix:
dict_suffix = dict_suffix.dict_suffix
yield (1 + pos - dict_suffix.depth, dict_suffix.depth, dict_suffix.value)
def greedy_replace(self, seq):
'''
Greedily matches strings in ``seq``, and replaces them with their
node values.
Arguments:
- `seq`: an iterable of characters to perform search-and-replace on
'''
if not self._suffix_links_set:
self._set_suffix_links()
# start at the root
current = self.root
buffered = ''
outstr = ''
for char in seq:
while char not in current:
if current.has_dict_suffix:
current = current.dict_suffix
outstr += buffered[:-current.depth]
outstr += current.value
buffered = ''
current = self.root
break
elif current.has_suffix:
current = current.suffix
if current.depth:
outstr += buffered[:-current.depth]
buffered = buffered[-current.depth:]
else:
outstr += buffered
buffered = ''
break
else:
current = self.root
outstr += buffered
buffered = ''
break
if char in current:
buffered += char
current = current[char]
if current.has_value:
outstr += buffered[:-current.depth]
outstr += current.value
buffered = ''
current = self.root
else:
assert current is self.root
outstr += buffered + char
buffered = ''
if current.has_dict_suffix:
current = current.dict_suffix
outstr += buffered[:-current.depth]
outstr += current.value
else:
outstr += buffered
return outstr
|
wroberts/fsed
|
fsed/ahocorasick.py
|
AhoCorasickTrie.greedy_replace
|
python
|
def greedy_replace(self, seq):
'''
Greedily matches strings in ``seq``, and replaces them with their
node values.
Arguments:
- `seq`: an iterable of characters to perform search-and-replace on
'''
if not self._suffix_links_set:
self._set_suffix_links()
# start at the root
current = self.root
buffered = ''
outstr = ''
for char in seq:
while char not in current:
if current.has_dict_suffix:
current = current.dict_suffix
outstr += buffered[:-current.depth]
outstr += current.value
buffered = ''
current = self.root
break
elif current.has_suffix:
current = current.suffix
if current.depth:
outstr += buffered[:-current.depth]
buffered = buffered[-current.depth:]
else:
outstr += buffered
buffered = ''
break
else:
current = self.root
outstr += buffered
buffered = ''
break
if char in current:
buffered += char
current = current[char]
if current.has_value:
outstr += buffered[:-current.depth]
outstr += current.value
buffered = ''
current = self.root
else:
assert current is self.root
outstr += buffered + char
buffered = ''
if current.has_dict_suffix:
current = current.dict_suffix
outstr += buffered[:-current.depth]
outstr += current.value
else:
outstr += buffered
return outstr
|
Greedily matches strings in ``seq``, and replaces them with their
node values.
Arguments:
- `seq`: an iterable of characters to perform search-and-replace on
|
train
|
https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/ahocorasick.py#L340-L395
|
[
"def _set_suffix_links(self):\n '''\n Sets all suffix links in all nodes in this trie.\n '''\n self._suffix_links_set = True\n for current, parent in self.bfs():\n # skip the root node\n if parent is None:\n continue\n current.longest_prefix = parent.longest_prefix\n if parent.has_value:\n current.longest_prefix = parent\n # the root doesn't get a suffix link\n # also, skip previously set suffix links\n if current.has_suffix:\n continue\n # current is not the root and has no suffix\n # set current's suffix to parent's suffix\n suffix = parent\n while True:\n if not suffix.has_suffix:\n current.suffix = self.root\n break\n else:\n suffix = suffix.suffix\n if current.uplink in suffix:\n current.suffix = suffix[current.uplink]\n break\n # now find the dict_suffix value\n suffix = current.suffix\n while not suffix.has_value and suffix.has_suffix:\n suffix = suffix.suffix\n if suffix.has_value:\n current.dict_suffix = suffix\n"
] |
class AhoCorasickTrie(Trie):
'''A Trie object for performing Aho-Corasick string matching.'''
def __init__(self):
'''Constructor.'''
super(AhoCorasickTrie, self).__init__()
self._suffix_links_set = False
def __setitem__(self, seq, value):
super(AhoCorasickTrie, self).__setitem__(seq, value)
if self._suffix_links_set:
self._reset_suffix_links()
def _reset_suffix_links(self):
'''
Reset all suffix links in all nodes in this trie.
'''
self._suffix_links_set = False
for current, _parent in self.dfs():
current.suffix = None
current.dict_suffix = None
current.longest_prefix = None
def _set_suffix_links(self):
'''
Sets all suffix links in all nodes in this trie.
'''
self._suffix_links_set = True
for current, parent in self.bfs():
# skip the root node
if parent is None:
continue
current.longest_prefix = parent.longest_prefix
if parent.has_value:
current.longest_prefix = parent
# the root doesn't get a suffix link
# also, skip previously set suffix links
if current.has_suffix:
continue
# current is not the root and has no suffix
# set current's suffix to parent's suffix
suffix = parent
while True:
if not suffix.has_suffix:
current.suffix = self.root
break
else:
suffix = suffix.suffix
if current.uplink in suffix:
current.suffix = suffix[current.uplink]
break
# now find the dict_suffix value
suffix = current.suffix
while not suffix.has_value and suffix.has_suffix:
suffix = suffix.suffix
if suffix.has_value:
current.dict_suffix = suffix
def find_all(self, seq):
'''
Generator expression. Yields tuples of `(begin, length, value)`,
where:
- `begin` is an integer identifying the character index where
the match begins (0-based)
- `length` is an integer indicating the length of the match (1
or more characters)
- `value` is the value of the matching node in the trie
'''
if not self._suffix_links_set:
self._set_suffix_links()
current = self.root
for pos, char in enumerate(seq):
# find a state where we can transition on char
while char not in current and current.has_suffix:
current = current.suffix
if char in current:
# transition
current = current[char]
else:
# we must be at the root node
assert current is self.root
# throw out the char without doing anything
# pass
# now perform any matching on the current node
if current.has_value:
yield (1 + pos - current.depth, current.depth, current.value)
dict_suffix = current
while dict_suffix.has_dict_suffix:
dict_suffix = dict_suffix.dict_suffix
yield (1 + pos - dict_suffix.depth, dict_suffix.depth, dict_suffix.value)
def replace(self, seq):
'''
Performs search and replace on the given input string `seq` using
the values stored in this trie. This method uses a O(n**2)
chart-parsing algorithm to find the optimal way of replacing
matches in the input.
Arguments:
- `seq`:
'''
# #1: seq must be stored in a container with a len() function
seq = list(seq)
# chart is a (n-1) X (n) table
# chart[0] represents all matches of length (0+1) = 1
# chart[n-1] represents all matches/rewrites of length (n-1+1) = n
# chart[0][0] represents a match of length 1 starting at character 0
# chart[0][n-1] represents a match of length 1 starting at character n-1
# cells in the chart are tuples:
# (score, list)
# we initialise chart by filling in row 0:
# each cell gets assigned (0, char), where char is the character at
# the corresponding position in the input string
chart = [ [None for _i in range(len(seq)) ] for _i in range(len(seq)) ]
chart[0] = [(0, char) for char in seq]
# now we fill in the chart using the results from the aho-corasick
# string matches
for (begin, length, value) in self.find_all(seq):
chart[length-1][begin] = (length, value)
# now we need to fill in the chart row by row, starting with row 1
for row in range(1, len(chart)):
# each row is 1 cell shorter than the last
for col in range(len(seq) - row):
# the entry in [row][col] is the choice with the highest score; to
# find this, we must search the possible partitions of the cell
#
# things on row 2 have only one possible partition: 1 + 1
# things on row 3 have two: 1 + 2, 2 + 1
# things on row 4 have three: 1+3, 3+1, 2+2
#
# we assume that any pre-existing entry found by aho-corasick
# in a cell is already optimal
#print('scanning [{}][{}]'.format(row, col))
if chart[row][col] is not None:
continue
# chart[1][2] is the cell of matches of length 2 starting at
# character position 2;
# it can only be composed of chart[0][2] + chart[0][3]
#
# partition_point is the length of the first of the two parts
# of the cell
#print('cell[{}][{}] => '.format(row, col))
best_score = -1
best_value = None
for partition_point in range(row):
# the two cells will be [partition_point][col] and
# [row - partition_point - 2][col+partition_point+1]
x1 = partition_point
y1 = col
x2 = row - partition_point - 1
y2 = col + partition_point + 1
#print(' [{}][{}] + [{}][{}]'.format(x1, y1, x2, y2))
s1, v1 = chart[x1][y1]
s2, v2 = chart[x2][y2]
# compute the score
score = s1 + s2
#print(' = {} + {}'.format((s1, v1), (s2, v2)))
#print(' = score {}'.format(score))
if best_score < score:
best_score = score
best_value = v1 + v2
chart[row][col] = (best_score, best_value)
#print(' sets new best score with value {}'.format(
# best_value))
# now the optimal solution is stored at the top of the chart
return chart[len(seq)-1][0][1]
|
wroberts/fsed
|
fsed/fsed.py
|
set_log_level
|
python
|
def set_log_level(verbose, quiet):
'''
Ses the logging level of the script based on command line options.
Arguments:
- `verbose`:
- `quiet`:
'''
if quiet:
verbose = -1
if verbose < 0:
verbose = logging.CRITICAL
elif verbose == 0:
verbose = logging.WARNING
elif verbose == 1:
verbose = logging.INFO
elif 1 < verbose:
verbose = logging.DEBUG
LOGGER.setLevel(verbose)
|
Ses the logging level of the script based on command line options.
Arguments:
- `verbose`:
- `quiet`:
|
train
|
https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/fsed.py#L23-L41
| null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
fsed.py
(c) Will Roberts 12 December, 2015
Main module for the ``fsed`` command line utility.
'''
from __future__ import absolute_import, print_function, unicode_literals
from fsed.utils import open_file
import click
import fsed.ahocorasick
import logging
import re
import sys
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
stream=sys.stderr, level=logging.INFO)
LOGGER = logging.getLogger(__name__)
def detect_pattern_format(pattern_filename, encoding, on_word_boundaries):
'''
Automatically detects the pattern file format, and determines
whether the Aho-Corasick string matching should pay attention to
word boundaries or not.
Arguments:
- `pattern_filename`:
- `encoding`:
- `on_word_boundaries`:
'''
tsv = True
boundaries = on_word_boundaries
with open_file(pattern_filename) as input_file:
for line in input_file:
line = line.decode(encoding)
if line.count('\t') != 1:
tsv = False
if '\\b' in line:
boundaries = True
if boundaries and not tsv:
break
return tsv, boundaries
def sub_escapes(sval):
'''
Process escaped characters in ``sval``.
Arguments:
- `sval`:
'''
sval = sval.replace('\\a', '\a')
sval = sval.replace('\\b', '\x00')
sval = sval.replace('\\f', '\f')
sval = sval.replace('\\n', '\n')
sval = sval.replace('\\r', '\r')
sval = sval.replace('\\t', '\t')
sval = sval.replace('\\v', '\v')
sval = sval.replace('\\\\', '\\')
return sval
def build_trie(pattern_filename, pattern_format, encoding, on_word_boundaries):
'''
Constructs a finite state machine for performing string rewriting.
Arguments:
- `pattern_filename`:
- `pattern_format`:
- `encoding`:
- `on_word_boundaries`:
'''
boundaries = on_word_boundaries
if pattern_format == 'auto' or not on_word_boundaries:
tsv, boundaries = detect_pattern_format(pattern_filename, encoding,
on_word_boundaries)
if pattern_format == 'auto':
if tsv:
pattern_format = 'tsv'
else:
pattern_format = 'sed'
trie = fsed.ahocorasick.AhoCorasickTrie()
num_candidates = 0
with open_file(pattern_filename) as pattern_file:
for lineno, line in enumerate(pattern_file):
line = line.decode(encoding).rstrip('\n')
if not line.strip():
continue
# decode the line
if pattern_format == 'tsv':
fields = line.split('\t')
if len(fields) != 2:
LOGGER.warning(('skipping line {} of pattern file (not '
'in tab-separated format): {}').format(lineno, line))
continue
before, after = fields
elif pattern_format == 'sed':
before = after = None
line = line.lstrip()
if line[0] == 's':
delim = line[1]
# delim might be a regex special character;
# escape it if necessary
if delim in '.^$*+?[](){}|\\':
delim = '\\' + delim
fields = re.split(r'(?<!\\){}'.format(delim), line)
if len(fields) == 4:
before, after = fields[1], fields[2]
before = re.sub(r'(?<!\\)\\{}'.format(delim), delim, before)
after = re.sub(r'(?<!\\)\\{}'.format(delim), delim, after)
if before is None or after is None:
LOGGER.warning(('skipping line {} of pattern file (not '
'in sed format): {}').format(lineno, line))
continue
num_candidates += 1
if on_word_boundaries and before != before.strip():
LOGGER.warning(('before pattern on line {} padded whitespace; '
'this may interact strangely with the --words '
'option: {}').format(lineno, line))
before = sub_escapes(before)
after = sub_escapes(after)
if boundaries:
before = fsed.ahocorasick.boundary_transform(before, on_word_boundaries)
trie[before] = after
LOGGER.info('{} patterns loaded from {}'.format(num_candidates,
pattern_filename))
return trie, boundaries
def warn_prefix_values(trie):
'''
Prints warning messages for every node that has both a value and a
longest_prefix.
'''
for current, _parent in trie.dfs():
if current.has_value and current.longest_prefix is not None:
LOGGER.warn(('pattern {} (value {}) is a superstring of pattern '
'{} (value {}) and will never be matched').format(
current.prefix, current.value,
current.longest_prefix.prefix, current.longest_prefix.value))
def rewrite_str_with_trie(sval, trie, boundaries = False, slow = False):
'''
Rewrites a string using the given trie object.
Arguments:
- `sval`:
- `trie`:
- `boundaries`:
- `slow`:
'''
if boundaries:
sval = fsed.ahocorasick.boundary_transform(sval)
if slow:
sval = trie.replace(sval)
else:
sval = trie.greedy_replace(sval)
if boundaries:
sval = ''.join(fsed.ahocorasick.boundary_untransform(sval))
return sval
@click.command()
@click.argument('pattern_filename', type=click.Path(exists=True),
metavar='PATTERN_FILE')
@click.argument('input_filenames', nargs=-1,
type=click.Path(exists=True), metavar='[INPUT_FILES]')
@click.option('--pattern-format', type=click.Choice(['auto', 'tsv', 'sed']),
default='auto', show_default=True,
help='Specify the format of PATTERN_FILE')
@click.option('-o', '--output', 'output_filename', type=click.Path(),
help='Program output is written '
'to this file. Default is to write '
'to standard output.')
@click.option('-e', '--encoding', default='utf-8', show_default=True,
help='The character encoding to use')
@click.option('-w', '--words', is_flag=True,
help='Match only on word boundaries: '
'appends "\\b" to the beginning and '
'end of every pattern in PATTERN_FILE.')
@click.option('--by-line/--across-lines', default=False,
help='Process the input line by '
'line or character by character; the default is --across-lines.')
@click.option('--slow', is_flag=True,
help='Try very hard to '
'find the longest matches on the input; this is very slow, '
'and forces --by-line.')
@click.option('-v', '--verbose', default=0, count=True,
help='Turns on debugging output.')
@click.option('-q', '--quiet', is_flag=True,
help='Quiet operation, do not emit warnings.')
def main(pattern_filename, input_filenames, pattern_format,
output_filename,
encoding, words, by_line, slow, verbose, quiet):
'''
Search and replace on INPUT_FILE(s) (or standard input), with
matching on fixed strings.
'''
set_log_level(verbose, quiet)
if slow:
by_line = True
by_line = True # TODO: implement non-line-based rewriting
# load the patterns
LOGGER.info('fsed {} input {} output {}'.format(pattern_filename,
input_filenames,
output_filename))
if not input_filenames:
input_filenames = ('-',)
if not output_filename:
output_filename = '-'
# build trie machine for matching
trie, boundaries = build_trie(pattern_filename, pattern_format, encoding, words)
if not slow:
warn_prefix_values(trie)
LOGGER.info('writing to {}'.format(output_filename))
with open_file(output_filename, 'wb') as output_file:
for input_filename in input_filenames:
# search and replace
with open_file(input_filename) as input_file:
LOGGER.info('reading {}'.format(input_filename))
if by_line:
num_lines = 0
for line in input_file:
line = line.decode(encoding).rstrip('\n')
line = rewrite_str_with_trie(line, trie, boundaries, slow)
output_file.write((line + '\n').encode(encoding))
num_lines += 1
LOGGER.info('{} lines written'.format(num_lines))
else:
raise NotImplementedError
if __name__ == '__main__':
main()
|
wroberts/fsed
|
fsed/fsed.py
|
detect_pattern_format
|
python
|
def detect_pattern_format(pattern_filename, encoding, on_word_boundaries):
'''
Automatically detects the pattern file format, and determines
whether the Aho-Corasick string matching should pay attention to
word boundaries or not.
Arguments:
- `pattern_filename`:
- `encoding`:
- `on_word_boundaries`:
'''
tsv = True
boundaries = on_word_boundaries
with open_file(pattern_filename) as input_file:
for line in input_file:
line = line.decode(encoding)
if line.count('\t') != 1:
tsv = False
if '\\b' in line:
boundaries = True
if boundaries and not tsv:
break
return tsv, boundaries
|
Automatically detects the pattern file format, and determines
whether the Aho-Corasick string matching should pay attention to
word boundaries or not.
Arguments:
- `pattern_filename`:
- `encoding`:
- `on_word_boundaries`:
|
train
|
https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/fsed.py#L43-L65
|
[
"def open_file(filename, mode='rb'):\n \"\"\"\n Opens a file for access with the given mode. This function\n transparently wraps gzip and xz files as well as normal files.\n You can also open zip files using syntax like:\n\n f = utils.open_file('../semcor-parsed.zip:semcor000.txt')\n \"\"\"\n if (('r' not in mode or hasattr(filename, 'read')) and\n (('a' not in mode and 'w' not in mode) or hasattr(filename, 'write')) and\n hasattr(filename, '__iter__')):\n return filename\n elif isinstance(filename, string_type):\n if filename == '-' and 'r' in mode:\n if PY3:\n return sys.stdin.buffer\n return sys.stdin\n elif filename == '-' and ('w' in mode or 'a' in mode):\n if PY3:\n return sys.stdout.buffer\n return sys.stdout\n if filename.lower().count('.zip:'):\n assert 'r' in mode\n assert filename.count(':') == 1\n import zipfile\n zipped_file = zipfile.ZipFile(filename.split(':')[0])\n unzipped_file = zipped_file.open(filename.split(':')[1], 'r')\n zipped_file.close()\n return unzipped_file\n elif filename.lower().endswith('.gz'):\n import gzip\n return gzip.open(filename, mode)\n elif filename.lower().endswith('.xz'):\n import lzma\n tmp = lzma.LZMAFile(filename, mode)\n dir(tmp)\n return tmp\n else:\n return open(filename, mode)\n else:\n raise Exception('Unknown type for argument filename')\n"
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
fsed.py
(c) Will Roberts 12 December, 2015
Main module for the ``fsed`` command line utility.
'''
from __future__ import absolute_import, print_function, unicode_literals
from fsed.utils import open_file
import click
import fsed.ahocorasick
import logging
import re
import sys
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
stream=sys.stderr, level=logging.INFO)
LOGGER = logging.getLogger(__name__)
def set_log_level(verbose, quiet):
'''
Ses the logging level of the script based on command line options.
Arguments:
- `verbose`:
- `quiet`:
'''
if quiet:
verbose = -1
if verbose < 0:
verbose = logging.CRITICAL
elif verbose == 0:
verbose = logging.WARNING
elif verbose == 1:
verbose = logging.INFO
elif 1 < verbose:
verbose = logging.DEBUG
LOGGER.setLevel(verbose)
def sub_escapes(sval):
'''
Process escaped characters in ``sval``.
Arguments:
- `sval`:
'''
sval = sval.replace('\\a', '\a')
sval = sval.replace('\\b', '\x00')
sval = sval.replace('\\f', '\f')
sval = sval.replace('\\n', '\n')
sval = sval.replace('\\r', '\r')
sval = sval.replace('\\t', '\t')
sval = sval.replace('\\v', '\v')
sval = sval.replace('\\\\', '\\')
return sval
def build_trie(pattern_filename, pattern_format, encoding, on_word_boundaries):
'''
Constructs a finite state machine for performing string rewriting.
Arguments:
- `pattern_filename`:
- `pattern_format`:
- `encoding`:
- `on_word_boundaries`:
'''
boundaries = on_word_boundaries
if pattern_format == 'auto' or not on_word_boundaries:
tsv, boundaries = detect_pattern_format(pattern_filename, encoding,
on_word_boundaries)
if pattern_format == 'auto':
if tsv:
pattern_format = 'tsv'
else:
pattern_format = 'sed'
trie = fsed.ahocorasick.AhoCorasickTrie()
num_candidates = 0
with open_file(pattern_filename) as pattern_file:
for lineno, line in enumerate(pattern_file):
line = line.decode(encoding).rstrip('\n')
if not line.strip():
continue
# decode the line
if pattern_format == 'tsv':
fields = line.split('\t')
if len(fields) != 2:
LOGGER.warning(('skipping line {} of pattern file (not '
'in tab-separated format): {}').format(lineno, line))
continue
before, after = fields
elif pattern_format == 'sed':
before = after = None
line = line.lstrip()
if line[0] == 's':
delim = line[1]
# delim might be a regex special character;
# escape it if necessary
if delim in '.^$*+?[](){}|\\':
delim = '\\' + delim
fields = re.split(r'(?<!\\){}'.format(delim), line)
if len(fields) == 4:
before, after = fields[1], fields[2]
before = re.sub(r'(?<!\\)\\{}'.format(delim), delim, before)
after = re.sub(r'(?<!\\)\\{}'.format(delim), delim, after)
if before is None or after is None:
LOGGER.warning(('skipping line {} of pattern file (not '
'in sed format): {}').format(lineno, line))
continue
num_candidates += 1
if on_word_boundaries and before != before.strip():
LOGGER.warning(('before pattern on line {} padded whitespace; '
'this may interact strangely with the --words '
'option: {}').format(lineno, line))
before = sub_escapes(before)
after = sub_escapes(after)
if boundaries:
before = fsed.ahocorasick.boundary_transform(before, on_word_boundaries)
trie[before] = after
LOGGER.info('{} patterns loaded from {}'.format(num_candidates,
pattern_filename))
return trie, boundaries
def warn_prefix_values(trie):
'''
Prints warning messages for every node that has both a value and a
longest_prefix.
'''
for current, _parent in trie.dfs():
if current.has_value and current.longest_prefix is not None:
LOGGER.warn(('pattern {} (value {}) is a superstring of pattern '
'{} (value {}) and will never be matched').format(
current.prefix, current.value,
current.longest_prefix.prefix, current.longest_prefix.value))
def rewrite_str_with_trie(sval, trie, boundaries = False, slow = False):
'''
Rewrites a string using the given trie object.
Arguments:
- `sval`:
- `trie`:
- `boundaries`:
- `slow`:
'''
if boundaries:
sval = fsed.ahocorasick.boundary_transform(sval)
if slow:
sval = trie.replace(sval)
else:
sval = trie.greedy_replace(sval)
if boundaries:
sval = ''.join(fsed.ahocorasick.boundary_untransform(sval))
return sval
@click.command()
@click.argument('pattern_filename', type=click.Path(exists=True),
metavar='PATTERN_FILE')
@click.argument('input_filenames', nargs=-1,
type=click.Path(exists=True), metavar='[INPUT_FILES]')
@click.option('--pattern-format', type=click.Choice(['auto', 'tsv', 'sed']),
default='auto', show_default=True,
help='Specify the format of PATTERN_FILE')
@click.option('-o', '--output', 'output_filename', type=click.Path(),
help='Program output is written '
'to this file. Default is to write '
'to standard output.')
@click.option('-e', '--encoding', default='utf-8', show_default=True,
help='The character encoding to use')
@click.option('-w', '--words', is_flag=True,
help='Match only on word boundaries: '
'appends "\\b" to the beginning and '
'end of every pattern in PATTERN_FILE.')
@click.option('--by-line/--across-lines', default=False,
help='Process the input line by '
'line or character by character; the default is --across-lines.')
@click.option('--slow', is_flag=True,
help='Try very hard to '
'find the longest matches on the input; this is very slow, '
'and forces --by-line.')
@click.option('-v', '--verbose', default=0, count=True,
help='Turns on debugging output.')
@click.option('-q', '--quiet', is_flag=True,
help='Quiet operation, do not emit warnings.')
def main(pattern_filename, input_filenames, pattern_format,
output_filename,
encoding, words, by_line, slow, verbose, quiet):
'''
Search and replace on INPUT_FILE(s) (or standard input), with
matching on fixed strings.
'''
set_log_level(verbose, quiet)
if slow:
by_line = True
by_line = True # TODO: implement non-line-based rewriting
# load the patterns
LOGGER.info('fsed {} input {} output {}'.format(pattern_filename,
input_filenames,
output_filename))
if not input_filenames:
input_filenames = ('-',)
if not output_filename:
output_filename = '-'
# build trie machine for matching
trie, boundaries = build_trie(pattern_filename, pattern_format, encoding, words)
if not slow:
warn_prefix_values(trie)
LOGGER.info('writing to {}'.format(output_filename))
with open_file(output_filename, 'wb') as output_file:
for input_filename in input_filenames:
# search and replace
with open_file(input_filename) as input_file:
LOGGER.info('reading {}'.format(input_filename))
if by_line:
num_lines = 0
for line in input_file:
line = line.decode(encoding).rstrip('\n')
line = rewrite_str_with_trie(line, trie, boundaries, slow)
output_file.write((line + '\n').encode(encoding))
num_lines += 1
LOGGER.info('{} lines written'.format(num_lines))
else:
raise NotImplementedError
if __name__ == '__main__':
main()
|
wroberts/fsed
|
fsed/fsed.py
|
sub_escapes
|
python
|
def sub_escapes(sval):
'''
Process escaped characters in ``sval``.
Arguments:
- `sval`:
'''
sval = sval.replace('\\a', '\a')
sval = sval.replace('\\b', '\x00')
sval = sval.replace('\\f', '\f')
sval = sval.replace('\\n', '\n')
sval = sval.replace('\\r', '\r')
sval = sval.replace('\\t', '\t')
sval = sval.replace('\\v', '\v')
sval = sval.replace('\\\\', '\\')
return sval
|
Process escaped characters in ``sval``.
Arguments:
- `sval`:
|
train
|
https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/fsed.py#L67-L82
| null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
fsed.py
(c) Will Roberts 12 December, 2015
Main module for the ``fsed`` command line utility.
'''
from __future__ import absolute_import, print_function, unicode_literals
from fsed.utils import open_file
import click
import fsed.ahocorasick
import logging
import re
import sys
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
stream=sys.stderr, level=logging.INFO)
LOGGER = logging.getLogger(__name__)
def set_log_level(verbose, quiet):
'''
Ses the logging level of the script based on command line options.
Arguments:
- `verbose`:
- `quiet`:
'''
if quiet:
verbose = -1
if verbose < 0:
verbose = logging.CRITICAL
elif verbose == 0:
verbose = logging.WARNING
elif verbose == 1:
verbose = logging.INFO
elif 1 < verbose:
verbose = logging.DEBUG
LOGGER.setLevel(verbose)
def detect_pattern_format(pattern_filename, encoding, on_word_boundaries):
'''
Automatically detects the pattern file format, and determines
whether the Aho-Corasick string matching should pay attention to
word boundaries or not.
Arguments:
- `pattern_filename`:
- `encoding`:
- `on_word_boundaries`:
'''
tsv = True
boundaries = on_word_boundaries
with open_file(pattern_filename) as input_file:
for line in input_file:
line = line.decode(encoding)
if line.count('\t') != 1:
tsv = False
if '\\b' in line:
boundaries = True
if boundaries and not tsv:
break
return tsv, boundaries
def build_trie(pattern_filename, pattern_format, encoding, on_word_boundaries):
'''
Constructs a finite state machine for performing string rewriting.
Arguments:
- `pattern_filename`:
- `pattern_format`:
- `encoding`:
- `on_word_boundaries`:
'''
boundaries = on_word_boundaries
if pattern_format == 'auto' or not on_word_boundaries:
tsv, boundaries = detect_pattern_format(pattern_filename, encoding,
on_word_boundaries)
if pattern_format == 'auto':
if tsv:
pattern_format = 'tsv'
else:
pattern_format = 'sed'
trie = fsed.ahocorasick.AhoCorasickTrie()
num_candidates = 0
with open_file(pattern_filename) as pattern_file:
for lineno, line in enumerate(pattern_file):
line = line.decode(encoding).rstrip('\n')
if not line.strip():
continue
# decode the line
if pattern_format == 'tsv':
fields = line.split('\t')
if len(fields) != 2:
LOGGER.warning(('skipping line {} of pattern file (not '
'in tab-separated format): {}').format(lineno, line))
continue
before, after = fields
elif pattern_format == 'sed':
before = after = None
line = line.lstrip()
if line[0] == 's':
delim = line[1]
# delim might be a regex special character;
# escape it if necessary
if delim in '.^$*+?[](){}|\\':
delim = '\\' + delim
fields = re.split(r'(?<!\\){}'.format(delim), line)
if len(fields) == 4:
before, after = fields[1], fields[2]
before = re.sub(r'(?<!\\)\\{}'.format(delim), delim, before)
after = re.sub(r'(?<!\\)\\{}'.format(delim), delim, after)
if before is None or after is None:
LOGGER.warning(('skipping line {} of pattern file (not '
'in sed format): {}').format(lineno, line))
continue
num_candidates += 1
if on_word_boundaries and before != before.strip():
LOGGER.warning(('before pattern on line {} padded whitespace; '
'this may interact strangely with the --words '
'option: {}').format(lineno, line))
before = sub_escapes(before)
after = sub_escapes(after)
if boundaries:
before = fsed.ahocorasick.boundary_transform(before, on_word_boundaries)
trie[before] = after
LOGGER.info('{} patterns loaded from {}'.format(num_candidates,
pattern_filename))
return trie, boundaries
def warn_prefix_values(trie):
'''
Prints warning messages for every node that has both a value and a
longest_prefix.
'''
for current, _parent in trie.dfs():
if current.has_value and current.longest_prefix is not None:
LOGGER.warn(('pattern {} (value {}) is a superstring of pattern '
'{} (value {}) and will never be matched').format(
current.prefix, current.value,
current.longest_prefix.prefix, current.longest_prefix.value))
def rewrite_str_with_trie(sval, trie, boundaries = False, slow = False):
'''
Rewrites a string using the given trie object.
Arguments:
- `sval`:
- `trie`:
- `boundaries`:
- `slow`:
'''
if boundaries:
sval = fsed.ahocorasick.boundary_transform(sval)
if slow:
sval = trie.replace(sval)
else:
sval = trie.greedy_replace(sval)
if boundaries:
sval = ''.join(fsed.ahocorasick.boundary_untransform(sval))
return sval
@click.command()
@click.argument('pattern_filename', type=click.Path(exists=True),
metavar='PATTERN_FILE')
@click.argument('input_filenames', nargs=-1,
type=click.Path(exists=True), metavar='[INPUT_FILES]')
@click.option('--pattern-format', type=click.Choice(['auto', 'tsv', 'sed']),
default='auto', show_default=True,
help='Specify the format of PATTERN_FILE')
@click.option('-o', '--output', 'output_filename', type=click.Path(),
help='Program output is written '
'to this file. Default is to write '
'to standard output.')
@click.option('-e', '--encoding', default='utf-8', show_default=True,
help='The character encoding to use')
@click.option('-w', '--words', is_flag=True,
help='Match only on word boundaries: '
'appends "\\b" to the beginning and '
'end of every pattern in PATTERN_FILE.')
@click.option('--by-line/--across-lines', default=False,
help='Process the input line by '
'line or character by character; the default is --across-lines.')
@click.option('--slow', is_flag=True,
help='Try very hard to '
'find the longest matches on the input; this is very slow, '
'and forces --by-line.')
@click.option('-v', '--verbose', default=0, count=True,
help='Turns on debugging output.')
@click.option('-q', '--quiet', is_flag=True,
help='Quiet operation, do not emit warnings.')
def main(pattern_filename, input_filenames, pattern_format,
output_filename,
encoding, words, by_line, slow, verbose, quiet):
'''
Search and replace on INPUT_FILE(s) (or standard input), with
matching on fixed strings.
'''
set_log_level(verbose, quiet)
if slow:
by_line = True
by_line = True # TODO: implement non-line-based rewriting
# load the patterns
LOGGER.info('fsed {} input {} output {}'.format(pattern_filename,
input_filenames,
output_filename))
if not input_filenames:
input_filenames = ('-',)
if not output_filename:
output_filename = '-'
# build trie machine for matching
trie, boundaries = build_trie(pattern_filename, pattern_format, encoding, words)
if not slow:
warn_prefix_values(trie)
LOGGER.info('writing to {}'.format(output_filename))
with open_file(output_filename, 'wb') as output_file:
for input_filename in input_filenames:
# search and replace
with open_file(input_filename) as input_file:
LOGGER.info('reading {}'.format(input_filename))
if by_line:
num_lines = 0
for line in input_file:
line = line.decode(encoding).rstrip('\n')
line = rewrite_str_with_trie(line, trie, boundaries, slow)
output_file.write((line + '\n').encode(encoding))
num_lines += 1
LOGGER.info('{} lines written'.format(num_lines))
else:
raise NotImplementedError
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.