code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
from __future__ import print_function
import logging
import requests
from tenacity import retry, wait_fixed, TryAgain
from django.conf import settings
from .utils import format_msisdn, read_cvs_file
headers = ({'Content-Type': 'application/x-www-form-urlencode'})
logger = logging.getLogger('bulksms')
# API Authentication credentials.
username = getattr(settings, 'BULKSMS_AUTH_USERNAME', '')
password = getattr(settings, 'BULKSMS_AUTH_PASSWORD', '')
api_url = getattr(settings, 'BULKSMS_API_URL', '')
# Whether to insert country codes or not.
clean_msisdn_number = getattr(settings, 'CLEAN_MSISDN_NUMBER', False)
@retry(wait=wait_fixed(2))
def send_single(msisdn=None, message=None):
"""
Send SMS to any number in several countries.
:param msisdn number. str
The number to send to using international format.
:param str message the message to be sent to msisdn.
@return: Request results in pipe format [statusCode|statusString]
"""
if clean_msisdn_number:
msisdn = format_msisdn(msisdn)
payload = (
{
'username': username,
'password': password,
'msisdn': msisdn,
'message': message
}
)
results = ''
try:
response = requests.post(api_url.get('single', None), params=payload, headers=headers)
if response.status_code < 200 or response.status_code >= 300:
return 'Bad request. {}'.format(response.status_code)
results = response.content.split('|')
except (requests.exceptions.Timeout, requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e:
logging.info(e)
raise TryAgain
except requests.exceptions.TooManyRedirects as e:
return 'Invalid url. {}'.format(e)
except requests.exceptions.RequestException as e:
logging.error('Catastrophic error occurred. ', e)
return results
def send_bulk(filename=None):
"""
Send bulk SMS. The API expects a given CSV file to be in the format as show below.
Batch data is passed as query-parameters using an HTTP get request.
|-----------------------------------------|
msisdn,message
"27831234567","Message 1"
"27831234566","Message 2"
|-----------------------------------------|
:param filename : contains a list of msisdn and message.
"""
api_endpoint = api_url.get('batch', None)
results = ''
try:
url = '{}?username={}&password={}&batch_data={}'.format(api_endpoint, username, password, read_cvs_file(filename))
response = requests.get(url, headers=headers)
if response.status_code < 200 or response.status_code >= 300:
return 'Bad request: {}'.format(response.status_code)
results = response.content.split('|')
except (requests.exceptions.Timeout, requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e:
logging.info(e)
raise TryAgain
except requests.exceptions.TooManyRedirects as e:
return 'Invalid url: {}'.format(e)
except requests.exceptions.RequestException as e:
logging.error('Catastrophic error occurred: ', e)
return results | unknown | codeparrot/codeparrot-clean | ||
{
"name": "axios"
} | json | github | https://github.com/axios/axios | test/manual/fixture.json |
# orm/events.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""ORM event interfaces.
"""
from .. import event, exc, util
orm = util.importlater("sqlalchemy", "orm")
import inspect
import weakref
class InstrumentationEvents(event.Events):
"""Events related to class instrumentation events.
The listeners here support being established against
any new style class, that is any object that is a subclass
of 'type'. Events will then be fired off for events
against that class. If the "propagate=True" flag is passed
to event.listen(), the event will fire off for subclasses
of that class as well.
The Python ``type`` builtin is also accepted as a target,
which when used has the effect of events being emitted
for all classes.
Note the "propagate" flag here is defaulted to ``True``,
unlike the other class level events where it defaults
to ``False``. This means that new subclasses will also
be the subject of these events, when a listener
is established on a superclass.
.. versionchanged:: 0.8 - events here will emit based
on comparing the incoming class to the type of class
passed to :func:`.event.listen`. Previously, the
event would fire for any class unconditionally regardless
of what class was sent for listening, despite
documentation which stated the contrary.
"""
@classmethod
def _accept_with(cls, target):
# TODO: there's no coverage for this
if isinstance(target, type):
return _InstrumentationEventsHold(target)
else:
return None
@classmethod
def _listen(cls, target, identifier, fn, propagate=True):
def listen(target_cls, *arg):
listen_cls = target()
if propagate and issubclass(target_cls, listen_cls):
return fn(target_cls, *arg)
elif not propagate and target_cls is listen_cls:
return fn(target_cls, *arg)
def remove(ref):
event.Events._remove(orm.instrumentation._instrumentation_factory,
identifier, listen)
target = weakref.ref(target.class_, remove)
event.Events._listen(orm.instrumentation._instrumentation_factory,
identifier, listen)
@classmethod
def _remove(cls, identifier, target, fn):
raise NotImplementedError("Removal of instrumentation events "
"not yet implemented")
@classmethod
def _clear(cls):
super(InstrumentationEvents, cls)._clear()
orm.instrumentation._instrumentation_factory.dispatch._clear()
def class_instrument(self, cls):
"""Called after the given class is instrumented.
To get at the :class:`.ClassManager`, use
:func:`.manager_of_class`.
"""
def class_uninstrument(self, cls):
"""Called before the given class is uninstrumented.
To get at the :class:`.ClassManager`, use
:func:`.manager_of_class`.
"""
def attribute_instrument(self, cls, key, inst):
"""Called when an attribute is instrumented."""
class _InstrumentationEventsHold(object):
"""temporary marker object used to transfer from _accept_with() to
_listen() on the InstrumentationEvents class.
"""
def __init__(self, class_):
self.class_ = class_
dispatch = event.dispatcher(InstrumentationEvents)
class InstanceEvents(event.Events):
"""Define events specific to object lifecycle.
e.g.::
from sqlalchemy import event
def my_load_listener(target, context):
print "on load!"
event.listen(SomeMappedClass, 'load', my_load_listener)
Available targets include mapped classes, instances of
:class:`.Mapper` (i.e. returned by :func:`.mapper`,
:func:`.class_mapper` and similar), as well as the
:class:`.Mapper` class and :func:`.mapper` function itself
for global event reception::
from sqlalchemy.orm import mapper
def some_listener(target, context):
log.debug("Instance %s being loaded" % target)
# attach to all mappers
event.listen(mapper, 'load', some_listener)
Instance events are closely related to mapper events, but
are more specific to the instance and its instrumentation,
rather than its system of persistence.
When using :class:`.InstanceEvents`, several modifiers are
available to the :func:`.event.listen` function.
:param propagate=False: When True, the event listener should
be applied to all inheriting classes as well as the
class which is the target of this listener.
:param raw=False: When True, the "target" argument passed
to applicable event listener functions will be the
instance's :class:`.InstanceState` management
object, rather than the mapped instance itself.
"""
@classmethod
def _accept_with(cls, target):
if isinstance(target, orm.instrumentation.ClassManager):
return target
elif isinstance(target, orm.Mapper):
return target.class_manager
elif target is orm.mapper:
return orm.instrumentation.ClassManager
elif isinstance(target, type):
if issubclass(target, orm.Mapper):
return orm.instrumentation.ClassManager
else:
manager = orm.instrumentation.manager_of_class(target)
if manager:
return manager
else:
return _InstanceEventsHold(target)
return None
@classmethod
def _listen(cls, target, identifier, fn, raw=False, propagate=False):
if not raw:
orig_fn = fn
def wrap(state, *arg, **kw):
return orig_fn(state.obj(), *arg, **kw)
fn = wrap
event.Events._listen(target, identifier, fn, propagate=propagate)
if propagate:
for mgr in target.subclass_managers(True):
event.Events._listen(mgr, identifier, fn, True)
@classmethod
def _remove(cls, identifier, target, fn):
msg = "Removal of instance events not yet implemented"
raise NotImplementedError(msg)
@classmethod
def _clear(cls):
super(InstanceEvents, cls)._clear()
_InstanceEventsHold._clear()
def first_init(self, manager, cls):
"""Called when the first instance of a particular mapping is called.
"""
def init(self, target, args, kwargs):
"""Receive an instance when it's constructor is called.
This method is only called during a userland construction of
an object. It is not called when an object is loaded from the
database.
"""
def init_failure(self, target, args, kwargs):
"""Receive an instance when it's constructor has been called,
and raised an exception.
This method is only called during a userland construction of
an object. It is not called when an object is loaded from the
database.
"""
def load(self, target, context):
"""Receive an object instance after it has been created via
``__new__``, and after initial attribute population has
occurred.
This typically occurs when the instance is created based on
incoming result rows, and is only called once for that
instance's lifetime.
Note that during a result-row load, this method is called upon
the first row received for this instance. Note that some
attributes and collections may or may not be loaded or even
initialized, depending on what's present in the result rows.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param context: the :class:`.QueryContext` corresponding to the
current :class:`.Query` in progress. This argument may be
``None`` if the load does not correspond to a :class:`.Query`,
such as during :meth:`.Session.merge`.
"""
def refresh(self, target, context, attrs):
"""Receive an object instance after one or more attributes have
been refreshed from a query.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param context: the :class:`.QueryContext` corresponding to the
current :class:`.Query` in progress.
:param attrs: iterable collection of attribute names which
were populated, or None if all column-mapped, non-deferred
attributes were populated.
"""
def expire(self, target, attrs):
"""Receive an object instance after its attributes or some subset
have been expired.
'keys' is a list of attribute names. If None, the entire
state was expired.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param attrs: iterable collection of attribute
names which were expired, or None if all attributes were
expired.
"""
def resurrect(self, target):
"""Receive an object instance as it is 'resurrected' from
garbage collection, which occurs when a "dirty" state falls
out of scope.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
"""
def pickle(self, target, state_dict):
"""Receive an object instance when its associated state is
being pickled.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param state_dict: the dictionary returned by
:class:`.InstanceState.__getstate__`, containing the state
to be pickled.
"""
def unpickle(self, target, state_dict):
"""Receive an object instance after it's associated state has
been unpickled.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param state_dict: the dictionary sent to
:class:`.InstanceState.__setstate__`, containing the state
dictionary which was pickled.
"""
class _EventsHold(object):
"""Hold onto listeners against unmapped, uninstrumented classes.
Establish _listen() for that class' mapper/instrumentation when
those objects are created for that class.
"""
def __init__(self, class_):
self.class_ = class_
@classmethod
def _clear(cls):
cls.all_holds.clear()
class HoldEvents(object):
@classmethod
def _listen(cls, target, identifier, fn, raw=False, propagate=False):
if target.class_ in target.all_holds:
collection = target.all_holds[target.class_]
else:
collection = target.all_holds[target.class_] = []
collection.append((identifier, fn, raw, propagate))
if propagate:
stack = list(target.class_.__subclasses__())
while stack:
subclass = stack.pop(0)
stack.extend(subclass.__subclasses__())
subject = target.resolve(subclass)
if subject is not None:
subject.dispatch._listen(subject, identifier, fn,
raw=raw, propagate=propagate)
@classmethod
def populate(cls, class_, subject):
for subclass in class_.__mro__:
if subclass in cls.all_holds:
if subclass is class_:
collection = cls.all_holds.pop(subclass)
else:
collection = cls.all_holds[subclass]
for ident, fn, raw, propagate in collection:
if propagate or subclass is class_:
subject.dispatch._listen(subject, ident,
fn, raw, propagate)
class _InstanceEventsHold(_EventsHold):
all_holds = weakref.WeakKeyDictionary()
def resolve(self, class_):
return orm.instrumentation.manager_of_class(class_)
class HoldInstanceEvents(_EventsHold.HoldEvents, InstanceEvents):
pass
dispatch = event.dispatcher(HoldInstanceEvents)
class MapperEvents(event.Events):
"""Define events specific to mappings.
e.g.::
from sqlalchemy import event
def my_before_insert_listener(mapper, connection, target):
# execute a stored procedure upon INSERT,
# apply the value to the row to be inserted
target.calculated_value = connection.scalar(
"select my_special_function(%d)"
% target.special_number)
# associate the listener function with SomeMappedClass,
# to execute during the "before_insert" hook
event.listen(
SomeMappedClass, 'before_insert', my_before_insert_listener)
Available targets include mapped classes, instances of
:class:`.Mapper` (i.e. returned by :func:`.mapper`,
:func:`.class_mapper` and similar), as well as the
:class:`.Mapper` class and :func:`.mapper` function itself
for global event reception::
from sqlalchemy.orm import mapper
def some_listener(mapper, connection, target):
log.debug("Instance %s being inserted" % target)
# attach to all mappers
event.listen(mapper, 'before_insert', some_listener)
Mapper events provide hooks into critical sections of the
mapper, including those related to object instrumentation,
object loading, and object persistence. In particular, the
persistence methods :meth:`~.MapperEvents.before_insert`,
and :meth:`~.MapperEvents.before_update` are popular
places to augment the state being persisted - however, these
methods operate with several significant restrictions. The
user is encouraged to evaluate the
:meth:`.SessionEvents.before_flush` and
:meth:`.SessionEvents.after_flush` methods as more
flexible and user-friendly hooks in which to apply
additional database state during a flush.
When using :class:`.MapperEvents`, several modifiers are
available to the :func:`.event.listen` function.
:param propagate=False: When True, the event listener should
be applied to all inheriting mappers and/or the mappers of
inheriting classes, as well as any
mapper which is the target of this listener.
:param raw=False: When True, the "target" argument passed
to applicable event listener functions will be the
instance's :class:`.InstanceState` management
object, rather than the mapped instance itself.
:param retval=False: when True, the user-defined event function
must have a return value, the purpose of which is either to
control subsequent event propagation, or to otherwise alter
the operation in progress by the mapper. Possible return
values are:
* ``sqlalchemy.orm.interfaces.EXT_CONTINUE`` - continue event
processing normally.
* ``sqlalchemy.orm.interfaces.EXT_STOP`` - cancel all subsequent
event handlers in the chain.
* other values - the return value specified by specific listeners,
such as :meth:`~.MapperEvents.translate_row` or
:meth:`~.MapperEvents.create_instance`.
"""
@classmethod
def _accept_with(cls, target):
if target is orm.mapper:
return orm.Mapper
elif isinstance(target, type):
if issubclass(target, orm.Mapper):
return target
else:
mapper = orm.util._mapper_or_none(target)
if mapper is not None:
return mapper
else:
return _MapperEventsHold(target)
else:
return target
@classmethod
def _listen(cls, target, identifier, fn,
raw=False, retval=False, propagate=False):
if not raw or not retval:
if not raw:
meth = getattr(cls, identifier)
try:
target_index = \
inspect.getargspec(meth)[0].index('target') - 1
except ValueError:
target_index = None
wrapped_fn = fn
def wrap(*arg, **kw):
if not raw and target_index is not None:
arg = list(arg)
arg[target_index] = arg[target_index].obj()
if not retval:
wrapped_fn(*arg, **kw)
return orm.interfaces.EXT_CONTINUE
else:
return wrapped_fn(*arg, **kw)
fn = wrap
if propagate:
for mapper in target.self_and_descendants:
event.Events._listen(mapper, identifier, fn, propagate=True)
else:
event.Events._listen(target, identifier, fn)
@classmethod
def _clear(cls):
super(MapperEvents, cls)._clear()
_MapperEventsHold._clear()
def instrument_class(self, mapper, class_):
"""Receive a class when the mapper is first constructed,
before instrumentation is applied to the mapped class.
This event is the earliest phase of mapper construction.
Most attributes of the mapper are not yet initialized.
This listener can generally only be applied to the :class:`.Mapper`
class overall.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param class\_: the mapped class.
"""
def mapper_configured(self, mapper, class_):
"""Called when the mapper for the class is fully configured.
This event is the latest phase of mapper construction, and
is invoked when the mapped classes are first used, so that
relationships between mappers can be resolved. When the event is
called, the mapper should be in its final state.
While the configuration event normally occurs automatically,
it can be forced to occur ahead of time, in the case where the event
is needed before any actual mapper usage, by using the
:func:`.configure_mappers` function.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param class\_: the mapped class.
"""
# TODO: need coverage for this event
def after_configured(self):
"""Called after a series of mappers have been configured.
This corresponds to the :func:`.orm.configure_mappers` call, which
note is usually called automatically as mappings are first
used.
Theoretically this event is called once per
application, but is actually called any time new mappers
have been affected by a :func:`.orm.configure_mappers`
call. If new mappings are constructed after existing ones have
already been used, this event can be called again.
"""
def translate_row(self, mapper, context, row):
"""Perform pre-processing on the given result row and return a
new row instance.
This listener is typically registered with ``retval=True``.
It is called when the mapper first receives a row, before
the object identity or the instance itself has been derived
from that row. The given row may or may not be a
:class:`.RowProxy` object - it will always be a dictionary-like
object which contains mapped columns as keys. The
returned object should also be a dictionary-like object
which recognizes mapped columns as keys.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param context: the :class:`.QueryContext`, which includes
a handle to the current :class:`.Query` in progress as well
as additional state information.
:param row: the result row being handled. This may be
an actual :class:`.RowProxy` or may be a dictionary containing
:class:`.Column` objects as keys.
:return: When configured with ``retval=True``, the function
should return a dictionary-like row object, or ``EXT_CONTINUE``,
indicating the original row should be used.
"""
def create_instance(self, mapper, context, row, class_):
"""Receive a row when a new object instance is about to be
created from that row.
The method can choose to create the instance itself, or it can return
EXT_CONTINUE to indicate normal object creation should take place.
This listener is typically registered with ``retval=True``.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param context: the :class:`.QueryContext`, which includes
a handle to the current :class:`.Query` in progress as well
as additional state information.
:param row: the result row being handled. This may be
an actual :class:`.RowProxy` or may be a dictionary containing
:class:`.Column` objects as keys.
:param class\_: the mapped class.
:return: When configured with ``retval=True``, the return value
should be a newly created instance of the mapped class,
or ``EXT_CONTINUE`` indicating that default object construction
should take place.
"""
def append_result(self, mapper, context, row, target,
result, **flags):
"""Receive an object instance before that instance is appended
to a result list.
This is a rarely used hook which can be used to alter
the construction of a result list returned by :class:`.Query`.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param context: the :class:`.QueryContext`, which includes
a handle to the current :class:`.Query` in progress as well
as additional state information.
:param row: the result row being handled. This may be
an actual :class:`.RowProxy` or may be a dictionary containing
:class:`.Column` objects as keys.
:param target: the mapped instance being populated. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param result: a list-like object where results are being
appended.
:param \**flags: Additional state information about the
current handling of the row.
:return: If this method is registered with ``retval=True``,
a return value of ``EXT_STOP`` will prevent the instance
from being appended to the given result list, whereas a
return value of ``EXT_CONTINUE`` will result in the default
behavior of appending the value to the result list.
"""
def populate_instance(self, mapper, context, row,
target, **flags):
"""Receive an instance before that instance has
its attributes populated.
This usually corresponds to a newly loaded instance but may
also correspond to an already-loaded instance which has
unloaded attributes to be populated. The method may be called
many times for a single instance, as multiple result rows are
used to populate eagerly loaded collections.
Most usages of this hook are obsolete. For a
generic "object has been newly created from a row" hook, use
:meth:`.InstanceEvents.load`.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param context: the :class:`.QueryContext`, which includes
a handle to the current :class:`.Query` in progress as well
as additional state information.
:param row: the result row being handled. This may be
an actual :class:`.RowProxy` or may be a dictionary containing
:class:`.Column` objects as keys.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: When configured with ``retval=True``, a return
value of ``EXT_STOP`` will bypass instance population by
the mapper. A value of ``EXT_CONTINUE`` indicates that
default instance population should take place.
"""
def before_insert(self, mapper, connection, target):
"""Receive an object instance before an INSERT statement
is emitted corresponding to that instance.
This event is used to modify local, non-object related
attributes on the instance before an INSERT occurs, as well
as to emit additional SQL statements on the given
connection.
The event is often called for a batch of objects of the
same class before their INSERT statements are emitted at
once in a later step. In the extremely rare case that
this is not desirable, the :func:`.mapper` can be
configured with ``batch=False``, which will cause
batches of instances to be broken up into individual
(and more poorly performing) event->persist->event
steps.
.. warning::
Mapper-level flush events are designed to operate **on attributes
local to the immediate object being handled
and via SQL operations with the given**
:class:`.Connection` **only.** Handlers here should **not** make
alterations to the state of the :class:`.Session` overall, and
in general should not affect any :func:`.relationship` -mapped
attributes, as session cascade rules will not function properly,
nor is it always known if the related class has already been
handled. Operations that **are not supported in mapper
events** include:
* :meth:`.Session.add`
* :meth:`.Session.delete`
* Mapped collection append, add, remove, delete, discard, etc.
* Mapped relationship attribute set/del events,
i.e. ``someobject.related = someotherobject``
Operations which manipulate the state of the object
relative to other objects are better handled:
* In the ``__init__()`` method of the mapped object itself, or
another method designed to establish some particular state.
* In a ``@validates`` handler, see :ref:`simple_validators`
* Within the :meth:`.SessionEvents.before_flush` event.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit INSERT statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
"""
def after_insert(self, mapper, connection, target):
"""Receive an object instance after an INSERT statement
is emitted corresponding to that instance.
This event is used to modify in-Python-only
state on the instance after an INSERT occurs, as well
as to emit additional SQL statements on the given
connection.
The event is often called for a batch of objects of the
same class after their INSERT statements have been
emitted at once in a previous step. In the extremely
rare case that this is not desirable, the
:func:`.mapper` can be configured with ``batch=False``,
which will cause batches of instances to be broken up
into individual (and more poorly performing)
event->persist->event steps.
.. warning::
Mapper-level flush events are designed to operate **on attributes
local to the immediate object being handled
and via SQL operations with the given**
:class:`.Connection` **only.** Handlers here should **not** make
alterations to the state of the :class:`.Session` overall, and in
general should not affect any :func:`.relationship` -mapped
attributes, as session cascade rules will not function properly,
nor is it always known if the related class has already been
handled. Operations that **are not supported in mapper
events** include:
* :meth:`.Session.add`
* :meth:`.Session.delete`
* Mapped collection append, add, remove, delete, discard, etc.
* Mapped relationship attribute set/del events,
i.e. ``someobject.related = someotherobject``
Operations which manipulate the state of the object
relative to other objects are better handled:
* In the ``__init__()`` method of the mapped object itself,
or another method designed to establish some particular state.
* In a ``@validates`` handler, see :ref:`simple_validators`
* Within the :meth:`.SessionEvents.before_flush` event.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit INSERT statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
"""
def before_update(self, mapper, connection, target):
"""Receive an object instance before an UPDATE statement
is emitted corresponding to that instance.
This event is used to modify local, non-object related
attributes on the instance before an UPDATE occurs, as well
as to emit additional SQL statements on the given
connection.
This method is called for all instances that are
marked as "dirty", *even those which have no net changes
to their column-based attributes*. An object is marked
as dirty when any of its column-based attributes have a
"set attribute" operation called or when any of its
collections are modified. If, at update time, no
column-based attributes have any net changes, no UPDATE
statement will be issued. This means that an instance
being sent to :meth:`~.MapperEvents.before_update` is
*not* a guarantee that an UPDATE statement will be
issued, although you can affect the outcome here by
modifying attributes so that a net change in value does
exist.
To detect if the column-based attributes on the object have net
changes, and will therefore generate an UPDATE statement, use
``object_session(instance).is_modified(instance,
include_collections=False)``.
The event is often called for a batch of objects of the
same class before their UPDATE statements are emitted at
once in a later step. In the extremely rare case that
this is not desirable, the :func:`.mapper` can be
configured with ``batch=False``, which will cause
batches of instances to be broken up into individual
(and more poorly performing) event->persist->event
steps.
.. warning::
Mapper-level flush events are designed to operate **on attributes
local to the immediate object being handled
and via SQL operations with the given** :class:`.Connection`
**only.** Handlers here should **not** make alterations to the
state of the :class:`.Session` overall, and in general should not
affect any :func:`.relationship` -mapped attributes, as
session cascade rules will not function properly, nor is it
always known if the related class has already been handled.
Operations that **are not supported in mapper events** include:
* :meth:`.Session.add`
* :meth:`.Session.delete`
* Mapped collection append, add, remove, delete, discard, etc.
* Mapped relationship attribute set/del events,
i.e. ``someobject.related = someotherobject``
Operations which manipulate the state of the object
relative to other objects are better handled:
* In the ``__init__()`` method of the mapped object itself,
or another method designed to establish some particular state.
* In a ``@validates`` handler, see :ref:`simple_validators`
* Within the :meth:`.SessionEvents.before_flush` event.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit UPDATE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
"""
def after_update(self, mapper, connection, target):
"""Receive an object instance after an UPDATE statement
is emitted corresponding to that instance.
This event is used to modify in-Python-only
state on the instance after an UPDATE occurs, as well
as to emit additional SQL statements on the given
connection.
This method is called for all instances that are
marked as "dirty", *even those which have no net changes
to their column-based attributes*, and for which
no UPDATE statement has proceeded. An object is marked
as dirty when any of its column-based attributes have a
"set attribute" operation called or when any of its
collections are modified. If, at update time, no
column-based attributes have any net changes, no UPDATE
statement will be issued. This means that an instance
being sent to :meth:`~.MapperEvents.after_update` is
*not* a guarantee that an UPDATE statement has been
issued.
To detect if the column-based attributes on the object have net
changes, and therefore resulted in an UPDATE statement, use
``object_session(instance).is_modified(instance,
include_collections=False)``.
The event is often called for a batch of objects of the
same class after their UPDATE statements have been emitted at
once in a previous step. In the extremely rare case that
this is not desirable, the :func:`.mapper` can be
configured with ``batch=False``, which will cause
batches of instances to be broken up into individual
(and more poorly performing) event->persist->event
steps.
.. warning::
Mapper-level flush events are designed to operate **on attributes
local to the immediate object being handled
and via SQL operations with the given** :class:`.Connection`
**only.** Handlers here should **not** make alterations to the
state of the :class:`.Session` overall, and in general should not
affect any :func:`.relationship` -mapped attributes, as
session cascade rules will not function properly, nor is it
always known if the related class has already been handled.
Operations that **are not supported in mapper events** include:
* :meth:`.Session.add`
* :meth:`.Session.delete`
* Mapped collection append, add, remove, delete, discard, etc.
* Mapped relationship attribute set/del events,
i.e. ``someobject.related = someotherobject``
Operations which manipulate the state of the object
relative to other objects are better handled:
* In the ``__init__()`` method of the mapped object itself,
or another method designed to establish some particular state.
* In a ``@validates`` handler, see :ref:`simple_validators`
* Within the :meth:`.SessionEvents.before_flush` event.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit UPDATE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
"""
def before_delete(self, mapper, connection, target):
"""Receive an object instance before a DELETE statement
is emitted corresponding to that instance.
This event is used to emit additional SQL statements on
the given connection as well as to perform application
specific bookkeeping related to a deletion event.
The event is often called for a batch of objects of the
same class before their DELETE statements are emitted at
once in a later step.
.. warning::
Mapper-level flush events are designed to operate **on attributes
local to the immediate object being handled
and via SQL operations with the given** :class:`.Connection`
**only.** Handlers here should **not** make alterations to the
state of the :class:`.Session` overall, and in general should not
affect any :func:`.relationship` -mapped attributes, as
session cascade rules will not function properly, nor is it
always known if the related class has already been handled.
Operations that **are not supported in mapper events** include:
* :meth:`.Session.add`
* :meth:`.Session.delete`
* Mapped collection append, add, remove, delete, discard, etc.
* Mapped relationship attribute set/del events,
i.e. ``someobject.related = someotherobject``
Operations which manipulate the state of the object
relative to other objects are better handled:
* In the ``__init__()`` method of the mapped object itself,
or another method designed to establish some particular state.
* In a ``@validates`` handler, see :ref:`simple_validators`
* Within the :meth:`.SessionEvents.before_flush` event.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit DELETE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being deleted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
"""
def after_delete(self, mapper, connection, target):
"""Receive an object instance after a DELETE statement
has been emitted corresponding to that instance.
This event is used to emit additional SQL statements on
the given connection as well as to perform application
specific bookkeeping related to a deletion event.
The event is often called for a batch of objects of the
same class after their DELETE statements have been emitted at
once in a previous step.
.. warning::
Mapper-level flush events are designed to operate **on attributes
local to the immediate object being handled
and via SQL operations with the given** :class:`.Connection`
**only.** Handlers here should **not** make alterations to the
state of the :class:`.Session` overall, and in general should not
affect any :func:`.relationship` -mapped attributes, as
session cascade rules will not function properly, nor is it
always known if the related class has already been handled.
Operations that **are not supported in mapper events** include:
* :meth:`.Session.add`
* :meth:`.Session.delete`
* Mapped collection append, add, remove, delete, discard, etc.
* Mapped relationship attribute set/del events,
i.e. ``someobject.related = someotherobject``
Operations which manipulate the state of the object
relative to other objects are better handled:
* In the ``__init__()`` method of the mapped object itself,
or another method designed to establish some particular state.
* In a ``@validates`` handler, see :ref:`simple_validators`
* Within the :meth:`.SessionEvents.before_flush` event.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit DELETE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being deleted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
"""
@classmethod
def _remove(cls, identifier, target, fn):
"Removal of mapper events not yet implemented"
raise NotImplementedError(msg)
class _MapperEventsHold(_EventsHold):
all_holds = weakref.WeakKeyDictionary()
def resolve(self, class_):
return orm.util._mapper_or_none(class_)
class HoldMapperEvents(_EventsHold.HoldEvents, MapperEvents):
pass
dispatch = event.dispatcher(HoldMapperEvents)
class SessionEvents(event.Events):
"""Define events specific to :class:`.Session` lifecycle.
e.g.::
from sqlalchemy import event
from sqlalchemy.orm import sessionmaker
def my_before_commit(session):
print "before commit!"
Session = sessionmaker()
event.listen(Session, "before_commit", my_before_commit)
The :func:`~.event.listen` function will accept
:class:`.Session` objects as well as the return result
of :func:`.sessionmaker` and :func:`.scoped_session`.
Additionally, it accepts the :class:`.Session` class which
will apply listeners to all :class:`.Session` instances
globally.
"""
@classmethod
def _accept_with(cls, target):
if isinstance(target, orm.scoped_session):
target = target.session_factory
if not isinstance(target, orm.sessionmaker) and \
(
not isinstance(target, type) or
not issubclass(target, orm.Session)
):
raise exc.ArgumentError(
"Session event listen on a scoped_session "
"requires that its creation callable "
"is associated with the Session class.")
if isinstance(target, orm.sessionmaker):
return target.class_
elif isinstance(target, type):
if issubclass(target, orm.scoped_session):
return orm.Session
elif issubclass(target, orm.Session):
return target
elif isinstance(target, orm.Session):
return target
else:
return None
@classmethod
def _remove(cls, identifier, target, fn):
msg = "Removal of session events not yet implemented"
raise NotImplementedError(msg)
def after_transaction_create(self, session, transaction):
"""Execute when a new :class:`.SessionTransaction` is created.
:param session: the target :class:`.Session`.
:param transaction: the target :class:`.SessionTransaction`.
.. versionadded:: 0.8
"""
def after_transaction_end(self, session, transaction):
"""Execute when the span of a :class:`.SessionTransaction` ends.
:param session: the target :class:`.Session`.
:param transaction: the target :class:`.SessionTransaction`.
.. versionadded:: 0.8
"""
def before_commit(self, session):
"""Execute before commit is called.
Note that this may not be per-flush if a longer running
transaction is ongoing.
:param session: The target :class:`.Session`.
"""
def after_commit(self, session):
"""Execute after a commit has occurred.
Note that this may not be per-flush if a longer running
transaction is ongoing.
:param session: The target :class:`.Session`.
"""
def after_rollback(self, session):
"""Execute after a real DBAPI rollback has occurred.
Note that this event only fires when the *actual* rollback against
the database occurs - it does *not* fire each time the
:meth:`.Session.rollback` method is called, if the underlying
DBAPI transaction has already been rolled back. In many
cases, the :class:`.Session` will not be in
an "active" state during this event, as the current
transaction is not valid. To acquire a :class:`.Session`
which is active after the outermost rollback has proceeded,
use the :meth:`.SessionEvents.after_soft_rollback` event, checking the
:attr:`.Session.is_active` flag.
:param session: The target :class:`.Session`.
"""
def after_soft_rollback(self, session, previous_transaction):
"""Execute after any rollback has occurred, including "soft"
rollbacks that don't actually emit at the DBAPI level.
This corresponds to both nested and outer rollbacks, i.e.
the innermost rollback that calls the DBAPI's
rollback() method, as well as the enclosing rollback
calls that only pop themselves from the transaction stack.
The given :class:`.Session` can be used to invoke SQL and
:meth:`.Session.query` operations after an outermost rollback
by first checking the :attr:`.Session.is_active` flag::
@event.listens_for(Session, "after_soft_rollback")
def do_something(session, previous_transaction):
if session.is_active:
session.execute("select * from some_table")
:param session: The target :class:`.Session`.
:param previous_transaction: The :class:`.SessionTransaction`
transactional marker object which was just closed. The current
:class:`.SessionTransaction` for the given :class:`.Session` is
available via the :attr:`.Session.transaction` attribute.
.. versionadded:: 0.7.3
"""
def before_flush(self, session, flush_context, instances):
"""Execute before flush process has started.
:param session: The target :class:`.Session`.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
:param instances: Usually ``None``, this is the collection of
objects which can be passed to the :meth:`.Session.flush` method
(note this usage is deprecated).
"""
def after_flush(self, session, flush_context):
"""Execute after flush has completed, but before commit has been
called.
Note that the session's state is still in pre-flush, i.e. 'new',
'dirty', and 'deleted' lists still show pre-flush state as well
as the history settings on instance attributes.
:param session: The target :class:`.Session`.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
"""
def after_flush_postexec(self, session, flush_context):
"""Execute after flush has completed, and after the post-exec
state occurs.
This will be when the 'new', 'dirty', and 'deleted' lists are in
their final state. An actual commit() may or may not have
occurred, depending on whether or not the flush started its own
transaction or participated in a larger transaction.
:param session: The target :class:`.Session`.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
"""
def after_begin(self, session, transaction, connection):
"""Execute after a transaction is begun on a connection
:param session: The target :class:`.Session`.
:param transaction: The :class:`.SessionTransaction`.
:param connection: The :class:`~.engine.Connection` object
which will be used for SQL statements.
"""
def before_attach(self, session, instance):
"""Execute before an instance is attached to a session.
This is called before an add, delete or merge causes
the object to be part of the session.
.. versionadded:: 0.8. Note that :meth:`.after_attach` now
fires off after the item is part of the session.
:meth:`.before_attach` is provided for those cases where
the item should not yet be part of the session state.
"""
def after_attach(self, session, instance):
"""Execute after an instance is attached to a session.
This is called after an add, delete or merge.
.. note::
As of 0.8, this event fires off *after* the item
has been fully associated with the session, which is
different than previous releases. For event
handlers that require the object not yet
be part of session state (such as handlers which
may autoflush while the target object is not
yet complete) consider the
new :meth:`.before_attach` event.
"""
def after_bulk_update(self, session, query, query_context, result):
"""Execute after a bulk update operation to the session.
This is called as a result of the :meth:`.Query.update` method.
:param query: the :class:`.Query` object that this update operation was
called upon.
:param query_context: The :class:`.QueryContext` object, corresponding
to the invocation of an ORM query.
:param result: the :class:`.ResultProxy` returned as a result of the
bulk UPDATE operation.
"""
def after_bulk_delete(self, session, query, query_context, result):
"""Execute after a bulk delete operation to the session.
This is called as a result of the :meth:`.Query.delete` method.
:param query: the :class:`.Query` object that this update operation was
called upon.
:param query_context: The :class:`.QueryContext` object, corresponding
to the invocation of an ORM query.
:param result: the :class:`.ResultProxy` returned as a result of the
bulk DELETE operation.
"""
class AttributeEvents(event.Events):
"""Define events for object attributes.
These are typically defined on the class-bound descriptor for the
target class.
e.g.::
from sqlalchemy import event
def my_append_listener(target, value, initiator):
print "received append event for target: %s" % target
event.listen(MyClass.collection, 'append', my_append_listener)
Listeners have the option to return a possibly modified version
of the value, when the ``retval=True`` flag is passed
to :func:`~.event.listen`::
def validate_phone(target, value, oldvalue, initiator):
"Strip non-numeric characters from a phone number"
return re.sub(r'(?![0-9])', '', value)
# setup listener on UserContact.phone attribute, instructing
# it to use the return value
listen(UserContact.phone, 'set', validate_phone, retval=True)
A validation function like the above can also raise an exception
such as :class:`.ValueError` to halt the operation.
Several modifiers are available to the :func:`~.event.listen` function.
:param active_history=False: When True, indicates that the
"set" event would like to receive the "old" value being
replaced unconditionally, even if this requires firing off
database loads. Note that ``active_history`` can also be
set directly via :func:`.column_property` and
:func:`.relationship`.
:param propagate=False: When True, the listener function will
be established not just for the class attribute given, but
for attributes of the same name on all current subclasses
of that class, as well as all future subclasses of that
class, using an additional listener that listens for
instrumentation events.
:param raw=False: When True, the "target" argument to the
event will be the :class:`.InstanceState` management
object, rather than the mapped instance itself.
:param retval=False: when True, the user-defined event
listening must return the "value" argument from the
function. This gives the listening function the opportunity
to change the value that is ultimately used for a "set"
or "append" event.
"""
@classmethod
def _accept_with(cls, target):
# TODO: coverage
if isinstance(target, orm.interfaces.MapperProperty):
return getattr(target.parent.class_, target.key)
else:
return target
@classmethod
def _listen(cls, target, identifier, fn, active_history=False,
raw=False, retval=False,
propagate=False):
if active_history:
target.dispatch._active_history = True
# TODO: for removal, need to package the identity
# of the wrapper with the original function.
if not raw or not retval:
orig_fn = fn
def wrap(target, value, *arg):
if not raw:
target = target.obj()
if not retval:
orig_fn(target, value, *arg)
return value
else:
return orig_fn(target, value, *arg)
fn = wrap
event.Events._listen(target, identifier, fn, propagate)
if propagate:
manager = orm.instrumentation.manager_of_class(target.class_)
for mgr in manager.subclass_managers(True):
event.Events._listen(mgr[target.key], identifier, fn, True)
@classmethod
def _remove(cls, identifier, target, fn):
msg = "Removal of attribute events not yet implemented"
raise NotImplementedError(msg)
def append(self, target, value, initiator):
"""Receive a collection append event.
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value being appended. If this listener
is registered with ``retval=True``, the listener
function must return this value, or a new value which
replaces it.
:param initiator: the attribute implementation object
which initiated this event.
:return: if the event was registered with ``retval=True``,
the given value, or a new effective value, should be returned.
"""
def remove(self, target, value, initiator):
"""Receive a collection remove event.
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value being removed.
:param initiator: the attribute implementation object
which initiated this event.
:return: No return value is defined for this event.
"""
def set(self, target, value, oldvalue, initiator):
"""Receive a scalar set event.
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value being set. If this listener
is registered with ``retval=True``, the listener
function must return this value, or a new value which
replaces it.
:param oldvalue: the previous value being replaced. This
may also be the symbol ``NEVER_SET`` or ``NO_VALUE``.
If the listener is registered with ``active_history=True``,
the previous value of the attribute will be loaded from
the database if the existing value is currently unloaded
or expired.
:param initiator: the attribute implementation object
which initiated this event.
:return: if the event was registered with ``retval=True``,
the given value, or a new effective value, should be returned.
""" | unknown | codeparrot/codeparrot-clean | ||
package kotlinx.coroutines.flow
import kotlinx.coroutines.testing.*
import kotlinx.coroutines.*
import kotlin.test.*
class LintTest: TestBase() {
/**
* Tests that using [SharedFlow.toList] and similar functions by passing a mutable collection does add values
* to the provided collection.
*/
@Test
fun testSharedFlowToCollection() = runTest {
val sharedFlow = MutableSharedFlow<Int>()
val list = mutableListOf<Int>()
val set = mutableSetOf<Int>()
val jobs = listOf(suspend { sharedFlow.toList(list) }, { sharedFlow.toSet(set) }).map {
launch(Dispatchers.Unconfined) { it() }
}
repeat(10) {
sharedFlow.emit(it)
}
jobs.forEach { it.cancelAndJoin() }
assertEquals((0..9).toList(), list)
assertEquals((0..9).toSet(), set)
}
} | kotlin | github | https://github.com/Kotlin/kotlinx.coroutines | kotlinx-coroutines-core/common/test/flow/operators/LintTest.kt |
# Django settings for voodoo project.
# -*- coding:utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
at_project_root = lambda *args: os.path.join(PROJECT_ROOT, *args)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'django_db', # Or path to database file if using sqlite3.
'USER': 'root', # Not used with sqlite3.
'PASSWORD': '111', # Not used with sqlite3.
'HOST': '127.0.0.1', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '3306', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Kiev'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# LANGUAGE_CODE = 'ru-RU'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# DATE_FORMAT = 'd-m-Y'
# DATETIME_FORMAT = 'd-m-Y H:i'
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
ADMIN_MEDIA_PREFIX = '/static/admin/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'xb0=l03e=xp^r9fl+!p-iyzl@8#+=)hf##5b!m&2m!ee7@*6*s'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
AUTH_PROFILE_MODULE = "mainsite.profile"
# Context processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'voodoo.admin_center.utils.context_processors.global_vars',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'voodoo.mainsite.middleware.BasketMiddlWare',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'voodoo.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'voodoo.wsgi.application'
import os.path
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__),'templates').replace('\\','/'),
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# прописываем dashboards для нашего экземпляра AdminSite
ADMIN_TOOLS_INDEX_DASHBOARD = {
'russian_admin.admin.site': 'voodoo.custom_admin.dashboard.CustomIndexDashboard',
}
ADMIN_TOOLS_APP_INDEX_DASHBOARD = {
'russian_admin.admin.site': 'voodoo.custom_admin.dashboard.CustomAppIndexDashboard',
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'voodoo.mainsite',
'voodoo.admin_center',
'voodoo.mainsite.basket',
'registration',
'bootstrap_toolkit',
'captcha',
'pymorphy',
'django.contrib.admin',
'voodoo.admin_center.dhtmlScheduler',
'xlrd',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
ACCOUNT_ACTIVATION_DAYS = 7 # One-week activation window; you may, of course, use a different value.
# LOGIN_REDIRECT_URL='/index/'
# LOGIN_URL='/admin_center/login/'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'disable_existing_loggers': False,
'version': 1,
'handlers': {
'console': {
# logging handler that outputs log messages to terminal
'class': 'logging.StreamHandler',
'level': 'INFO', # message level to be written to console
},
},
'loggers': {
'': {
# this sets root level logger to log debug and higher level
# logs to console. All other loggers inherit settings from
# root level logger.
'handlers': ['console'],
'level': 'INFO',
'propagate': False, # this tells logger to send logging message
# to its parent (will send if set to True)
},
'django.db': {
# django also has database level logging
},
},
}
PYMORPHY_DICTS = {'ru': { 'dir': at_project_root('files', 'dicts')}} | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.acl.AclOperation;
import org.apache.kafka.common.acl.AclPermissionType;
import org.apache.kafka.common.message.DeleteAclsResponseData;
import org.apache.kafka.common.message.DeleteAclsResponseData.DeleteAclsFilterResult;
import org.apache.kafka.common.message.DeleteAclsResponseData.DeleteAclsMatchingAcl;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.resource.PatternType;
import org.apache.kafka.common.resource.ResourceType;
import org.junit.jupiter.api.Test;
import static java.util.Arrays.asList;
import static java.util.Collections.singletonList;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class DeleteAclsResponseTest {
private static final short V1 = 1;
private static final DeleteAclsMatchingAcl LITERAL_ACL1 = new DeleteAclsMatchingAcl()
.setResourceType(ResourceType.TOPIC.code())
.setResourceName("foo")
.setPatternType(PatternType.LITERAL.code())
.setPrincipal("User:ANONYMOUS")
.setHost("127.0.0.1")
.setOperation(AclOperation.READ.code())
.setPermissionType(AclPermissionType.DENY.code());
private static final DeleteAclsMatchingAcl LITERAL_ACL2 = new DeleteAclsMatchingAcl()
.setResourceType(ResourceType.GROUP.code())
.setResourceName("group")
.setPatternType(PatternType.LITERAL.code())
.setPrincipal("User:*")
.setHost("127.0.0.1")
.setOperation(AclOperation.WRITE.code())
.setPermissionType(AclPermissionType.ALLOW.code());
private static final DeleteAclsMatchingAcl PREFIXED_ACL1 = new DeleteAclsMatchingAcl()
.setResourceType(ResourceType.GROUP.code())
.setResourceName("prefix")
.setPatternType(PatternType.PREFIXED.code())
.setPrincipal("User:*")
.setHost("127.0.0.1")
.setOperation(AclOperation.CREATE.code())
.setPermissionType(AclPermissionType.ALLOW.code());
private static final DeleteAclsMatchingAcl UNKNOWN_ACL = new DeleteAclsMatchingAcl()
.setResourceType(ResourceType.UNKNOWN.code())
.setResourceName("group")
.setPatternType(PatternType.LITERAL.code())
.setPrincipal("User:*")
.setHost("127.0.0.1")
.setOperation(AclOperation.WRITE.code())
.setPermissionType(AclPermissionType.ALLOW.code());
private static final DeleteAclsFilterResult LITERAL_RESPONSE = new DeleteAclsFilterResult().setMatchingAcls(asList(
LITERAL_ACL1, LITERAL_ACL2));
private static final DeleteAclsFilterResult PREFIXED_RESPONSE = new DeleteAclsFilterResult().setMatchingAcls(asList(
LITERAL_ACL1, PREFIXED_ACL1));
private static final DeleteAclsFilterResult UNKNOWN_RESPONSE = new DeleteAclsFilterResult().setMatchingAcls(singletonList(
UNKNOWN_ACL));
@Test
public void shouldThrowOnIfUnknown() {
assertThrows(IllegalArgumentException.class, () -> new DeleteAclsResponse(
new DeleteAclsResponseData()
.setThrottleTimeMs(10)
.setFilterResults(singletonList(UNKNOWN_RESPONSE)),
V1));
}
@Test
public void shouldRoundTripV1() {
final DeleteAclsResponse original = new DeleteAclsResponse(
new DeleteAclsResponseData()
.setThrottleTimeMs(10)
.setFilterResults(asList(LITERAL_RESPONSE, PREFIXED_RESPONSE)),
V1);
final Readable readable = original.serialize(V1);
final DeleteAclsResponse result = DeleteAclsResponse.parse(readable, V1);
assertEquals(original.filterResults(), result.filterResults());
}
} | java | github | https://github.com/apache/kafka | clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsResponseTest.java |
// Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
//go:build !windows
package cli
import (
"context"
"os"
"os/exec"
"os/signal"
"path/filepath"
"strconv"
"strings"
"github.com/cockroachdb/cockroach/pkg/cli/cliflags"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/sdnotify"
"github.com/cockroachdb/cockroach/pkg/util/sysutil"
"github.com/cockroachdb/errors"
"golang.org/x/sys/unix"
)
// DrainSignals is the list of signals that trigger the start of a shutdown
// sequence ("server drain").
//
// The first time they're received, both signals initiate a drain just the same.
// The behavior between the two differs if they're received a second time (or,
// more generally, after the drain had started):
// - a second SIGTERM is ignored.
// - a second SIGINT terminates the process abruptly.
var DrainSignals = []os.Signal{unix.SIGINT, unix.SIGTERM}
// termSignal is the signal that causes an idempotent graceful
// shutdown (i.e. second occurrence does not incur hard shutdown).
var termSignal os.Signal = unix.SIGTERM
// quitSignal is the signal to recognize to dump Go stacks.
var quitSignal os.Signal = unix.SIGQUIT
// debugSignal is the signal to open a pprof debugging server.
var debugSignal os.Signal = unix.SIGUSR2
// exitAbruptlySignal is the signal to make the process exit immediately. It is
// preferable to SIGKILL when running with coverage instrumentation because the
// coverage profile gets dumped on exit.
var exitAbruptlySignal os.Signal = unix.SIGUSR1
func handleSignalDuringShutdown(sig os.Signal) {
// On Unix, a signal that was not handled gracefully by the application
// should be reraised so it is visible in the exit code.
// Reset signal to its original disposition.
signal.Reset(sig)
// Reraise the signal. os.Signal is always sysutil.Signal.
if err := unix.Kill(unix.Getpid(), sig.(sysutil.Signal)); err != nil {
// Sending a valid signal to ourselves should never fail.
//
// Unfortunately it appears (#34354) that some users
// run CockroachDB in containers that only support
// a subset of all syscalls. If this ever happens, we
// still need to quit immediately.
log.Dev.Fatalf(context.Background(), "unable to forward signal %v: %v", sig, err)
}
// Block while we wait for the signal to be delivered.
select {}
}
const backgroundFlagDefined = true
// findGoodNotifyDir determines a good target directory
// to create the unix socket used to signal successful
// background startup (via sdnotify).
// A directory is "good" if it seems writable and its
// name is short enough.
func findGoodNotifyDir() (string, error) {
goodEnough := func(s string) bool {
if len(s) >= 104-1-len("sdnotify/notify.sock")-10 {
// On BSD, binding to a socket is limited to a path length of 104 characters
// (including the NUL terminator). In glibc, this limit is 108 characters.
// macOS also has a tendency to produce very long temporary directory names.
return false
}
st, err := os.Stat(s)
if err != nil {
return false
}
if !st.IsDir() || st.Mode()&0222 == 0 /* any write bits? */ {
// Note: we're confident the directory is unsuitable if none of the
// write bits are set, however there could be a false positive
// if some write bits are set.
//
// For example, if the process runs as a UID that does not match
// the directory owner UID or GID, and the write mode is 0220 or
// less, the sd socket creation will still fail.
// As such, the mode check here is merely a heuristic. We're
// OK with that: the actual write failure will produce a clear
// error message.
return false
}
return true
}
// Was --socket-dir configured? Try to use that.
if serverSocketDir != "" && goodEnough(serverSocketDir) {
return serverSocketDir, nil
}
// Do we have a temp directory? Try to use that.
if tmpDir := os.TempDir(); goodEnough(tmpDir) {
return tmpDir, nil
}
// Can we perhaps use the current directory?
if cwd, err := os.Getwd(); err == nil && goodEnough(cwd) {
return cwd, nil
}
// Note: we do not attempt to use the configured on-disk store
// directory(ies), because they may point to a filesystem that does
// not support unix sockets.
return "", errors.WithHintf(
errors.Newf("no suitable directory found for the --background notify socket"),
"Avoid using --%s altogether (preferred), or use a shorter directory name "+
"for --socket-dir, TMPDIR or the current directory.", cliflags.Background.Name)
}
func maybeRerunBackground() (bool, error) {
if startBackground {
notifyDir, err := findGoodNotifyDir()
if err != nil {
return true, err
}
args := make([]string, 0, len(os.Args))
foundBackground := false
for _, arg := range os.Args {
if arg == "--background" || strings.HasPrefix(arg, "--background=") {
foundBackground = true
continue
}
args = append(args, arg)
}
if !foundBackground {
args = append(args, "--background=false")
}
cmd := exec.Command(args[0], args[1:]...)
cmd.Stdout = os.Stdout
cmd.Stderr = stderr
// Notify to ourselves that we're restarting.
_ = os.Setenv(backgroundEnvVar, "1")
return true, sdnotify.Exec(cmd, notifyDir)
}
return false, nil
}
func disableOtherPermissionBits() {
mask := unix.Umask(0000)
mask |= 00007
_ = unix.Umask(mask)
}
// closeAllSockets is used in the event of a disk stall, in which case we want
// to terminate the process but may not be able to. A process stalled in disk
// I/O is in uninterruptible sleep within the kernel and cannot be terminated.
// If we can't terminate the process, the next best thing is to quarantine it by
// closing all sockets so that it appears dead to other nodes.
//
// See log.SetMakeProcessUnavailableFunc.
func closeAllSockets() {
// Close all sockets twice. A LISTEN socket may open a new socket after we
// list all FDs. If that's the case, the socket will be closed by the second
// call.
//
// TODO(jackson,#96342): This doesn't prevent the retry mechanisms from
// opening new outgoing connections. Consider marking the rpc.Context as
// poisoned to prevent new outgoing connections.
_ = closeAllSocketsOnce()
_ = closeAllSocketsOnce()
// It's unclear what to do with errors. We try to close all sockets in an
// emergency where we can't exit the process but want to quarantine it by
// removing all communication with the outside world. If we fail to close
// all sockets, panicking is unlikely to be able to terminate the process.
// We do nothing so that if the log sink is NOT stalled, we'll write the
// disk stall log entry.
}
func closeAllSocketsOnce() error {
fds, err := findOpenSocketFDs()
// NB: Intentionally ignore `err`. findOpenSocketFDs may return a non-empty
// slice of FDs with a non-nil error. We want to close the descriptors we
// were able to identify regardless of any error.
for _, fd := range fds {
// Ignore errors so that if we can't close all sockets, we close as many
// as we can. When finished, return a combined error.
fdErr := unix.Shutdown(fd, unix.SHUT_RDWR)
err = errors.CombineErrors(err, fdErr)
}
return err
}
func findOpenSocketFDs() ([]int, error) {
f, err := os.Open("/dev/fd")
if err != nil {
return nil, err
}
defer f.Close()
dirnames, err := f.Readdirnames(-1)
if err != nil {
return nil, err
}
var fds []int
for _, name := range dirnames {
// From the Linux /proc/[pid]/fd man page:
//
// For file descriptors for pipes and sockets, the entries
// will be symbolic links whose content is the file type with
// the inode. A readlink(2) call on this file returns a
// string in the format:
//
// type:[inode]
//
// For example, socket:[2248868] will be a socket and its
// inode is 2248868. For sockets, that inode can be used to
// find more information in one of the files under
// /proc/net/.
//
// We `readlink` each directory entry, and check that the destination
// has the `socket:` prefix.
dst, readLinkErr := os.Readlink(filepath.Join("/dev/fd", name))
if readLinkErr != nil {
// Stumble forward.
err = errors.CombineErrors(err, readLinkErr)
continue
}
if !strings.HasPrefix(dst, "socket:") {
continue
}
fd, atoiErr := strconv.Atoi(name)
if atoiErr != nil {
// Stumble forward.
err = errors.CombineErrors(err, atoiErr)
continue
}
fds = append(fds, fd)
}
return fds, err
} | go | github | https://github.com/cockroachdb/cockroach | pkg/cli/start_unix.go |
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <immintrin.h>
PYTORCH_QNNP_INLINE __m128i
sub_zero_point(const __m128i va, const __m128i vzp) {
#if PYTORCH_QNNPACK_RUNTIME_QUANTIZATION
// Run-time quantization
return _mm_sub_epi16(va, vzp);
#else
// Design-time quantization (no-op)
return va;
#endif
} | c | github | https://github.com/pytorch/pytorch | aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/runtime-sse2.h |
from coalib.bearlib.abstractions.Linter import linter
from coalib.settings.Setting import typed_list
from dependency_management.requirements.DistributionRequirement import (
DistributionRequirement)
from dependency_management.requirements.AnyOneOfRequirements import (
AnyOneOfRequirements)
from dependency_management.requirements.ComposerRequirement import (
ComposerRequirement)
@linter(executable='phpmd',
output_format='regex',
output_regex=r':(?P<line>\d+)\s*(?P<message>.*)')
class PHPMessDetectorBear:
"""
The bear takes a given PHP source code base and looks for several
potential problems within that source. These problems can be things like:
- Possible bugs
- Suboptimal code
- Overcomplicated expressions
- Unused parameters, methods, properties
"""
LANGUAGES = {'PHP'}
REQUIREMENTS = {
AnyOneOfRequirements(
[DistributionRequirement(apt_get='phpmd',
dnf='php-phpmd-PHP-PMD',
),
ComposerRequirement('phpmd/phpmd'),
],
),
}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Formatting', 'Complexity', 'Unused Code', 'Redundancy',
'Variable Misuse'}
SEE_MORE = 'https://phpmd.org/about.html'
@staticmethod
def create_arguments(filename, file, config_file,
phpmd_rulesets: typed_list(str)):
"""
:param phpmd_rulesets:
A list of rulesets to use for analysis.
Available rulesets: cleancode, codesize, controversial, design,
naming, unusedcode.
"""
return filename, 'text', ','.join(phpmd_rulesets) | unknown | codeparrot/codeparrot-clean | ||
// This is a comment.
'use strict';
/**
* This is a comment.
*/ import 'client-only';
export default function() {
return null;
} | javascript | github | https://github.com/vercel/next.js | crates/next-custom-transforms/tests/errors/react-server-components/client-graph/client-only/output.js |
import requests
import time
from prettytable import PrettyTable
import sys, getopt
from cmdopt import *
from network import *
from collections import namedtuple
def PrintStat(array):
print ("\n\n")
t = PrettyTable(['Method', 'URL','# Request','MIN','MAX','AVG','RSD'])
for i in array:
i.statistics()
t.add_row(i.value())
print (t)
def escapeIndex(str,escape):
star=-1
st=0
while st < len(str):
st=str.find(escape, st+1,len(str))
if st!=-1:
if star == -1:
star=st
else:
pt=[star+1, st]
return pt
else:
st==-1
break
return None
def Fuzz(host,fz,escape,ret):
pt=escapeIndex(host.urlt,escape)
if pt is not None:
for item in fz:
newone=host.urlt[:pt[0]-1]+item+host.urlt[pt[1]+1:]
nv=Host(host.method,newone,host.header,host.body,host.cnt,host.prec)
#Recursion
Fuzz(nv,fz,escape,ret)
#print "URL:"+nv.urlt
#HEADER DOPO
else:
pt=escapeIndex(host.body,escape)
if pt is not None:
for item in fz:
#Devo modificare il body
newone=host.body[:pt[0]-1]+item+host.body[pt[1]+1:]
nv=Host(host.method,host.urlt,host.header,newone,host.cnt,host.prec)
#Recursion
Fuzz(nv,fz,escape,ret)
else:
ret.append(host)
def main(argv):
par=ParseOpt(argv)
print ("Timing analysis v1.0.0 - A tool for timing side-channel analysis.\n\n")
hs=[]
fz=[]
kll=None
if par['fuzz']==True:
if par['dict'] is None:
print ("Error. Define a dictionary file using '--dict option'.")
sys.exit()
f=open(par['dict'])
for line in f:
fz.append(line.strip())
if par['url'] != None:
hd=None
if par['hd'] != None:
hd={}
for i in par['hd']:
d=i.split(':')
hd[d[0].strip()]=d[1].strip()
k=Host(par['md'],par['url'],hd,par['dt'],par['cnt'],par['pre'])
hs.append(k)
if par['fi'] != None:
kll=ReadFile(par['fi'],par['cnt'],par['cert'],par['dl'],par['pre'])
if kll != None:
hs=hs+kll
if par['fuzz'] == True:
tot=[]
for host in hs:
nn=[]
Fuzz(host,fz,par['escape'],nn)
tot+=nn
else:
tot=hs
for i in tot:
# Sending Data
t=SendData(i,par['dl'],par['cnt'],par['cert'])
i.time=t
PrintStat(tot)
if __name__ == "__main__":
main(sys.argv) | unknown | codeparrot/codeparrot-clean | ||
"""
Current-flow betweenness centrality measures.
"""
# Copyright (C) 2010-2012 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import random
import networkx as nx
from networkx.algorithms.centrality.flow_matrix import *
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
__all__ = ['current_flow_betweenness_centrality',
'approximate_current_flow_betweenness_centrality',
'edge_current_flow_betweenness_centrality']
def approximate_current_flow_betweenness_centrality(G, normalized=True,
weight='weight',
dtype=float, solver='full',
epsilon=0.5, kmax=10000):
r"""Compute the approximate current-flow betweenness centrality for nodes.
Approximates the current-flow betweenness centrality within absolute
error of epsilon with high probability [1]_.
Parameters
----------
G : graph
A NetworkX graph
normalized : bool, optional (default=True)
If True the betweenness values are normalized by 2/[(n-1)(n-2)] where
n is the number of nodes in G.
weight : string or None, optional (default='weight')
Key for edge data used as the edge weight.
If None, then use 1 as each edge weight.
dtype: data type (float)
Default data type for internal matrices.
Set to np.float32 for lower memory consumption.
solver: string (default='lu')
Type of linear solver to use for computing the flow matrix.
Options are "full" (uses most memory), "lu" (recommended), and
"cg" (uses least memory).
epsilon: float
Absolute error tolerance.
kmax: int
Maximum number of sample node pairs to use for approximation.
Returns
-------
nodes : dictionary
Dictionary of nodes with betweenness centrality as the value.
See Also
--------
current_flow_betweenness_centrality
Notes
-----
The running time is `O((1/\epsilon^2)m{\sqrt k} \log n)`
and the space required is `O(m)` for n nodes and m edges.
If the edges have a 'weight' attribute they will be used as
weights in this algorithm. Unspecified weights are set to 1.
References
----------
.. [1] Ulrik Brandes and Daniel Fleischer:
Centrality Measures Based on Current Flow.
Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf
"""
from networkx.utils import reverse_cuthill_mckee_ordering
try:
import numpy as np
except ImportError:
raise ImportError('current_flow_betweenness_centrality requires NumPy ',
'http://scipy.org/')
try:
from scipy import sparse
from scipy.sparse import linalg
except ImportError:
raise ImportError('current_flow_betweenness_centrality requires SciPy ',
'http://scipy.org/')
if G.is_directed():
raise nx.NetworkXError('current_flow_betweenness_centrality() ',
'not defined for digraphs.')
if not nx.is_connected(G):
raise nx.NetworkXError("Graph not connected.")
solvername={"full" :FullInverseLaplacian,
"lu": SuperLUInverseLaplacian,
"cg": CGInverseLaplacian}
n = G.number_of_nodes()
ordering = list(reverse_cuthill_mckee_ordering(G))
# make a copy with integer labels according to rcm ordering
# this could be done without a copy if we really wanted to
H = nx.relabel_nodes(G,dict(zip(ordering,range(n))))
L = laplacian_sparse_matrix(H, nodelist=range(n), weight=weight,
dtype=dtype, format='csc')
C = solvername[solver](L, dtype=dtype) # initialize solver
betweenness = dict.fromkeys(H,0.0)
nb = (n-1.0)*(n-2.0) # normalization factor
cstar = n*(n-1)/nb
l = 1 # parameter in approximation, adjustable
k = l*int(np.ceil((cstar/epsilon)**2*np.log(n)))
if k > kmax:
raise nx.NetworkXError('Number random pairs k>kmax (%d>%d) '%(k,kmax),
'Increase kmax or epsilon')
cstar2k = cstar/(2*k)
for i in range(k):
s,t = random.sample(range(n),2)
b = np.zeros(n, dtype=dtype)
b[s] = 1
b[t] = -1
p = C.solve(b)
for v in H:
if v==s or v==t:
continue
for nbr in H[v]:
w = H[v][nbr].get(weight,1.0)
betweenness[v] += w*np.abs(p[v]-p[nbr])*cstar2k
if normalized:
factor = 1.0
else:
factor = nb/2.0
# remap to original node names and "unnormalize" if required
return dict((ordering[k],float(v*factor)) for k,v in betweenness.items())
def current_flow_betweenness_centrality(G, normalized=True, weight='weight',
dtype=float, solver='full'):
r"""Compute current-flow betweenness centrality for nodes.
Current-flow betweenness centrality uses an electrical current
model for information spreading in contrast to betweenness
centrality which uses shortest paths.
Current-flow betweenness centrality is also known as
random-walk betweenness centrality [2]_.
Parameters
----------
G : graph
A NetworkX graph
normalized : bool, optional (default=True)
If True the betweenness values are normalized by 2/[(n-1)(n-2)] where
n is the number of nodes in G.
weight : string or None, optional (default='weight')
Key for edge data used as the edge weight.
If None, then use 1 as each edge weight.
dtype: data type (float)
Default data type for internal matrices.
Set to np.float32 for lower memory consumption.
solver: string (default='lu')
Type of linear solver to use for computing the flow matrix.
Options are "full" (uses most memory), "lu" (recommended), and
"cg" (uses least memory).
Returns
-------
nodes : dictionary
Dictionary of nodes with betweenness centrality as the value.
See Also
--------
approximate_current_flow_betweenness_centrality
betweenness_centrality
edge_betweenness_centrality
edge_current_flow_betweenness_centrality
Notes
-----
Current-flow betweenness can be computed in `O(I(n-1)+mn \log n)`
time [1]_, where `I(n-1)` is the time needed to compute the
inverse Laplacian. For a full matrix this is `O(n^3)` but using
sparse methods you can achieve `O(nm{\sqrt k})` where `k` is the
Laplacian matrix condition number.
The space required is `O(nw)` where `w` is the width of the sparse
Laplacian matrix. Worse case is `w=n` for `O(n^2)`.
If the edges have a 'weight' attribute they will be used as
weights in this algorithm. Unspecified weights are set to 1.
References
----------
.. [1] Centrality Measures Based on Current Flow.
Ulrik Brandes and Daniel Fleischer,
Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf
.. [2] A measure of betweenness centrality based on random walks,
M. E. J. Newman, Social Networks 27, 39-54 (2005).
"""
from networkx.utils import reverse_cuthill_mckee_ordering
try:
import numpy as np
except ImportError:
raise ImportError('current_flow_betweenness_centrality requires NumPy ',
'http://scipy.org/')
try:
import scipy
except ImportError:
raise ImportError('current_flow_betweenness_centrality requires SciPy ',
'http://scipy.org/')
if G.is_directed():
raise nx.NetworkXError('current_flow_betweenness_centrality() ',
'not defined for digraphs.')
if not nx.is_connected(G):
raise nx.NetworkXError("Graph not connected.")
n = G.number_of_nodes()
ordering = list(reverse_cuthill_mckee_ordering(G))
# make a copy with integer labels according to rcm ordering
# this could be done without a copy if we really wanted to
H = nx.relabel_nodes(G,dict(zip(ordering,range(n))))
betweenness = dict.fromkeys(H,0.0) # b[v]=0 for v in H
for row,(s,t) in flow_matrix_row(H, weight=weight, dtype=dtype,
solver=solver):
pos = dict(zip(row.argsort()[::-1],range(n)))
for i in range(n):
betweenness[s] += (i-pos[i])*row[i]
betweenness[t] += (n-i-1-pos[i])*row[i]
if normalized:
nb = (n-1.0)*(n-2.0) # normalization factor
else:
nb = 2.0
for i,v in enumerate(H): # map integers to nodes
betweenness[v] = float((betweenness[v]-i)*2.0/nb)
return dict((ordering[k],v) for k,v in betweenness.items())
def edge_current_flow_betweenness_centrality(G, normalized=True,
weight='weight',
dtype=float, solver='full'):
"""Compute current-flow betweenness centrality for edges.
Current-flow betweenness centrality uses an electrical current
model for information spreading in contrast to betweenness
centrality which uses shortest paths.
Current-flow betweenness centrality is also known as
random-walk betweenness centrality [2]_.
Parameters
----------
G : graph
A NetworkX graph
normalized : bool, optional (default=True)
If True the betweenness values are normalized by 2/[(n-1)(n-2)] where
n is the number of nodes in G.
weight : string or None, optional (default='weight')
Key for edge data used as the edge weight.
If None, then use 1 as each edge weight.
dtype: data type (float)
Default data type for internal matrices.
Set to np.float32 for lower memory consumption.
solver: string (default='lu')
Type of linear solver to use for computing the flow matrix.
Options are "full" (uses most memory), "lu" (recommended), and
"cg" (uses least memory).
Returns
-------
nodes : dictionary
Dictionary of edge tuples with betweenness centrality as the value.
See Also
--------
betweenness_centrality
edge_betweenness_centrality
current_flow_betweenness_centrality
Notes
-----
Current-flow betweenness can be computed in `O(I(n-1)+mn \log n)`
time [1]_, where `I(n-1)` is the time needed to compute the
inverse Laplacian. For a full matrix this is `O(n^3)` but using
sparse methods you can achieve `O(nm{\sqrt k})` where `k` is the
Laplacian matrix condition number.
The space required is `O(nw) where `w` is the width of the sparse
Laplacian matrix. Worse case is `w=n` for `O(n^2)`.
If the edges have a 'weight' attribute they will be used as
weights in this algorithm. Unspecified weights are set to 1.
References
----------
.. [1] Centrality Measures Based on Current Flow.
Ulrik Brandes and Daniel Fleischer,
Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf
.. [2] A measure of betweenness centrality based on random walks,
M. E. J. Newman, Social Networks 27, 39-54 (2005).
"""
from networkx.utils import reverse_cuthill_mckee_ordering
try:
import numpy as np
except ImportError:
raise ImportError('current_flow_betweenness_centrality requires NumPy ',
'http://scipy.org/')
try:
import scipy
except ImportError:
raise ImportError('current_flow_betweenness_centrality requires SciPy ',
'http://scipy.org/')
if G.is_directed():
raise nx.NetworkXError('edge_current_flow_betweenness_centrality ',
'not defined for digraphs.')
if not nx.is_connected(G):
raise nx.NetworkXError("Graph not connected.")
n = G.number_of_nodes()
ordering = list(reverse_cuthill_mckee_ordering(G))
# make a copy with integer labels according to rcm ordering
# this could be done without a copy if we really wanted to
H = nx.relabel_nodes(G,dict(zip(ordering,range(n))))
betweenness=(dict.fromkeys(H.edges(),0.0))
if normalized:
nb=(n-1.0)*(n-2.0) # normalization factor
else:
nb=2.0
for row,(e) in flow_matrix_row(H, weight=weight, dtype=dtype,
solver=solver):
pos=dict(zip(row.argsort()[::-1],range(1,n+1)))
for i in range(n):
betweenness[e]+=(i+1-pos[i])*row[i]
betweenness[e]+=(n-i-pos[i])*row[i]
betweenness[e]/=nb
return dict(((ordering[s],ordering[t]),float(v))
for (s,t),v in betweenness.items())
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
import scipy
except:
raise SkipTest("NumPy not available") | unknown | codeparrot/codeparrot-clean | ||
import React from "react";
import {
Field,
RichText as JssRichText,
} from "@sitecore-jss/sitecore-jss-nextjs";
interface Fields {
Text: Field<string>;
}
export type RichTextProps = {
params: { [key: string]: string };
fields: Fields;
};
export const Default = (props: RichTextProps): JSX.Element => {
const text = props.fields ? (
<JssRichText field={props.fields.Text} />
) : (
<span className="is-empty-hint">Rich text</span>
);
const id = props.params.RenderingIdentifier;
return (
<div
className={`component rich-text ${props.params.styles.trimEnd()}`}
id={id ? id : undefined}
>
<div className="component-content">{text}</div>
</div>
);
}; | typescript | github | https://github.com/vercel/next.js | examples/cms-sitecore-xmcloud/src/components/RichText.tsx |
// Copyright 2025 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cache
import (
"bytes"
"context"
"errors"
"fmt"
"sync"
"time"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
clientv3 "go.etcd.io/etcd/client/v3"
)
var (
// Returned when an option combination isn’t yet handled by the cache (e.g. WithPrevKV, WithProgressNotify for Watch(), WithCountOnly for Get()).
ErrUnsupportedRequest = errors.New("cache: unsupported request parameters")
// Returned when the requested key or key‑range is invalid (empty or reversed) or lies outside c.prefix.
ErrKeyRangeInvalid = errors.New("cache: invalid or out‑of‑range key range")
)
// Cache buffers a single etcd Watch for a given key‐prefix and fan‑outs local watchers.
type Cache struct {
prefix string // prefix is the key-prefix this shard is responsible for ("" = root).
cfg Config // immutable runtime configuration
watcher clientv3.Watcher
kv clientv3.KV
demux *demux // demux fans incoming events out to active watchers and manages resync.
store *store // last‑observed snapshot
ready *ready
stop context.CancelFunc
waitGroup sync.WaitGroup
internalCtx context.Context
}
// New builds a cache shard that watches only the requested prefix.
// For the root cache pass "".
func New(client *clientv3.Client, prefix string, opts ...Option) (*Cache, error) {
cfg := defaultConfig()
for _, opt := range opts {
opt(&cfg)
}
if cfg.HistoryWindowSize <= 0 {
return nil, fmt.Errorf("invalid HistoryWindowSize %d (must be > 0)", cfg.HistoryWindowSize)
}
if cfg.BTreeDegree < 2 {
return nil, fmt.Errorf("invalid BTreeDegree %d (must be >= 2)", cfg.BTreeDegree)
}
internalCtx, cancel := context.WithCancel(context.Background())
cache := &Cache{
prefix: prefix,
cfg: cfg,
watcher: client.Watcher,
kv: client.KV,
store: newStore(cfg.BTreeDegree, cfg.HistoryWindowSize),
ready: newReady(),
stop: cancel,
internalCtx: internalCtx,
}
cache.demux = NewDemux(internalCtx, &cache.waitGroup, cfg.HistoryWindowSize, cfg.ResyncInterval)
cache.waitGroup.Add(1)
go func() {
defer cache.waitGroup.Done()
cache.getWatchLoop()
}()
return cache, nil
}
// Watch registers a cache-backed watcher for a given key or prefix.
// It returns a WatchChan that streams WatchResponses containing events.
func (c *Cache) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {
if err := c.WaitReady(ctx); err != nil {
emptyWatchChan := make(chan clientv3.WatchResponse)
close(emptyWatchChan)
return emptyWatchChan
}
op := clientv3.OpWatch(key, opts...)
startRev := op.Rev()
pred, err := c.validateWatch(key, op)
if err != nil {
ch := make(chan clientv3.WatchResponse, 1)
ch <- clientv3.WatchResponse{Canceled: true, CancelReason: err.Error()}
close(ch)
return ch
}
w := newWatcher(c.cfg.PerWatcherBufferSize, pred)
c.demux.Register(w, startRev)
responseChan := make(chan clientv3.WatchResponse)
c.waitGroup.Add(1)
go func() {
defer c.waitGroup.Done()
defer close(responseChan)
defer c.demux.Unregister(w)
for {
select {
case <-ctx.Done():
return
case <-c.internalCtx.Done():
return
case resp, ok := <-w.respCh:
if !ok {
if w.cancelResp != nil {
select {
case <-ctx.Done():
case <-c.internalCtx.Done():
case responseChan <- *w.cancelResp:
}
}
return
}
select {
case <-ctx.Done():
return
case <-c.internalCtx.Done():
return
case responseChan <- resp:
}
}
}
}()
return responseChan
}
func (c *Cache) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
if c.store.LatestRev() == 0 {
if err := c.WaitReady(ctx); err != nil {
return nil, err
}
}
op := clientv3.OpGet(key, opts...)
if _, err := c.validateGet(key, op); err != nil {
return nil, err
}
startKey := []byte(key)
endKey := op.RangeBytes()
requestedRev := op.Rev()
kvs, latestRev, err := c.store.Get(startKey, endKey, requestedRev)
if err != nil {
return nil, err
}
return &clientv3.GetResponse{
Header: &pb.ResponseHeader{Revision: latestRev},
Kvs: kvs,
Count: int64(len(kvs)),
}, nil
}
// Ready returns true if the snapshot has been loaded and the first watch has been confirmed.
func (c *Cache) Ready() bool {
return c.ready.Ready()
}
// WaitReady blocks until the cache is ready or the ctx is cancelled.
func (c *Cache) WaitReady(ctx context.Context) error {
return c.ready.WaitReady(ctx)
}
func (c *Cache) WaitForRevision(ctx context.Context, rev int64) error {
for {
if c.store.LatestRev() >= rev {
return nil
}
select {
case <-time.After(10 * time.Millisecond):
case <-ctx.Done():
return ctx.Err()
}
}
}
// Close cancels the private context and blocks until all goroutines return.
func (c *Cache) Close() {
c.stop()
c.waitGroup.Wait()
}
func (c *Cache) getWatchLoop() {
cfg := defaultConfig()
ctx := c.internalCtx
backoff := cfg.InitialBackoff
for {
if err := ctx.Err(); err != nil {
return
}
if err := c.getWatch(); err != nil {
fmt.Printf("getWatch failed, will retry after %v: %v\n", backoff, err)
}
select {
case <-ctx.Done():
return
case <-time.After(backoff):
}
}
}
func (c *Cache) getWatch() error {
getResp, err := c.get(c.internalCtx)
if err != nil {
return err
}
return c.watch(getResp.Header.Revision + 1)
}
func (c *Cache) get(ctx context.Context) (*clientv3.GetResponse, error) {
resp, err := c.kv.Get(ctx, c.prefix, clientv3.WithPrefix())
if err != nil {
return nil, err
}
c.store.Restore(resp.Kvs, resp.Header.Revision)
return resp, nil
}
func (c *Cache) watch(rev int64) error {
readyOnce := sync.Once{}
for {
storeW := newWatcher(c.cfg.PerWatcherBufferSize, nil)
c.demux.Register(storeW, rev)
applyErr := make(chan error, 1)
c.waitGroup.Add(1)
go func() {
defer c.waitGroup.Done()
if err := c.applyStorage(storeW); err != nil {
applyErr <- err
}
close(applyErr)
}()
err := c.watchEvents(rev, applyErr, &readyOnce)
c.demux.Unregister(storeW)
if err != nil {
return err
}
}
}
func (c *Cache) applyStorage(storeW *watcher) error {
for {
select {
case <-c.internalCtx.Done():
return nil
case resp, ok := <-storeW.respCh:
if !ok {
return nil
}
if err := c.store.Apply(resp); err != nil {
return err
}
}
}
}
func (c *Cache) watchEvents(rev int64, applyErr <-chan error, readyOnce *sync.Once) error {
watchCh := c.watcher.Watch(
c.internalCtx,
c.prefix,
clientv3.WithPrefix(),
clientv3.WithRev(rev),
clientv3.WithProgressNotify(),
clientv3.WithCreatedNotify(),
)
for {
select {
case <-c.internalCtx.Done():
return c.internalCtx.Err()
case resp, ok := <-watchCh:
if !ok {
return nil
}
readyOnce.Do(func() {
c.demux.Init(rev)
c.ready.Set()
})
if err := resp.Err(); err != nil {
c.ready.Reset()
return err
}
err := c.demux.Broadcast(resp)
if err != nil {
c.ready.Reset()
return err
}
case err := <-applyErr:
c.ready.Reset()
return err
}
}
}
func (c *Cache) validateWatch(key string, op clientv3.Op) (pred KeyPredicate, err error) {
switch {
case op.IsPrevKV():
return nil, fmt.Errorf("%w: PrevKV not supported", ErrUnsupportedRequest)
case op.IsFragment():
return nil, fmt.Errorf("%w: Fragment not supported", ErrUnsupportedRequest)
case op.IsProgressNotify():
return nil, fmt.Errorf("%w: ProgressNotify not supported", ErrUnsupportedRequest)
case op.IsCreatedNotify():
return nil, fmt.Errorf("%w: CreatedNotify not supported", ErrUnsupportedRequest)
case op.IsFilterPut():
return nil, fmt.Errorf("%w: FilterPut not supported", ErrUnsupportedRequest)
case op.IsFilterDelete():
return nil, fmt.Errorf("%w: FilterDelete not supported", ErrUnsupportedRequest)
}
startKey := []byte(key)
endKey := op.RangeBytes() // nil = single key, {0}=FromKey, else explicit range
if err := c.validateRange(startKey, endKey); err != nil {
return nil, err
}
return KeyPredForRange(startKey, endKey), nil
}
func (c *Cache) validateGet(key string, op clientv3.Op) (KeyPredicate, error) {
switch {
case op.IsCountOnly():
return nil, fmt.Errorf("%w: CountOnly not supported", ErrUnsupportedRequest)
case op.IsPrevKV():
return nil, fmt.Errorf("%w: PrevKV not supported", ErrUnsupportedRequest)
case op.IsSortSet():
return nil, fmt.Errorf("%w: SortSet not supported", ErrUnsupportedRequest)
case op.Limit() != 0:
return nil, fmt.Errorf("%w: Limit(%d) not supported", ErrUnsupportedRequest, op.Limit())
case op.MinModRev() != 0:
return nil, fmt.Errorf("%w: MinModRev(%d) not supported", ErrUnsupportedRequest, op.MinModRev())
case op.MaxModRev() != 0:
return nil, fmt.Errorf("%w: MaxModRev(%d) not supported", ErrUnsupportedRequest, op.MaxModRev())
case op.MinCreateRev() != 0:
return nil, fmt.Errorf("%w: MinCreateRev(%d) not supported", ErrUnsupportedRequest, op.MinCreateRev())
case op.MaxCreateRev() != 0:
return nil, fmt.Errorf("%w: MaxCreateRev(%d) not supported", ErrUnsupportedRequest, op.MaxCreateRev())
// cache now only serves serializable reads of the latest revision (rev == 0).
case !op.IsSerializable():
return nil, fmt.Errorf("%w: non-serializable request", ErrUnsupportedRequest)
}
startKey := []byte(key)
endKey := op.RangeBytes()
if err := c.validateRange(startKey, endKey); err != nil {
return nil, err
}
return KeyPredForRange(startKey, endKey), nil
}
func (c *Cache) validateRange(startKey, endKey []byte) error {
prefixStart := []byte(c.prefix)
prefixEnd := []byte(clientv3.GetPrefixRangeEnd(c.prefix))
isSingleKey := len(endKey) == 0
isFromKey := len(endKey) == 1 && endKey[0] == 0
switch {
case isSingleKey:
if c.prefix == "" {
return nil
}
if bytes.Compare(startKey, prefixStart) < 0 || bytes.Compare(startKey, prefixEnd) >= 0 {
return ErrKeyRangeInvalid
}
return nil
case isFromKey:
if c.prefix != "" {
return ErrKeyRangeInvalid
}
return nil
default:
if bytes.Compare(endKey, startKey) <= 0 {
return ErrKeyRangeInvalid
}
if c.prefix == "" {
return nil
}
if bytes.Compare(startKey, prefixStart) < 0 || bytes.Compare(endKey, prefixEnd) > 0 {
return ErrKeyRangeInvalid
}
return nil
}
}
// WaitForNextResync blocks until the next resync loop iteration is complete.
func (c *Cache) WaitForNextResync(ctx context.Context) error {
return c.demux.WaitForNextResync(ctx)
} | go | github | https://github.com/etcd-io/etcd | cache/cache.go |
package kotlinx.coroutines.internal
import java.util.concurrent.atomic.*
/**
* Atomic array with lock-free reads and synchronized modifications. It logically has an unbounded size,
* is implicitly filled with nulls, and is resized on updates as needed to grow.
*/
internal class ResizableAtomicArray<T>(initialLength: Int) {
@Volatile
private var array = AtomicReferenceArray<T>(initialLength)
// for debug output
public fun currentLength(): Int = array.length()
public operator fun get(index: Int): T? {
val array = this.array // volatile read
return if (index < array.length()) array[index] else null
}
// Must not be called concurrently, e.g. always use synchronized(this) to call this function
fun setSynchronized(index: Int, value: T?) {
val curArray = this.array
val curLen = curArray.length()
if (index < curLen) {
curArray[index] = value
return
}
// It would be nice to copy array in batch instead of 1 by 1, but it seems like Java has no API for that
val newArray = AtomicReferenceArray<T>((index + 1).coerceAtLeast(2 * curLen))
for (i in 0 until curLen) newArray[i] = curArray[i]
newArray[index] = value
array = newArray // copy done
}
} | kotlin | github | https://github.com/Kotlin/kotlinx.coroutines | kotlinx-coroutines-core/jvm/src/internal/ResizableAtomicArray.kt |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_bar05.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'bar'})
chart.axis_ids = [64264064, 64447232]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': ['Sheet1', 0, 0, 4, 0]})
chart.add_series({'values': ['Sheet1', 0, 1, 4, 1]})
chart.add_series({'values': ['Sheet1', 0, 2, 4, 2]})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual() | unknown | codeparrot/codeparrot-clean | ||
import { test } from '../../test';
export default test({
warnings: [
{
code: 'css_unused_selector',
end: {
character: 72,
column: 3,
line: 10
},
message: 'Unused CSS selector "h4"',
start: {
character: 70,
column: 1,
line: 10
}
}
]
}); | javascript | github | https://github.com/sveltejs/svelte | packages/svelte/tests/css/samples/unused-selector-in-between/_config.js |
from django.test import TestCase
from django.core.management import call_command
from django.core import mail
from django.core.urlresolvers import reverse
from contactbox.models import Message, Receiver
class MainTestCase(TestCase):
def setUp(self):
Receiver.objects.create(
name='test',
email='foo@bar.com',
active=True)
Receiver.objects.create(
name='test',
email='foo1@bar.com',
active=True)
Receiver.objects.create(
name='test',
email='foo2@bar.com',
active=False)
def test_sending(self):
message = Message.objects.create(
email='a@a.com',
message='message',
)
self.assertEqual(mail.outbox, [])
self.assertIsNone(message.notification_date)
call_command('remind_contact')
message = Message.objects.get(pk=message.pk)
self.assertIsNotNone(message.notification_date)
self.assertEqual(len(mail.outbox), 1)
def test_www_form(self):
self.assertEqual(mail.outbox, [])
self.assertEqual(Message.objects.count(), 0)
url = reverse('contact-box-form-test')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'name', 'This field is required.')
self.assertFormError(response, 'form', 'email', 'This field is required.')
self.assertFormError(response, 'form', 'message', 'This field is required.')
self.assertEqual(mail.outbox, [])
self.assertEqual(Message.objects.count(), 0)
data = {
'email': 'a',
'name': 'some name',
'message': 'm'
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'email', 'Enter a valid email address.')
self.assertEqual(mail.outbox, [])
self.assertEqual(Message.objects.count(), 0)
data['email'] = 'a@a.com'
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
self.assertIsNone(Message.objects.first().notification_date)
call_command('remind_contact')
self.assertEqual(len(mail.outbox), 1)
self.assertIsNotNone(Message.objects.first().notification_date) | unknown | codeparrot/codeparrot-clean | ||
__author__ = 'Batchu Vishal'
import pygame
class OnBoard(pygame.sprite.Sprite):
'''
This class defines all inanimate objects that we need to display on our board.
Any object that is on the board and not a person, comes under this class (ex. Coins,Ladders,Walls etc)
Sets up the image and its position for all its child classes.
'''
def __init__(self, raw_image, position):
pygame.sprite.Sprite.__init__(self)
self.__position = position
self.image = raw_image
self.image = pygame.transform.scale(self.image,
(15, 15)) # Image and Rect required for the draw function on sprites
self.rect = self.image.get_rect()
self.rect.center = self.__position
# Getters and Setters
def setCenter(self, position):
self.rect.center = position
def getPosition(self):
return self.__position
def setPosition(self, position):
self.__position = position
# Update Image, this is an abstract method, needs to be implemented in the
# subclass with whatever size required
def updateImage(self, raw_image): # Abstract Method
raise NotImplementedError("Subclass must implement this")
# Modify the size of the image
def modifySize(self, raw_image, height, width):
self.image = raw_image
self.image = pygame.transform.scale(self.image, (width, height)) | unknown | codeparrot/codeparrot-clean | ||
<?php
namespace Illuminate\Routing;
use Illuminate\Contracts\Routing\UrlRoutable;
use Illuminate\Database\Eloquent\ModelNotFoundException;
use Illuminate\Routing\Exceptions\BackedEnumCaseNotFoundException;
use Illuminate\Support\Reflector;
use Illuminate\Support\Str;
class ImplicitRouteBinding
{
/**
* Resolve the implicit route bindings for the given route.
*
* @param \Illuminate\Container\Container $container
* @param \Illuminate\Routing\Route $route
* @return void
*
* @throws \Illuminate\Database\Eloquent\ModelNotFoundException<\Illuminate\Database\Eloquent\Model>
* @throws \Illuminate\Routing\Exceptions\BackedEnumCaseNotFoundException
*/
public static function resolveForRoute($container, $route)
{
$parameters = $route->parameters();
$route = static::resolveBackedEnumsForRoute($route, $parameters);
foreach ($route->signatureParameters(['subClass' => UrlRoutable::class]) as $parameter) {
if (! $parameterName = static::getParameterName($parameter->getName(), $parameters)) {
continue;
}
$parameterValue = $parameters[$parameterName];
if ($parameterValue instanceof UrlRoutable) {
continue;
}
$instance = $container->make(Reflector::getParameterClassName($parameter));
$parent = $route->parentOfParameter($parameterName);
$routeBindingMethod = $route->allowsTrashedBindings() && $instance::isSoftDeletable()
? 'resolveSoftDeletableRouteBinding'
: 'resolveRouteBinding';
if ($parent instanceof UrlRoutable &&
! $route->preventsScopedBindings() &&
($route->enforcesScopedBindings() || array_key_exists($parameterName, $route->bindingFields()))) {
$childRouteBindingMethod = $route->allowsTrashedBindings() && $instance::isSoftDeletable()
? 'resolveSoftDeletableChildRouteBinding'
: 'resolveChildRouteBinding';
if (! $model = $parent->{$childRouteBindingMethod}(
$parameterName, $parameterValue, $route->bindingFieldFor($parameterName)
)) {
throw (new ModelNotFoundException)->setModel(get_class($instance), [$parameterValue]);
}
} elseif (! $model = $instance->{$routeBindingMethod}($parameterValue, $route->bindingFieldFor($parameterName))) {
throw (new ModelNotFoundException)->setModel(get_class($instance), [$parameterValue]);
}
$route->setParameter($parameterName, $model);
}
}
/**
* Resolve the Backed Enums route bindings for the route.
*
* @param \Illuminate\Routing\Route $route
* @param array $parameters
* @return \Illuminate\Routing\Route
*
* @throws \Illuminate\Routing\Exceptions\BackedEnumCaseNotFoundException
*/
protected static function resolveBackedEnumsForRoute($route, $parameters)
{
foreach ($route->signatureParameters(['backedEnum' => true]) as $parameter) {
if (! $parameterName = static::getParameterName($parameter->getName(), $parameters)) {
continue;
}
$parameterValue = $parameters[$parameterName];
if ($parameterValue === null) {
continue;
}
$backedEnumClass = $parameter->getType()?->getName();
$backedEnum = $parameterValue instanceof $backedEnumClass
? $parameterValue
: $backedEnumClass::tryFrom((string) $parameterValue);
if (is_null($backedEnum)) {
throw new BackedEnumCaseNotFoundException($backedEnumClass, $parameterValue);
}
$route->setParameter($parameterName, $backedEnum);
}
return $route;
}
/**
* Return the parameter name if it exists in the given parameters.
*
* @param string $name
* @param array $parameters
* @return string|null
*/
protected static function getParameterName($name, $parameters)
{
if (array_key_exists($name, $parameters)) {
return $name;
}
if (array_key_exists($snakedName = Str::snake($name), $parameters)) {
return $snakedName;
}
}
} | php | github | https://github.com/laravel/framework | src/Illuminate/Routing/ImplicitRouteBinding.php |
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Generate template values for a callback interface.
Extends IdlTypeBase with property |callback_cpp_type|.
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
from idl_types import IdlTypeBase
from v8_globals import includes
import v8_types
import v8_utilities
CALLBACK_INTERFACE_H_INCLUDES = frozenset([
'bindings/core/v8/ActiveDOMCallback.h',
'bindings/core/v8/DOMWrapperWorld.h',
'bindings/core/v8/ScopedPersistent.h',
])
CALLBACK_INTERFACE_CPP_INCLUDES = frozenset([
'bindings/core/v8/ScriptController.h',
'bindings/core/v8/V8Binding.h',
'core/dom/ExecutionContext.h',
'wtf/Assertions.h',
'wtf/GetPtr.h',
'wtf/RefPtr.h',
])
def cpp_type(idl_type):
# FIXME: remove this function by making callback types consistent
# (always use usual v8_types.cpp_type)
idl_type_name = idl_type.name
if idl_type_name == 'String':
return 'const String&'
if idl_type_name == 'void':
return 'void'
# Callbacks use raw pointers, so raw_type=True
raw_cpp_type = idl_type.cpp_type_args(raw_type=True)
# Pass containers and dictionaries to callback method by const reference rather than by value
if raw_cpp_type.startswith(('Vector', 'HeapVector', 'WillBeHeapVector')) or idl_type.is_dictionary:
return 'const %s&' % raw_cpp_type
return raw_cpp_type
IdlTypeBase.callback_cpp_type = property(cpp_type)
def callback_interface_context(callback_interface):
includes.clear()
includes.update(CALLBACK_INTERFACE_CPP_INCLUDES)
return {
'cpp_class': callback_interface.name,
'v8_class': v8_utilities.v8_class_name(callback_interface),
'header_includes': set(CALLBACK_INTERFACE_H_INCLUDES),
'methods': [method_context(operation)
for operation in callback_interface.operations],
}
def add_includes_for_operation(operation):
operation.idl_type.add_includes_for_type()
for argument in operation.arguments:
argument.idl_type.add_includes_for_type()
def method_context(operation):
extended_attributes = operation.extended_attributes
idl_type = operation.idl_type
idl_type_str = str(idl_type)
if idl_type_str not in ['boolean', 'void']:
raise Exception('We only support callbacks that return boolean or void values.')
is_custom = 'Custom' in extended_attributes
if not is_custom:
add_includes_for_operation(operation)
call_with = extended_attributes.get('CallWith')
call_with_this_handle = v8_utilities.extended_attribute_value_contains(call_with, 'ThisValue')
context = {
'call_with_this_handle': call_with_this_handle,
'cpp_type': idl_type.callback_cpp_type,
'idl_type': idl_type_str,
'is_custom': is_custom,
'name': operation.name,
}
context.update(arguments_context(operation.arguments,
call_with_this_handle))
return context
def arguments_context(arguments, call_with_this_handle):
def argument_context(argument):
return {
'handle': '%sHandle' % argument.name,
'cpp_value_to_v8_value': argument.idl_type.cpp_value_to_v8_value(
argument.name, isolate='m_scriptState->isolate()',
creation_context='m_scriptState->context()->Global()'),
}
argument_declarations = ['ScriptValue thisValue'] if call_with_this_handle else []
argument_declarations.extend(
'%s %s' % (argument.idl_type.callback_cpp_type, argument.name)
for argument in arguments)
return {
'argument_declarations': argument_declarations,
'arguments': [argument_context(argument) for argument in arguments],
} | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.configurationmetadata.changelog;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.util.jar.JarOutputStream;
import java.util.zip.ZipEntry;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
import static org.assertj.core.api.Assertions.assertThat;
/**
* Tests for {@link ChangelogGenerator}.
*
* @author Phillip Webb
*/
class ChangelogGeneratorTests {
@TempDir
File temp;
@Test
void generateChangeLog() throws IOException {
File oldJars = new File(this.temp, "1.0");
addJar(oldJars, "sample-1.0.json");
File newJars = new File(this.temp, "2.0");
addJar(newJars, "sample-2.0.json");
File out = new File(this.temp, "changes.adoc");
String[] args = new String[] { oldJars.getAbsolutePath(), newJars.getAbsolutePath(), out.getAbsolutePath() };
ChangelogGenerator.main(args);
assertThat(out).usingCharset(StandardCharsets.UTF_8)
.hasSameTextualContentAs(new File("src/test/resources/sample.adoc"));
}
private void addJar(File directory, String filename) throws IOException {
directory.mkdirs();
try (JarOutputStream out = new JarOutputStream(new FileOutputStream(new File(directory, "sample.jar")))) {
out.putNextEntry(new ZipEntry("META-INF/spring-configuration-metadata.json"));
try (InputStream in = new FileInputStream("src/test/resources/" + filename)) {
in.transferTo(out);
out.closeEntry();
}
}
}
} | java | github | https://github.com/spring-projects/spring-boot | configuration-metadata/spring-boot-configuration-metadata-changelog-generator/src/test/java/org/springframework/boot/configurationmetadata/changelog/ChangelogGeneratorTests.java |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
log util
:copyright:
Wenjie Lei (lei@princeton.edu), 2016
:license:
GNU Lesser General Public License, version 3 (LGPLv3)
(http://www.gnu.org/licenses/lgpl-3.0.en.html)
"""
from __future__ import (print_function, division, absolute_import)
import numpy as np
from . import logger
from .util import get_cmt_par
def inversion_result_table(npar, cmtsource, new_cmtsource,
bootstrap_flag=False, bootstrap_mean=None,
bootstrap_std=None,
bootstrap_std_over_mean=None):
"""
Print out the inversion table
:return:
"""
if npar < 6 or npar > 11:
raise ValueError("npar(%d) should be within [6, 11]")
title = "*" * 20 + " Inversion Result Table(%d npar) " % \
npar + "*" * 20
logger.info(title)
mattrs = ["m_rr", "m_tt", "m_pp", "m_rt", "m_rp", "m_tp"]
lattrs = ["depth_in_m", "longitude", "latitude", "time_shift",
"half_duration"]
lattr_names = {"depth_in_m": "dep", "longitude": "lon",
"latitude": "lat", "time_shift": "tshift",
"half_duration": "hdr"}
if not bootstrap_flag:
logger.info("PAR Old_CMT New_CMT")
for attr in mattrs:
logger.info("%s: %15.6e %15.6e" % (
attr, getattr(cmtsource, attr), getattr(new_cmtsource, attr)))
for idx in range(npar - 6):
attr = lattrs[idx]
logger.info("%s: %15.3f %15.3f" % (
lattr_names[attr], getattr(cmtsource, attr),
getattr(new_cmtsource, attr)))
else:
logger.info("PAR Old_CMT New_CMT "
"Bootstrap_Mean Bootstrap_STD STD/Mean")
for idx, attr in enumerate(mattrs):
logger.info(
"%s: %15.6e %15.6e %15.6e %15.6e %10.2f%%" % (
attr, getattr(cmtsource, attr),
getattr(new_cmtsource, attr),
bootstrap_mean[idx], bootstrap_std[idx],
bootstrap_std_over_mean[idx] * 100))
for idx in range(npar - 6):
attr = lattrs[idx]
logger.info("%s: %15.3f %15.3f %15.3f %15.3f %10.2f%%" % (
lattr_names[attr],
getattr(cmtsource, attr),
getattr(new_cmtsource, attr),
bootstrap_mean[idx + 6],
bootstrap_std[idx + 6],
bootstrap_std_over_mean[idx + 6]))
def fmt_cmt_par(data):
if len(data) == 9:
return "MomentTensor=({:.4e}, {:.4e}, {:.4e}, {:.4e}, " \
"{:.4e}, {:.4e}), Depth={:.2f} m, Longitude={:.2f}, "\
"Latitude={:.2f}]".format(*data)
elif len(data) == 7:
return "MomentTensor=({:.4e}, {:.4e}, {:.4e}, {:.4e}, " \
"{:.4e}, {:.4e}), Depth={:.2f} m".format(*data)
elif len(data) == 6:
return "MomentTensor=({:.4e}, {:.4e}, {:.4e}, {:.4e}, " \
"{:.4e}, {:.4e})".format(*data)
else:
raise ValueError("Uknown cmt par length: {}".format(len(data)))
def print_inversion_summary(npar, cmtsource, new_cmtsource,
bootstrap=False, bmean=None, bstd=None,
bstd_over_mean=None):
"""
Print out the inversion summary
:return:
"""
logger.info("*" * 20)
logger.info("Invert cmt parameters Summary (%d par)" % npar)
def _fmt_(v):
return "%.4e" % v
cmt_par = get_cmt_par(cmtsource)[:npar]
new_cmt_par = get_cmt_par(new_cmtsource)[:npar]
logger.info("Old CMT: {}".format(fmt_cmt_par(cmt_par)))
logger.info("dm: [%s]" % (
', '.join(map(_fmt_, new_cmt_par - cmt_par))))
logger.info("New CMT: {}".format(fmt_cmt_par(new_cmt_par)))
logger.info("Trace of Moment Tensor: %e" % (np.sum(new_cmt_par[0:3])))
logger.info("Energy (Scalar Moment) Change: %5.2f%%" % (
(new_cmtsource.M0 - cmtsource.M0) /
cmtsource.M0 * 100.0))
inversion_result_table(
npar, cmtsource, new_cmtsource, bootstrap_flag=bootstrap,
bootstrap_mean=bmean, bootstrap_std=bstd,
bootstrap_std_over_mean=bstd_over_mean) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (c) 2007 Mockito contributors
* This program is made available under the terms of the MIT License.
*/
package org.mockito;
import static org.mockito.internal.progress.ThreadSafeMockingProgress.mockingProgress;
import org.mockito.internal.matchers.ArrayEquals;
import org.mockito.internal.matchers.CompareEqual;
import org.mockito.internal.matchers.EqualsWithDelta;
import org.mockito.internal.matchers.Find;
import org.mockito.internal.matchers.GreaterOrEqual;
import org.mockito.internal.matchers.GreaterThan;
import org.mockito.internal.matchers.LessOrEqual;
import org.mockito.internal.matchers.LessThan;
/**
* See {@link ArgumentMatchers} for general info about matchers.
* <p>
* AdditionalMatchers provides rarely used matchers, kept only for somewhat compatibility with EasyMock.
* Use additional matchers very judiciously because they may impact readability of a test.
* It is recommended to use matchers from {@link ArgumentMatchers} and keep stubbing and verification simple.
* <p>
* Example of using logical and(), not(), or() matchers:
*
* <pre class="code"><code class="java">
* //anything but not "ejb"
* mock.someMethod(not(eq("ejb")));
*
* //not "ejb" and not "michael jackson"
* mock.someMethod(and(not(eq("ejb")), not(eq("michael jackson"))));
*
* //1 or 10
* mock.someMethod(or(eq(1), eq(10)));
* </code></pre>
*
* Scroll down to see all methods - full list of matchers.
*/
@SuppressWarnings("ALL")
public final class AdditionalMatchers {
/**
* argument greater than or equal the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>null</code>.
*/
public static <T extends Comparable<T>> T geq(T value) {
reportMatcher(new GreaterOrEqual<T>(value));
return null;
}
/**
* byte argument greater than or equal to the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>0</code>.
*/
public static byte geq(byte value) {
reportMatcher(new GreaterOrEqual<Byte>(value));
return 0;
}
/**
* double argument greater than or equal to the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>0</code>.
*/
public static double geq(double value) {
reportMatcher(new GreaterOrEqual<Double>(value));
return 0;
}
/**
* float argument greater than or equal to the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>0</code>.
*/
public static float geq(float value) {
reportMatcher(new GreaterOrEqual<Float>(value));
return 0;
}
/**
* int argument greater than or equal to the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>0</code>.
*/
public static int geq(int value) {
reportMatcher(new GreaterOrEqual<Integer>(value));
return 0;
}
/**
* long argument greater than or equal to the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>0</code>.
*/
public static long geq(long value) {
reportMatcher(new GreaterOrEqual<Long>(value));
return 0;
}
/**
* short argument greater than or equal to the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>0</code>.
*/
public static short geq(short value) {
reportMatcher(new GreaterOrEqual<Short>(value));
return 0;
}
/**
* comparable argument less than or equal the given value details.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>null</code>.
*/
public static <T extends Comparable<T>> T leq(T value) {
reportMatcher(new LessOrEqual<T>(value));
return null;
}
/**
* byte argument less than or equal to the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>0</code>.
*/
public static byte leq(byte value) {
reportMatcher(new LessOrEqual<Byte>(value));
return 0;
}
/**
* double argument less than or equal to the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>0</code>.
*/
public static double leq(double value) {
reportMatcher(new LessOrEqual<Double>(value));
return 0;
}
/**
* float argument less than or equal to the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>0</code>.
*/
public static float leq(float value) {
reportMatcher(new LessOrEqual<Float>(value));
return 0;
}
/**
* int argument less than or equal to the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>0</code>.
*/
public static int leq(int value) {
reportMatcher(new LessOrEqual<Integer>(value));
return 0;
}
/**
* long argument less than or equal to the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>0</code>.
*/
public static long leq(long value) {
reportMatcher(new LessOrEqual<Long>(value));
return 0;
}
/**
* short argument less than or equal to the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>0</code>.
*/
public static short leq(short value) {
reportMatcher(new LessOrEqual<Short>(value));
return 0;
}
/**
* comparable argument greater than the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>null</code>.
*/
public static <T extends Comparable<T>> T gt(T value) {
reportMatcher(new GreaterThan<T>(value));
return null;
}
/**
* byte argument greater than the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>0</code>.
*/
public static byte gt(byte value) {
reportMatcher(new GreaterThan<Byte>(value));
return 0;
}
/**
* double argument greater than the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>0</code>.
*/
public static double gt(double value) {
reportMatcher(new GreaterThan<Double>(value));
return 0;
}
/**
* float argument greater than the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>0</code>.
*/
public static float gt(float value) {
reportMatcher(new GreaterThan<Float>(value));
return 0;
}
/**
* int argument greater than the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>0</code>.
*/
public static int gt(int value) {
reportMatcher(new GreaterThan<Integer>(value));
return 0;
}
/**
* long argument greater than the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>0</code>.
*/
public static long gt(long value) {
reportMatcher(new GreaterThan<Long>(value));
return 0;
}
/**
* short argument greater than the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>0</code>.
*/
public static short gt(short value) {
reportMatcher(new GreaterThan<Short>(value));
return 0;
}
/**
* comparable argument less than the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>null</code>.
*/
public static <T extends Comparable<T>> T lt(T value) {
reportMatcher(new LessThan<T>(value));
return null;
}
/**
* byte argument less than the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>0</code>.
*/
public static byte lt(byte value) {
reportMatcher(new LessThan<Byte>(value));
return 0;
}
/**
* double argument less than the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>0</code>.
*/
public static double lt(double value) {
reportMatcher(new LessThan<Double>(value));
return 0;
}
/**
* float argument less than the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>0</code>.
*/
public static float lt(float value) {
reportMatcher(new LessThan<Float>(value));
return 0;
}
/**
* int argument less than the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>0</code>.
*/
public static int lt(int value) {
reportMatcher(new LessThan<Integer>(value));
return 0;
}
/**
* long argument less than the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>0</code>.
*/
public static long lt(long value) {
reportMatcher(new LessThan<Long>(value));
return 0;
}
/**
* short argument less than the given value.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>0</code>.
*/
public static short lt(short value) {
reportMatcher(new LessThan<Short>(value));
return 0;
}
/**
* comparable argument equals to the given value according to their
* compareTo method.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @return <code>null</code>.
*/
public static <T extends Comparable<T>> T cmpEq(T value) {
reportMatcher(new CompareEqual<T>(value));
return null;
}
/**
* String argument that contains a substring that matches the given regular
* expression.
*
* @param regex
* the regular expression.
* @return <code>null</code>.
*/
public static String find(String regex) {
reportMatcher(new Find(regex));
return null;
}
/**
* Object array argument that is equal to the given array, i.e. it has to
* have the same type, length, and each element has to be equal.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param <T>
* the type of the array, it is passed through to prevent casts.
* @param value
* the given array.
* @return <code>null</code>.
*/
public static <T> T[] aryEq(T[] value) {
reportMatcher(new ArrayEquals(value));
return null;
}
/**
* short array argument that is equal to the given array, i.e. it has to
* have the same length, and each element has to be equal.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given array.
* @return <code>null</code>.
*/
public static short[] aryEq(short[] value) {
reportMatcher(new ArrayEquals(value));
return null;
}
/**
* long array argument that is equal to the given array, i.e. it has to have
* the same length, and each element has to be equal.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given array.
* @return <code>null</code>.
*/
public static long[] aryEq(long[] value) {
reportMatcher(new ArrayEquals(value));
return null;
}
/**
* int array argument that is equal to the given array, i.e. it has to have
* the same length, and each element has to be equal.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given array.
* @return <code>null</code>.
*/
public static int[] aryEq(int[] value) {
reportMatcher(new ArrayEquals(value));
return null;
}
/**
* float array argument that is equal to the given array, i.e. it has to
* have the same length, and each element has to be equal.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given array.
* @return <code>null</code>.
*/
public static float[] aryEq(float[] value) {
reportMatcher(new ArrayEquals(value));
return null;
}
/**
* double array argument that is equal to the given array, i.e. it has to
* have the same length, and each element has to be equal.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given array.
* @return <code>null</code>.
*/
public static double[] aryEq(double[] value) {
reportMatcher(new ArrayEquals(value));
return null;
}
/**
* char array argument that is equal to the given array, i.e. it has to have
* the same length, and each element has to be equal.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given array.
* @return <code>null</code>.
*/
public static char[] aryEq(char[] value) {
reportMatcher(new ArrayEquals(value));
return null;
}
/**
* byte array argument that is equal to the given array, i.e. it has to have
* the same length, and each element has to be equal.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given array.
* @return <code>null</code>.
*/
public static byte[] aryEq(byte[] value) {
reportMatcher(new ArrayEquals(value));
return null;
}
/**
* boolean array argument that is equal to the given array, i.e. it has to
* have the same length, and each element has to be equal.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given array.
* @return <code>null</code>.
*/
public static boolean[] aryEq(boolean[] value) {
reportMatcher(new ArrayEquals(value));
return null;
}
/**
* boolean argument that matches both given matchers.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param first
* placeholder for the first argument matcher.
* @param second
* placeholder for the second argument matcher.
* @return <code>false</code>.
*/
public static boolean and(boolean first, boolean second) {
mockingProgress().getArgumentMatcherStorage().reportAnd();
return false;
}
/**
* byte argument that matches both given argument matchers.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param first
* placeholder for the first argument matcher.
* @param second
* placeholder for the second argument matcher.
* @return <code>0</code>.
*/
public static byte and(byte first, byte second) {
mockingProgress().getArgumentMatcherStorage().reportAnd();
return 0;
}
/**
* char argument that matches both given argument matchers.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param first
* placeholder for the first argument matcher.
* @param second
* placeholder for the second argument matcher.
* @return <code>0</code>.
*/
public static char and(char first, char second) {
mockingProgress().getArgumentMatcherStorage().reportAnd();
return 0;
}
/**
* double argument that matches both given argument matchers.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param first
* placeholder for the first argument matcher.
* @param second
* placeholder for the second argument matcher.
* @return <code>0</code>.
*/
public static double and(double first, double second) {
mockingProgress().getArgumentMatcherStorage().reportAnd();
return 0;
}
/**
* float argument that matches both given argument matchers.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param first
* placeholder for the first argument matcher.
* @param second
* placeholder for the second argument matcher.
* @return <code>0</code>.
*/
public static float and(float first, float second) {
mockingProgress().getArgumentMatcherStorage().reportAnd();
return 0;
}
/**
* int argument that matches both given argument matchers.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param first
* placeholder for the first argument matcher.
* @param second
* placeholder for the second argument matcher.
* @return <code>0</code>.
*/
public static int and(int first, int second) {
mockingProgress().getArgumentMatcherStorage().reportAnd();
return 0;
}
/**
* long argument that matches both given argument matchers.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param first
* placeholder for the first argument matcher.
* @param second
* placeholder for the second argument matcher.
* @return <code>0</code>.
*/
public static long and(long first, long second) {
mockingProgress().getArgumentMatcherStorage().reportAnd();
return 0;
}
/**
* short argument that matches both given argument matchers.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param first
* placeholder for the first argument matcher.
* @param second
* placeholder for the second argument matcher.
* @return <code>0</code>.
*/
public static short and(short first, short second) {
mockingProgress().getArgumentMatcherStorage().reportAnd();
return 0;
}
/**
* Object argument that matches both given argument matchers.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param <T>
* the type of the object, it is passed through to prevent casts.
* @param first
* placeholder for the first argument matcher.
* @param second
* placeholder for the second argument matcher.
* @return <code>null</code>.
*/
public static <T> T and(T first, T second) {
mockingProgress().getArgumentMatcherStorage().reportAnd();
return null;
}
/**
* boolean argument that matches any of the given argument matchers.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param first
* placeholder for the first argument matcher.
* @param second
* placeholder for the second argument matcher.
* @return <code>false</code>.
*/
public static boolean or(boolean first, boolean second) {
mockingProgress().getArgumentMatcherStorage().reportOr();
return false;
}
/**
* Object argument that matches any of the given argument matchers.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param <T>
* the type of the object, it is passed through to prevent casts.
* @param first
* placeholder for the first argument matcher.
* @param second
* placeholder for the second argument matcher.
* @return <code>null</code>.
*/
public static <T> T or(T first, T second) {
mockingProgress().getArgumentMatcherStorage().reportOr();
return null;
}
/**
* short argument that matches any of the given argument matchers.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param first
* placeholder for the first argument matcher.
* @param second
* placeholder for the second argument matcher.
* @return <code>0</code>.
*/
public static short or(short first, short second) {
mockingProgress().getArgumentMatcherStorage().reportOr();
return 0;
}
/**
* long argument that matches any of the given argument matchers.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param first
* placeholder for the first argument matcher.
* @param second
* placeholder for the second argument matcher.
* @return <code>0</code>.
*/
public static long or(long first, long second) {
mockingProgress().getArgumentMatcherStorage().reportOr();
return 0;
}
/**
* int argument that matches any of the given argument matchers.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param first
* placeholder for the first argument matcher.
* @param second
* placeholder for the second argument matcher.
* @return <code>0</code>.
*/
public static int or(int first, int second) {
mockingProgress().getArgumentMatcherStorage().reportOr();
return 0;
}
/**
* float argument that matches any of the given argument matchers.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param first
* placeholder for the first argument matcher.
* @param second
* placeholder for the second argument matcher.
* @return <code>0</code>.
*/
public static float or(float first, float second) {
mockingProgress().getArgumentMatcherStorage().reportOr();
return 0;
}
/**
* double argument that matches any of the given argument matchers.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param first
* placeholder for the first argument matcher.
* @param second
* placeholder for the second argument matcher.
* @return <code>0</code>.
*/
public static double or(double first, double second) {
mockingProgress().getArgumentMatcherStorage().reportOr();
return 0;
}
/**
* char argument that matches any of the given argument matchers.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param first
* placeholder for the first argument matcher.
* @param second
* placeholder for the second argument matcher.
* @return <code>0</code>.
*/
public static char or(char first, char second) {
mockingProgress().getArgumentMatcherStorage().reportOr();
return 0;
}
/**
* byte argument that matches any of the given argument matchers.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param first
* placeholder for the first argument matcher.
* @param second
* placeholder for the second argument matcher.
* @return <code>0</code>.
*/
public static byte or(byte first, byte second) {
mockingProgress().getArgumentMatcherStorage().reportOr();
return 0;
}
/**
* Object argument that does not match the given argument matcher.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param <T>
* the type of the object, it is passed through to prevent casts.
* @param first
* placeholder for the argument matcher.
* @return <code>null</code>.
*/
public static <T> T not(T first) {
mockingProgress().getArgumentMatcherStorage().reportNot();
return null;
}
/**
* short argument that does not match the given argument matcher.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param first
* placeholder for the argument matcher.
* @return <code>0</code>.
*/
public static short not(short first) {
mockingProgress().getArgumentMatcherStorage().reportNot();
return 0;
}
/**
* int argument that does not match the given argument matcher.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param first
* placeholder for the argument matcher.
* @return <code>0</code>.
*/
public static int not(int first) {
mockingProgress().getArgumentMatcherStorage().reportNot();
return 0;
}
/**
* long argument that does not match the given argument matcher.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param first
* placeholder for the argument matcher.
* @return <code>0</code>.
*/
public static long not(long first) {
mockingProgress().getArgumentMatcherStorage().reportNot();
return 0;
}
/**
* float argument that does not match the given argument matcher.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param first
* placeholder for the argument matcher.
* @return <code>0</code>.
*/
public static float not(float first) {
mockingProgress().getArgumentMatcherStorage().reportNot();
return 0;
}
/**
* double argument that does not match the given argument matcher.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param first
* placeholder for the argument matcher.
* @return <code>0</code>.
*/
public static double not(double first) {
mockingProgress().getArgumentMatcherStorage().reportNot();
return 0;
}
/**
* char argument that does not match the given argument matcher.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param first
* placeholder for the argument matcher.
* @return <code>0</code>.
*/
public static char not(char first) {
mockingProgress().getArgumentMatcherStorage().reportNot();
return 0;
}
/**
* boolean argument that does not match the given argument matcher.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param first
* placeholder for the argument matcher.
* @return <code>false</code>.
*/
public static boolean not(boolean first) {
mockingProgress().getArgumentMatcherStorage().reportNot();
return false;
}
/**
* byte argument that does not match the given argument matcher.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param first
* placeholder for the argument matcher.
* @return <code>0</code>.
*/
public static byte not(byte first) {
mockingProgress().getArgumentMatcherStorage().reportNot();
return 0;
}
/**
* double argument that has an absolute difference to the given value that
* is less than the given delta details.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @param delta
* the given delta.
* @return <code>0</code>.
*/
public static double eq(double value, double delta) {
reportMatcher(new EqualsWithDelta(value, delta));
return 0;
}
/**
* float argument that has an absolute difference to the given value that is
* less than the given delta details.
* <p>
* See examples in javadoc for {@link AdditionalMatchers} class
*
* @param value
* the given value.
* @param delta
* the given delta.
* @return <code>0</code>.
*/
public static float eq(float value, float delta) {
reportMatcher(new EqualsWithDelta(value, delta));
return 0;
}
private static void reportMatcher(ArgumentMatcher<?> matcher) {
mockingProgress().getArgumentMatcherStorage().reportMatcher(matcher);
}
private AdditionalMatchers() {}
} | java | github | https://github.com/mockito/mockito | mockito-core/src/main/java/org/mockito/AdditionalMatchers.java |
#!/usr/bin/env python3
#
# Copyright 2015 Signal Processing Devices Sweden AB. All rights reserved.
#
# Description: ADQ14 FWDAQ streaming example
# Documentation:
#
import numpy as np
import ctypes as ct
import matplotlib.pyplot as plt
import sys
import time
import os
sys.path.insert(1, os.path.dirname(os.path.realpath(__file__))+'/..')
from modules.example_helpers import *
# Record settings
number_of_records = 1000
samples_per_record = 512
# Plot data if set to True
plot_data = True
# Print metadata in headers
print_headers = True
# DMA transfer buffer settings
transfer_buffer_size = 65536
num_transfer_buffers = 8
# DMA flush timeout in seconds
flush_timeout = 0.5
# Load ADQAPI
ADQAPI = adqapi_load()
# Create ADQControlUnit
adq_cu = ct.c_void_p(ADQAPI.CreateADQControlUnit())
# Enable error logging from ADQAPI
ADQAPI.ADQControlUnit_EnableErrorTrace(adq_cu, 3, '.')
# Find ADQ devices
ADQAPI.ADQControlUnit_FindDevices(adq_cu)
n_of_ADQ = ADQAPI.ADQControlUnit_NofADQ(adq_cu)
print('Number of ADQ found: {}'.format(n_of_ADQ))
# Exit if no devices were found
if n_of_ADQ < 1:
print('No ADQ connected.')
ADQAPI.DeleteADQControlUnit(adq_cu)
adqapi_unload(ADQAPI)
sys.exit(1)
# Select ADQ
if n_of_ADQ > 1:
adq_num = int(input('Select ADQ device 1-{:d}: '.format(n_of_ADQ)))
else:
adq_num = 1
print_adq_device_revisions(ADQAPI, adq_cu, adq_num)
# Set clock source
ADQ_CLOCK_INT_INTREF = 0
ADQAPI.ADQ_SetClockSource(adq_cu, adq_num, ADQ_CLOCK_INT_INTREF)
# Maximum number of channels for ADQ14 FWPD is four
max_number_of_channels = ADQAPI.ADQ_GetNofChannels(adq_cu, adq_num)
# Setup test pattern
# 0 enables the analog input from the ADCs
# > 0 enables a specific test pattern
# Note: Default is to enable a test pattern (4) and disconnect the
# analog inputs inside the FPGA.
ADQAPI.ADQ_SetTestPatternMode(adq_cu, adq_num, 4)
# Set trig mode
SW_TRIG = 1
EXT_TRIG_1 = 2
EXT_TRIG_2 = 7
EXT_TRIG_3 = 8
LVL_TRIG = 3
INT_TRIG = 4
LVL_FALLING = 0
LVL_RISING = 1
trig_type = EXT_TRIG_1
success = ADQAPI.ADQ_SetTriggerMode(adq_cu, adq_num, trig_type)
if (success == 0):
print('ADQ_SetTriggerMode failed.')
success = ADQAPI.ADQ_SetLvlTrigLevel(adq_cu, adq_num, 0)
if (success == 0):
print('ADQ_SetLvlTrigLevel failed.')
success = ADQAPI.ADQ_SetTrigLevelResetValue(adq_cu, adq_num, 1000)
if (success == 0):
print('ADQ_SetTrigLevelResetValue failed.')
success = ADQAPI.ADQ_SetLvlTrigChannel(adq_cu, adq_num, 1)
if (success == 0):
print('ADQ_SetLvlTrigChannel failed.')
success = ADQAPI.ADQ_SetLvlTrigEdge(adq_cu, adq_num, LVL_RISING)
if (success == 0):
print('ADQ_SetLvlTrigEdge failed.')
# Setup acquisition
channels_mask = 0xf
ADQAPI.ADQ_TriggeredStreamingSetup(adq_cu, adq_num, number_of_records, samples_per_record, 0, 0, channels_mask)
ADQAPI.ADQ_SetStreamStatus(adq_cu, adq_num, 1);
# Get number of channels from device
number_of_channels = ADQAPI.ADQ_GetNofChannels(adq_cu, adq_num)
# Setup size of transfer buffers
print('Setting up streaming...')
ADQAPI.ADQ_SetTransferBuffers(adq_cu, adq_num, num_transfer_buffers, transfer_buffer_size)
# Start streaming
print('Collecting data, please wait...')
ADQAPI.ADQ_StopStreaming(adq_cu, adq_num)
ADQAPI.ADQ_StartStreaming(adq_cu, adq_num)
# Allocate target buffers for intermediate data storage
target_buffers = (ct.POINTER(ct.c_int16*transfer_buffer_size)*number_of_channels)()
for bufp in target_buffers:
bufp.contents = (ct.c_int16*transfer_buffer_size)()
# Create some buffers for the full records
data_16bit = [np.array([], dtype=np.int16),
np.array([], dtype=np.int16),
np.array([], dtype=np.int16),
np.array([], dtype=np.int16)]
# Allocate target buffers for headers
headerbuf_list = [(HEADER*number_of_records)() for ch in range(number_of_channels)]
# Create an C array of pointers to header buffers
headerbufp_list = ((ct.POINTER(HEADER*number_of_records))*number_of_channels)()
# Initiate pointers with allocated header buffers
for ch,headerbufp in enumerate(headerbufp_list):
headerbufp.contents = headerbuf_list[ch]
# Create a second level pointer to each buffer pointer,
# these will only be used to change the bufferp_list pointer values
headerbufvp_list = [ct.cast(ct.pointer(headerbufp_list[ch]), ct.POINTER(ct.c_void_p)) for ch in range(number_of_channels)]
# Allocate length output variable
samples_added = (4*ct.c_uint)()
for ind in range(len(samples_added)):
samples_added[ind] = 0
headers_added = (4*ct.c_uint)()
for ind in range(len(headers_added)):
headers_added[ind] = 0
header_status = (4*ct.c_uint)()
for ind in range(len(header_status)):
header_status[ind] = 0
# Generate triggers if software trig is used
if (trig_type == 1):
for trig in range(number_of_records):
ADQAPI.ADQ_SWTrig(adq_cu, adq_num)
print('Waiting for data...')
# Collect data until all requested records have been recieved
records_completed = [0, 0, 0, 0]
headers_completed = [0, 0, 0, 0]
records_completed_cnt = 0
ltime = time.time()
buffers_filled = ct.c_uint(0)
# Read out data until records_completed for ch A is number_of_records
while (number_of_records > records_completed[0]):
buffers_filled.value = 0
collect_result = 1
poll_time_diff_prev = time.time()
# Wait for next data buffer
while ((buffers_filled.value == 0) and (collect_result)):
collect_result = ADQAPI.ADQ_GetTransferBufferStatus(adq_cu, adq_num,
ct.byref(buffers_filled))
poll_time_diff = time.time()
if ((poll_time_diff - poll_time_diff_prev) > flush_timeout):
# Force flush
print('No data for {}s, flushing the DMA buffer.'.format(flush_timeout))
status = ADQAPI.ADQ_FlushDMA(adq_cu, adq_num);
print('ADQAPI.ADQ_FlushDMA returned {}'.format(adq_status(status)))
poll_time_diff_prev = time.time()
# Fetch data and headers into target buffers
status = ADQAPI.ADQ_GetDataStreaming(adq_cu, adq_num,
target_buffers,
headerbufp_list,
channels_mask,
ct.byref(samples_added),
ct.byref(headers_added),
ct.byref(header_status))
if status == 0:
print('GetDataStreaming failed!')
sys.exit()
for ch in range(number_of_channels):
if (headers_added[ch] > 0):
# The last call to GetDataStreaming has generated header data
if (header_status[ch]):
headers_done = headers_added[ch]
else:
# One incomplete header
headers_done = headers_added[ch]-1
# Update counter counting completed records
headers_completed[ch] += headers_done
# Update the number of completed records if at least one header has completed
if (headers_done > 0):
records_completed[ch] = headerbuf_list[ch][headers_completed[ch]-1].RecordNumber + 1
# Update header pointer so that it points to the current header
headerbufvp_list[ch].contents.value += headers_done*ct.sizeof(headerbuf_list[ch]._type_)
if headers_done > 0 and (np.sum(records_completed)-records_completed_cnt) > 1000:
dtime = time.time()-ltime
if (dtime > 0):
print('{:d} {:.2f} MB/s'.format(np.sum(records_completed),
((samples_per_record
*2
*(np.sum(records_completed)-records_completed_cnt))
/(dtime))/(1024*1024)))
sys.stdout.flush()
records_completed_cnt = np.sum(records_completed)
ltime = time.time()
if (samples_added[ch] > 0 and plot_data):
# Copy channel data to continuous buffer
data_buf = np.frombuffer(target_buffers[ch].contents, dtype=np.int16, count=samples_added[ch])
data_16bit[ch] = np.append(data_16bit[ch], data_buf)
print(records_completed[0])
# Stop streaming
ADQAPI.ADQ_StopStreaming(adq_cu, adq_num)
# Print recieved headers
if print_headers:
for ch in range(max_number_of_channels):
if number_of_records > 0:
print('------------------')
print('Headers channel {}'.format(ch))
print('------------------')
for rec in range(number_of_records):
header = headerbuf_list[ch][rec]
print('RecordStatus: {}'.format(header.RecordNumber))
print('UserID: {}'.format(header.UserID))
print('SerialNumber: {}'.format(header.SerialNumber))
print('Channel: {}'.format(header.Channel))
print('DataFormat: {}'.format(header.DataFormat))
print('RecordNumber: {}'.format(header.RecordNumber))
print('Timestamp: {} ns'.format(header.Timestamp * 0.125))
print('RecordStart: {} ns'.format(header.RecordStart * 0.125))
print('SamplePeriod: {} ns'.format(header.SamplePeriod * 0.125))
print('RecordLength: {} ns'.format(header.RecordLength * (header.SamplePeriod* 0.125)))
print('------------------')
# Plot data
if plot_data:
for ch in range(max_number_of_channels):
if number_of_records > 0:
widths = np.array([], dtype=np.uint32)
record_end_offset = 0
# Extract record lengths from headers
for rec in range(number_of_records):
header = headerbuf_list[ch][rec]
if rec>0:
print header.Timestamp*0.125-headerbuf_list[ch][rec-1].Timestamp*0.125
widths = np.append(widths, header.RecordLength)
# Get new figure
plt.figure(ch)
plt.clf()
# Plot data
plt.plot(data_16bit[ch].T, '.-')
# Set window title
plt.gcf().canvas.set_window_title('Channel {}'.format(ch))
# Set grid mode
plt.grid(which='Major')
# Mark records in plot
alternate_background(plt.gca(), 0, widths, labels=True)
# Show plot
plt.show()
# Delete ADQ device handle
ADQAPI.ADQControlUnit_DeleteADQ(adq_cu, adq_num)
# Delete ADQControlunit
ADQAPI.DeleteADQControlUnit(adq_cu)
print('Done.') | unknown | codeparrot/codeparrot-clean | ||
from wofrysrw.beamline.optical_elements.mirrors.srw_mirror import SRWMirror, Orientation, SimulationMethod, TreatInputOutput
from syned.beamline.shape import Sphere
from oasys_srw.srwlib import SRWLOptMirSph
class SRWSphericalMirror(SRWMirror):
def __init__(self,
name = "Undefined",
optical_element_displacement = None,
tangential_size = 1.2,
sagittal_size = 0.01,
grazing_angle = 0.003,
orientation_of_reflection_plane = Orientation.UP,
invert_tangent_component = False,
radius = 1,
height_profile_data_file = "mirror.dat",
height_profile_data_file_dimension = 1,
height_amplification_coefficient = 1.0):
super().__init__(name=name,
optical_element_displacement=optical_element_displacement,
tangential_size=tangential_size,
sagittal_size=sagittal_size,
grazing_angle=grazing_angle,
orientation_of_reflection_plane=orientation_of_reflection_plane,
invert_tangent_component=invert_tangent_component,
height_profile_data_file=height_profile_data_file,
height_profile_data_file_dimension=height_profile_data_file_dimension,
height_amplification_coefficient=height_amplification_coefficient)
self.radius = radius
def get_shape(self):
return Sphere()
def get_SRWLOptMir(self, nvx, nvy, nvz, tvx, tvy, x, y, ap_shape):
return SRWLOptMirSph(_size_tang=self.tangential_size,
_size_sag=self.sagittal_size,
_r=self.radius,
_ap_shape=ap_shape,
_sim_meth=SimulationMethod.THICK,
_treat_in_out=TreatInputOutput.WAVEFRONT_INPUT_CENTER_OUTPUT_CENTER,
_nvx=nvx,
_nvy=nvy,
_nvz=nvz,
_tvx=tvx,
_tvy=tvy,
_x=x,
_y=y)
def fromSRWLOpt(self, srwlopt=SRWLOptMirSph()):
if not isinstance(srwlopt, SRWLOptMirSph):
raise ValueError("SRW object is not a SRWLOptMirEl object")
super().fromSRWLOpt(srwlopt)
self.radius = srwlopt.rad
def to_python_code_aux(self, nvx, nvy, nvz, tvx, tvy, x, y, ap_shape):
text_code = "SRWLOptMirSph(_size_tang=" + str(self.tangential_size) +"," + "\n"
text_code += " _size_sag=" + str(self.sagittal_size) +"," + "\n"
text_code += " _r=" + str(self.radius) +"," + "\n"
text_code += " _ap_shape='" + str(ap_shape) +"'," + "\n"
text_code += " _sim_meth=" + str(SimulationMethod.THICK) +"," + "\n"
text_code += " _treat_in_out=" + str(TreatInputOutput.WAVEFRONT_INPUT_CENTER_OUTPUT_CENTER) +"," + "\n"
text_code += " _nvx=" + str(nvx) +"," + "\n"
text_code += " _nvy=" + str(nvy) +"," + "\n"
text_code += " _nvz=" + str(nvz) +"," + "\n"
text_code += " _tvx=" + str(tvx) +"," + "\n"
text_code += " _tvy=" + str(tvy) +"," + "\n"
text_code += " _x=" + str(x) +"," + "\n"
text_code += " _y=" + str(y) +")" + "\n"
return text_code | unknown | codeparrot/codeparrot-clean | ||
# Require the OptionParser code.
require 'optparse'
# Create an OptionParser object.
parser = OptionParser.new
# Define one or more options.
parser.on('-x', 'Whether to X') do |value|
p ['x', value]
end
parser.on('-y', 'Whether to Y') do |value|
p ['y', value]
end
parser.on('-z', 'Whether to Z') do |value|
p ['z', value]
end
# Parse the command line and return pared-down ARGV.
p parser.parse! | ruby | github | https://github.com/ruby/ruby | doc/optparse/ruby/basic.rb |
# frozen_string_literal: true
# :markup: markdown
module ActionDispatch
# # Action Dispatch PublicExceptions
#
# When called, this middleware renders an error page. By default if an HTML
# response is expected it will render static error pages from the `/public`
# directory. For example when this middleware receives a 500 response it will
# render the template found in `/public/500.html`. If an internationalized
# locale is set, this middleware will attempt to render the template in
# `/public/500.<locale>.html`. If an internationalized template is not found it
# will fall back on `/public/500.html`.
#
# When a request with a content type other than HTML is made, this middleware
# will attempt to convert error information into the appropriate response type.
class PublicExceptions
attr_accessor :public_path
def initialize(public_path)
@public_path = public_path
end
def call(env)
request = ActionDispatch::Request.new(env)
status = request.path_info[1..-1].to_i
content_type = request.formats.first
body = { status: status, error: Rack::Utils::HTTP_STATUS_CODES.fetch(status, Rack::Utils::HTTP_STATUS_CODES[500]) }
if env["action_dispatch.original_request_method"] == "HEAD"
render_format(status, content_type, "")
else
render(status, content_type, body)
end
end
private
def render(status, content_type, body)
format = "to_#{content_type.to_sym}" if content_type
if format && body.respond_to?(format)
render_format(status, content_type, body.public_send(format))
else
render_html(status)
end
end
def render_format(status, content_type, body)
[status, { Rack::CONTENT_TYPE => "#{content_type}; charset=#{ActionDispatch::Response.default_charset}",
Rack::CONTENT_LENGTH => body.bytesize.to_s }, [body]]
end
def render_html(status)
path = "#{public_path}/#{status}.#{I18n.locale}.html"
path = "#{public_path}/#{status}.html" unless (found = File.exist?(path))
if found || File.exist?(path)
render_format(status, "text/html", File.read(path))
else
[404, { Constants::X_CASCADE => "pass" }, []]
end
end
end
end | ruby | github | https://github.com/rails/rails | actionpack/lib/action_dispatch/middleware/public_exceptions.rb |
import io
import random
import docker
import six
from . import api_test
BUSYBOX = api_test.BUSYBOX
class TestRegressions(api_test.BaseTestCase):
def test_443_handle_nonchunked_response_in_stream(self):
dfile = io.BytesIO()
with self.assertRaises(docker.errors.APIError) as exc:
for line in self.client.build(fileobj=dfile, tag="a/b/c"):
pass
self.assertEqual(exc.exception.response.status_code, 500)
dfile.close()
def test_542_truncate_ids_client_side(self):
self.client.start(
self.client.create_container(BUSYBOX, ['true'])
)
result = self.client.containers(all=True, trunc=True)
self.assertEqual(len(result[0]['Id']), 12)
def test_647_support_doubleslash_in_image_names(self):
with self.assertRaises(docker.errors.APIError):
self.client.inspect_image('gensokyo.jp//kirisame')
def test_649_handle_timeout_value_none(self):
self.client.timeout = None
ctnr = self.client.create_container(BUSYBOX, ['sleep', '2'])
self.client.start(ctnr)
self.client.stop(ctnr)
def test_715_handle_user_param_as_int_value(self):
ctnr = self.client.create_container(BUSYBOX, ['id', '-u'], user=1000)
self.client.start(ctnr)
self.client.wait(ctnr)
logs = self.client.logs(ctnr)
if six.PY3:
logs = logs.decode('utf-8')
assert logs == '1000\n'
def test_792_explicit_port_protocol(self):
tcp_port, udp_port = random.sample(range(9999, 32000), 2)
ctnr = self.client.create_container(
BUSYBOX, ['sleep', '9999'], ports=[2000, (2000, 'udp')],
host_config=self.client.create_host_config(
port_bindings={'2000/tcp': tcp_port, '2000/udp': udp_port}
)
)
self.tmp_containers.append(ctnr)
self.client.start(ctnr)
self.assertEqual(
self.client.port(ctnr, 2000)[0]['HostPort'],
six.text_type(tcp_port)
)
self.assertEqual(
self.client.port(ctnr, '2000/tcp')[0]['HostPort'],
six.text_type(tcp_port)
)
self.assertEqual(
self.client.port(ctnr, '2000/udp')[0]['HostPort'],
six.text_type(udp_port)
) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
import subprocess
import sys
import botocore
import click
from jungle.session import create_session
def format_output(instances, flag):
"""return formatted string for instance"""
out = []
line_format = '{0}\t{1}\t{2}\t{3}\t{4}'
name_len = _get_max_name_len(instances) + 3
if flag:
line_format = '{0:<' + str(name_len) + '}{1:<16}{2:<21}{3:<16}{4:<16}'
for i in instances:
tag_name = get_tag_value(i.tags, 'Name')
out.append(line_format.format(
tag_name, i.state['Name'], i.id, i.private_ip_address, str(i.public_ip_address)))
return out
def _get_instance_ip_address(instance, use_private_ip=False):
if use_private_ip:
return instance.private_ip_address
elif instance.public_ip_address is not None:
return instance.public_ip_address
else:
click.echo("Public IP address not set. Attempting to use the private IP address.")
return instance.private_ip_address
def _get_max_name_len(instances):
"""get max length of Tag:Name"""
# FIXME: ec2.instanceCollection doesn't have __len__
for i in instances:
return max([len(get_tag_value(i.tags, 'Name')) for i in instances])
return 0
def get_tag_value(x, key):
"""Get a value from tag"""
if x is None:
return ''
result = [y['Value'] for y in x if y['Key'] == key]
if result:
return result[0]
return ''
@click.group()
@click.option('--profile-name', '-P', default=None, help='AWS profile name')
@click.pass_context
def cli(ctx, profile_name):
"""EC2 CLI group"""
ctx.obj = {'AWS_PROFILE_NAME': profile_name}
@cli.command(help='List EC2 instances')
@click.argument('name', default='*')
@click.option('--list-formatted', '-l', is_flag=True)
@click.pass_context
def ls(ctx, name, list_formatted):
"""List EC2 instances"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
if name == '*':
instances = ec2.instances.filter()
else:
condition = {'Name': 'tag:Name', 'Values': [name]}
instances = ec2.instances.filter(Filters=[condition])
out = format_output(instances, list_formatted)
click.echo('\n'.join(out))
@cli.command(help='Start EC2 instance')
@click.option('--instance-id', '-i', required=True, help='EC2 instance id')
@click.pass_context
def up(ctx, instance_id):
"""Start EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
try:
instance = ec2.Instance(instance_id)
instance.start()
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
@cli.command(help='Stop EC2 instance')
@click.option('--instance-id', '-i', required=True, help='EC2 instance id')
@click.pass_context
def down(ctx, instance_id):
"""Stop EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
try:
instance = ec2.Instance(instance_id)
instance.stop()
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
def create_ssh_command(session, instance_id, instance_name, username, key_file, port, ssh_options,
use_private_ip, gateway_instance_id, gateway_username):
"""Create SSH Login command string"""
ec2 = session.resource('ec2')
if instance_id is not None:
try:
instance = ec2.Instance(instance_id)
hostname = _get_instance_ip_address(instance, use_private_ip)
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
elif instance_name is not None:
try:
conditions = [
{'Name': 'tag:Name', 'Values': [instance_name]},
{'Name': 'instance-state-name', 'Values': ['running']},
]
instances = ec2.instances.filter(Filters=conditions)
target_instances = []
for idx, i in enumerate(instances):
target_instances.append(i)
if len(target_instances) == 1:
instance = target_instances[0]
hostname = _get_instance_ip_address(instance, use_private_ip)
else:
for idx, i in enumerate(instances):
tag_name = get_tag_value(i.tags, 'Name')
click.echo('[{0}]: {1}\t{2}\t{3}\t{4}\t{5}'.format(
idx, i.id, i.public_ip_address, i.state['Name'], tag_name, i.key_name))
selected_idx = click.prompt("Please enter a valid number", type=int, default=0)
if len(target_instances) - 1 < selected_idx or selected_idx < 0:
click.echo("selected number [{0}] is invalid".format(selected_idx), err=True)
sys.exit(2)
click.echo("{0} is selected.".format(selected_idx))
instance = target_instances[selected_idx]
hostname = _get_instance_ip_address(instance, use_private_ip)
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
# TODO: need to refactor and make it testable
if key_file is None:
key_file_option = ''
else:
key_file_option = ' -i {0}'.format(key_file)
gateway_username_option = build_option_username(gateway_username)
username_option = build_option_username(username)
if ssh_options is None:
ssh_options = ''
else:
ssh_options = ' {0}'.format(ssh_options)
if gateway_instance_id is not None:
gateway_instance = ec2.Instance(gateway_instance_id)
gateway_public_ip = gateway_instance.public_ip_address
hostname = instance.private_ip_address
cmd = 'ssh -tt{0} {1}{2} -p {3}{4} ssh{5} {6}'.format(
gateway_username_option, gateway_public_ip, key_file_option,
port, ssh_options, username_option, hostname)
else:
cmd = 'ssh{0} {1}{2} -p {3}{4}'.format(username_option, hostname, key_file_option, port, ssh_options)
return cmd
def build_option_username(username):
if username is None:
return ''
else:
return ' -l {0}'.format(username)
@cli.command(help='SSH login to EC2 instance')
@click.option('--instance-id', '-i', default=None, help='EC2 instance id')
@click.option('--instance-name', '-n', default=None, help='EC2 instance Name Tag')
@click.option('--username', '-u', default=None, help='Login username')
@click.option('--key-file', '-k', help='SSH Key file path', type=click.Path())
@click.option('--port', '-p', help='SSH port', default=22)
@click.option('--private-ip', '-e', help='Use instance private ip', is_flag=True, default=False)
@click.option('--ssh-options', '-s', help='Additional SSH options', default=None)
@click.option('--gateway-instance-id', '-g', default=None, help='Gateway instance id')
@click.option('--gateway-username', '-x', default=None, help='Gateway username')
@click.option('--dry-run', is_flag=True, default=False, help='Print SSH Login command and exist')
@click.pass_context
def ssh(ctx, instance_id, instance_name, username, key_file, port, ssh_options, private_ip,
gateway_instance_id, gateway_username, dry_run):
"""SSH to EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
if instance_id is None and instance_name is None:
click.echo(
"One of --instance-id/-i or --instance-name/-n"
" has to be specified.", err=True)
sys.exit(1)
elif instance_id is not None and instance_name is not None:
click.echo(
"Both --instance-id/-i and --instance-name/-n "
"can't to be specified at the same time.", err=True)
sys.exit(1)
cmd = create_ssh_command(
session, instance_id, instance_name, username, key_file, port, ssh_options, private_ip,
gateway_instance_id, gateway_username)
if not dry_run:
subprocess.call(cmd, shell=True)
else:
click.echo(cmd) | unknown | codeparrot/codeparrot-clean | ||
##
# Copyright (C) 2014 Jessica Tallon & Matt Molyneaux
#
# This file is part of Inboxen.
#
# Inboxen is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Inboxen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Inboxen. If not, see <http://www.gnu.org/licenses/>.
##
from django import http
from django.contrib import messages
from django.core import urlresolvers
from django.utils.translation import ugettext_lazy as _
from django.views import generic
from inboxen import models
from website import forms
from website.views import base
__all__ = ["RestoreSelectView"]
class RestoreSelectView(base.CommonContextMixin, base.LoginRequiredMixin, generic.FormView):
form_class = forms.RestoreSelectForm
headline = _("Choose An Inbox To Restore")
template_name = "user/account/restore.html"
def get_success_url(self):
return urlresolvers.reverse("user-restore", kwargs={"inbox":self.inbox.inbox, "domain":self.inbox.domain.domain})
def get_form_kwargs(self, *args, **kwargs):
kwargs = super(RestoreSelectView, self).get_form_kwargs(*args, **kwargs)
kwargs.setdefault("request", self.request)
return kwargs
def form_valid(self, form, *args, **kwargs):
self.inbox = form.save()
return super(RestoreSelectView, self).form_valid(form=form, *args, **kwargs) | unknown | codeparrot/codeparrot-clean | ||
#include "pyconfig.h" // Py_GIL_DISABLED
// Need limited C API version 3.5 for PyCodec_NameReplaceErrors()
#if !defined(Py_GIL_DISABLED) && !defined(Py_LIMITED_API)
# define Py_LIMITED_API 0x03050000
#endif
#include "parts.h"
static PyObject *
codec_namereplace_errors(PyObject *Py_UNUSED(module), PyObject *exc)
{
assert(exc != NULL);
return PyCodec_NameReplaceErrors(exc);
}
static PyMethodDef test_methods[] = {
{"codec_namereplace_errors", codec_namereplace_errors, METH_O},
{NULL},
};
int
_PyTestLimitedCAPI_Init_Codec(PyObject *module)
{
if (PyModule_AddFunctions(module, test_methods) < 0) {
return -1;
}
return 0;
} | c | github | https://github.com/python/cpython | Modules/_testlimitedcapi/codec.c |
from pymongo import MongoClient
from progressbar import ProgressBar, Bar, Percentage, FormatLabel, ETA
import numpy as np
np.set_printoptions(threshold=np.nan)
client = MongoClient()
db = client.dotabot
matches = db.matches
# We're going to create a training matrix, X, where each
# row is a different match and each column is a feature
# The features are bit vectors indicating whether heroes
# were picked (1) or not picked (0). The first N features
# correspond to radiant, and the last N features are
# for dire.
NUM_HEROES = 108
NUM_FEATURES = NUM_HEROES * 2
# Our training label vector, Y, is a bit vector indicating
# whether radiant won (1) or lost (-1)
NUM_MATCHES = matches.count()
# Initialize training matrix
X = np.zeros((NUM_MATCHES, NUM_FEATURES), dtype=np.int8)
# Initialize training label vector
Y = np.zeros(NUM_MATCHES, dtype=np.int8)
widgets = [FormatLabel('Processed: %(value)d/%(max)d matches. '), ETA(), Percentage(), ' ', Bar()]
pbar = ProgressBar(widgets=widgets, maxval=NUM_MATCHES).start()
for i, record in enumerate(matches.find()):
pbar.update(i)
Y[i] = 1 if record['radiant_win'] else 0
players = record['players']
for player in players:
hero_id = player['hero_id'] - 1
# If the left-most bit of player_slot is set,
# this player is on dire, so push the index accordingly
player_slot = player['player_slot']
if player_slot >= 128:
hero_id += NUM_HEROES
X[i, hero_id] = 1
pbar.finish()
print "Permuting, generating train and test sets."
indices = np.random.permutation(NUM_MATCHES)
test_indices = indices[0:NUM_MATCHES/10]
train_indices = indices[NUM_MATCHES/10:NUM_MATCHES]
X_test = X[test_indices]
Y_test = Y[test_indices]
X_train = X[train_indices]
Y_train = Y[train_indices]
print "Saving output file now..."
np.savez_compressed('test_%d.npz' % len(test_indices), X=X_test, Y=Y_test)
np.savez_compressed('train_%d.npz' % len(train_indices), X=X_train, Y=Y_train) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
###############################################################################
#
# Module for OpenERP
# Copyright (C) 2014 Akretion (http://www.akretion.com).
# Copyright (C) 2010-2013 Akretion LDTA (<http://www.akretion.com>)
# @author Sébastien BEAU <sebastien.beau@akretion.com>
# @author Benoît GUILLOT <benoit.guillot@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import api, fields, models
from datetime import date
class SaleOrder(models.Model):
_inherit = "sale.order"
@api.one
@api.depends('project_id')
def _compute_related_project_id(self):
self.related_project_id = (
self.project_id.use_tasks and
self.env['project.project'].search(
[('analytic_account_id', '=', self.project_id.id)],
limit=1)[:1])
related_project_id = fields.Many2one(
comodel_name='project.project', string='Project',
compute='_compute_related_project_id')
@api.model
def _prepare_project_vals(self, order):
name = u" %s - %s - %s" % (
order.partner_id.name,
date.today().year,
order.name)
return {
'user_id': order.user_id.id,
'name': name,
'partner_id': order.partner_id.id,
}
@api.multi
def action_create_project(self):
project_obj = self.env['project.project']
for order in self:
vals = self._prepare_project_vals(order)
project = project_obj.create(vals)
order.write({
'project_id': project.analytic_account_id.id
})
return True | unknown | codeparrot/codeparrot-clean | ||
"""
Index images on a tensor database
"""
import concurrent.futures
from loader.reader import Importer
from tensors.tensor_database import TensorDatabase
from features.extractors import FeatureExtractor
from features.extractors import Features
class Indexer:
"""
Indexer of images
"""
def __init__(self, images_path, database_path):
self._images_path = images_path
self._database_path = database_path
@staticmethod
def name_normalize(name):
return name.replace('/', '|')
@staticmethod
def name_denormalize(name):
return name.replace('|', '/')
def index(self):
"""
Do the index process
"""
importer = Importer(self._images_path)
tensors = TensorDatabase(self._database_path)
extract = Features(FeatureExtractor.SIFT)
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = {
executor.submit(
tensors.add,
self.name_normalize(name),
extract.descriptors(image)
): name
for name, image in importer.load()
}
for future in concurrent.futures.as_completed(futures):
result = future.result() | unknown | codeparrot/codeparrot-clean | ||
# -*- encoding: utf-8 -*-
import os
import base64
from openerp.tools.safe_eval import safe_eval
from openerp import models, fields, api, _
from openerp.exceptions import except_orm
from openerp.tools import config
from openerp import SUPERUSER_ID
import core
from java_oe import JAVA_MAPPING, check_java_list, PARAM_VALUES
ADDONS_PATHS = config['addons_path'].split(",")
class report_xml(models.Model):
_inherit = 'ir.actions.report.xml'
def __init__(self, pool, cr):
if not('pentaho','Pentaho Report') in self._columns['report_type'].selection:
self._columns['report_type'].selection.append(('pentaho', 'Pentaho Report'))
super(report_xml, self).__init__(pool, cr)
pentaho_report_output_type = fields.Selection([("pdf", "PDF"), ("html", "HTML"), ("csv", "CSV"), ("xls", "Excel"), ("xlsx", "Excel 2007"), ("rtf", "RTF"), ("txt", "Plain text")],
string = 'Output format')
pentaho_report_model_id = fields.Many2one('ir.model', string='Model')
pentaho_file = fields.Binary(string='File', filters='*.prpt')
pentaho_filename = fields.Char(string='Filename', size=256, required=False)
# 'is_pentaho_report': fields.boolean('Is this a Pentaho report?'),
linked_menu_id = fields.Many2one('ir.ui.menu', string='Linked menu item', select=True)
created_menu_id = fields.Many2one('ir.ui.menu', string='Created menu item')
# This is not displayed on the client - it is a trigger to indicate that
# a prpt file needs to be loaded - normally it is loaded by the client interface
# In this case, the filename should be specified with a module path.
pentaho_load_file = fields.Boolean(string='Load prpt file from filename')
@api.onchange('report_type')
def _onchange_report_type(self):
if self.report_type == 'pentaho':
self.auto = False
self.pentaho_report_output_type = 'pdf'
if self.model:
if not self.pentaho_report_model_id or self.pentaho_report_model_id.model != self.model:
self.pentaho_report_model_id = self.env['ir.model'].search([('model', '=', self.model)])[0]
else:
if self.pentaho_report_model_id:
self.model = self.pentaho_report_model_id.model
@api.onchange('pentaho_report_model_id')
def _onchange_model_id(self):
if self.pentaho_report_model_id:
self.model = self.pentaho_report_model_id.model
else:
self.model = False
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
if context is None:
context = {}
default = default.copy()
default.update({'created_menu_id': 0})
return super(report_xml, self).copy(cr, uid, id, default, context=context)
def create_menu(self, cr, uid, vals, context=None):
view_ids = self.pool.get('ir.ui.view').search(cr, uid, [('model', '=', 'ir.actions.report.promptwizard'), ('type', '=', 'form')], context=context)
action_vals = {'name': vals.get('name', 'Pentaho Report'),
'res_model': 'ir.actions.report.promptwizard',
'type' : 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'tree,form',
'view_id' : view_ids and view_ids[0] or 0,
'context' : "{'service_name': '%s'}" % vals.get('report_name', ''),
'target' : 'new',
}
action_id = self.pool.get('ir.actions.act_window').create(cr, uid, action_vals, context=context)
result = self.pool.get('ir.ui.menu').create(cr, SUPERUSER_ID, {
'name': vals.get('name' ,'Pentaho Report'),
'sequence': 10,
'parent_id': vals['linked_menu_id'],
'groups_id': vals.get('groups_id', []),
'icon': 'STOCK_PRINT',
'action': 'ir.actions.act_window,%d' % (action_id,),
}, context=context)
return result
def delete_menu(self, cr, uid, menu_id, context=None):
action = self.pool.get('ir.ui.menu').browse(cr, uid, menu_id, context=context).action
if action and action._model._name == 'ir.actions.act_window':
self.pool.get('ir.actions.act_window').unlink(cr, uid, [action.id], context=context)
result = self.pool.get('ir.ui.menu').unlink(cr, SUPERUSER_ID, [menu_id], context=context)
return result
def update_menu(self, cr, uid, action_report, context=None):
if action_report.created_menu_id and not action_report.linked_menu_id:
self.delete_menu(cr, uid, action_report.created_menu_id.id, context=context)
if action_report.report_type == 'pentaho' and action_report.linked_menu_id:
groups_id = [(6, 0, map(lambda x: x.id, action_report.groups_id))]
if not action_report.created_menu_id:
result = self.create_menu(cr, uid, {'name': action_report.name,
'linked_menu_id': action_report.linked_menu_id.id,
'report_name': action_report.report_name,
'groups_id': groups_id,
}, context=context)
else:
action = action_report.created_menu_id.action
if action and action._model._name == 'ir.actions.act_window':
existing_context = safe_eval(self.pool.get('ir.actions.act_window').browse(cr, uid, action.id, context=context).context)
new_context = existing_context if type(existing_context) == dict else {}
new_context['service_name'] = action_report.report_name or ''
self.pool.get('ir.actions.act_window').write(cr, uid, [action.id], {'name': action_report.name or 'Pentaho Report',
'context': str(new_context),
}, context=context)
self.pool.get('ir.ui.menu').write(cr, SUPERUSER_ID, [action_report.created_menu_id.id], {'name': action_report.name or 'Pentaho Report',
'parent_id': action_report.linked_menu_id.id,
'groups_id': groups_id,
}, context=context)
result = action_report.created_menu_id.id
else:
result = 0
return result
def create(self, cr, uid, vals, context = None):
if vals.get('report_type','') == 'pentaho':
vals.update({
'auto': False,
})
if vals.get('linked_menu_id', False):
vals['created_menu_id'] = self.create_menu(cr, uid, vals, context=context)
res = super(report_xml, self).create(cr, uid, vals, context=context)
self.update_pentaho(cr, uid, [res], context=context)
return res
def write(self, cr, uid, ids, vals, context = None):
if vals.get('report_type','') == 'pentaho':
vals.update({
'type': 'ir.actions.report.xml',
'auto': False,
})
res = super(report_xml, self).write(cr, uid, ids, vals, context=context)
for r in self.browse(cr, uid, ids if isinstance(ids, list) else [ids], context=context):
created_menu_id = self.update_menu(cr, uid, r, context=context)
if created_menu_id != r.created_menu_id:
super(report_xml, self).write(cr, uid, [r.id], {'created_menu_id': created_menu_id}, context=context)
self.update_pentaho(cr, uid, ids if isinstance(ids, list) else [ids], context=context)
return res
def unlink(self, cr, uid, ids, context=None):
values_obj = self.pool.get('ir.values')
for r in self.browse(cr, uid, ids, context=context):
if r.created_menu_id:
self.delete_menu(cr, uid, r.created_menu_id.id, context=context)
values_obj.unlink(cr, SUPERUSER_ID, values_obj.search(cr, uid, [('value', '=', 'ir.actions.report.xml,%s' % r.id)]), context=context)
return super(report_xml, self).unlink(cr, uid, ids, context=context)
def update_pentaho(self, cr, uid, ids, context = None):
values_obj = self.pool.get('ir.values')
for report in self.browse(cr, uid, ids):
values_ids = values_obj.search(cr, uid, [('value', '=', 'ir.actions.report.xml,%s' % report.id)])
if report.report_type == 'pentaho':
if report.pentaho_filename:
if report.pentaho_load_file:
# if we receive a filename and no content, this has probably been loaded by a process other than the standard client, such as a data import
# in this case, we expect the filename to be a fully specified file within a module from which we load the file data
super(report_xml, self).write(cr, uid, [report.id], {'pentaho_filename': os.path.basename(report.pentaho_filename),
'pentaho_file': self.read_content_from_file(report.pentaho_filename),
'pentaho_load_file': False
})
report = self.browse(cr, uid, report.id)
# path = self.save_content_to_file(report.pentaho_filename, report.pentaho_file)
# super(report_xml, self).write(cr, uid, [report.id], {'report_rml': path})
# we are no longer relying on report_rml to contain a name at all - for clarity, though, still store it...
super(report_xml, self).write(cr, uid, [report.id], {'report_rml': report.pentaho_filename})
if not report.linked_menu_id and report.pentaho_filename.endswith('.prpt'):
data = {
'name': report.name,
'model': report.model,
'key': 'action',
'object': True,
'key2': 'client_print_multi',
'value': 'ir.actions.report.xml,%s' % report.id,
}
if not values_ids:
values_obj.create(cr, SUPERUSER_ID, data, context=context)
else:
values_obj.write(cr, SUPERUSER_ID, values_ids, data, context=context)
values_ids = []
# core.register_pentaho_report(report.report_name)
elif report.pentaho_file:
super(report_xml, self).write(cr, uid, [report.id], {'pentaho_file': False})
# If this is a pentaho report and there are still "values_ids", it means that
# the action is not considered valid - get rid of the values_ids...
if values_ids:
values_obj.unlink(cr, SUPERUSER_ID, values_ids, context=context)
# If this is not a pentaho report, then the action should always have a row in values
else:
if not values_ids:
values_obj.create(cr, SUPERUSER_ID, {
'name': report.name,
'model': report.model,
'key': 'action',
'object': True,
'key2': 'client_print_multi',
'value': 'ir.actions.report.xml,%s' % report.id,
},
context = context)
return True
def read_content_from_file(self, name):
path_found = False
for addons_path in ADDONS_PATHS:
try:
os.stat(addons_path + os.sep + name)
path_found = True
break
except:
pass
if not path_found:
raise except_orm(_('Error'), _('Could not locate path for file %s') % name)
path = addons_path + os.sep + name
with open(path, "rb") as report_file:
data = base64.encodestring(report_file.read())
return data
def pentaho_validate_params(self, cr, uid, report, param_vals, context=None):
"""Validate a list of passed parameters against the defined params for
a Pentaho report.
Raises an exception if any of the params are invalid.
@param report: Browse object on the ir.actions.report.xml record for the report.
@param param_vals: Dict with parameter values to pass to the report. These are python
data types prior to conversion for passing to the Pentaho server.
"""
param_defs = core.fetch_report_parameters(cr, uid, report.report_name, context=context)
val_names = param_vals.keys()
for pdef in param_defs:
pname = pdef.get('name', '')
if not pname:
continue
if pname in val_names:
val_names.remove(pname)
else:
if pdef.get('default_value', False):
if type(pdef['default_value']) in (list, tuple):
param_vals[pname] = pdef['default_value'][0]
else:
param_vals[pname] = pdef['default_value']
else:
if pdef.get('is_mandatory', False):
raise except_orm(_('Error'), _("Report '%s'. No value passed for mandatory report parameter '%s'.") % (report.report_name, pname))
continue
# Make sure data types match
value_type = pdef.get('value_type', '')
java_list, value_type = check_java_list(value_type)
if not value_type in JAVA_MAPPING:
raise except_orm(_('Error'), _("Report '%s', parameter '%s'. Type '%s' not supported.") % (report.report_name, pname, pdef.get('value_type', '')))
local_type = JAVA_MAPPING[value_type](pdef.get('attributes', {}).get('data-format', False))
param_val = param_vals[pname]
if not local_type in PARAM_VALUES:
raise except_orm(_('Error'), _("Report '%s', parameter '%s'. Local type '%s' not supported.") % (report.report_name, pname, local_type))
if not isinstance(param_val, PARAM_VALUES[local_type]['py_types']):
raise except_orm(_('Error'), _("Report '%s', parameter '%s'. Passed value is '%s' but must be one of '%s'.") % (report.report_name, pname, param_val.__class__.__name__, PARAM_VALUES[local_type]['py_types']))
converter = PARAM_VALUES[local_type].get('convert')
if converter:
try:
converter(param_val)
except Exception, e:
raise except_orm(_('Error'), _("Report '%s', parameter '%s'. Passed value '%s' failed data conversion to type '%s'.\n%s") % (report.report_name, pname, param_val, local_type, str(e)))
# Make sure all passed values have a param to go to on the report.
# This wouldn't raise an error on the Pentaho side but flagging it here
# might save a lot of development time if a param is misnamed.
if val_names:
raise except_orm(_('Error'), _("Report '%s'. Parameter values not required by report: %s") % (report.report_name, val_names))
def pentaho_report_action(self, cr, uid, service_name, active_ids=None, param_values=None, context=None):
"""Return the action definition to run a Pentaho report.
The action definition is returned as a dict which can be returned
to the OpenERP client from a wizard button or server action to
cause the client to request the report.
@param service_name: The report service name (without leading 'report.').
@param active_ids: List of ids on the report model to pass.
@param param_values: Dict with parameter values for the report.
The keys are the parameter names as defined by the Pentaho report.
"""
report = False
report_ids = self.search(cr, uid, [('report_name', '=', service_name)], context=context)
if report_ids:
report = self.browse(cr, uid, report_ids[0], context=context)
if not report or report.report_type != 'pentaho':
raise except_orm(_('Error'), _("Report '%s' is not a Pentaho report.") % service_name)
if not active_ids and not param_values:
raise except_orm(_('Error'), _("Report '%s' must be passed active ids or parameter values.") % service_name)
datas = {'model': report.model,
'output_type': report.report_type,
}
if active_ids:
datas['ids'] = active_ids
if param_values:
self.pentaho_validate_params(cr, uid, report, param_values, context=context)
datas['variables'] = param_values
return {
'type': 'ir.actions.report.xml',
'report_name': report.report_name,
'datas': datas,
} | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import sys
from lib.policy import PolicySet
from lib.subcommand import SubCommand
LOGGER = logging.getLogger('dmprof')
class ExpandCommand(SubCommand):
def __init__(self):
super(ExpandCommand, self).__init__(
'Usage: %prog expand <dump> <policy> <component> <depth>')
self._parser.add_option('--alternative-dirs', dest='alternative_dirs',
metavar='/path/on/target@/path/on/host[:...]',
help='Read files in /path/on/host/ instead of '
'files in /path/on/target/.')
def do(self, sys_argv):
options, args = self._parse_args(sys_argv, 4)
dump_path = args[1]
target_policy = args[2]
component_name = args[3]
depth = args[4]
alternative_dirs_dict = {}
policy_set = PolicySet.load(SubCommand._parse_policy_list(target_policy))
if not policy_set[target_policy].find_rule(component_name):
sys.stderr.write("ERROR: Component %s not found in policy %s\n"
% (component_name, target_policy))
return 1
if options.alternative_dirs:
for alternative_dir_pair in options.alternative_dirs.split(':'):
target_path, host_path = alternative_dir_pair.split('@', 1)
alternative_dirs_dict[target_path] = host_path
(bucket_set, dump) = SubCommand.load_basic_files(
dump_path, False, alternative_dirs=alternative_dirs_dict)
ExpandCommand._output(dump, policy_set[target_policy], bucket_set,
component_name, int(depth), sys.stdout)
return 0
@staticmethod
def _output(dump, policy, bucket_set, component_name, depth, out):
"""Prints all stacktraces in a given component of given depth.
Args:
dump: A Dump object.
policy: A Policy object.
bucket_set: A BucketSet object.
component_name: A name of component for filtering.
depth: An integer representing depth to be printed.
out: An IO object to output.
"""
sizes = {}
ExpandCommand._accumulate(
dump, policy, bucket_set, component_name, depth, sizes)
sorted_sizes_list = sorted(
sizes.iteritems(), key=(lambda x: x[1]), reverse=True)
total = 0
# TODO(dmikurube): Better formatting.
for size_pair in sorted_sizes_list:
out.write('%10d %s\n' % (size_pair[1], size_pair[0]))
total += size_pair[1]
LOGGER.info('total: %d\n' % total)
@staticmethod
def _add_size(precedence, bucket, depth, committed, sizes):
stacktrace_sequence = precedence
for function, sourcefile in zip(
bucket.symbolized_stackfunction[
0 : min(len(bucket.symbolized_stackfunction), 1 + depth)],
bucket.symbolized_stacksourcefile[
0 : min(len(bucket.symbolized_stacksourcefile), 1 + depth)]):
stacktrace_sequence += '%s(@%s) ' % (function, sourcefile)
if not stacktrace_sequence in sizes:
sizes[stacktrace_sequence] = 0
sizes[stacktrace_sequence] += committed
@staticmethod
def _accumulate(dump, policy, bucket_set, component_name, depth, sizes):
rule = policy.find_rule(component_name)
if not rule:
pass
elif rule.allocator_type == 'malloc':
for bucket_id, _, committed, allocs, frees in dump.iter_stacktrace:
bucket = bucket_set.get(bucket_id)
if not bucket or bucket.allocator_type == 'malloc':
component_match = policy.find_malloc(bucket)
elif bucket.allocator_type == 'mmap':
continue
else:
assert False
if component_match == component_name:
precedence = ''
precedence += '(alloc=%d) ' % allocs
precedence += '(free=%d) ' % frees
if bucket.typeinfo:
precedence += '(type=%s) ' % bucket.symbolized_typeinfo
precedence += '(type.name=%s) ' % bucket.typeinfo_name
ExpandCommand._add_size(precedence, bucket, depth, committed, sizes)
elif rule.allocator_type == 'mmap':
for _, region in dump.iter_map:
if region[0] != 'hooked':
continue
component_match, bucket = policy.find_mmap(region, bucket_set)
if component_match == component_name:
ExpandCommand._add_size('', bucket, depth,
region[1]['committed'], sizes)
elif rule.allocator_type == 'unhooked':
for addr, region in dump.iter_map:
if region[0] != 'unhooked':
continue
component_match = policy.find_unhooked(region)
if component_match == component_name:
precedence = ''
precedence += '%s-' % hex(addr[0])[2:]
precedence += '%s' % hex(addr[1])[2:]
precedence += ' %s' % region[1]['vma']['readable']
precedence += '%s' % region[1]['vma']['writable']
precedence += '%s' % region[1]['vma']['executable']
precedence += '%s' % region[1]['vma']['private']
precedence += ' %s' % region[1]['vma']['offset']
precedence += ' %s:' % region[1]['vma']['major']
precedence += '%s' % region[1]['vma']['minor']
precedence += ' %s' % region[1]['vma']['inode']
precedence += ' %s' % region[1]['vma']['name']
if not precedence in sizes:
sizes[precedence] = 0
sizes[precedence] += region[1]['committed'] | unknown | codeparrot/codeparrot-clean | ||
"""Deals with creating the normal mode representation arrays.
Copyright (C) 2013, Joshua More and Michele Ceriotti
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http.//www.gnu.org/licenses/>.
Classes:
InputNormalModes: Deals with creating the normal mode objects.
"""
import numpy as np
from copy import copy
from ipi.engine.normalmodes import *
from ipi.utils.inputvalue import *
from ipi.utils.units import *
__all__ = ['InputNormalModes']
class InputNormalModes(InputArray):
""" Storage class for NormalModes engine.
Describes how normal-modes transformation and integration should be
performed.
Attributes:
mode: Specifies the method by which the dynamical masses are created.
transform: Specifies whether the normal mode calculation will be
done using a FFT transform or a matrix multiplication.
"""
attribs = copy(InputArray.attribs)
attribs["mode"] = (InputAttribute, {"dtype" : str,
"default" : "rpmd",
"help" : "Specifies the technique to be used to calculate the dynamical masses. 'rpmd' simply assigns the bead masses the physical mass. 'manual' sets all the normal mode frequencies except the centroid normal mode manually. 'pa-cmd' takes an argument giving the frequency to set all the non-centroid normal modes to. 'wmax-cmd' is similar to 'pa-cmd', except instead of taking one argument it takes two ([wmax,wtarget]). The lowest-lying normal mode will be set to wtarget for a free particle, and all the normal modes will coincide at frequency wmax. ",
"options" : ['pa-cmd', 'wmax-cmd', 'manual', 'rpmd']})
attribs["transform"] = (InputValue,{"dtype" : str,
"default" : "fft",
"help" : "Specifies whether to calculate the normal mode transform using a fast Fourier transform or a matrix multiplication. For small numbers of beads the matrix multiplication may be faster.",
"options" : ['fft', 'matrix']})
default_help = "Deals with the normal mode transformations, including the adjustment of bead masses to give the desired ring polymer normal mode frequencies if appropriate. Takes as arguments frequencies, of which different numbers must be specified and which are used to scale the normal mode frequencies in different ways depending on which 'mode' is specified."
default_label = "NORMALMODES"
def __init__(self, help=None, dimension=None, default=None, dtype=None):
""" Initializes InputNormalModes.
Just calls the parent initialization function with appropriate arguments.
"""
super(InputNormalModes,self).__init__(help=help, default=default, dtype=float, dimension="frequency")
def store(self, nm):
"""Takes a normal modes instance and stores a minimal representation
of it.
Args:
nm: A normal modes object.
"""
super(InputNormalModes,self).store(nm.nm_freqs)
self.mode.store(nm.mode)
self.transform.store(nm.transform_method)
def fetch(self):
"""Creates a normal modes object.
Returns:
A normal modes object.
"""
super(InputNormalModes,self).check()
return NormalModes(self.mode.fetch(), self.transform.fetch(), super(InputNormalModes,self).fetch() ) | unknown | codeparrot/codeparrot-clean | ||
from zope.interface import implements
from twisted.internet import reactor
from twisted.internet import defer
from webut.skin import iskin
from ldaptor.protocols import pureldap
from ldaptor.protocols.ldap import ldapsyntax, distinguishedname
from ldaptor import generate_password, interfaces
from ldaptor.apps.webui.uriquote import uriUnquote
from ldaptor import weave
from ldaptor.apps.webui.i18n import _
from ldaptor.apps.webui import i18n
import os
from nevow import rend, inevow, loaders, url, tags
from formless import annotate, webform, iformless, configurable
def getEntry(ctx, dn):
user = ctx.locate(inevow.ISession).getLoggedInRoot().loggedIn
e=ldapsyntax.LDAPEntry(client=user.client, dn=dn)
return e
def getEntryWithAttributes(ctx, dn, *attributes):
e = getEntry(ctx, dn)
d = e.fetch(*attributes)
return d
def getServiceName(ctx, dn):
d = getEntryWithAttributes(ctx, dn, 'cn')
def _cb(e):
for cn in e.get('cn', []):
return cn
raise RuntimeError, \
_("Service password entry has no attribute cn: %r") % e
d.addCallback(_cb)
return d
def checkPasswordTypos(newPassword, again):
if newPassword != again:
raise annotate.ValidateError(
{},
formErrorMessage=_('Passwords do not match.'))
class RemoveServicePassword(configurable.Configurable):
def __init__(self, dn):
super(RemoveServicePassword, self).__init__(None)
self.dn = dn
def getBindingNames(self, ctx):
return ['remove']
def bind_remove(self, ctx):
return annotate.MethodBinding(
'remove',
annotate.Method(arguments=[
annotate.Argument('ctx', annotate.Context()),
],
label=_('Remove')),
action=_('Remove'))
def remove(self, ctx):
e = getEntry(ctx, self.dn)
d = getServiceName(ctx, self.dn)
def _delete(name, e):
d = e.delete()
d.addCallback(lambda _: name)
return d
d.addCallback(_delete, e)
def _report(name):
return _('Removed service %r') % name
d.addCallback(_report)
return d
class SetServicePassword(configurable.Configurable):
def __init__(self, dn):
super(SetServicePassword, self).__init__(None)
self.dn = dn
def getBindingNames(self, ctx):
return ['setServicePassword']
def bind_setServicePassword(self, ctx):
return annotate.MethodBinding(
'setServicePassword',
annotate.Method(arguments=[
annotate.Argument('ctx', annotate.Context()),
annotate.Argument('newPassword', annotate.PasswordEntry(required=True,
label=_('New password'))),
annotate.Argument('again', annotate.PasswordEntry(required=True,
label=_('Again'))),
],
label=_('Set password')),
action=_('Set password'))
def _isPasswordAcceptable(self, ctx, newPassword, again):
return checkPasswordTypos(newPassword, again)
def setServicePassword(self, ctx, newPassword, again):
d = defer.maybeDeferred(self._isPasswordAcceptable, ctx, newPassword, again)
def _setPassword(ctx, dn, newPassword):
e = getEntry(ctx, dn)
d=defer.maybeDeferred(e.setPassword, newPasswd=newPassword)
return d
d.addCallback(lambda _: _setPassword(ctx, self.dn, newPassword))
def _getName(_, ctx):
d = getServiceName(ctx, self.dn)
return d
d.addCallback(_getName, ctx)
def _report(name):
return _('Set password for service %r') % name
d.addCallback(_report)
return d
class SetRandomServicePassword(configurable.Configurable):
def __init__(self, dn):
super(SetRandomServicePassword, self).__init__(None)
self.dn = dn
def getBindingNames(self, ctx):
return ['generateRandom']
def bind_generateRandom(self, ctx):
return annotate.MethodBinding(
'generateRandom',
annotate.Method(arguments=[
annotate.Argument('ctx', annotate.Context()),
],
label=_('Generate random')),
action=_('Generate random'))
def generateRandom(self, ctx):
d=generate_password.generate(reactor)
def _first(passwords):
assert len(passwords)==1
return passwords[0]
d.addCallback(_first)
def _setPass(newPassword, ctx):
e = getEntry(ctx, self.dn)
d = e.setPassword(newPassword)
def _getName(_, ctx):
d = getServiceName(ctx, self.dn)
return d
d.addCallback(_getName, ctx)
def _report(name, newPassword):
return _('Service %r password set to %s') % (name, newPassword)
d.addCallback(_report, newPassword)
return d
d.addCallback(_setPass, ctx)
return d
class AddService(configurable.Configurable):
def __init__(self, dn):
super(AddService, self).__init__(None)
self.dn = dn
def getBindingNames(self, ctx):
return ['add']
def bind_add(self, ctx):
return annotate.MethodBinding(
'add',
annotate.Method(arguments=[
annotate.Argument('ctx', annotate.Context()),
annotate.Argument('serviceName', annotate.String(required=True,
label=_('Service name'))),
annotate.Argument('newPassword', annotate.PasswordEntry(required=False,
label=_('New password'),
description=_("Leave empty to generate random password."))),
annotate.Argument('again', annotate.PasswordEntry(required=False,
label=_('Again'))),
],
label=_('Add')),
action=_('Add'))
def add(self, ctx, serviceName, newPassword, again):
if newPassword or again:
checkPasswordTypos(newPassword, again)
if not newPassword:
d = self._generate(ctx, serviceName)
else:
d = self._add(ctx, newPassword, serviceName)
return d
def _cbSetPassword(self, ctx, newPassword, serviceName):
e = getEntry(ctx, self.dn)
rdn = distinguishedname.RelativeDistinguishedName(
attributeTypesAndValues=[
distinguishedname.LDAPAttributeTypeAndValue(
attributeType='cn', value=serviceName),
distinguishedname.LDAPAttributeTypeAndValue(
attributeType='owner', value=str(self.dn))
])
d = e.addChild(rdn, {
'objectClass': ['serviceSecurityObject'],
'cn': [serviceName],
'owner': [str(self.dn)],
'userPassword': ['{crypt}!'],
})
def _setPass(e, newPassword):
d = e.setPassword(newPassword)
return d
d.addCallback(_setPass, newPassword)
return d
def _generate(self, ctx, serviceName):
d=generate_password.generate(reactor)
def _first(passwords):
assert len(passwords)==1
return passwords[0]
d.addCallback(_first)
def _cb(newPassword, serviceName):
d = self._cbSetPassword(ctx, newPassword, serviceName)
d.addCallback(lambda dummy: _('Added service %r with password %s') % (serviceName, newPassword))
return d
d.addCallback(_cb, serviceName)
return d
def _add(self, ctx, newPassword, serviceName):
d = self._cbSetPassword(ctx, newPassword, serviceName)
def _report(dummy, name):
return _('Added service %r') % name
d.addCallback(_report, serviceName)
return d
class ServicePasswordChangeMixin(object):
def __init__(self, dn):
super(ServicePasswordChangeMixin, self).__init__()
self.dn = dn
def listServicePasswordActions(self):
l = [(int(pri), name)
for x, pri, name in [name.split('_', 2) for name in dir(self)
if name.startswith('servicePasswordAction_')]]
l.sort()
for pri, name in l:
yield name
def getServicePasswordAction(self, name):
for attrName in dir(self):
if not attrName.startswith('servicePasswordAction_'):
continue
x, pri, actionName = attrName.split('_', 2)
if actionName == name:
return getattr(self, attrName)
return None
def render_servicePasswords(self, ctx, data):
docFactory = loaders.xmlfile(
'change_service_passwords.xhtml',
templateDir=os.path.split(os.path.abspath(__file__))[0])
r = inevow.IQ(docFactory).onePattern('main')
return r
def render_hideIfNot(self, ctx, data):
if data:
return ctx.tag
else:
return tags.invisible()
def data_servicePasswords(self, ctx, data):
user = ctx.locate(inevow.ISession).getLoggedInRoot().loggedIn
config = interfaces.ILDAPConfig(ctx)
e=ldapsyntax.LDAPEntry(client=user.client, dn=config.getBaseDN())
d = e.search(filterObject=pureldap.LDAPFilter_and([
pureldap.LDAPFilter_equalityMatch(attributeDesc=pureldap.LDAPAttributeDescription('objectClass'),
assertionValue=pureldap.LDAPAssertionValue('serviceSecurityObject')),
pureldap.LDAPFilter_equalityMatch(attributeDesc=pureldap.LDAPAttributeDescription('owner'),
assertionValue=pureldap.LDAPAssertionValue(str(self.dn))),
pureldap.LDAPFilter_present('cn'),
]),
attributes=['cn'])
return d
def render_form_service(self, ctx, data):
# TODO error messages for one password change form display in
# all of them.
e = inevow.IData(ctx)
for name in self.listServicePasswordActions():
yield webform.renderForms('service_%s_%s' % (name, e.dn))[ctx.tag()]
def locateConfigurable(self, ctx, name):
try:
return super(ServicePasswordChangeMixin, self).locateConfigurable(ctx, name)
except AttributeError:
if name.startswith('service_'):
pass
else:
raise
rest = name[len('service_'):]
l = rest.split('_', 1)
if len(l) != 2:
raise AttributeError, name
c = self.getServicePasswordAction(l[0])
if c is None:
raise AttributeError, name
return iformless.IConfigurable(c(l[1]))
render_zebra = weave.zebra()
render_i18n = i18n.render()
class ConfirmChange(ServicePasswordChangeMixin, rend.Page):
implements(iskin.ISkinnable)
title = _('Ldaptor Password Change Page')
addSlash = True
docFactory = loaders.xmlfile(
'change_password.xhtml',
templateDir=os.path.split(os.path.abspath(__file__))[0])
def getBindingNames(self, ctx):
return ['setPassword', 'generateRandom']
def bind_setPassword(self, ctx):
return annotate.MethodBinding(
'setPassword',
annotate.Method(arguments=[
annotate.Argument('ctx', annotate.Context()),
annotate.Argument('newPassword', annotate.PasswordEntry(required=True,
label=_('New password'))),
annotate.Argument('again', annotate.PasswordEntry(required=True,
label=_('Again'))),
],
label=_('Set password')),
action=_('Set password'))
def bind_generateRandom(self, ctx):
return annotate.MethodBinding(
'generateRandom',
annotate.Method(arguments=[
annotate.Argument('ctx', annotate.Context()),
],
label=_('Generate random')),
action=_('Generate random'))
servicePasswordAction_10_remove = RemoveServicePassword
servicePasswordAction_20_set = SetServicePassword
servicePasswordAction_30_random = SetRandomServicePassword
def _setPassword(self, ctx, password):
e = getEntry(ctx, self.dn)
d=defer.maybeDeferred(e.setPassword, newPasswd=password)
return d
def setPassword(self, ctx, newPassword, again):
d = defer.maybeDeferred(checkPasswordTypos, newPassword, again)
d.addCallback(lambda dummy: self._setPassword(ctx, newPassword))
d.addCallback(lambda dummy: _('Password set.'))
def eb(fail):
return _("Failed: %s") % fail.getErrorMessage()
d.addErrback(eb)
return d
def generateRandom(self, ctx):
d=generate_password.generate(reactor)
def _first(passwords):
assert len(passwords)==1
return passwords[0]
d.addCallback(_first)
def _status(newPassword, ctx):
d = self._setPassword(ctx, newPassword)
d.addCallback(lambda dummy: _('Password set to %s') % newPassword)
return d
d.addCallback(_status, ctx)
def eb(fail):
return _("Failed: %s") % fail.getErrorMessage()
d.addErrback(eb)
return d
def data_status(self, ctx, data):
try:
return ctx.locate(inevow.IStatusMessage)
except KeyError:
return ''
def data_dn(self, ctx, data):
return self.dn
def render_form(self, ctx, data):
return webform.renderForms()
def render_passthrough(self, ctx, data):
return ctx.tag.clear()[data]
def data_header(self, ctx, data):
u=url.URL.fromContext(ctx)
u=u.parentdir().parentdir().clear()
l=[]
l.append(tags.a(href=u.sibling("search"))[_("Search")])
l.append(tags.a(href=u.sibling("add"))[_("add new entry")])
l.append(tags.a(href=u.sibling("edit").child(str(self.dn)))[_("edit")])
l.append(tags.a(href=u.sibling("delete").child(str(self.dn)))[_("delete")])
return l
def render_add(self, ctx, data):
return webform.renderForms('add')
def configurable_add(self, ctx):
return AddService(self.dn)
render_i18n = i18n.render()
def render_data(self, ctx, data):
return ctx.tag.clear()[data]
class GetDN(rend.Page):
addSlash = True
def child_(self, ctx):
entry = inevow.ISession(ctx).getLoggedInRoot().loggedIn
u = inevow.IRequest(ctx).URLPath()
return u.child(str(entry.dn))
def childFactory(self, ctx, name):
unquoted=uriUnquote(name)
try:
dn = distinguishedname.DistinguishedName(stringValue=unquoted)
except distinguishedname.InvalidRelativeDistinguishedName, e:
# TODO There's no way to throw a FormException at this stage.
return None
r=ConfirmChange(dn=dn)
return r
def getResource():
return GetDN() | unknown | codeparrot/codeparrot-clean | ||
# جولة سريعة
[[open-in-colab]]
ابدأ رحلتك مع مكتبة 🤗 Transformers! سواء كنت مطورًا أو مستخدمًا عاديًا، ستساعدك هذه الجولة السريعة على البدء وستُظهر لك كيفية استخدام [`pipeline`] للاستنتاج، وتحميل نموذج مُدرب مسبقًا ومعالج مُسبق مع [AutoClass](./model_doc/auto)، وتدريب نموذج بسرعة باستخدام PyTorch أو TensorFlow. إذا كنت مبتدئًا، نوصي بالاطلاع على دروسنا أو [الدورة](https://huggingface.co/course/chapter1/1) للحصول على شرح أكثر تعمقًا للمفاهيم المقدمة هنا.
قبل البدء، تأكد من تثبيت جميع المكتبات الضرورية:
```bash
!pip install transformers datasets evaluate accelerate
```
ستحتاج أيضًا إلى تثبيت إطار عمل التعلم الآلي المفضل لديك:
```bash
pip install torch
```
## خط الأنابيب
<Youtube id="tiZFewofSLM"/>
يمثل [`pipeline`] أسهل وأسرع طريقة لاستخدام نموذج مُدرب مسبقًا للاستنتاج. يمكنك استخدام [`pipeline`] جاهزًا للعديد من المهام عبر طرق مختلفة، والتي يظهر بعضها في الجدول أدناه:
<Tip>
للاطلاع على القائمة الكاملة للمهام المتاحة، راجع [مرجع واجهة برمجة التطبيقات الخاصة بخط الأنابيب](./main_classes/pipelines).
</Tip>
<div dir="rtl">
| **المهمة** | **الوصف** | **الطريقة** | **معرف خط الأنابيب** |
|------------------------------|--------------------------------------------------------------------------------------------------------------|-----------------|-----------------------------------------------|
| تصنيف النص | تعيين تسمية إلى تسلسل نص معين | NLP | pipeline(task=“sentiment-analysis”) |
| توليد النص | توليد نص بناءً على موجه معين | NLP | pipeline(task=“text-generation”) |
| تلخيص | توليد ملخص لتسلسل نص أو مستند | NLP | pipeline(task=“summarization”) |
| تصنيف الصور | تعيين تسمية لصورة معينة | رؤية حاسوبية | pipeline(task=“image-classification”) |
| تجزئة الصورة | تعيين تسمية لكل بكسل فردي في الصورة (يدعم التجزئة الدلالية، والمجملة، وتجزئة مثيلات) | رؤية حاسوبية | pipeline(task=“image-segmentation”) |
| اكتشاف الأشياء | التنبؤ بحدود الأشياء وفئاتها في صورة معينة | رؤية حاسوبية | pipeline(task=“object-detection”) |
| تصنيف الصوت | تعيين تسمية لبيانات صوتية معينة | صوتي | pipeline(task=“audio-classification”) |
| التعرف على الكلام التلقائي | نسخ الكلام إلى نص | صوتي | pipeline(task=“automatic-speech-recognition”) |
| الإجابة على الأسئلة البصرية | الإجابة على سؤال حول الصورة، مع إعطاء صورة وسؤال | متعدد الوسائط | pipeline(task=“vqa”) |
| الإجابة على أسئلة المستندات | الإجابة على سؤال حول المستند، مع إعطاء مستند وسؤال | متعدد الوسائط | pipeline(task="document-question-answering") |
| كتابة تعليق على الصورة | إنشاء تعليق على صورة معينة | متعدد الوسائط | pipeline(task="image-to-text") |
</div>
ابدأ بإنشاء مثيل من [`pipeline`] وتحديد المهمة التي تريد استخدامه لها. في هذا الدليل، ستستخدم خط الأنابيب للتحليل النصي كنموذج:
```py
>>> from transformers import pipeline
>>> classifier = pipeline("sentiment-analysis")
```
يقوم [`pipeline`] بتنزيل وتخزين نسخة احتياطية من نموذج افتراضي [مُدرب مسبقًا](https://huggingface.co/distilbert/distilbert-base-uncased-finetuned-sst-2-english) ومعالج للتحليل النصي. الآن يمكنك استخدام `classifier` على النص المستهدف:
```py
>>> classifier("We are very happy to show you the 🤗 Transformers library.")
[{'label': 'POSITIVE', 'score': 0.9998}]
```
إذا كان لديك أكثر من إدخال واحد، قم بتمرير إدخالاتك كقائمة إلى [`pipeline`] لإرجاع قائمة من القواميس:
```py
>>> results = classifier(["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."])
>>> for result in results:
... print(f"label: {result['label']}, with score: {round(result['score'], 4)}")
label: POSITIVE, with score: 0.9998
label: NEGATIVE, with score: 0.5309
```
يمكن لخط الأنابيب أيضًا أن يتنقل خلال مجموعة بيانات كاملة لأي مهمة تريدها. كمثال على ذلك، دعنا نختار التعرف على الكلام التلقائي كمهمة لنا:
```py
>>> import torch
>>> from transformers import pipeline
>>> speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h")
```
قم بتحميل مجموعة بيانات صوتية (راجع دليل البدء السريع لـ 🤗 Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart#audio) للحصول على مزيد من التفاصيل) التي تريد التنقل خلالها. على سبيل المثال، قم بتحميل مجموعة بيانات [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14):
```py
>>> from datasets import load_dataset, Audio
>>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") # doctest: +IGNORE_RESULT
```
يجب التأكد من أن نفس الجودة الصوتية (معدل أخذ العينات) لمجموعة البيانات يتطابق مع معدل أخذ العينات الذي تم تدريب [`facebook/wav2vec2-base-960h`](https://huggingface.co/facebook/wav2vec2-base-960h) عليه:
```py
>>> dataset = dataset.cast_column("audio", Audio(sampling_rate=speech_recognizer.feature_extractor.sampling_rate))
```
يتم تحميل الملفات الصوتية وإعادة تشكيلها تلقائيًا عند استدعاء العمود "audio".
استخرج المصفوفات الموجية الخام من أول 4 عينات ومررها كقائمة إلى خط الأنابيب:
```py
>>> result = speech_recognizer(dataset[:4]["audio"])
>>> print([d["text"] for d in result])
['I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT', "FONDERING HOW I'D SET UP A JOIN TO HELL T WITH MY WIFE AND WHERE THE AP MIGHT BE", "I I'D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I'M NOT SEEING THE OPTION TO DO IT ON THE APSO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AN I'M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS", 'HOW DO I FURN A JOINA COUT']
```
بالنسبة لمجموعات البيانات الكبيرة التي تحتوي على مدخلات ضخمة (كما هو الحال في البيانات الصوتية أو المرئية)، يفضل تمرير مولد (generator) بدلاً من قائمة لتحميل جميع المدخلات في الذاكرة دفعة واحدة. راجع [مرجع واجهة برمجة التطبيقات الخاصة بخط الأنابيب](./main_classes/pipelines) للحصول على مزيد من المعلومات.
### ااستخدم نموذجًا ومجزئًا آخرين في خط الأنابيب
يمكن لخط الأنابيب [`pipeline`] استيعاب أي نموذج من [Hub](https://huggingface.co/models)، مما يسهل التكيف مع حالات الاستخدام الأخرى. على سبيل المثال، إذا كنت تريد نموذجًا قادرًا على التعامل مع النص الفرنسي، فاستخدم العلامات على Hub لفلتره نموذج مناسب. تعيد النتيجة الأولى المرشحة نموذج BERT متعدد اللغات [BERT model](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment) الذي تم ضبطه مسبقًا للتحليل المشاعر والذي يمكنك استخدامه للنص الفرنسي:
```py
>>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment"
```
استخدم [`AutoModelForSequenceClassification`] و [`AutoTokenizer`] لتحميل النموذج المُدرب مسبقًا ومعالجته المرتبط به (مزيد من المعلومات حول `AutoClass` في القسم التالي):
```py
>>> from transformers import AutoTokenizer, AutoModelForSequenceClassification
>>> model = AutoModelForSequenceClassification.from_pretrained(model_name)
>>> tokenizer = AutoTokenizer.from_pretrained(model_name)
```
حدد النموذج والمعالج في [`pipeline`]. الآن يمكنك تطبيق `classifier` على النص الفرنسي:
```py
>>> classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer)
>>> classifier("Nous sommes très heureux de vous présenter la bibliothèque 🤗 Transformers.")
[{'label': '5 stars', 'score': 0.7273}]
```
إذا لم تجد نموذجًا جاهزًا يناسب مهمتك، فستحتاج إلى ضبط نموذج مُدرب مسبقًا على بياناتك. اطلع على [دليل الضبط الدقيق](./training) للتعرف على كيفية القيام بذلك. وبعد ضبط نموذجك المُدرب مسبقًا، يرجى مراعاة [المشاركة](./model_sharing) النموذج مع المجتمع على Hub لمساعدة الجميع في مجال التعلم الآلي! 🤗
## AutoClass
<Youtube id="AhChOFRegn4"/>
في الخلفية، تعمل فئتا [`AutoModelForSequenceClassification`] و [`AutoTokenizer`] معًا لتشغيل دالة pipeline() الذي استخدمتها أعلاه. تعتبر [AutoClass](./model_doc/auto) اختصارًا يقوم تلقائيًا باسترداد بنية نموذج مُدرب مسبقًا من اسمه أو مساره. كل ما عليك فعله هو تحديد فئة `AutoClass` المناسبة لمهمتك وفئة المعالجة المرتبطة بها.
لنعد إلى المثال من القسم السابق ولنرى كيف يمكنك استخدام `AutoClass` لتكرار نتائج خط الأنابيب.
### المجزئ التلقائي (AutoTokenizer)
يتولى المجزئ مسؤولية تحويل النص إلى مصفوفة من الأرقام (رموز) يمكن للنموذج فهمها ومعالجتها. هناك قواعد متعددة تحكم عملية التجزئة، بما في ذلك كيفية تقسيم كلمة وما هو المستوى الذي يجب أن تقسيم الكلمات عنده (تعرف على المزيد حول المعالجة في [ملخص المجزئ](./tokenizer_summary)). أهم شيء يجب تذكره هو أنك تحتاج إلى إنشاء مثيل للمجزئ بنفس اسم النموذج لضمان استخدامك لقواعد التجزئة نفسها التي تم تدريب النموذج عليها.
قم بتحميل المجزئ باستخدام [`AutoTokenizer`]:
```py
>>> from transformers import AutoTokenizer
>>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment"
>>> tokenizer = AutoTokenizer.from_pretrained(model_name)
```
مرر نصك إلى المجزئ:
```py
>>> encoding = tokenizer("We are very happy to show you the 🤗 Transformers library.")
>>> print(encoding)
{'input_ids': [101, 11312, 10320, 12495, 19308, 10114, 11391, 10855, 10103, 100, 58263, 13299, 119, 102],
'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}
```
يعيد المجزئ قاموسًا يحتوي على:
* [input_ids](./glossary#input-ids): التمثيلات الرقمية لرموزك.
* [attention_mask](./glossary#attention-mask): تشير إلى الرموز التي يجب الانتباه بها.
يمكن المجزئ أيضًا قبول قائمة من المدخلات، ويقوم بـ "حشو" و"تقصير" النص لإرجاع كدفعة بطول موحد:
```py
>>> pt_batch = tokenizer(
... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."],
... padding=True,
... truncation=True,
... max_length=512,
... return_tensors="pt",
... )
```
<Tip>
اطلع على [الدليل التمهيدي للمعالجة المسبقة](./preprocessing) للحصول على مزيد من التفاصيل حول المعالجة، وكيفية استخدام [`AutoImageProcessor`] و [`AutoFeatureExtractor`] و [`AutoProcessor`] لمعالجة الصور والصوت والإدخالات متعددة الوسائط.
</Tip>
### AutoModel
تقدم مكتبة 🤗 Transformers طريقة بسيطة وموحدة لتحميل نماذج مدربة مسبقًا. وهذا يعني أنه يمكنك تحميل [`AutoModel`] كما لو كنت تقوم بتحميل [`AutoTokenizer`]. الفرق الوحيد هو اختيار فئة [`AutoModel`] المناسبة للمهمة. بالنسبة لتصنيف النص (أو التسلسل)، يجب عليك تحميل [`AutoModelForSequenceClassification`]:
```py
>>> from transformers import AutoModelForSequenceClassification
>>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment"
>>> pt_model = AutoModelForSequenceClassification.from_pretrained(model_name)
```
<Tip>
راجع [ملخص المهمة](./task_summary) للاطلاع على المهام التي تدعمها فئة [`AutoModel`].
</Tip>
الآن قم بتمرير دفعة المدخلات المُعالجة مسبقًا مباشرة إلى النموذج. عليك فقط فك تعبئة القاموس عن طريق إضافة `**`:
# تدريب النموذج
الآن، مرر دفعة المدخلات المعالجة مسبقًا مباشرة إلى النموذج. ما عليك سوى فك تعبئة القاموس عن طريق إضافة `**`:
```py
>>> pt_outputs = pt_model(**pt_batch)
```
يُخرج النموذج التنشيطات النهائية في سمة `logits`. طبق دالة softmax على `logits` للحصول على الاحتمالات:
```py
>>> from torch import nn
>>> pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1)
>>> print(pt_predictions)
tensor([[0.0021, 0.0018, 0.0115, 0.2121, 0.7725],
[0.2084, 0.1826, 0.1969, 0.1755, 0.2365]], grad_fn=<SoftmaxBackward0>)
```
<Tip>
تخرج جميع نماذج 🤗 Transformers (PyTorch أو TensorFlow) المصفوفات *قبل* دالة التنشيط النهائية (مثل softmax) لأن دالة التنشيط النهائية غالبًا ما تكون مدمجة مع دالة الخسارة. نواتج النموذج عبارة عن فئات بيانات خاصة، لذلك يتم استكمال سماتها تلقائيًا في IDE. وتتصرف مخرجات النموذج مثل زوج مرتب أو قاموس (يمكنك الفهرسة باستخدام عدد صحيح ، شريحة، أو سلسلة)، وفي هذه الحالة، يتم تجاهل السمات التي تساوي None.
</Tip>
### حفظ النموذج
بمجرد ضبط نموذجك، يمكنك حفظه مع برنامج الترميز الخاص به باستخدام [`PreTrainedModel.save_pretrained`]:
```py
>>> pt_save_directory = "./pt_save_pretrained"
>>> tokenizer.save_pretrained(pt_save_directory) # doctest: +IGNORE_RESULT
>>> pt_model.save_pretrained(pt_save_directory)
```
عندما تكون مستعدًا لاستخدام النموذج مرة أخرى، أعد تحميله باستخدام [`PreTrainedModel.from_pretrained`]:
```py
>>> pt_model = AutoModelForSequenceClassification.from_pretrained("./pt_save_pretrained")
```
من الميزات الرائعة في 🤗 Transformers القدرة على حفظ نموذج وإعادة تحميله كنموذج PyTorch أو TensorFlow. يمكن أن يحول معامل `from_pt` أو `from_tf` النموذج من إطار عمل إلى آخر:
```py
>>> from transformers import AutoModel
>>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory)
>>> pt_model = AutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True)
```
## إنشاء نماذج مخصصة
يمكنك تعديل فئة تكوين النموذج لتغيير كيفية بناء النموذج. يحدد التكوين سمات النموذج، مثل عدد الطبقات المخفية أو رؤوس الاهتمام. تبدأ من الصفر عند تهيئة نموذج من فئة تكوين مخصصة. يتم تهيئة سمات النموذج بشكل عشوائي، ويجب تدريب النموذج قبل استخدامه للحصول على نتائج ذات معنى.
ابدأ باستيراد [`AutoConfig`]. ثم قم بتحميل النموذج المُدرب مسبقًا الذي تريد تعديله. ضمن [`AutoConfig.from_pretrained`]. يمكنك تحديد السمة التي تريد تغييرها، مثل عدد رؤوس الاهتمام:
```py
>>> from transformers import AutoConfig
>>> my_config = AutoConfig.from_pretrained("distilbert/distilbert-base-uncased", n_heads=12)
```
قم بإنشاء نموذج من تكوينك المخصص باستخدام [`AutoModel.from_config`]:
```py
>>> from transformers import AutoModel
>>> my_model = AutoModel.from_config(my_config)
```
الق نظرة على دليل [إنشاء بنية مخصصة](./create_a_model) لمزيد من المعلومات حول بناء التكوينات المخصصة.
## المدرب - حلقة تدريب محسنة لـ PyTorch
جميع النماذج عبارة عن [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) قياسية، لذا يمكنك استخدامها في أي حلقة تدريب نموذجية. في حين يمكنك كتابة حلقة التدريب الخاصة بك، يوفر 🤗 Transformers فئة [`Trainer`] لـ PyTorch، والتي تحتوي على حلقة التدريب الأساسية وتضيف وظائف إضافية لميزات مثل التدريب الموزع، والدقة المختلطة، والمزيد.
وفقًا لمهمتك، ستقوم عادةً بتمرير المعلمات التالية إلى [`Trainer`]:
1. ستبدأ بـ [`PreTrainedModel`] أو [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module):
```py
>>> from transformers import AutoModelForSequenceClassification
>>> model = AutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased")
```
2. تحتوي [`TrainingArguments`] على فرط معلمات النموذج التي يمكنك تغييرها مثل معدل التعلم، وحجم الدفعة، وعدد العصور التي يجب التدريب عليها. يتم استخدام القيم الافتراضية إذا لم تحدد أي حجج تدريب:
```py
>>> from transformers import TrainingArguments
>>> training_args = TrainingArguments(
... output_dir="path/to/save/folder/",
... learning_rate=2e-5,
... per_device_train_batch_size=8,
... per_device_eval_batch_size=8,
... num_train_epochs=2,
... )
```
3. قم بتحميل فئة معالجة مسبقة مثل برنامج الترميز، أو معالج الصور، أو مستخرج الميزات، أو المعالج:
```py
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased")
```
4. قم بتحميل مجموعة بيانات:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("rotten_tomatoes") # doctest: +IGNORE_RESULT
```
5. قم بإنشاء دالة لترميز مجموعة البيانات:
```py
>>> def tokenize_dataset(dataset):
... return tokenizer(dataset["text"])
```
ثم قم بتطبيقه على مجموعة البيانات بأكملها باستخدام [`~datasets.Dataset.map`]:
```py
>>> dataset = dataset.map(tokenize_dataset, batched=True)
```
6. [`DataCollatorWithPadding`] لإنشاء دفعة من الأمثلة من مجموعة البيانات الخاصة بك:
```py
>>> from transformers import DataCollatorWithPadding
>>> data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
```
الآن قم بتجميع جميع هذه الفئات في [`Trainer`]:
```py
>>> from transformers import Trainer
>>> trainer = Trainer(
... model=model,
... args=training_args,
... train_dataset=dataset["train"],
... eval_dataset=dataset["test"],
... tokenizer=tokenizer,
... data_collator=data_collator,
... ) # doctest: +SKIP
```
عندما تكون مستعدًا، استدعِ [`~Trainer.train`] لبدء التدريب:
```py
>>> trainer.train() # doctest: +SKIP
```
<Tip>
بالنسبة للمهام - مثل الترجمة أو التلخيص - التي تستخدم نموذج تسلسل إلى تسلسل، استخدم فئات [`Seq2SeqTrainer`] و [`Seq2SeqTrainingArguments`] بدلاً من ذلك.
</Tip>
يمكنك تخصيص سلوك حلقة التدريب عن طريق إنشاء فئة فرعية من الطرق داخل [`Trainer`]. يسمح لك ذلك بتخصيص ميزات مثل دالة الخسارة، والمحسن، والمجدول. راجع مرجع [`Trainer`] للتعرف على الطرق التي يمكن إنشاء فئات فرعية منها.
والطريقة الأخرى لتخصيص حلقة التدريب هي باستخدام [المستدعيات](./main_classes/callback). يمكنك استخدام المستدعيات للتكامل مع المكتبات الأخرى ومراقبة حلقة التدريب للإبلاغ عن التقدم أو إيقاف التدريب مبكرًا. لا تعدل المستدعيات أي شيء في حلقة التدريب نفسها. لتخصيص شيء مثل دالة الخسارة، تحتاج إلى إنشاء فئة فرعية من [`Trainer`] بدلاً من ذلك.
## التدريب باستخدام TensorFlow
جميع النماذج عبارة عن [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) قياسية، لذا يمكن تدريبها في TensorFlow باستخدام واجهة برمجة تطبيقات Keras. يوفر 🤗 Transformers طريقة [`~TFPreTrainedModel.prepare_tf_dataset`] لتحميل مجموعة البيانات الخاصة بك بسهولة كـ `tf.data.Dataset` حتى تتمكن من البدء في التدريب على الفور باستخدام دالتي `compile` و`fit` في Keras.
1. ستبدأ بـ [`TFPreTrainedModel`] أو [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model):
```py
>>> from transformers import TFAutoModelForSequenceClassification
>>> model = TFAutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased")
```
2. قم بتحميل فئة معالجة مسبقة مثل برنامج الترميز، أو معالج الصور، أو مستخرج الميزات، أو المعالج:
```py
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased")
```
3. قم بإنشاء دالة لترميز مجموعة البيانات:
```py
>>> def tokenize_dataset(dataset):
... return tokenizer(dataset["text"]) # doctest: +SKIP
```
4. قم بتطبيق برنامج الترميز على مجموعة البيانات بأكملها باستخدام [`~datasets.Dataset.map`] ثم مرر مجموعة البيانات وبرنامج الترميز إلى [`~TFPreTrainedModel.prepare_tf_dataset`]. يمكنك أيضًا تغيير حجم الدفعة وخلط مجموعة البيانات هنا إذا أردت:
```py
>>> dataset = dataset.map(tokenize_dataset) # doctest: +SKIP
>>> tf_dataset = model.prepare_tf_dataset(
... dataset["train"], batch_size=16, shuffle=True, tokenizer=tokenizer
... ) # doctest: +SKIP
```
5. عندما تكون مستعدًا، يمكنك استدعاء `compile` و`fit` لبدء التدريب. لاحظ أن جميع نماذج Transformers لديها دالة خسارة ذات صلة بالمهمة بشكل افتراضي، لذا فأنت لست بحاجة إلى تحديد واحدة ما لم ترغب في ذلك:
```py
>>> from tensorflow.keras.optimizers import Adam
>>> model.compile(optimizer='adam') # لا توجد وسيطة دالة الخسارة!
>>> model.fit(tf_dataset) # doctest: +SKIP
```
## ماذا بعد؟
الآن بعد أن أكملت الجولة السريعة في 🤗 Transformers، راجع أدلتنا لمعرفة كيفية القيام بأشياء أكثر تحديدًا مثل كتابة نموذج مخصص، وضبط نموذج مسبق التدريب لمهمة معينة، وكيفية تدريب نموذج باستخدام نص برمجي. إذا كنت مهتمًا بمعرفة المزيد عن المفاهيم الأساسية لـ 🤗 Transformers، فاحصل على فنجان من القهوة واطلع على أدلة المفاهيم الخاصة بنا! | unknown | github | https://github.com/huggingface/transformers | docs/source/ar/quicktour.md |
/* Copyright 2017 - 2025 R. Thomas
* Copyright 2017 - 2025 Quarkslab
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <iterator>
#include "spdlog/fmt/fmt.h"
#include "logging.hpp"
#include "frozen.hpp"
#include "fmt_formatter.hpp"
#include "LIEF/Visitor.hpp"
#include "LIEF/BinaryStream/SpanStream.hpp"
#include "LIEF/MachO/Section.hpp"
#include "LIEF/MachO/Relocation.hpp"
#include "LIEF/MachO/SegmentCommand.hpp"
#include "MachO/Structures.hpp"
FMT_FORMATTER(LIEF::MachO::Section::FLAGS, LIEF::MachO::to_string);
FMT_FORMATTER(LIEF::MachO::Section::TYPE, LIEF::MachO::to_string);
namespace LIEF {
namespace MachO {
static constexpr auto ARRAY_FLAGS = {
Section::FLAGS::PURE_INSTRUCTIONS,
Section::FLAGS::NO_TOC,
Section::FLAGS::STRIP_STATIC_SYMS,
Section::FLAGS::NO_DEAD_STRIP,
Section::FLAGS::LIVE_SUPPORT,
Section::FLAGS::SELF_MODIFYING_CODE,
Section::FLAGS::DEBUG_INFO,
Section::FLAGS::SOME_INSTRUCTIONS,
Section::FLAGS::EXT_RELOC,
Section::FLAGS::LOC_RELOC,
};
Section::Section() = default;
Section::~Section() = default;
Section& Section::operator=(Section other) {
swap(other);
return *this;
}
Section::Section(std::string name) {
this->name(std::move(name));
}
Section::Section(std::string name, content_t content) {
this->name(std::move(name));
this->content(std::move(content));
}
Section::Section(const Section& other) :
LIEF::Section{other},
segment_name_{other.segment_name_},
original_size_{other.original_size_},
align_{other.align_},
relocations_offset_{other.relocations_offset_},
nbof_relocations_{other.nbof_relocations_},
flags_{other.flags_},
reserved1_{other.reserved1_},
reserved2_{other.reserved2_},
reserved3_{other.reserved3_},
content_{other.content_}
{}
Section::Section(const details::section_32& sec) :
segment_name_{sec.segname, sizeof(sec.sectname)},
original_size_{sec.size},
align_{sec.align},
relocations_offset_{sec.reloff},
nbof_relocations_{sec.nreloc},
flags_{sec.flags},
reserved1_{sec.reserved1},
reserved2_{sec.reserved2}
{
name_ = {sec.sectname, sizeof(sec.sectname)};
size_ = sec.size;
offset_ = sec.offset;
virtual_address_ = sec.addr;
name_ = name_.c_str();
segment_name_ = segment_name_.c_str();
}
Section::Section(const details::section_64& sec) :
segment_name_{sec.segname, sizeof(sec.segname)},
original_size_{sec.size},
align_{sec.align},
relocations_offset_{sec.reloff},
nbof_relocations_{sec.nreloc},
flags_{sec.flags},
reserved1_{sec.reserved1},
reserved2_{sec.reserved2},
reserved3_{sec.reserved3}
{
name_ = {sec.sectname, sizeof(sec.sectname)};
size_ = sec.size;
offset_ = sec.offset;
virtual_address_ = sec.addr;
name_ = name_.c_str();
segment_name_ = segment_name_.c_str();
}
void Section::swap(Section& other) noexcept {
std::swap(name_, other.name_);
std::swap(virtual_address_, other.virtual_address_);
std::swap(size_, other.size_);
std::swap(offset_, other.offset_);
std::swap(segment_name_, other.segment_name_);
std::swap(original_size_, other.original_size_);
std::swap(align_, other.align_);
std::swap(relocations_offset_, other.relocations_offset_);
std::swap(nbof_relocations_, other.nbof_relocations_);
std::swap(flags_, other.flags_);
std::swap(reserved1_, other.reserved1_);
std::swap(reserved2_, other.reserved2_);
std::swap(reserved3_, other.reserved3_);
std::swap(content_, other.content_);
std::swap(segment_, other.segment_);
std::swap(relocations_, other.relocations_);
}
span<const uint8_t> Section::content() const {
if (segment_ == nullptr) {
return content_;
}
if (size_ == 0 || offset_ == 0) { // bss section for instance
return {};
}
if (int64_t(size_) < 0 || int64_t(offset_) < 0) {
return {};
}
int64_t relative_offset = offset_ - segment_->file_offset();
if (relative_offset < 0) {
relative_offset = virtual_address_ - segment_->virtual_address();
}
span<const uint8_t> content = segment_->content();
if (relative_offset > (int64_t)content.size() || (relative_offset + size_) > content.size()) {
LIEF_ERR("Section's size is bigger than segment's size");
return {};
}
return content.subspan(relative_offset, size_);
}
void Section::content(const content_t& data) {
if (segment_ == nullptr) {
content_ = data;
return;
}
if (size_ == 0 || offset_ == 0) { // bss section for instance
LIEF_ERR("Offset or size is null");
return;
}
uint64_t relative_offset = offset_ - segment_->file_offset();
span<uint8_t> content = segment_->writable_content();
if (relative_offset > content.size() || (relative_offset + data.size()) > content.size()) {
LIEF_ERR("New data are bigger than the original one");
return;
}
std::move(std::begin(data), std::end(data),
content.data() + relative_offset);
}
const std::string& Section::segment_name() const {
if (segment_ == nullptr || segment_->name().empty()) {
return segment_name_;
}
return segment_->name();
}
std::vector<Section::FLAGS> Section::flags_list() const {
std::vector<FLAGS> flags;
std::copy_if(
std::begin(ARRAY_FLAGS), std::end(ARRAY_FLAGS),
std::inserter(flags, std::begin(flags)),
[this] (FLAGS f) { return has(f); });
return flags;
}
void Section::segment_name(const std::string& name) {
segment_name_ = name;
if (segment_ != nullptr && !segment_->name().empty()) {
segment_->name(name);
}
}
bool Section::has(FLAGS flag) const {
return (static_cast<uint32_t>(flag) & uint32_t(flags())) > 0;
}
void Section::add(FLAGS flag) {
flags(raw_flags() | uint32_t(flag));
}
void Section::remove(FLAGS flag) {
flags(raw_flags() & (~ uint32_t(flag)));
}
void Section::clear(uint8_t v) {
content_t clear(size(), v);
content(std::move(clear));
}
void Section::accept(Visitor& visitor) const {
visitor.visit(*this);
}
std::unique_ptr<SpanStream> Section::stream() const {
return std::make_unique<SpanStream>(content());
}
std::ostream& operator<<(std::ostream& os, const Section& section) {
const auto& flags = section.flags_list();
os << fmt::format(
"name={}, segment={}, address=0x{:06x}, size=0x{:04x} "
"offset=0x{:06x}, align={}, type={}, reloc_offset={}, nb_reloc={} "
"reserved1={}, reserved2={}, reserved3={}, flags={}",
section.name(), section.segment_name(), section.address(),
section.size(), section.offset(), section.alignment(), section.type(),
section.relocation_offset(), section.numberof_relocations(),
section.reserved1(), section.reserved2(), section.reserved3(),
flags
);
return os;
}
const char* to_string(Section::FLAGS e) {
#define ENTRY(X) std::pair(Section::FLAGS::X, #X)
STRING_MAP enums2str {
ENTRY(PURE_INSTRUCTIONS),
ENTRY(NO_TOC),
ENTRY(STRIP_STATIC_SYMS),
ENTRY(NO_DEAD_STRIP),
ENTRY(LIVE_SUPPORT),
ENTRY(SELF_MODIFYING_CODE),
ENTRY(DEBUG_INFO),
ENTRY(SOME_INSTRUCTIONS),
ENTRY(EXT_RELOC),
ENTRY(LOC_RELOC),
};
#undef ENTRY
if (auto it = enums2str.find(e); it != enums2str.end()) {
return it->second;
}
return "UNKNOWN";
}
const char* to_string(Section::TYPE e) {
#define ENTRY(X) std::pair(Section::TYPE::X, #X)
STRING_MAP enums2str {
ENTRY(REGULAR),
ENTRY(ZEROFILL),
ENTRY(CSTRING_LITERALS),
ENTRY(IS_4BYTE_LITERALS),
ENTRY(IS_8BYTE_LITERALS),
ENTRY(LITERAL_POINTERS),
ENTRY(NON_LAZY_SYMBOL_POINTERS),
ENTRY(LAZY_SYMBOL_POINTERS),
ENTRY(SYMBOL_STUBS),
ENTRY(MOD_INIT_FUNC_POINTERS),
ENTRY(MOD_TERM_FUNC_POINTERS),
ENTRY(COALESCED),
ENTRY(GB_ZEROFILL),
ENTRY(INTERPOSING),
ENTRY(IS_16BYTE_LITERALS),
ENTRY(DTRACE_DOF),
ENTRY(LAZY_DYLIB_SYMBOL_POINTERS),
ENTRY(THREAD_LOCAL_REGULAR),
ENTRY(THREAD_LOCAL_ZEROFILL),
ENTRY(THREAD_LOCAL_VARIABLES),
ENTRY(THREAD_LOCAL_VARIABLE_POINTERS),
ENTRY(THREAD_LOCAL_INIT_FUNCTION_POINTERS),
ENTRY(INIT_FUNC_OFFSETS),
};
#undef ENTRY
if (auto it = enums2str.find(e); it != enums2str.end()) {
return it->second;
}
return "UNKNOWN";
}
} // namespace MachO
} // namespace LIEF | cpp | github | https://github.com/nodejs/node | deps/LIEF/src/MachO/Section.cpp |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/norms-conorms.py
# How to use t-norms and s-norms (norms and conorms)
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
import numpy
from peach.fuzzy import *
from peach.fuzzy.norms import *
# The standard operations with sets -- and thus fuzzy sets -- are intersection,
# union and complement. Fuzzy sets, however, are an extension to classical sets,
# and there are infinite ways to extend those operations. Thus the existence of
# norms, conorms and negations. We show here how to use them in Peach.
# First, remember that we must create the sets. A FuzzySet instance is returned
# when you apply a membership function over a domain. It is, in fact, a
# standard array, but making it a new class allow us to redefine operations.
# Here we create the sets:
x = numpy.linspace(-5.0, 5.0, 500)
a = Triangle(-3.0, -1.0, 1.0)(x)
b = Triangle(-1.0, 1.0, 3.0)(x)
# To set norms, conorms and negations, we use, respectively, the methods
# set_norm, set_conorm and set_negation. Notice that those are class methods, so
# if you change the norm for one instance of a set, you change for them all! So,
# it is better to use the class name to select the methods. Here, we will use
# Zadeh norms, that are already defined in Peach. Notice that we use the
# standard operators for and, or and not operations (respectively, &, | e ~):
FuzzySet.set_norm(ZadehAnd)
FuzzySet.set_conorm(ZadehOr)
aandb_zadeh = a & b # A and B
aorb_zadeh = a | b # A or B
# Probabilistic norms are based on the corresponding operations in probability.
# Here we use them
FuzzySet.set_norm(ProbabilisticAnd)
FuzzySet.set_conorm(ProbabilisticOr)
aandb_prob = a & b
aorb_prob = a | b
# There are other norms that we could use. Please, check the documentation for
# a complete list. Here are some of them:
# Norms: ZadehAnd, ProbabilisticAnd, DrasticProduct, EinsteinProduct
# Conorms: ZadehOr, ProbabilisticOr, DrasticSum, EinsteinSum
# We will use the matplotlib module to plot these functions. We save the plot in
# a figure called 'norms-conorms.png'.
try:
from matplotlib import *
from matplotlib.pylab import *
figure(1).set_size_inches(8, 6)
a1 = axes([ 0.125, 0.555, 0.775, 0.40 ])
a2 = axes([ 0.125, 0.125, 0.775, 0.40 ])
a1.hold(True)
a1.plot(x, a, 'k:')
a1.plot(x, b, 'k:')
a1.plot(x, aandb_zadeh, 'k')
a1.plot(x, aandb_prob, 'k-.')
a1.set_xlim([ -5, 5 ])
a1.set_ylim([ -0.1, 1.1 ])
a1.set_xticks([])
a1.set_yticks([ 0.0, 1.0 ])
a1.legend((r'$A$', r'$B$', 'Zadeh AND', 'Prob. AND'))
a2.hold(True)
a2.plot(x, a, 'k:')
a2.plot(x, b, 'k:')
a2.plot(x, aorb_zadeh, 'k')
a2.plot(x, aorb_prob, 'k-.')
a2.set_xlim([ -5, 5 ])
a2.set_ylim([ -0.1, 1.1 ])
a2.set_xticks([])
a2.set_yticks([ 0.0, 1.0 ])
a2.legend((r'$A$', r'$B$', 'Zadeh OR', 'Prob. OR'))
savefig("norms-conorms.png")
except ImportError:
pass | unknown | codeparrot/codeparrot-clean | ||
"""
Integration Tests for LMS instructor-initiated background tasks.
Runs tasks on answers to course problems to validate that code
paths actually work.
"""
import csv
import logging
import json
from mock import patch
import textwrap
from celery.states import SUCCESS, FAILURE
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from capa.tests.response_xml_factory import (CodeResponseXMLFactory,
CustomResponseXMLFactory)
from user_api.tests.factories import UserCourseTagFactory
from xmodule.modulestore.tests.factories import ItemFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.partitions.partitions import Group, UserPartition
from courseware.model_data import StudentModule
from instructor_task.api import (submit_rescore_problem_for_all_students,
submit_rescore_problem_for_student,
submit_reset_problem_attempts_for_all_students,
submit_delete_problem_state_for_all_students)
from instructor_task.models import InstructorTask, ReportStore
from instructor_task.tasks_helper import upload_grades_csv
from instructor_task.tests.test_base import (InstructorTaskModuleTestCase, TestReportMixin, TEST_COURSE_ORG,
TEST_COURSE_NUMBER, OPTION_1, OPTION_2)
from capa.responsetypes import StudentInputError
from lms.lib.xblock.runtime import quote_slashes
log = logging.getLogger(__name__)
class TestIntegrationTask(InstructorTaskModuleTestCase):
"""
Base class to provide general methods used for "integration" testing of particular tasks.
"""
def submit_student_answer(self, username, problem_url_name, responses):
"""
Use ajax interface to submit a student answer.
Assumes the input list of responses has two values.
"""
def get_input_id(response_id):
"""Creates input id using information about the test course and the current problem."""
# Note that this is a capa-specific convention. The form is a version of the problem's
# URL, modified so that it can be easily stored in html, prepended with "input-" and
# appended with a sequence identifier for the particular response the input goes to.
return 'input_i4x-{0}-{1}-problem-{2}_{3}'.format(TEST_COURSE_ORG.lower(),
TEST_COURSE_NUMBER.replace('.', '_'),
problem_url_name, response_id)
# make sure that the requested user is logged in, so that the ajax call works
# on the right problem:
self.login_username(username)
# make ajax call:
modx_url = reverse('xblock_handler', kwargs={
'course_id': self.course.id.to_deprecated_string(),
'usage_id': quote_slashes(InstructorTaskModuleTestCase.problem_location(problem_url_name).to_deprecated_string()),
'handler': 'xmodule_handler',
'suffix': 'problem_check',
})
# we assume we have two responses, so assign them the correct identifiers.
resp = self.client.post(modx_url, {
get_input_id('2_1'): responses[0],
get_input_id('3_1'): responses[1],
})
return resp
def _assert_task_failure(self, entry_id, task_type, problem_url_name, expected_message):
"""Confirm that expected values are stored in InstructorTask on task failure."""
instructor_task = InstructorTask.objects.get(id=entry_id)
self.assertEqual(instructor_task.task_state, FAILURE)
self.assertEqual(instructor_task.requester.username, 'instructor')
self.assertEqual(instructor_task.task_type, task_type)
task_input = json.loads(instructor_task.task_input)
self.assertFalse('student' in task_input)
self.assertEqual(task_input['problem_url'], InstructorTaskModuleTestCase.problem_location(problem_url_name).to_deprecated_string())
status = json.loads(instructor_task.task_output)
self.assertEqual(status['exception'], 'ZeroDivisionError')
self.assertEqual(status['message'], expected_message)
# check status returned:
status = InstructorTaskModuleTestCase.get_task_status(instructor_task.task_id)
self.assertEqual(status['message'], expected_message)
class TestRescoringTask(TestIntegrationTask):
"""
Integration-style tests for rescoring problems in a background task.
Exercises real problems with a minimum of patching.
"""
def setUp(self):
self.initialize_course()
self.create_instructor('instructor')
self.create_student('u1')
self.create_student('u2')
self.create_student('u3')
self.create_student('u4')
self.logout()
# set up test user for performing test operations
self.setup_user()
def render_problem(self, username, problem_url_name):
"""
Use ajax interface to request html for a problem.
"""
# make sure that the requested user is logged in, so that the ajax call works
# on the right problem:
self.login_username(username)
# make ajax call:
modx_url = reverse('xblock_handler', kwargs={
'course_id': self.course.id.to_deprecated_string(),
'usage_id': quote_slashes(InstructorTaskModuleTestCase.problem_location(problem_url_name).to_deprecated_string()),
'handler': 'xmodule_handler',
'suffix': 'problem_get',
})
resp = self.client.post(modx_url, {})
return resp
def check_state(self, username, descriptor, expected_score, expected_max_score, expected_attempts):
"""
Check that the StudentModule state contains the expected values.
The student module is found for the test course, given the `username` and problem `descriptor`.
Values checked include the number of attempts, the score, and the max score for a problem.
"""
module = self.get_student_module(username, descriptor)
self.assertEqual(module.grade, expected_score)
self.assertEqual(module.max_grade, expected_max_score)
state = json.loads(module.state)
attempts = state['attempts']
self.assertEqual(attempts, expected_attempts)
if attempts > 0:
self.assertTrue('correct_map' in state)
self.assertTrue('student_answers' in state)
self.assertGreater(len(state['correct_map']), 0)
self.assertGreater(len(state['student_answers']), 0)
def submit_rescore_all_student_answers(self, instructor, problem_url_name):
"""Submits the particular problem for rescoring"""
return submit_rescore_problem_for_all_students(self.create_task_request(instructor),
InstructorTaskModuleTestCase.problem_location(problem_url_name))
def submit_rescore_one_student_answer(self, instructor, problem_url_name, student):
"""Submits the particular problem for rescoring for a particular student"""
return submit_rescore_problem_for_student(self.create_task_request(instructor),
InstructorTaskModuleTestCase.problem_location(problem_url_name),
student)
def test_rescoring_option_problem(self):
"""Run rescore scenario on option problem"""
# get descriptor:
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
descriptor = self.module_store.get_item(location)
# first store answers for each of the separate users:
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
self.submit_student_answer('u2', problem_url_name, [OPTION_1, OPTION_2])
self.submit_student_answer('u3', problem_url_name, [OPTION_2, OPTION_1])
self.submit_student_answer('u4', problem_url_name, [OPTION_2, OPTION_2])
self.check_state('u1', descriptor, 2, 2, 1)
self.check_state('u2', descriptor, 1, 2, 1)
self.check_state('u3', descriptor, 1, 2, 1)
self.check_state('u4', descriptor, 0, 2, 1)
# update the data in the problem definition
self.redefine_option_problem(problem_url_name)
# confirm that simply rendering the problem again does not result in a change
# in the grade:
self.render_problem('u1', problem_url_name)
self.check_state('u1', descriptor, 2, 2, 1)
# rescore the problem for only one student -- only that student's grade should change:
self.submit_rescore_one_student_answer('instructor', problem_url_name, User.objects.get(username='u1'))
self.check_state('u1', descriptor, 0, 2, 1)
self.check_state('u2', descriptor, 1, 2, 1)
self.check_state('u3', descriptor, 1, 2, 1)
self.check_state('u4', descriptor, 0, 2, 1)
# rescore the problem for all students
self.submit_rescore_all_student_answers('instructor', problem_url_name)
self.check_state('u1', descriptor, 0, 2, 1)
self.check_state('u2', descriptor, 1, 2, 1)
self.check_state('u3', descriptor, 1, 2, 1)
self.check_state('u4', descriptor, 2, 2, 1)
def test_rescoring_failure(self):
"""Simulate a failure in rescoring a problem"""
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
expected_message = "bad things happened"
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
mock_rescore.side_effect = ZeroDivisionError(expected_message)
instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
self._assert_task_failure(instructor_task.id, 'rescore_problem', problem_url_name, expected_message)
def test_rescoring_bad_unicode_input(self):
"""Generate a real failure in rescoring a problem, with an answer including unicode"""
# At one point, the student answers that resulted in StudentInputErrors were being
# persisted (even though they were not counted as an attempt). That is not possible
# now, so it's harder to generate a test for how such input is handled.
problem_url_name = 'H1P1'
# set up an option problem -- doesn't matter really what problem it is, but we need
# it to have an answer.
self.define_option_problem(problem_url_name)
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
# return an input error as if it were a numerical response, with an embedded unicode character:
expected_message = u"Could not interpret '2/3\u03a9' as a number"
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
mock_rescore.side_effect = StudentInputError(expected_message)
instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
# check instructor_task returned
instructor_task = InstructorTask.objects.get(id=instructor_task.id)
self.assertEqual(instructor_task.task_state, 'SUCCESS')
self.assertEqual(instructor_task.requester.username, 'instructor')
self.assertEqual(instructor_task.task_type, 'rescore_problem')
task_input = json.loads(instructor_task.task_input)
self.assertFalse('student' in task_input)
self.assertEqual(task_input['problem_url'], InstructorTaskModuleTestCase.problem_location(problem_url_name).to_deprecated_string())
status = json.loads(instructor_task.task_output)
self.assertEqual(status['attempted'], 1)
self.assertEqual(status['succeeded'], 0)
self.assertEqual(status['total'], 1)
def define_code_response_problem(self, problem_url_name):
"""
Define an arbitrary code-response problem.
We'll end up mocking its evaluation later.
"""
factory = CodeResponseXMLFactory()
grader_payload = json.dumps({"grader": "ps04/grade_square.py"})
problem_xml = factory.build_xml(initial_display="def square(x):",
answer_display="answer",
grader_payload=grader_payload,
num_responses=2)
ItemFactory.create(parent_location=self.problem_section.location,
category="problem",
display_name=str(problem_url_name),
data=problem_xml)
def test_rescoring_code_problem(self):
"""Run rescore scenario on problem with code submission"""
problem_url_name = 'H1P2'
self.define_code_response_problem(problem_url_name)
# we fully create the CodeResponse problem, but just pretend that we're queuing it:
with patch('capa.xqueue_interface.XQueueInterface.send_to_queue') as mock_send_to_queue:
mock_send_to_queue.return_value = (0, "Successfully queued")
self.submit_student_answer('u1', problem_url_name, ["answer1", "answer2"])
instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
instructor_task = InstructorTask.objects.get(id=instructor_task.id)
self.assertEqual(instructor_task.task_state, FAILURE)
status = json.loads(instructor_task.task_output)
self.assertEqual(status['exception'], 'NotImplementedError')
self.assertEqual(status['message'], "Problem's definition does not support rescoring.")
status = InstructorTaskModuleTestCase.get_task_status(instructor_task.task_id)
self.assertEqual(status['message'], "Problem's definition does not support rescoring.")
def define_randomized_custom_response_problem(self, problem_url_name, redefine=False):
"""
Defines a custom response problem that uses a random value to determine correctness.
Generated answer is also returned as the `msg`, so that the value can be used as a
correct answer by a test.
If the `redefine` flag is set, then change the definition of correctness (from equals
to not-equals).
"""
factory = CustomResponseXMLFactory()
script = textwrap.dedent("""
def check_func(expect, answer_given):
expected = str(random.randint(0, 100))
return {'ok': answer_given %s expected, 'msg': expected}
""" % ('!=' if redefine else '=='))
problem_xml = factory.build_xml(script=script, cfn="check_func", expect="42", num_responses=1)
if redefine:
descriptor = self.module_store.get_item(
InstructorTaskModuleTestCase.problem_location(problem_url_name)
)
descriptor.data = problem_xml
with self.module_store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, descriptor.location.course_key):
self.module_store.update_item(descriptor, self.user.id)
self.module_store.publish(descriptor.location, self.user.id)
else:
# Use "per-student" rerandomization so that check-problem can be called more than once.
# Using "always" means we cannot check a problem twice, but we want to call once to get the
# correct answer, and call a second time with that answer to confirm it's graded as correct.
# Per-student rerandomization will at least generate different seeds for different users, so
# we get a little more test coverage.
ItemFactory.create(parent_location=self.problem_section.location,
category="problem",
display_name=str(problem_url_name),
data=problem_xml,
metadata={"rerandomize": "per_student"})
def test_rescoring_randomized_problem(self):
"""Run rescore scenario on custom problem that uses randomize"""
# First define the custom response problem:
problem_url_name = 'H1P1'
self.define_randomized_custom_response_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
descriptor = self.module_store.get_item(location)
# run with more than one user
userlist = ['u1', 'u2', 'u3', 'u4']
for username in userlist:
# first render the problem, so that a seed will be created for this user
self.render_problem(username, problem_url_name)
# submit a bogus answer, in order to get the problem to tell us its real answer
dummy_answer = "1000"
self.submit_student_answer(username, problem_url_name, [dummy_answer, dummy_answer])
# we should have gotten the problem wrong, since we're way out of range:
self.check_state(username, descriptor, 0, 1, 1)
# dig the correct answer out of the problem's message
module = self.get_student_module(username, descriptor)
state = json.loads(module.state)
correct_map = state['correct_map']
log.info("Correct Map: %s", correct_map)
# only one response, so pull it out:
answer = correct_map.values()[0]['msg']
self.submit_student_answer(username, problem_url_name, [answer, answer])
# we should now get the problem right, with a second attempt:
self.check_state(username, descriptor, 1, 1, 2)
# redefine the problem (as stored in Mongo) so that the definition of correct changes
self.define_randomized_custom_response_problem(problem_url_name, redefine=True)
# confirm that simply rendering the problem again does not result in a change
# in the grade (or the attempts):
self.render_problem('u1', problem_url_name)
self.check_state('u1', descriptor, 1, 1, 2)
# rescore the problem for only one student -- only that student's grade should change
# (and none of the attempts):
self.submit_rescore_one_student_answer('instructor', problem_url_name, User.objects.get(username='u1'))
for username in userlist:
self.check_state(username, descriptor, 0 if username == 'u1' else 1, 1, 2)
# rescore the problem for all students
self.submit_rescore_all_student_answers('instructor', problem_url_name)
# all grades should change to being wrong (with no change in attempts)
for username in userlist:
self.check_state(username, descriptor, 0, 1, 2)
class TestResetAttemptsTask(TestIntegrationTask):
"""
Integration-style tests for resetting problem attempts in a background task.
Exercises real problems with a minimum of patching.
"""
userlist = ['u1', 'u2', 'u3', 'u4']
def setUp(self):
self.initialize_course()
self.create_instructor('instructor')
for username in self.userlist:
self.create_student(username)
self.logout()
def get_num_attempts(self, username, descriptor):
"""returns number of attempts stored for `username` on problem `descriptor` for test course"""
module = self.get_student_module(username, descriptor)
state = json.loads(module.state)
return state['attempts']
def reset_problem_attempts(self, instructor, location):
"""Submits the current problem for resetting"""
return submit_reset_problem_attempts_for_all_students(self.create_task_request(instructor),
location)
def test_reset_attempts_on_problem(self):
"""Run reset-attempts scenario on option problem"""
# get descriptor:
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
descriptor = self.module_store.get_item(location)
num_attempts = 3
# first store answers for each of the separate users:
for _ in range(num_attempts):
for username in self.userlist:
self.submit_student_answer(username, problem_url_name, [OPTION_1, OPTION_1])
for username in self.userlist:
self.assertEquals(self.get_num_attempts(username, descriptor), num_attempts)
self.reset_problem_attempts('instructor', location)
for username in self.userlist:
self.assertEquals(self.get_num_attempts(username, descriptor), 0)
def test_reset_failure(self):
"""Simulate a failure in resetting attempts on a problem"""
problem_url_name = 'H1P1'
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
self.define_option_problem(problem_url_name)
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
expected_message = "bad things happened"
with patch('courseware.models.StudentModule.save') as mock_save:
mock_save.side_effect = ZeroDivisionError(expected_message)
instructor_task = self.reset_problem_attempts('instructor', location)
self._assert_task_failure(instructor_task.id, 'reset_problem_attempts', problem_url_name, expected_message)
def test_reset_non_problem(self):
"""confirm that a non-problem can still be successfully reset"""
location = self.problem_section.location
instructor_task = self.reset_problem_attempts('instructor', location)
instructor_task = InstructorTask.objects.get(id=instructor_task.id)
self.assertEqual(instructor_task.task_state, SUCCESS)
class TestDeleteProblemTask(TestIntegrationTask):
"""
Integration-style tests for deleting problem state in a background task.
Exercises real problems with a minimum of patching.
"""
userlist = ['u1', 'u2', 'u3', 'u4']
def setUp(self):
self.initialize_course()
self.create_instructor('instructor')
for username in self.userlist:
self.create_student(username)
self.logout()
def delete_problem_state(self, instructor, location):
"""Submits the current problem for deletion"""
return submit_delete_problem_state_for_all_students(self.create_task_request(instructor), location)
def test_delete_problem_state(self):
"""Run delete-state scenario on option problem"""
# get descriptor:
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
descriptor = self.module_store.get_item(location)
# first store answers for each of the separate users:
for username in self.userlist:
self.submit_student_answer(username, problem_url_name, [OPTION_1, OPTION_1])
# confirm that state exists:
for username in self.userlist:
self.assertTrue(self.get_student_module(username, descriptor) is not None)
# run delete task:
self.delete_problem_state('instructor', location)
# confirm that no state can be found:
for username in self.userlist:
with self.assertRaises(StudentModule.DoesNotExist):
self.get_student_module(username, descriptor)
def test_delete_failure(self):
"""Simulate a failure in deleting state of a problem"""
problem_url_name = 'H1P1'
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
self.define_option_problem(problem_url_name)
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
expected_message = "bad things happened"
with patch('courseware.models.StudentModule.delete') as mock_delete:
mock_delete.side_effect = ZeroDivisionError(expected_message)
instructor_task = self.delete_problem_state('instructor', location)
self._assert_task_failure(instructor_task.id, 'delete_problem_state', problem_url_name, expected_message)
def test_delete_non_problem(self):
"""confirm that a non-problem can still be successfully deleted"""
location = self.problem_section.location
instructor_task = self.delete_problem_state('instructor', location)
instructor_task = InstructorTask.objects.get(id=instructor_task.id)
self.assertEqual(instructor_task.task_state, SUCCESS)
class TestGradeReportConditionalContent(TestReportMixin, TestIntegrationTask):
"""
Check that grade export works when graded content exists within
split modules.
"""
def setUp(self):
"""
Set up a course with graded problems within a split test.
Course hierarchy is as follows (modeled after how split tests
are created in studio):
-> course
-> chapter
-> sequential (graded)
-> vertical
-> split_test
-> vertical (Group A)
-> problem
-> vertical (Group B)
-> problem
"""
super(TestGradeReportConditionalContent, self).setUp()
# Create user partitions
self.user_partition_group_a = 0
self.user_partition_group_b = 1
self.partition = UserPartition(
0,
'first_partition',
'First Partition',
[
Group(self.user_partition_group_a, 'Group A'),
Group(self.user_partition_group_b, 'Group B')
]
)
# Create course with group configurations and grading policy
self.initialize_course(
course_factory_kwargs={
'user_partitions': [self.partition],
'grading_policy': {
"GRADER": [{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
"weight": 1.0
}]
}
}
)
# Create users and partition them
self.student_a = self.create_student('student_a')
self.student_b = self.create_student('student_b')
UserCourseTagFactory(
user=self.student_a,
course_id=self.course.id,
key='xblock.partition_service.partition_{0}'.format(self.partition.id), # pylint: disable=no-member
value=str(self.user_partition_group_a)
)
UserCourseTagFactory(
user=self.student_b,
course_id=self.course.id,
key='xblock.partition_service.partition_{0}'.format(self.partition.id), # pylint: disable=no-member
value=str(self.user_partition_group_b)
)
# Create a vertical to contain our split test
problem_vertical = ItemFactory.create(
parent_location=self.problem_section.location,
category='vertical',
display_name='Problem Unit'
)
# Create the split test and child vertical containers
vertical_a_url = self.course.id.make_usage_key('vertical', 'split_test_vertical_a')
vertical_b_url = self.course.id.make_usage_key('vertical', 'split_test_vertical_b')
self.split_test = ItemFactory.create(
parent_location=problem_vertical.location,
category='split_test',
display_name='Split Test',
user_partition_id=self.partition.id, # pylint: disable=no-member
group_id_to_child={str(index): url for index, url in enumerate([vertical_a_url, vertical_b_url])}
)
self.vertical_a = ItemFactory.create(
parent_location=self.split_test.location,
category='vertical',
display_name='Group A problem container',
location=vertical_a_url
)
self.vertical_b = ItemFactory.create(
parent_location=self.split_test.location,
category='vertical',
display_name='Group B problem container',
location=vertical_b_url
)
def verify_csv_task_success(self, task_result):
"""
Verify that all students were successfully graded by
`upload_grades_csv`.
Arguments:
task_result (dict): Return value of `upload_grades_csv`.
"""
self.assertDictContainsSubset({'attempted': 2, 'succeeded': 2, 'failed': 0}, task_result)
def verify_rows_in_csv(self, expected_rows):
"""
Verify that the grades CSV contains the expected content.
Arguments:
expected_rows (iterable): An iterable of dictionaries, where
each dict represents a row of data in the grades
report CSV. Each dict maps keys from the CSV header
to values in that row's corresponding cell.
"""
report_store = ReportStore.from_config()
report_csv_filename = report_store.links_for(self.course.id)[0][0]
with open(report_store.path_to(self.course.id, report_csv_filename)) as csv_file:
# Expand the dict reader generator so we don't lose it's content
csv_rows = [row for row in csv.DictReader(csv_file)]
self.assertEqual(csv_rows, expected_rows)
def verify_grades_in_csv(self, students_grades):
"""
Verify that the grades CSV contains the expected grades data.
Arguments:
students_grades (iterable): An iterable of dictionaries,
where each dict maps a student to another dict
representing their grades we expect to see in the CSV.
For example: [student_a: {'grade': 1.0, 'HW': 1.0}]
"""
def merge_dicts(dict_1, dict_2):
"""Return the union of dict_1 and dict_2"""
return dict(dict_1.items() + dict_2.items())
self.verify_rows_in_csv(
[
merge_dicts(
{'id': str(student.id), 'username': student.username, 'email': student.email},
grades
)
for student_grades in students_grades for student, grades in student_grades.iteritems()
]
)
def test_both_groups_problems(self):
"""
Verify that grade export works when each user partition
receives (different) problems. Each user's grade on their
particular problem should show up in the grade report.
"""
problem_a_url = 'problem_a_url'
problem_b_url = 'problem_b_url'
self.define_option_problem(problem_a_url, parent=self.vertical_a)
self.define_option_problem(problem_b_url, parent=self.vertical_b)
# student A will get 100%, student B will get 50% because
# OPTION_1 is the correct option, and OPTION_2 is the
# incorrect option
self.submit_student_answer(self.student_a.username, problem_a_url, [OPTION_1, OPTION_1])
self.submit_student_answer(self.student_b.username, problem_b_url, [OPTION_1, OPTION_2])
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_grades_csv(None, None, self.course.id, None, 'graded')
self.verify_csv_task_success(result)
self.verify_grades_in_csv(
[
{self.student_a: {'grade': '1.0', 'HW': '1.0'}},
{self.student_b: {'grade': '0.5', 'HW': '0.5'}}
]
)
def test_one_group_problem(self):
"""
Verify that grade export works when only the Group A user
partition receives a problem. We expect to see a column for
the homework where student_a's entry includes their grade, and
student b's entry shows a 0.
"""
problem_a_url = 'problem_a_url'
self.define_option_problem(problem_a_url, parent=self.vertical_a)
self.submit_student_answer(self.student_a.username, problem_a_url, [OPTION_1, OPTION_1])
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_grades_csv(None, None, self.course.id, None, 'graded')
self.verify_csv_task_success(result)
self.verify_grades_in_csv(
[
{self.student_a: {'grade': '1.0', 'HW': '1.0'}},
{self.student_b: {'grade': '0.0', 'HW': '0.0'}}
]
) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# This software is Copyright (c) 2014, Sanju Kholia <sanju.kholia at gmail.com>
# and it is hereby released to the general public under the following terms:
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted.
import sys
import os
import struct
from binascii import hexlify
KWMAGIC = "KWALLET\n\r\0\r\n"
KWMAGIC_LEN = 12
KWALLET_VERSION_MAJOR = 0
KWALLET_VERSION_MINOR = 0
KWALLET_CIPHER_BLOWFISH_CBC = 0
KWALLET_CIPHER_3DES_CBC = 1
KWALLET_HASH_SHA1 = 0
KWALLET_HASH_MD5 = 1
N = 128
def process_file(filename):
offset = 0
try:
fd = open(filename, "rb")
except IOError:
e = sys.exc_info()[1]
sys.stderr.write("%s\n" % str(e))
return
# TOCTOU but who cares, right? ;)
size = os.stat(filename).st_size
buf = fd.read(KWMAGIC_LEN)
if buf != KWMAGIC:
sys.stderr.write("%s : Not a KDE KWallet file!\n" % filename)
return
offset += KWMAGIC_LEN
buf = bytearray(fd.read(4))
offset += 4
# First byte is major version, second byte is minor version
if buf[0] != KWALLET_VERSION_MAJOR:
sys.stderr.write("%s : Unknown version!\n" % filename)
return
if buf[1] != KWALLET_VERSION_MINOR:
sys.stderr.write("%s : Unknown version!\n" % filename)
return
if buf[2] != KWALLET_CIPHER_BLOWFISH_CBC:
sys.stderr.write("%s : Unsupported cipher\n" % filename)
return
if buf[3] != KWALLET_HASH_SHA1:
sys.stderr.write("%s : Unsupported hash\n" % filename)
return
# Read in the hashes
buf = fd.read(4)
n = struct.unpack("> I", buf)[0]
if n > 0xffff:
sys.stderr.write("%s : sanity check failed!\n" % filename)
sys.exit(6)
offset += 4
for i in range(0, n):
buf = fd.read(16)
offset += 16
buf = fd.read(4) # read 4 bytes more
fsz = struct.unpack("> I", buf)[0]
offset += 4
for j in range(0, fsz):
fd.read(16)
offset += 16
# Read in the rest of the file
encrypted_size = size - offset
encrypted = fd.read(encrypted_size)
encrypted_size = len(encrypted)
if encrypted_size % 8 != 0:
sys.stderr.write("%s : invalid file structure!\n", filename)
sys.exit(7)
sys.stdout.write("%s:$kwallet$%ld$%s" % (os.path.basename(filename), encrypted_size, hexlify(encrypted)))
sys.stdout.write(":::::%s\n" % filename)
fd.close()
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.stderr.write("Usage: %s <.kwl file(s)>\n" % sys.argv[0])
sys.exit(-1)
for i in range(1, len(sys.argv)):
process_file(sys.argv[i]) | unknown | codeparrot/codeparrot-clean | ||
# Serde   [![Build Status]][actions] [![Latest Version]][crates.io] [![serde msrv]][Rust 1.56] [![serde_derive msrv]][Rust 1.71]
[Build Status]: https://img.shields.io/github/actions/workflow/status/serde-rs/serde/ci.yml?branch=master
[actions]: https://github.com/serde-rs/serde/actions?query=branch%3Amaster
[Latest Version]: https://img.shields.io/crates/v/serde.svg
[crates.io]: https://crates.io/crates/serde
[serde msrv]: https://img.shields.io/crates/msrv/serde.svg?label=serde%20msrv&color=lightgray
[serde_derive msrv]: https://img.shields.io/crates/msrv/serde_derive.svg?label=serde_derive%20msrv&color=lightgray
[Rust 1.56]: https://blog.rust-lang.org/2021/10/21/Rust-1.56.0/
[Rust 1.71]: https://blog.rust-lang.org/2023/07/13/Rust-1.71.0/
**Serde is a framework for *ser*ializing and *de*serializing Rust data structures efficiently and generically.**
---
You may be looking for:
- [An overview of Serde](https://serde.rs)
- [Data formats supported by Serde](https://serde.rs/#data-formats)
- [Setting up `#[derive(Serialize, Deserialize)]`](https://serde.rs/derive.html)
- [Examples](https://serde.rs/examples.html)
- [API documentation](https://docs.rs/serde)
- [Release notes](https://github.com/serde-rs/serde/releases)
## Serde in action
<details>
<summary>
Click to show Cargo.toml.
<a href="https://play.rust-lang.org/?edition=2021&gist=72755f28f99afc95e01d63174b28c1f5" target="_blank">Run this code in the playground.</a>
</summary>
```toml
[dependencies]
# The core APIs, including the Serialize and Deserialize traits. Always
# required when using Serde. The "derive" feature is only required when
# using #[derive(Serialize, Deserialize)] to make Serde work with structs
# and enums defined in your crate.
serde = { version = "1.0", features = ["derive"] }
# Each data format lives in its own crate; the sample code below uses JSON
# but you may be using a different one.
serde_json = "1.0"
```
</details>
<p></p>
```rust
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug)]
struct Point {
x: i32,
y: i32,
}
fn main() {
let point = Point { x: 1, y: 2 };
// Convert the Point to a JSON string.
let serialized = serde_json::to_string(&point).unwrap();
// Prints serialized = {"x":1,"y":2}
println!("serialized = {}", serialized);
// Convert the JSON string back to a Point.
let deserialized: Point = serde_json::from_str(&serialized).unwrap();
// Prints deserialized = Point { x: 1, y: 2 }
println!("deserialized = {:?}", deserialized);
}
```
## Getting help
Serde is one of the most widely used Rust libraries so any place that Rustaceans
congregate will be able to help you out. For chat, consider trying the
[#rust-questions] or [#rust-beginners] channels of the unofficial community
Discord (invite: <https://discord.gg/rust-lang-community>), the [#rust-usage] or
[#beginners] channels of the official Rust Project Discord (invite:
<https://discord.gg/rust-lang>), or the [#general][zulip] stream in Zulip. For
asynchronous, consider the [\[rust\] tag on StackOverflow][stackoverflow], the
[/r/rust] subreddit which has a pinned weekly easy questions post, or the Rust
[Discourse forum][discourse]. It's acceptable to file a support issue in this
repo but they tend not to get as many eyes as any of the above and may get
closed without a response after some time.
[#rust-questions]: https://discord.com/channels/273534239310479360/274215136414400513
[#rust-beginners]: https://discord.com/channels/273534239310479360/273541522815713281
[#rust-usage]: https://discord.com/channels/442252698964721669/443150878111694848
[#beginners]: https://discord.com/channels/442252698964721669/448238009733742612
[zulip]: https://rust-lang.zulipchat.com/#narrow/stream/122651-general
[stackoverflow]: https://stackoverflow.com/questions/tagged/rust
[/r/rust]: https://www.reddit.com/r/rust
[discourse]: https://users.rust-lang.org
<br>
#### License
<sup>
Licensed under either of <a href="LICENSE-APACHE">Apache License, Version
2.0</a> or <a href="LICENSE-MIT">MIT license</a> at your option.
</sup>
<br>
<sub>
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in Serde by you, as defined in the Apache-2.0 license, shall be
dual licensed as above, without any additional terms or conditions.
</sub> | unknown | github | https://github.com/serde-rs/serde | README.md |
########################################################################
#
# (C) 2015, Chris Houseknecht <chouse@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from stat import S_IRUSR, S_IWUSR
import yaml
from ansible.utils.display import Display
display = Display()
class GalaxyToken(object):
''' Class to storing and retrieving token in ~/.ansible_galaxy '''
def __init__(self):
self.file = os.path.expanduser("~") + '/.ansible_galaxy'
self.config = yaml.safe_load(self.__open_config_for_read())
if not self.config:
self.config = {}
def __open_config_for_read(self):
if os.path.isfile(self.file):
display.vvv('Opened %s' % self.file)
return open(self.file, 'r')
# config.yml not found, create and chomd u+rw
f = open(self.file, 'w')
f.close()
os.chmod(self.file, S_IRUSR | S_IWUSR) # owner has +rw
display.vvv('Created %s' % self.file)
return open(self.file, 'r')
def set(self, token):
self.config['token'] = token
self.save()
def get(self):
return self.config.get('token', None)
def save(self):
with open(self.file, 'w') as f:
yaml.safe_dump(self.config, f, default_flow_style=False) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 Isaku Yamahata <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
VRRP state machine implementation
VRRPManager creates/deletes VRRPRounter instances dynamically.
"""
import abc
import six
from ryu.base import app_manager
from ryu.controller import event
from ryu.controller import handler
from ryu.lib import hub
from ryu.lib.packet import vrrp
from ryu.services.protocols.vrrp import event as vrrp_event
from ryu.services.protocols.vrrp import api as vrrp_api
# TODO: improve Timer service and move it into framework
class Timer(object):
def __init__(self, handler_):
assert callable(handler_)
super(Timer, self).__init__()
self._handler = handler_
self._event = hub.Event()
self._thread = None
def start(self, interval):
"""interval is in seconds"""
if self._thread:
self.cancel()
self._event.clear()
self._thread = hub.spawn(self._timer, interval)
def cancel(self):
if self._thread is None:
return
self._event.set()
hub.joinall([self._thread])
self._thread = None
def is_running(self):
return self._thread is not None
def _timer(self, interval):
# Avoid cancellation during execution of self._callable()
cancel = self._event.wait(interval)
if cancel:
return
self._handler()
class TimerEventSender(Timer):
# timeout handler is called by timer thread context.
# So in order to actual execution context to application's event thread,
# post the event to the application
def __init__(self, app, ev_cls):
super(TimerEventSender, self).__init__(self._timeout)
self._app = app
self._ev_cls = ev_cls
def _timeout(self):
self._app.send_event(self._app.name, self._ev_cls())
class VRRPParams(object):
def __init__(self, config):
self.config = config
self.master_adver_interval = None # In seconds
@property
def skew_time(self):
# In seconds
config = self.config
version = config.version
priority = config.priority
if config.version == vrrp.VRRP_VERSION_V2:
return (256.0 - priority) / 256.0
if config.version == vrrp.VRRP_VERSION_V3:
return (((256.0 - priority) * self.master_adver_interval) / 256.0)
raise ValueError('unknown vrrp version %d' % version)
@property
def master_down_interval(self):
# In seconds
return (3.0 * self.master_adver_interval) + self.skew_time
@six.add_metaclass(abc.ABCMeta)
class VRRPState(object):
def __init__(self, vrrp_router):
super(VRRPState, self).__init__()
self.vrrp_router = vrrp_router
@abc.abstractmethod
def master_down(self, ev):
pass
@abc.abstractmethod
def adver(self, ev):
pass
@abc.abstractmethod
def preempt_delay(self, ev):
pass
@abc.abstractmethod
def vrrp_received(self, ev):
pass
@abc.abstractmethod
def vrrp_shutdown_request(self, ev):
pass
@abc.abstractmethod
def vrrp_config_change_request(self, ev):
pass
class VRRPRouter(app_manager.RyuApp):
_EVENTS = [vrrp_event.EventVRRPStateChanged]
_CONSTRUCTORS = {}
_STATE_MAP = {} # should be overrided by concrete class
@staticmethod
def register(version):
def _register(cls):
VRRPRouter._CONSTRUCTORS[version] = cls
return cls
return _register
@staticmethod
def factory(name, monitor_name, interface, config, statistics, *args,
**kwargs):
cls = VRRPRouter._CONSTRUCTORS[config.version]
app_mgr = app_manager.AppManager.get_instance()
kwargs = kwargs.copy()
kwargs['name'] = name
kwargs['monitor_name'] = monitor_name
kwargs['vrrp_interface'] = interface
kwargs['vrrp_config'] = config
kwargs['vrrp_statistics'] = statistics
return app_mgr.instantiate(cls, *args, **kwargs)
class _EventMasterDown(event.EventBase):
pass
class _EventAdver(event.EventBase):
pass
class _EventPreemptDelay(event.EventBase):
pass
class _EventStatisticsOut(event.EventBase):
pass
def __init__(self, *args, **kwargs):
super(VRRPRouter, self).__init__(*args, **kwargs)
self.name = kwargs['name']
self.monitor_name = kwargs['monitor_name']
self.interface = kwargs['vrrp_interface']
self.config = kwargs['vrrp_config']
self.statistics = kwargs['vrrp_statistics']
self.params = VRRPParams(self.config)
self.state = None
self.state_impl = None
self.vrrp = None
self.master_down_timer = TimerEventSender(self, self._EventMasterDown)
self.adver_timer = TimerEventSender(self, self._EventAdver)
self.preempt_delay_timer = TimerEventSender(self,
self._EventPreemptDelay)
self.register_observer(self._EventMasterDown, self.name)
self.register_observer(self._EventAdver, self.name)
self.stats_out_timer = TimerEventSender(self,
self._EventStatisticsOut)
self.register_observer(self._EventStatisticsOut, self.name)
def send_advertisement(self, release=False):
if self.vrrp is None:
config = self.config
max_adver_int = vrrp.vrrp.sec_to_max_adver_int(
config.version, config.advertisement_interval)
self.vrrp = vrrp.vrrp.create_version(
config.version, vrrp.VRRP_TYPE_ADVERTISEMENT, config.vrid,
config.priority, max_adver_int, config.ip_addresses)
vrrp_ = self.vrrp
if release:
vrrp_ = vrrp_.create(vrrp_.type, vrrp_.vrid,
vrrp.VRRP_PRIORITY_RELEASE_RESPONSIBILITY,
vrrp_.max_adver_int, vrrp_.ip_addresses)
if self.vrrp.priority == 0:
self.statistics.tx_vrrp_zero_prio_packets += 1
# create packet frame each time to generate new ip identity
interface = self.interface
packet_ = vrrp_.create_packet(interface.primary_ip_address,
interface.vlan_id)
packet_.serialize()
vrrp_api.vrrp_transmit(self, self.monitor_name, packet_.data)
self.statistics.tx_vrrp_packets += 1
def state_change(self, new_state):
old_state = self.state
self.state = new_state
self.state_impl = self._STATE_MAP[new_state](self)
state_changed = vrrp_event.EventVRRPStateChanged(
self.name, self.monitor_name, self.interface, self.config,
old_state, new_state)
self.send_event_to_observers(state_changed)
@handler.set_ev_handler(_EventMasterDown)
def master_down_handler(self, ev):
self.state_impl.master_down(ev)
@handler.set_ev_handler(_EventAdver)
def adver_handler(self, ev):
self.state_impl.adver(ev)
@handler.set_ev_handler(_EventPreemptDelay)
def preempt_delay_handler(self, ev):
self.state_impl.preempt_delay(ev)
@handler.set_ev_handler(vrrp_event.EventVRRPReceived)
def vrrp_received_handler(self, ev):
self.state_impl.vrrp_received(ev)
@handler.set_ev_handler(vrrp_event.EventVRRPShutdownRequest)
def vrrp_shutdown_request_handler(self, ev):
assert ev.instance_name == self.name
self.state_impl.vrrp_shutdown_request(ev)
@handler.set_ev_handler(vrrp_event.EventVRRPConfigChangeRequest)
def vrrp_config_change_request_handler(self, ev):
config = self.config
if ev.priority is not None:
config.priority = ev.priority
if ev.advertisement_interval is not None:
config.advertisement_interval = ev.advertisement_interval
if ev.preempt_mode is not None:
config.preempt_mode = ev.preempt_mode
if ev.preempt_delay is not None:
config.preempt_delay = ev.preempt_delay
if ev.accept_mode is not None:
config.accept_mode = ev.accept_mode
# force to recreate cached vrrp packet
self.vrrp = None
self.state_impl.vrrp_config_change_request(ev)
@handler.set_ev_handler(_EventStatisticsOut)
def statistics_handler(self, ev):
# sends stats to somewhere here
# print self.statistics.get_stats()
self.stats_out_timer.start(self.statistics.statistics_interval)
# RFC defines that start timer, then change the state.
# This causes the race between state change and event dispatching.
# So our implementation does, state change, then start timer
class VRRPV2StateInitialize(VRRPState):
# In theory this shouldn't be called.
def master_down(self, ev):
self.vrrp_router.logger.warn('%s master_down', self.__class__.__name__)
def adver(self, ev):
self.vrrp_router.logger.warn('%s adver', self.__class__.__name__)
def preempt_delay(self, ev):
self.vrrp_router.logger.warn('%s preempt_delay',
self.__class__.__name__)
def vrrp_received(self, ev):
self.vrrp_router.logger.warn('%s vrrp_received',
self.__class__.__name__)
def vrrp_shutdown_request(self, ev):
self.vrrp_router.logger.warn('%s vrrp_shutdown_request',
self.__class__.__name__)
def vrrp_config_change_request(self, ev):
self.vrrp_router.logger.warn('%s vrrp_config_change_request',
self.__class__.__name__)
class VRRPV2StateMaster(VRRPState):
def master_down(self, ev):
# should not reach here.
# In fact this can be happned due to event scheduling
vrrp_router = self.vrrp_router
vrrp_router.logger.debug('%s master_down %s %s' % (
self.__class__.__name__, ev.__class__.__name__, vrrp_router.state))
def _adver(self):
vrrp_router = self.vrrp_router
vrrp_router.send_advertisement()
vrrp_router.adver_timer.start(
vrrp_router.config.advertisement_interval)
def adver(self, ev):
self.vrrp_router.logger.debug('%s adver', self.__class__.__name__)
self._adver()
def preempt_delay(self, ev):
self.vrrp_router.logger.warn('%s preempt_delay',
self.__class__.__name__)
def vrrp_received(self, ev):
vrrp_router = self.vrrp_router
vrrp_router.logger.debug('%s vrrp_received', self.__class__.__name__)
ip, vrrp_ = vrrp.vrrp.get_payload(ev.packet)
config = vrrp_router.config
if vrrp_.priority == 0:
vrrp_router.send_advertisement()
vrrp_router.adver_timer.start(config.advertisement_interval)
else:
params = vrrp_router.params
if (config.priority < vrrp_.priority or
(config.priority == vrrp_.priority and
vrrp.ip_address_lt(vrrp_router.interface.primary_ip_address,
ip.src))):
vrrp_router.adver_timer.cancel()
vrrp_router.state_change(vrrp_event.VRRP_STATE_BACKUP)
vrrp_router.master_down_timer.start(
params.master_down_interval)
def vrrp_shutdown_request(self, ev):
vrrp_router = self.vrrp_router
vrrp_router.logger.debug('%s vrrp_shutdown_request',
self.__class__.__name__)
vrrp_router.adver_timer.cancel()
vrrp_router.send_advertisement(True)
vrrp_router.state_change(vrrp_event.VRRP_STATE_INITIALIZE)
def vrrp_config_change_request(self, ev):
vrrp_router = self.vrrp_router
vrrp_router.logger.warn('%s vrrp_config_change_request',
self.__class__.__name__)
if ev.priority is not None or ev.advertisement_interval is not None:
vrrp_router.adver_timer.cancel()
self._adver()
class VRRPV2StateBackup(VRRPState):
def _master_down(self):
vrrp_router = self.vrrp_router
vrrp_router.send_advertisement()
# This action should be done router on
# EventVRRPStateChanged(VRRP_STATE_BACKUP->VRRP_STATE_MASTER)
#
# RFC3768 6.4.2 Backup
# o Broadcast a gratuitous ARP request containing the virtual
# router MAC address for each IP address associated with the
# virtual router
# RACE: actual router has the responsiblity to send garp.
# so due to thread scheduling there is a race between
# actual router sending GARP and VRRPRouter becoming
# master/backup
vrrp_router.preempt_delay_timer.cancel()
vrrp_router.state_change(vrrp_event.VRRP_STATE_MASTER)
vrrp_router.adver_timer.start(
vrrp_router.config.advertisement_interval)
def master_down(self, ev):
self.vrrp_router.logger.debug('%s master_down',
self.__class__.__name__)
self._master_down()
def adver(self, ev):
# should not reach here
# In fact this can be happned due to event scheduling
vrrp_router = self.vrrp_router
vrrp_router.logger.debug('%s adver %s %s' % (
self.__class__.__name__, ev.__class__.__name__, vrrp_router.state))
def preempt_delay(self, ev):
self.vrrp_router.logger.warn('%s preempt_delay',
self.__class__.__name__)
self._master_down()
def vrrp_received(self, ev):
vrrp_router = self.vrrp_router
vrrp_router.logger.debug('%s vrrp_received', self.__class__.__name__)
_ip, vrrp_ = vrrp.vrrp.get_payload(ev.packet)
if vrrp_.priority == 0:
vrrp_router.master_down_timer.start(vrrp_router.params.skew_time)
else:
config = vrrp_router.config
params = vrrp_router.params
if (not config.preempt_mode or config.priority <= vrrp_.priority):
vrrp_router.preempt_delay_timer.cancel()
vrrp_router.master_down_timer.start(
params.master_down_interval)
elif (config.preempt_mode and config.preempt_delay > 0 and
config.priority > vrrp_.priority):
if not vrrp_router.preempt_delay_timer.is_running():
vrrp_router.preempt_delay_timer.start(config.preempt_delay)
vrrp_router.master_down_timer.start(
params.master_down_interval)
def vrrp_shutdown_request(self, ev):
vrrp_router = self.vrrp_router
vrrp_router.logger.debug('%s vrrp_shutdown_request',
self.__class__.__name__)
vrrp_router.master_down_timer.cancel()
vrrp_router.preempt_delay_timer.cancel()
vrrp_router.state_change(vrrp_event.VRRP_STATE_INITIALIZE)
def vrrp_config_change_request(self, ev):
vrrp_router = self.vrrp_router
vrrp_router.logger.warn('%s vrrp_config_change_request',
self.__class__.__name__)
if ev.priority is not None and vrrp_router.config.address_owner:
vrrp_router.master_down_timer.cancel()
self._master_down()
if ev.preempt_mode is not None or ev.preempt_delay is not None:
vrrp_router.preempt_delay_timer.cancel()
@VRRPRouter.register(vrrp.VRRP_VERSION_V2)
class VRRPRouterV2(VRRPRouter):
_STATE_MAP = {
vrrp_event.VRRP_STATE_INITIALIZE: VRRPV2StateInitialize,
vrrp_event.VRRP_STATE_MASTER: VRRPV2StateMaster,
vrrp_event.VRRP_STATE_BACKUP: VRRPV2StateBackup,
}
def __init__(self, *args, **kwargs):
super(VRRPRouterV2, self).__init__(*args, **kwargs)
def start(self):
params = self.params
params.master_adver_interval = self.config.advertisement_interval
self.state_change(vrrp_event.VRRP_STATE_INITIALIZE)
if self.config.address_owner:
self.send_advertisement()
# This action should be done router on
# EventVRRPStateChanged(None->VRRP_STATE_MASTER)
#
# RFC3768 6.4.1
# o Broadcast a gratuitous ARP request containing the virtual
# router MAC address for each IP address associated with the
# virtual router.
self.state_change(vrrp_event.VRRP_STATE_MASTER)
self.adver_timer.start(self.config.advertisement_interval)
else:
self.state_change(vrrp_event.VRRP_STATE_BACKUP)
self.master_down_timer.start(params.master_down_interval)
super(VRRPRouterV2, self).start()
class VRRPV3StateInitialize(VRRPState):
# In theory this shouldn't be called.
def master_down(self, ev):
self.vrrp_router.logger.debug('%s master_down',
self.__class__.__name__)
def adver(self, ev):
self.vrrp_router.logger.debug('%s adver', self.__class__.__name__)
def preempt_delay(self, ev):
self.vrrp_router.logger.warn('%s preempt_delay',
self.__class__.__name__)
def vrrp_received(self, ev):
self.vrrp_router.logger.debug('%s vrrp_received',
self.__class__.__name__)
def vrrp_shutdown_request(self, ev):
self.vrrp_router.logger.debug('%s vrrp_shutdown_request',
self.__class__.__name__)
def vrrp_config_change_request(self, ev):
self.vrrp_router.logger.warn('%s vrrp_config_change_request',
self.__class__.__name__)
class VRRPV3StateMaster(VRRPState):
def master_down(self, ev):
# should not reach here
# In fact this can be happned due to event scheduling
vrrp_router = self.vrrp_router
vrrp_router.logger.debug('%s master_down %s %s' % (
self.__class__.__name__, ev.__class__.__name__, vrrp_router.state))
def _adver(self):
vrrp_router = self.vrrp_router
vrrp_router.send_advertisement()
vrrp_router.adver_timer.start(
vrrp_router.config.advertisement_interval)
def adver(self, ev):
self.vrrp_router.logger.debug('%s adver', self.__class__.__name__)
self._adver()
def preempt_delay(self, ev):
self.vrrp_router.logger.warn('%s preempt_delay',
self.__class__.__name__)
def vrrp_received(self, ev):
vrrp_router = self.vrrp_router
vrrp_router.logger.debug('%s vrrp_received', self.__class__.__name__)
ip, vrrp_ = vrrp.vrrp.get_payload(ev.packet)
config = vrrp_router.config
if vrrp_.priority == 0:
vrrp_router.send_advertisement()
vrrp_router.adver_timer.start(config.advertisement_interval)
else:
params = vrrp_router.params
if (config.priority < vrrp_.priority or
(config.priority == vrrp_.priority and
vrrp.ip_address_lt(vrrp_router.interface.primary_ip_address,
ip.src))):
vrrp_router.adver_timer.cancel()
params.master_adver_interval = vrrp_.max_adver_int_in_sec
vrrp_router.state_change(vrrp_event.VRRP_STATE_BACKUP)
vrrp_router.master_down_timer.start(
params.master_down_interval)
def vrrp_shutdown_request(self, ev):
vrrp_router = self.vrrp_router
vrrp_router.logger.debug('%s vrrp_shutdown_request',
self.__class__.__name__)
vrrp_router.adver_timer.cancel()
vrrp_router.send_advertisement(True)
vrrp_router.state_change(vrrp_event.VRRP_STATE_INITIALIZE)
def vrrp_config_change_request(self, ev):
vrrp_router = self.vrrp_router
vrrp_router.logger.warn('%s vrrp_config_change_request',
self.__class__.__name__)
if ev.priority is not None or ev.advertisement_interval is not None:
vrrp_router.adver_timer.cancel()
self._adver()
class VRRPV3StateBackup(VRRPState):
def _master_down(self):
vrrp_router = self.vrrp_router
vrrp_router.send_advertisement()
# This action should be done by router on
# EventStateChange(VRRP_SATE_BACKUP -> VRRP_STATE_MASTER)
#
# RFC 5795 6.4.2
# (375) + If the protected IPvX address is an IPv4 address, then:
# (380) * Broadcast a gratuitous ARP request on that interface
# containing the virtual router MAC address for each IPv4
# address associated with the virtual router.
# (385) + else // ipv6
# (390) * Compute and join the Solicited-Node multicast
# address [RFC4291] for the IPv6 address(es) associated with
# the virtual router.
# (395) * For each IPv6 address associated with the virtual
# router, send an unsolicited ND Neighbor Advertisement with
# the Router Flag (R) set, the Solicited Flag (S) unset, the
# Override flag (O) set, the target address set to the IPv6
# address of the virtual router, and the target link-layer
# address set to the virtual router MAC address.
# RACE: actual router has the responsiblity to send garp.
# so due to thread scheduling there is a race between
# actual router sending GARP and VRRPRouter becoming
# master/backup
vrrp_router.preempt_delay_timer.cancel()
vrrp_router.state_change(vrrp_event.VRRP_STATE_MASTER)
vrrp_router.adver_timer.start(
vrrp_router.config.advertisement_interval)
def master_down(self, ev):
self.vrrp_router.logger.debug('%s master_down',
self.__class__.__name__)
self._master_down()
def adver(self, ev):
# should not reach here
# In fact this can be happned due to event scheduling
vrrp_router = self.vrrp_router
vrrp_router.logger.debug('adver %s %s %s' % (
self.__class__.__name__, ev.__class__.__name__, vrrp_router.state))
def preempt_delay(self, ev):
self.vrrp_router.logger.warn('%s preempt_delay',
self.__class__.__name__)
self._master_down()
def vrrp_received(self, ev):
vrrp_router = self.vrrp_router
vrrp_router.logger.debug('%s vrrp_received', self.__class__.__name__)
_ip, vrrp_ = vrrp.vrrp.get_payload(ev.packet)
if vrrp_.priority == 0:
vrrp_router.master_down_timer.start(vrrp_router.params.skew_time)
else:
params = vrrp_router.params
config = vrrp_router.config
if (not config.preempt_mode or config.priority <= vrrp_.priority):
params.master_adver_interval = vrrp_.max_adver_int_in_sec
vrrp_router.master_down_timer.start(
params.master_down_interval)
elif (config.preempt_mode and config.preempt_delay > 0 and
config.priority > vrrp_.priority):
if not vrrp_router.preempt_delay_timer.is_running():
vrrp_router.preempt_delay_timer.start(config.preempt_delay)
vrrp_router.master_down_timer.start(
params.master_down_interval)
def vrrp_shutdown_request(self, ev):
vrrp_router = self.vrrp_router
vrrp_router.logger.debug('%s vrrp_shutdown_request',
self.__class__.__name__)
vrrp_router.preempt_delay_timer.cancel()
vrrp_router.master_down_timer.cancel()
vrrp_router.state_change(vrrp_event.VRRP_STATE_INITIALIZE)
def vrrp_config_change_request(self, ev):
vrrp_router = self.vrrp_router
vrrp_router.logger.warn('%s vrrp_config_change_request',
self.__class__.__name__)
if ev.priority is not None and vrrp_router.config.address_owner:
vrrp_router.master_down_timer.cancel()
self._master_down()
if ev.preempt_mode is not None or ev.preempt_delay is not None:
vrrp_router.preempt_delay_timer.cancel()
@VRRPRouter.register(vrrp.VRRP_VERSION_V3)
class VRRPRouterV3(VRRPRouter):
_STATE_MAP = {
vrrp_event.VRRP_STATE_INITIALIZE: VRRPV3StateInitialize,
vrrp_event.VRRP_STATE_MASTER: VRRPV3StateMaster,
vrrp_event.VRRP_STATE_BACKUP: VRRPV3StateBackup,
}
def __init__(self, *args, **kwargs):
super(VRRPRouterV3, self).__init__(*args, **kwargs)
def start(self):
self.state_change(vrrp_event.VRRP_STATE_INITIALIZE)
# Check role here and change accordingly
# Check config.admin_state
if self.config.address_owner or self.config.admin_state == 'master':
self.send_advertisement()
# This action should be done router on
# EventVRRPStateChanged(None->VRRP_STATE_MASTER)
#
# RFC 5795 6.4.1
# (115) + If the protected IPvX address is an IPv4 address, then:
# (120) * Broadcast a gratuitous ARP request containing the
# virtual router MAC address for each IP address associated
# with the virtual router.
# (125) + else // IPv6
# (130) * For each IPv6 address associated with the virtual
# router, send an unsolicited ND Neighbor Advertisement with
# the Router Flag (R) set, the Solicited Flag (S) unset, the
# Override flag (O) set, the target address set to the IPv6
# address of the virtual router, and the target link-layer
# address set to the virtual router MAC address.
self.state_change(vrrp_event.VRRP_STATE_MASTER)
self.adver_timer.start(self.config.advertisement_interval)
else:
params = self.params
params.master_adver_interval = self.config.advertisement_interval
self.state_change(vrrp_event.VRRP_STATE_BACKUP)
self.master_down_timer.start(params.master_down_interval)
self.stats_out_timer.start(self.statistics.statistics_interval)
super(VRRPRouterV3, self).start() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Copyright (C) Citrix Systems Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; version 2.1 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# DummySR: an example dummy SR for the SDK
import SR, VDI, SRCommand, util, lvutil
import errno
import os, sys, time
import xml.dom.minidom
import xmlrpclib
import xs_errors
CAPABILITIES = ["SR_PROBE","VDI_CREATE","VDI_DELETE","VDI_ATTACH","VDI_DETACH",
"VDI_ACTIVATE","VDI_DEACTIVATE","VDI_CLONE","VDI_SNAPSHOT","VDI_RESIZE",
"VDI_INTRODUCE"]
CONFIGURATION = [ ]
DRIVER_INFO = {
'name': 'dummy',
'description': 'SR plugin which manages fake data',
'vendor': 'Citrix Systems Inc',
'copyright': '(C) 2008 Citrix Systems Inc',
'driver_version': '1.0',
'required_api_version': '1.1',
'capabilities': CAPABILITIES,
'configuration': CONFIGURATION
}
TYPE = 'dummy'
class DummySR(SR.SR):
"""dummy storage repository"""
def handles(type):
if type == TYPE:
return True
return False
handles = staticmethod(handles)
def load(self, sr_uuid):
self.sr_vditype = 'phy'
def content_type(self, sr_uuid):
return super(DummySR, self).content_type(sr_uuid)
def create(self, sr_uuid, size):
self._assertValues(['sr_uuid','args','host_ref','session_ref','device_config','command','sr_ref'])
assert(len(self.srcmd.params['args'])==1)
def delete(self, sr_uuid):
self._assertValues(['sr_uuid','args','host_ref','session_ref','device_config','command','sr_ref'])
assert(len(self.srcmd.params['args'])==0)
def attach(self, sr_uuid):
self._assertValues(['sr_uuid','args','host_ref','session_ref','device_config','command','sr_ref'])
assert(len(self.srcmd.params['args'])==0)
def detach(self, sr_uuid):
self._assertValues(['sr_uuid','args','host_ref','session_ref','device_config','command','sr_ref'])
assert(len(self.srcmd.params['args'])==0)
def probe(self):
# N.B. There are no SR references
self._assertValues(['args','host_ref','session_ref','device_config','command'])
assert(len(self.srcmd.params['args'])==0)
# Create some Dummy SR records
entry = {}
entry['size'] = 1024
SRlist = {}
SRlist[util.gen_uuid()] = entry
# Return the Probe XML
return util.SRtoXML(SRlist)
def vdi(self, uuid):
return DummyVDI(self, uuid)
def scan(self, sr_uuid):
self._assertValues(['sr_uuid','args','host_ref','session_ref','device_config','command','sr_ref'])
assert(len(self.srcmd.params['args'])==0)
# The list of VDIs comes from the XenAPI - we have no state
for v in self._getallVDIrecords():
x = DummyVDI(self, v['uuid'])
x.size = v['virtual_size']
x.utilisation = v['physical_utilisation']
self.vdis[x.uuid] = x
self.physical_size = 2000000000000L
self.physical_utilisation = 0L
self.virtual_allocation = 0L
return super(DummySR, self).scan(sr_uuid)
def _assertValues(self, vals):
for attr in vals:
assert(self.srcmd.params.has_key(attr))
util.SMlog("%s param %s: [%s]" % (self.cmd,attr,self.srcmd.params[attr]))
# Iterate through the device_config dictionary
for key in self.dconf.iterkeys():
util.SMlog("\tdevice_config: [%s:%s]" % (key,self.dconf[key]))
# Query the sm_config; parameters can be set at Create time. Iterate through keys
self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref)
for key in self.sm_config.iterkeys():
util.SMlog("\tsm_config: [%s:%s]" % (key,self.sm_config[key]))
def _getallVDIrecords(self):
"""Helper function which returns a list of all VDI records for this SR
stored in the XenAPI server"""
# Returns a list of (reference, record) pairs: we only need the records
vdis = self.session.VDI.get_all_records(self.session_ref)['Value'].values()
# We only need the VDIs corresponding to this SR
return filter(lambda v: v['SR'] == self.sr_ref, vdis)
class DummyVDI(VDI.VDI):
def load(self, vdi_uuid):
self.path = "/dev/null" # returned on attach
self.uuid = vdi_uuid
self.size = 0
self.utilisation = 0
self.location = vdi_uuid
self.sm_config = {}
def create(self, sr_uuid, vdi_uuid, size):
self.sr._assertValues(['sr_uuid','args','host_ref','device_config','command','sr_ref','vdi_sm_config'])
assert(len(self.sr.srcmd.params['args']) == 8)
self.vdi_sm_config = self.sr.srcmd.params['vdi_sm_config']
for key in self.vdi_sm_config.iterkeys():
util.SMlog("\tvdi_sm_config: [%s:%s]" % (key,self.vdi_sm_config[key]))
for v in self.sr._getallVDIrecords():
if v['uuid'] == vdi_uuid:
raise xs_errors.XenError('VDIExists')
self.size = size
self.utilisation = size
self.sm_config['samplekey'] = "This is a dummy SR VDI"
self._db_introduce()
self.run_corner_cases_tests()
return self.get_params()
def delete(self, sr_uuid, vdi_uuid):
self.sr._assertValues(['sr_uuid','args','host_ref','device_config','command','sr_ref','vdi_ref','vdi_location','vdi_uuid'])
assert(len(self.sr.srcmd.params['args'])==0)
# Assert that the VDI record exists
self.session.VDI.get_record(self.sr.session_ref)
self.run_corner_cases_tests()
self._db_forget()
def introduce(self, sr_uuid, vdi_uuid):
self.sr._assertValues(['sr_uuid','args','host_ref','device_config','command','sr_ref','vdi_sm_config','new_uuid'])
assert(len(self.sr.srcmd.params['args'])==0)
self.vdi_sm_config = self.sr.srcmd.params['vdi_sm_config']
for key in self.vdi_sm_config.iterkeys():
util.SMlog("\tvdi_sm_config: [%s:%s]" % (key,self.vdi_sm_config[key]))
for v in self.sr._getallVDIrecords():
if v['uuid'] == vdi_uuid:
raise xs_errors.XenError('VDIExists')
self.uuid = vdi_uuid
self.location = self.sr.srcmd.params['vdi_location']
self._db_introduce()
self.run_corner_cases_tests()
return super(DummyVDI, self).get_params()
def attach(self, sr_uuid, vdi_uuid):
self.sr._assertValues(['sr_uuid','args','host_ref','device_config','command','sr_ref','vdi_ref','vdi_location','vdi_uuid'])
assert(len(self.sr.srcmd.params['args'])==1)
vdi = super(DummyVDI, self).attach(sr_uuid, vdi_uuid)
self.run_corner_cases_tests()
return vdi
def detach(self, sr_uuid, vdi_uuid):
self.sr._assertValues(['sr_uuid','args','host_ref','device_config','command','sr_ref','vdi_ref','vdi_location','vdi_uuid'])
self.run_corner_cases_tests()
assert(len(self.sr.srcmd.params['args'])==0)
def activate(self, sr_uuid, vdi_uuid):
self.sr._assertValues(['sr_uuid','args','host_ref','device_config','command','sr_ref','vdi_ref','vdi_location','vdi_uuid'])
assert(len(self.sr.srcmd.params['args'])==1)
self.vdi_ref = self.sr.srcmd.params['vdi_ref']
self.other_config = self.session.xenapi.VDI.get_other_config(self.vdi_ref)
self.run_corner_cases_tests()
for key in self.other_config.iterkeys():
util.SMlog("\tvdi_other_config: [%s:%s]" % (key,self.other_config[key]))
def deactivate(self, sr_uuid, vdi_uuid):
self.sr._assertValues(['sr_uuid','args','host_ref','device_config','command','sr_ref','vdi_ref','vdi_location','vdi_uuid'])
self.run_corner_cases_tests()
assert(len(self.sr.srcmd.params['args'])==0)
def resize(self, sr_uuid, vdi_uuid, size):
self.sr._assertValues(['sr_uuid','args','host_ref','device_config','command','sr_ref','vdi_ref','vdi_location','vdi_uuid'])
assert(len(self.sr.srcmd.params['args'])==1)
self.size = size
self.utilisation = size
self._db_update()
self.run_corner_cases_tests()
return super(DummyVDI, self).get_params()
def snapshot(self, sr_uuid, vdi_uuid):
self.sr._assertValues(['sr_uuid','args','host_ref','device_config','command','sr_ref'])
assert(len(self.sr.srcmd.params['args'])==0)
dest = util.gen_uuid()
vdi = VDI.VDI(self.sr, dest)
vdi.read_only = True
vdi.location = dest
vdi.size = 0
vdi.utilisation = 0
vdi._db_introduce()
self.run_corner_cases_tests()
return vdi.get_params()
def clone(self, sr_uuid, vdi_uuid):
self.sr._assertValues(['sr_uuid','args','host_ref','device_config','command','sr_ref'])
assert(len(self.sr.srcmd.params['args'])==0)
dest = util.gen_uuid()
vdi = VDI.VDI(self.sr, dest)
vdi.read_only = False
vdi.location = dest
vdi.size = 0
vdi.utilisation = 0
vdi._db_introduce()
self.run_corner_cases_tests()
return vdi.get_params()
def check_no_other_vdi_operation_in_progress(self):
vdis = util.list_VDI_records_in_sr(self.sr)
vdi_ref = self.session.xenapi.VDI.get_by_uuid(self.uuid)
del vdis[vdi_ref]
active_vdis = filter(lambda v: v['current_operations'] != {}, vdis.values())
if len(active_vdis) != 0:
msg = "LVHDRT: found other operations in progress for VDI: %s" % active_vdis[0]['uuid']
util.SMlog(msg)
raise xs_errors.XenError('OtherVDIOperationInProgress')
def get_attached_vbds(self):
vdi_ref = self.session.xenapi.VDI.get_by_uuid(self.uuid)
vbds = self.session.xenapi.VBD.get_all_records_where("field \"VDI\" = \"%s\"" % vdi_ref)
return filter(lambda v: v['currently_attached'] == "true", vbds.values())
def check_vbd_list_is_stable(self, attached_vbds):
newly_attached_vbds = self.get_attached_vbds()
old_set = set(attached_vbds)
new_set = set(newly_attached_vbds)
diff_set = old_set.difference(new_set) | new_set.difference(old_set)
if len(diff_set) != 0:
msg = "LVHDRT: found a non-stable VBD: %s" % (diff_set.pop())
util.SMlog(msg)
raise xs_errors.XenError('VBDListNotStable')
def run_corner_cases_tests(self):
def fn():
attached_vbds = self.get_attached_vbds()
for i in range(0,10):
time.sleep(2)
self.check_no_other_vdi_operation_in_progress()
self.check_vbd_list_is_stable(attached_vbds)
util.fistpoint.activate_custom_fn("LVHDRT_xapiSM_serialization_tests", fn)
if __name__ == '__main__':
SRCommand.run(DummySR, DRIVER_INFO)
else:
SR.registerSR(DummySR) | unknown | codeparrot/codeparrot-clean | ||
global:
scrape_native_histograms: true
scrape_protocols: ['PrometheusText0.0.4']
scrape_configs:
- job_name: prometheus
scrape_native_histograms: false
static_configs:
- targets: ['localhost:8080'] | unknown | github | https://github.com/prometheus/prometheus | config/testdata/global_scrape_protocols_and_local_disable_scrape_native_hist.good.yml |
#! /usr/bin/env python
from __future__ import print_function
import openturns as ot
parameters = ot.GammaMuSigma(0.1, 0.489898, -0.5)
distribution = ot.ParametrizedDistribution(parameters)
print("Distribution ", distribution)
# Is this distribution elliptical ?
print("Elliptical = ", distribution.isElliptical())
# Is this distribution continuous ?
print("Continuous = ", distribution.isContinuous())
# Test for realization of distribution
oneRealization = distribution.getRealization()
print("oneRealization=", oneRealization)
# Test for sampling
size = 10000
oneSample = distribution.getSample(size)
print("oneSample first=", oneSample[0], " last=", oneSample[size - 1])
print("mean=", oneSample.computeMean())
print("covariance=", oneSample.computeCovariance())
# Define a point
point = ot.NumericalPoint(distribution.getDimension(), 1.0)
print("Point= ", point)
# Show PDF and CDF of point
eps = 1e-5
DDF = distribution.computeDDF(point)
print("ddf =", DDF)
print("ddf (FD)= %.6g" % ((distribution.computePDF(point + ot.NumericalPoint(1, eps)) -
distribution.computePDF(point + ot.NumericalPoint(1, -eps))) / (2.0 * eps)))
LPDF = distribution.computeLogPDF(point)
print("log pdf= %.6g" % LPDF)
PDF = distribution.computePDF(point)
print("pdf =%.6g" % PDF)
print("pdf (FD)=%.6g" % ((distribution.computeCDF(point + ot.NumericalPoint(1, eps)) -
distribution.computeCDF(point + ot.NumericalPoint(1, -eps))) / (2.0 * eps)))
CDF = distribution.computeCDF(point)
print("cdf= %.6g" % CDF)
CCDF = distribution.computeComplementaryCDF(point)
print("ccdf= %.6g" % CCDF)
Survival = distribution.computeSurvivalFunction(point)
print("survival= %.6g" % Survival)
CF = distribution.computeCharacteristicFunction(point[0])
print("characteristic function=(%.6g, %.6g)" % (CF.real, CF.imag))
LCF = distribution.computeLogCharacteristicFunction(point[0])
print("log characteristic function=(%.6g, %.6g)" % (LCF.real, LCF.imag))
PDFgr = distribution.computePDFGradient(point)
quantile = distribution.computeQuantile(0.95)
print("quantile=", quantile)
print("cdf(quantile)=", distribution.computeCDF(quantile))
# Get 95% survival function
inverseSurvival = ot.NumericalPoint(distribution.computeInverseSurvivalFunction(0.95))
print("InverseSurvival=", repr(inverseSurvival))
print("Survival(inverseSurvival)=%.6f" % distribution.computeSurvivalFunction(inverseSurvival))
# Confidence regions
interval, threshold = distribution.computeMinimumVolumeIntervalWithMarginalProbability(0.95)
print("Minimum volume interval=", interval)
print("threshold=", ot.NumericalPoint(1, threshold))
levelSet, beta = distribution.computeMinimumVolumeLevelSetWithThreshold(0.95)
print("Minimum volume level set=", levelSet)
print("beta=", ot.NumericalPoint(1, beta))
interval, beta = distribution.computeBilateralConfidenceIntervalWithMarginalProbability(0.95)
print("Bilateral confidence interval=", interval)
print("beta=", ot.NumericalPoint(1, beta))
interval, beta = distribution.computeUnilateralConfidenceIntervalWithMarginalProbability(0.95, False)
print("Unilateral confidence interval (lower tail)=", interval)
print("beta=", ot.NumericalPoint(1, beta))
interval, beta = distribution.computeUnilateralConfidenceIntervalWithMarginalProbability(0.95, True)
print("Unilateral confidence interval (upper tail)=", interval)
print("beta=", ot.NumericalPoint(1, beta))
mean = distribution.getMean()
print("mean=", mean)
covariance = distribution.getCovariance()
print("covariance=", covariance)
correlation = distribution.getCorrelation()
print("correlation=", correlation)
spearman = distribution.getSpearmanCorrelation()
print("spearman=", spearman)
kendall = distribution.getKendallTau()
print("kendall=", kendall)
parameters = distribution.getParametersCollection()
print("parameters=", parameters)
for i in range(6):
print("standard moment n=", i, ", value=",
distribution.getStandardMoment(i))
print("Standard representative=", distribution.getStandardRepresentative()) | unknown | codeparrot/codeparrot-clean | ||
# Owner(s): ["module: dataloader"]
# ruff: noqa: F841
import ctypes
import errno
import faulthandler
import functools
import gc
import itertools
import math
import operator
import os
import signal
import sys
import tempfile
import time
import unittest
import warnings
import torch
import torch.utils.data.datapipes as dp
from torch import multiprocessing as mp
from torch._utils import ExceptionWrapper
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_utils import (
IS_CI,
IS_JETSON,
IS_MACOS,
IS_S390X,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
parametrize,
run_tests,
skipIfNoDill,
skipIfXpu,
slowTest,
TEST_CUDA,
TEST_NUMPY,
TEST_WITH_ASAN,
TEST_WITH_TSAN,
TestCase,
xfailIfLinux,
)
from torch.utils.data import (
_utils,
ChainDataset,
ConcatDataset,
DataLoader,
dataloader,
Dataset,
IterableDataset,
IterDataPipe,
StackDataset,
Subset,
TensorDataset,
)
from torch.utils.data._utils import MP_STATUS_CHECK_INTERVAL
from torch.utils.data.datapipes.iter import IterableWrapper
from torch.utils.data.dataset import random_split
try:
import psutil
HAS_PSUTIL = True
except ModuleNotFoundError:
HAS_PSUTIL = False
psutil = None
err_msg = (
"psutil not found. Some critical data loader tests relying on it "
"(e.g., TestDataLoader.test_proper_exit) will not run."
)
if IS_CI:
raise ModuleNotFoundError(err_msg) from None
else:
warnings.warn(err_msg)
try:
import numpy as np
HAS_NUMPY = True
except ModuleNotFoundError:
HAS_NUMPY = False
np = None
skipIfNoNumpy = unittest.skipIf(not HAS_NUMPY, "no NumPy")
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests # noqa: PLW0127
TEST_CUDA_IPC = (
torch.cuda.is_available()
and sys.platform != "darwin"
and sys.platform != "win32"
and not IS_JETSON
# and not TEST_WITH_ROCM
) # https://github.com/pytorch/pytorch/issues/90940
TEST_MULTIGPU = TEST_CUDA_IPC and torch.cuda.device_count() > 1
# We want to use `spawn` if able because some of our tests check that the
# data loader terminates gracefully. To prevent hanging in the testing
# process, such data loaders are run in a separate subprocess.
#
# We also want to test the `pin_memory=True` configuration, thus `spawn` is
# required to launch such processes and they initialize the CUDA context.
#
# Mixing different start method is a recipe for disaster (e.g., using a fork
# `mp.Event` with a spawn `mp.Process` segfaults). So we set this globally
# to avoid bugs.
#
# Get a multiprocessing context because some test / third party library will
# set start_method when imported, and setting again triggers `RuntimeError`.
mp = mp.get_context(method="spawn")
# 60s of timeout?
# Yes, in environments where physical CPU resources are shared, e.g., CI, the
# time for a inter-process communication can be highly varying. With 15~17s of
# timeout, we have observed flakiness in some CI builds (see
# pytorch/pytorch#14501, pytorch/pytorch#16608). We follow the CPython
# multiprocessing setup and set the timeout to 60s here:
#
# https://github.com/python/cpython/blob/e8113f51a8bdf33188ee30a1c038a298329e7bfa/Lib/test/_test_multiprocessing.py#L73
JOIN_TIMEOUT = 60.0 # seconds
supported_multiprocessing_contexts = [None] + list(
torch.multiprocessing.get_all_start_methods()
)
# The following collate functions are defined globally here for pickle purposes.
# collate_fn that returns the batch cloned
def _clone_collate(b):
return [x.clone() for x in b]
# collate_fn that returns the batch of sparse coo tensors cloned
def _sparse_coo_collate(b):
lst = []
for x in b:
t = x.clone()
lst.append(t)
# Force sparse tensor invariants checks. check_pinning=True
# reproduces gh-153143.
torch._validate_sparse_coo_tensor_args(
t._indices(), t._values(), t.size(), t.is_coalesced(), check_pinning=False
)
return lst
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)",
)
class TestDatasetRandomSplit(TestCase):
def test_lengths_must_equal_dataset_size(self):
with self.assertRaises(ValueError):
random_split([1, 2, 3, 4], [1, 2])
def test_splits_have_correct_size(self):
splits = random_split([1, 2, 3, 4, 5, 6], [2, 4])
self.assertEqual(len(splits), 2)
self.assertEqual(len(splits[0]), 2)
self.assertEqual(len(splits[1]), 4)
splits = random_split([1, 2, 3, 4, 5, 6], [0.5, 0.5])
self.assertEqual(len(splits), 2)
self.assertEqual(len(splits[0]), 3)
self.assertEqual(len(splits[1]), 3)
# Odd size splits
self.assertEqual(
len(
random_split(
range(3), [0.5, 0.5], generator=torch.Generator().manual_seed(1)
)
),
2,
)
# Odd sized round-robin splits
splits = random_split(
range(106), [0.1, 0.2, 0.3, 0.4], generator=torch.Generator().manual_seed(1)
)
self.assertEqual(len(splits[0]), 11)
self.assertEqual(len(splits[1]), 22)
self.assertEqual(len(splits[2]), 31)
self.assertEqual(len(splits[3]), 42)
def test_splits_are_mutually_exclusive(self):
data = [5, 2, 3, 4, 1, 6]
splits = random_split(data, [2, 4])
all_values = []
all_values.extend(list(splits[0]))
all_values.extend(list(splits[1]))
data.sort()
all_values.sort()
self.assertListEqual(data, all_values)
splits = random_split(data, [0.33, 0.67])
all_values = []
all_values.extend(list(splits[0]))
all_values.extend(list(splits[1]))
data.sort()
all_values.sort()
self.assertListEqual(data, all_values)
data = [1, 2, 3, 4]
splits = random_split(data, [0.25, 0.75])
all_values = []
all_values.extend(list(splits[0]))
all_values.extend(list(splits[1]))
data.sort()
all_values.sort()
self.assertListEqual(data, all_values)
def test_splits_indexing_type(self):
r"""Indices generated by random_split
should be of integer type
"""
class CustomDataset:
def __init__(self, test_object, custom_list):
self.data = custom_list
self.test_object = test_object
def __getitem__(self, key):
self.test_object.assertEqual(type(key), int)
return self.data[key]
def __len__(self):
return len(self.data)
x = [1, 2, 3, 4, 5]
dataset = CustomDataset(self, x)
dataset = random_split(dataset, [5])[0]
data_loader = DataLoader(dataset)
for _batch in data_loader:
pass
# fractional splitting
dataset = CustomDataset(self, x)
dataset = random_split(dataset, [1.0])[0]
data_loader = DataLoader(dataset)
for _batch in data_loader:
pass
def test_splits_reproducibility(self):
self.assertEqual(
[
list(x)
for x in random_split(
range(10), [3, 7], generator=torch.Generator().manual_seed(1)
)
],
[[5, 6, 1], [2, 0, 8, 9, 3, 7, 4]],
)
self.assertEqual(
random_split(
range(100), [60, 40], generator=torch.Generator().manual_seed(42)
),
random_split(
range(100), [60, 40], generator=torch.Generator().manual_seed(42)
),
)
self.assertEqual(
random_split(
range(100), [0.5, 0.5], generator=torch.Generator().manual_seed(42)
),
random_split(
range(100), [0.5, 0.5], generator=torch.Generator().manual_seed(42)
),
)
self.assertEqual(
random_split(
range(100),
[0.33, 0.33, 0.34],
generator=torch.Generator().manual_seed(42),
),
random_split(
range(100),
[0.33, 0.33, 0.34],
generator=torch.Generator().manual_seed(42),
),
)
def test_incomplete_fractional_splits(self):
with self.assertRaises(ValueError):
# should raise since the sum of fractions is not 1
random_split([1, 2, 3, 4], [0.1])
with self.assertRaises(ValueError):
# should raise since fraction > 1
random_split([1, 2, 3, 4], [1.1])
def test_splits_generator(self):
# A random_split without a specific generator should affect the default one
state = torch.get_rng_state()
a = torch.rand(10)
torch.set_rng_state(state)
random_split(range(10), [5, 5])
b = torch.rand(10)
self.assertNotEqual(a, b)
# A random_split with a specific generator should not affect the default one
state = torch.get_rng_state()
a = torch.rand(10)
torch.set_rng_state(state)
random_split(range(10), [5, 5], generator=torch.Generator().manual_seed(42))
b = torch.rand(10)
self.assertEqual(a, b)
def test_slicing_of_subset_of_dataset(self):
# Testing slicing a subset initialized with a dataset
dataset = TensorDataset(torch.tensor([1, 2, 3, 4, 5]))
subset_of_dataset = Subset(dataset, [0, 1, 2, 3, 4])
self.assertEqual(subset_of_dataset[:], dataset[:])
self.assertEqual(subset_of_dataset[1:2], dataset[1:2])
self.assertEqual(subset_of_dataset[0:-1:2], dataset[0:-1:2])
# Testing slicing of subset from random split
subset1, subset2 = random_split(dataset, [3, 2])
self.assertEqual(subset1[:], dataset[subset1.indices[:]])
self.assertEqual(subset1[0:2], dataset[subset1.indices[0:2]])
self.assertEqual(subset1[0:-1:2], dataset[subset1.indices[0:-1:2]])
def test_slicing_of_subset_of_subset(self):
# Testing slicing a subset initialized with a subset
dataset = TensorDataset(torch.tensor([1, 2, 3, 4, 5]))
subset_of_dataset = Subset(dataset, [0, 1, 2, 3, 4])
subset_of_subset = Subset(subset_of_dataset, [0, 1, 2, 3, 4])
self.assertEqual(subset_of_subset[:], dataset[:])
self.assertEqual(subset_of_subset[0:2], dataset[0:2])
self.assertEqual(subset_of_subset[0:-1:2], dataset[0:-1:2])
# Testing slicing of subset of subset from random split
subset1, subset2 = random_split(dataset, [4, 1])
subset_of_subset1, subset_of_subset2 = random_split(subset1, [3, 1])
idx = [subset1.indices[i] for i in subset_of_subset1.indices]
self.assertEqual(subset_of_subset1[:], dataset[idx.copy()])
self.assertEqual(subset_of_subset1[0:2], dataset[idx[0:2]])
self.assertEqual(subset_of_subset1[0:-1:2], dataset[idx[0:-1:2]])
class CUDACountingDataset(Dataset):
def __init__(self, n):
super().__init__()
self.n = n
def __getitem__(self, i):
return torch.as_tensor(i, device="cuda")
def __len__(self):
return self.n
class CountingDataset(Dataset):
def __init__(self, n):
super().__init__()
self.n = n
def __getitem__(self, i):
return i
def __len__(self):
return self.n
class CountingIterableDataset(IterableDataset):
def __init__(self, n):
super().__init__()
self.n = n
def __iter__(self):
return iter(range(self.n))
def __len__(self):
return self.n
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)",
)
class TestTensorDataset(TestCase):
def test_len(self):
source = TensorDataset(torch.randn(15, 10, 2, 3, 4, 5), torch.randperm(15))
self.assertEqual(len(source), 15)
def test_getitem(self):
t = torch.randn(15, 10, 2, 3, 4, 5)
l = torch.randn(15, 10)
source = TensorDataset(t, l)
for i in range(15):
self.assertEqual(t[i], source[i][0])
self.assertEqual(l[i], source[i][1])
def test_getitem_1d(self):
t = torch.randn(15)
l = torch.randn(15)
source = TensorDataset(t, l)
for i in range(15):
self.assertEqual(t[i], source[i][0])
self.assertEqual(l[i], source[i][1])
def test_single_tensor(self):
t = torch.randn(5, 10)
source = TensorDataset(t)
self.assertEqual(len(source), 5)
for i in range(5):
self.assertEqual(t[i], source[i][0])
def test_many_tensors(self):
t0 = torch.randn(5, 10, 2, 3, 4, 5)
t1 = torch.randn(5, 10)
t2 = torch.randn(5, 10, 2, 5)
t3 = torch.randn(5, 10, 3, 7)
source = TensorDataset(t0, t1, t2, t3)
self.assertEqual(len(source), 5)
for i in range(5):
self.assertEqual(t0[i], source[i][0])
self.assertEqual(t1[i], source[i][1])
self.assertEqual(t2[i], source[i][2])
self.assertEqual(t3[i], source[i][3])
def test_size_mismatch(self):
tensor1 = torch.randn(10, 5)
tensor2 = torch.randn(10, 3)
tensor3 = torch.randn(20, 7)
with self.assertRaisesRegex(AssertionError, "Size mismatch between tensors"):
TensorDataset(tensor1, tensor2, tensor3)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)",
)
class TestStackDataset(TestCase):
def test_empty(self):
with self.assertRaisesRegex(
ValueError, "At least one dataset should be passed"
):
StackDataset()
def test_mixed(self):
with self.assertRaisesRegex(ValueError, "Supported either"):
StackDataset(
TensorDataset(torch.randn(15, 10)), a=TensorDataset(torch.randn(10, 15))
)
def test_size_mismatch(self):
with self.assertRaisesRegex(ValueError, "Size mismatch between datasets"):
StackDataset(
TensorDataset(torch.randn(15, 10)), TensorDataset(torch.randn(10, 15))
)
with self.assertRaisesRegex(ValueError, "Size mismatch between datasets"):
StackDataset(
a=TensorDataset(torch.randn(15, 10)),
b=TensorDataset(torch.randn(10, 15)),
)
def test_len(self):
source = StackDataset(
TensorDataset(torch.randn(15, 10)), TensorDataset(torch.randn(15))
)
self.assertEqual(len(source), 15)
source = StackDataset(TensorDataset(torch.randn(15, 10)))
self.assertEqual(len(source), 15)
source = StackDataset(
a=TensorDataset(torch.randn(15, 10)), b=TensorDataset(torch.randn(15))
)
self.assertEqual(len(source), 15)
source = StackDataset(a=TensorDataset(torch.randn(15, 10)))
self.assertEqual(len(source), 15)
def test_single(self):
t = TensorDataset(torch.randn(15, 10))
source = StackDataset(t)
for i in range(15):
self.assertEqual(t[i], source[i][0])
source = StackDataset(a=t)
for i in range(15):
self.assertEqual(t[i], source[i]["a"])
def test_getitem(self):
t = TensorDataset(torch.randn(15, 10))
l = TensorDataset(torch.randn(15, 5, 4))
source = StackDataset(t, l)
for i in range(15):
self.assertEqual(t[i], source[i][0])
self.assertEqual(l[i], source[i][1])
source = StackDataset(a=t, b=l)
for i in range(15):
self.assertEqual(t[i], source[i]["a"])
self.assertEqual(l[i], source[i]["b"])
def test_getitems(self):
class GetItemsDataset(Dataset):
def __init__(self) -> None:
self.data = torch.randn(4)
def __getitem__(self, item):
return self.data[item]
def __getitems__(self, items):
return self.data[items]
def __len__(self):
return 4
t = GetItemsDataset()
l = [1, 2, 3, 4]
source = StackDataset(t, l)
batch = source.__getitems__([0, 1, 2, 3])
for i in range(4):
self.assertEqual(t[i], batch[i][0])
self.assertEqual(l[i], batch[i][1])
source = StackDataset(t=t, l=l)
batch = source.__getitems__([0, 1, 2, 3])
for i in range(4):
self.assertEqual(t[i], batch[i]["t"])
self.assertEqual(l[i], batch[i]["l"])
def test_getitems_raises_index_error(self):
class GetItemsDataset(Dataset):
def __init__(self) -> None:
self.data = torch.randn(4)
def __getitem__(self, item):
return self.data[item]
def __getitems__(self, items):
return self.data[items]
def __len__(self):
return 4
t = GetItemsDataset()
l = [1, 2, 3, 4]
source = StackDataset(t, l)
with self.assertRaises(IndexError):
source.__getitems__([0, 4])
def test_getitems_value_error(self):
class GetItemsDataset(Dataset):
def __init__(self) -> None:
self.data = torch.randn(4)
def __getitem__(self, item):
return self.data[item]
def __getitems__(self, items):
return self.data[items][:-1] # return less
def __len__(self):
return 4
t = GetItemsDataset()
l = [1, 2, 3, 4]
source = StackDataset(t, l)
with self.assertRaisesRegex(
ValueError, "Nested dataset's output size mismatch. Expected 4, got 3"
):
source.__getitems__([0, 1, 2, 3])
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)",
)
class TestConcatDataset(TestCase):
def test_concat_two_singletons(self):
result = ConcatDataset([[0], [1]])
self.assertEqual(2, len(result))
self.assertEqual(0, result[0])
self.assertEqual(1, result[1])
def test_concat_two_non_singletons(self):
result = ConcatDataset([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
self.assertEqual(10, len(result))
self.assertEqual(0, result[0])
self.assertEqual(5, result[5])
def test_concat_two_non_singletons_with_empty(self):
# Adding an empty dataset somewhere is correctly handled
result = ConcatDataset([[0, 1, 2, 3, 4], [], [5, 6, 7, 8, 9]])
self.assertEqual(10, len(result))
self.assertEqual(0, result[0])
self.assertEqual(5, result[5])
def test_concat_raises_index_error(self):
result = ConcatDataset([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
with self.assertRaises(IndexError):
# this one goes to 11
result[11]
def test_add_dataset(self):
d1 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
d2 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
d3 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
result = d1 + d2 + d3
self.assertEqual(21, len(result))
self.assertEqual(0, (d1[0][0] - result[0][0]).abs().sum())
self.assertEqual(0, (d2[0][0] - result[7][0]).abs().sum())
self.assertEqual(0, (d3[0][0] - result[14][0]).abs().sum())
def test_iterable_dataset_err(self):
d1 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
it1 = CountingIterableDataset(5)
it2 = CountingIterableDataset(10)
with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"):
ConcatDataset([d1, it2, it1])
with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"):
ConcatDataset([it2])
with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"):
ConcatDataset([it1, d1])
# takes in dummy var so this can also be used as a `worker_init_fn`
def set_faulthander_if_available(_=None):
faulthandler.enable(sys.__stderr__)
if not IS_WINDOWS:
# windows does not have faulthandler.register
# chain=False prevents the default behavior of killing the process
faulthandler.register(signal.SIGUSR1, file=sys.__stderr__, chain=False)
set_faulthander_if_available()
# Process `pid` must have called `set_faulthander_if_available`
def print_traces_of_all_threads(pid):
if not IS_WINDOWS:
# use the custom signal if available
os.kill(pid, signal.SIGUSR1)
else:
# otherwise we can still use the handler given by faulthandler.enable()
# at the cost of killing the process.
os.kill(pid, signal.SIGSEGV)
# wait in parent process to give subprocess some time to print
time.sleep(5)
# The following `ErrorTrackingProcess` stores the first encountered exception in
# its `.exception` attribute.
# Inspired by https://stackoverflow.com/a/33599967
class ErrorTrackingProcess(mp.Process):
# Why no *args?
# py2 doesn't support def fn(x, *args, key=val, **kwargs)
# Setting disable_stderr=True may generate a lot of unrelated error outputs
# but could be helpful for debugging.
def __init__(self, disable_stderr=True, **kwargs):
super().__init__(**kwargs)
self._pconn, self._cconn = mp.Pipe()
self._exception = None
self.disable_stderr = disable_stderr
def run(self):
set_faulthander_if_available()
if self.disable_stderr:
# Disable polluting stderr with errors that are supposed to happen.
with open(os.devnull, "w") as devnull:
os.dup2(devnull.fileno(), sys.stderr.fileno())
try:
super().run()
self._cconn.send(None)
except Exception:
self._cconn.send(ExceptionWrapper(sys.exc_info()))
raise
def print_traces_of_all_threads(self):
if not self.is_alive():
raise AssertionError(
"can only use print_traces_of_all_threads if the process is alive"
)
if self.disable_stderr:
raise AssertionError(
"do not disable stderr if you use print_traces_of_all_threads"
)
# On platforms without `SIGUSR1`, `set_faulthander_if_available` sets
# `faulthandler.enable()`, and `print_traces_of_all_threads` may kill
# the process. So let's poll the exception first
_ = self.exception
print_traces_of_all_threads(self.pid)
@property
def exception(self):
if self._pconn.poll():
self._exception = self._pconn.recv()
if self._exception is None:
return None
else:
return self._exception.exc_type(self._exception.exc_msg)
# ESRCH means that os.kill can't finds alive proc
def send_signal(self, signum, ignore_ESRCH=False):
try:
os.kill(self.pid, signum)
except OSError as e:
if not ignore_ESRCH or e.errno != errno.ESRCH:
raise
class ErrorDataset(Dataset):
def __init__(self, size):
self.size = size
def __len__(self):
return self.size
class SegfaultDataset(Dataset):
def __init__(self, size):
self.size = size
def __getitem__(self, idx):
return ctypes.string_at(0)
def __len__(self):
return self.size
class SleepDataset(Dataset):
def __init__(self, size, sleep_sec):
self.size = size
self.sleep_sec = sleep_sec
self.slept = False
def __getitem__(self, idx):
if not self.slept:
time.sleep(self.sleep_sec)
self.slept = True
return idx
def __len__(self):
return self.size
class SeedDataset(Dataset):
def __init__(self, size):
self.size = size
def __getitem__(self, idx):
return torch.initial_seed()
def __len__(self):
return self.size
class WorkerSpecificIterableDataset(IterableDataset):
def __init__(self, sizes_for_all_workers):
self.sizes_for_all_workers = sizes_for_all_workers
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
if worker_info is None:
raise AssertionError("Expected worker_info to be available")
return iter(range(self.sizes_for_all_workers[worker_info.id]))
def __len__(self):
return sum(self.sizes_for_all_workers)
# Inspired by https://stackoverflow.com/a/26703365
# If all workers will call `sync_once`, they will be blocked until all workers
# reach the call (i.e., acting like a barrier).
# This can be used to ensure that each worker at least processes one data.
class SynchronizedDataset(Dataset):
def __init__(self, size, batch_size, num_workers):
if size < num_workers * batch_size:
raise AssertionError(
f"Expected size >= num_workers * batch_size, got size={size}, "
f"num_workers={num_workers}, batch_size={batch_size}"
)
self.count = mp.Value("i", 0, lock=True)
self.barrier = mp.Semaphore(0)
self.num_workers = num_workers
self.size = size
def sync_once(self):
with self.count.get_lock():
self.count.value += 1
if self.count.value == self.num_workers:
self.barrier.release()
self.barrier.acquire()
self.barrier.release()
def __getitem__(self, idx):
raise NotImplementedError
def __len__(self):
return self.size
class EmptyTensorDataset(torch.utils.data.Dataset):
def __init__(self, len):
self.len = len
def __len__(self):
return self.len
def __getitem__(self, any):
return torch.empty(0)
class SynchronizedSeedDataset(SynchronizedDataset):
def __getitem__(self, idx):
self.sync_once()
return torch.initial_seed()
def _test_timeout(persistent_workers):
dataset = SleepDataset(10, 3)
dataloader = DataLoader(
dataset,
batch_size=2,
num_workers=2,
timeout=1,
persistent_workers=persistent_workers,
)
_ = next(iter(dataloader))
def _test_timeout_pin_memory(persistent_workers):
dataset = SleepDataset(10, 3)
dataloader = DataLoader(
dataset,
batch_size=2,
num_workers=2,
timeout=1,
pin_memory=True,
persistent_workers=persistent_workers,
)
_ = next(iter(dataloader))
def _test_large_sampler_indices(persistent_workers):
# See
# test_large_sampler_indices
# https://github.com/pytorch/pytorch/issues/48666
dataloader = torch.utils.data.DataLoader(
EmptyTensorDataset(10000000),
batch_size=40960,
persistent_workers=persistent_workers,
num_workers=1,
)
it = iter(dataloader)
for x in it:
if x.numel() != 0:
raise AssertionError(f"Expected empty tensor, got numel={x.numel()}")
raise RuntimeError("My Error")
def disable_stderr(worker_id):
r"""
Avoids printing "ERROR: Unexpected segmentation fault encountered in worker."
from workers. Since worker signal handler prints with low-level write(),
this has to be done on OS level via dup.
This is used as worker_init_fn for test_segfault.
"""
sys.stderr.flush() # flush library buffers that dup2 knows nothing about
# Can't use a with-block because otherwise the fd will be closed when this
# function ends.
with open(os.devnull, "w") as devnull:
os.dup2(devnull.fileno(), sys.stderr.fileno())
def _test_segfault():
dataset = SegfaultDataset(10)
dataloader = DataLoader(
dataset, batch_size=2, num_workers=2, worker_init_fn=disable_stderr
)
_ = next(iter(dataloader))
def _test_no_segfault():
dataset = [1, 2, 3]
num_threads = torch.get_num_threads()
if num_threads < 4:
torch.set_num_threads(4)
else:
torch.set_num_threads(num_threads)
mp_ctx = torch.multiprocessing.get_context(method="fork")
dataloader = DataLoader(
dataset,
num_workers=1,
worker_init_fn=disable_stderr,
multiprocessing_context=mp_ctx,
)
_ = next(iter(dataloader))
class TestProperExitDataset(Dataset):
def __init__(self, size, error_event):
self.size = size
self.error_event = error_event
def __len__(self):
return self.size
def __getitem__(self, idx):
worker_info = torch.utils.data.get_worker_info()
if (
self.error_event is not None
and self.error_event.is_set()
and worker_info.id == worker_info.num_workers - 1
):
# only error in the last worker
raise RuntimeError("Worker error")
return torch.tensor([idx])
class TestProperExitIterableDataset(IterableDataset):
def __init__(self, size, error_event):
self.error_event = error_event
self.size = size
self.remaining = size
def __len__(self):
return self.size
def __iter__(self):
return self
def __next__(self):
worker_info = torch.utils.data.get_worker_info()
if (
self.error_event is not None
and self.error_event.is_set()
and worker_info.id == worker_info.num_workers - 1
):
# only error in the last worker
raise RuntimeError("Worker error")
self.remaining -= 1
if self.remaining < 0:
raise StopIteration
return torch.tensor(-1000)
# See TestDataLoader.test_proper_exit for usage
def _test_proper_exit(
is_iterable_dataset,
use_workers,
pin_memory,
exit_method,
hold_iter_reference,
loader_setup_event,
tester_setup_event,
persistent_workers,
):
num_workers = 2 if use_workers else 0
if exit_method == "worker_error" or exit_method == "worker_kill":
if use_workers is not True:
raise AssertionError("Expected use_workers=True for worker exit methods")
if exit_method == "worker_error":
worker_error_event = mp.Event()
else:
worker_error_event = None
if is_iterable_dataset:
ds = TestProperExitIterableDataset(7, worker_error_event)
else:
ds = TestProperExitDataset(12, worker_error_event)
loader = DataLoader(
ds,
batch_size=1,
shuffle=False,
num_workers=num_workers,
pin_memory=pin_memory,
worker_init_fn=set_faulthander_if_available,
persistent_workers=persistent_workers,
)
error_it = 2
if use_workers:
# 2 is the magical per-worker prefetch number...
# FIXME: change this after the number becomes configurable.
if is_iterable_dataset:
if len(ds) * num_workers <= (error_it + 2 + 1):
raise AssertionError(
"Expected iterable dataset size to exceed error threshold"
)
else:
if len(loader) <= (error_it + 2 + 1) * num_workers:
raise AssertionError("Expected loader length to exceed error threshold")
else:
if is_iterable_dataset:
if len(ds) <= error_it + 1:
raise AssertionError(
"Expected iterable dataset length to exceed error threshold"
)
else:
if len(loader) <= error_it + 1:
raise AssertionError("Expected loader length to exceed error threshold")
it = iter(loader)
if use_workers:
workers = it._workers
def kill_pid(pid):
psutil_p = psutil.Process(pid)
psutil_p.kill()
psutil_p.wait(JOIN_TIMEOUT)
if psutil_p.is_running():
raise AssertionError("Expected process to be terminated")
for i, _ in enumerate(it):
if i == 0:
if not hold_iter_reference:
del it
del loader
loader_setup_event.set()
tester_setup_event.wait()
# ensure that the workers are still alive
if use_workers:
for w in workers:
if not w.is_alive():
raise AssertionError("Expected worker process to be alive")
if worker_error_event is not None:
worker_error_event.set()
if i == error_it:
if exit_method == "loader_error":
raise RuntimeError("Loader error")
elif exit_method == "loader_kill":
kill_pid(os.getpid())
elif exit_method == "worker_kill":
kill_pid(workers[-1].pid) # kill last worker
if not hold_iter_reference:
# Tries to trigger the __del__ clean-up rather than the automatic
# exiting of daemonic children. Technically it should be automatically
# triggered, but I don't want to rely on the implementation detail of
# Python gc.
gc.collect()
class TestWorkerInfoDataset(SynchronizedDataset):
def __getitem__(self, idx):
self.sync_once()
return torch.tensor(self.value)
# Should be used as worker_init_fn with TestWorkerInfoDataset.
# See _test_get_worker_info below for usage.
def _test_worker_info_init_fn(worker_id):
worker_info = torch.utils.data.get_worker_info()
if worker_id != worker_info.id:
raise AssertionError("worker_init_fn and worker_info should have consistent id")
if worker_id >= worker_info.num_workers:
raise AssertionError("worker_init_fn and worker_info should have valid id")
if worker_info.seed != torch.initial_seed():
raise AssertionError(
"worker_init_fn and worker_info should have consistent seed"
)
dataset = worker_info.dataset
if not isinstance(dataset, TestWorkerInfoDataset):
raise AssertionError("worker_info should have correct dataset copy")
if hasattr(dataset, "value"):
raise AssertionError("worker_info should have correct dataset copy")
# test that WorkerInfo attributes are read-only
try:
worker_info.id = 3999
except RuntimeError as e:
if str(e) != "Cannot assign attributes to WorkerInfo objects":
raise AssertionError(
"Expected RuntimeError for WorkerInfo attribute assignment"
) from None
try:
worker_info.a = 3
except RuntimeError as e:
if str(e) != "Cannot assign attributes to WorkerInfo objects":
raise AssertionError(
"Expected RuntimeError for WorkerInfo attribute assignment"
) from None
for k in ["id", "num_workers", "seed", "dataset"]:
if f"{k}=" not in repr(worker_info):
raise AssertionError(f"Expected {k} in worker_info repr")
dataset.value = [worker_id, os.getpid()]
def _test_get_worker_info():
# get_worker_info returns None in main proc
if torch.utils.data.get_worker_info() is not None:
raise AssertionError("Expected get_worker_info() to return None in main proc")
num_workers = 2
batch_size = 2
dataset = TestWorkerInfoDataset(6, batch_size, num_workers)
dataloader = DataLoader(
dataset,
batch_size=batch_size,
num_workers=num_workers,
worker_init_fn=_test_worker_info_init_fn,
)
it = iter(dataloader)
data = []
for d in it:
data.append(d) # noqa: PERF402
worker_pids = [w.pid for w in it._workers]
data = torch.cat(data, 0)
for d in data:
# each `d` is a [worker_id, worker_pid] pair, which is set in
# _test_worker_info_init_fn
if d[1] != worker_pids[d[0]]:
raise AssertionError(f"Expected worker pid {worker_pids[d[0]]}, got {d[1]}")
# get_worker_info returns None in main proc after data loading
if torch.utils.data.get_worker_info() is not None:
raise AssertionError(
"Expected get_worker_info() to return None after data loading"
)
# main proc dataset was never assigned this attribute
if hasattr(dataset, "value"):
raise AssertionError("Expected main dataset to not have 'value' attribute")
try:
_ = dataset[0]
except AttributeError:
return
raise RuntimeError("Expected AttributeError")
# test custom init function
def init_fn(worker_id):
torch.manual_seed(12345)
# used with test_error_in_init
class ErrorIterableDataset(IterableDataset):
def __iter__(self):
raise RuntimeError("Error in __iter__")
# used with test_error_in_init
def error_worker_init_fn(_):
raise RuntimeError("Error in worker_init_fn")
class BulkLoadingDataset(Dataset):
def __init__(self, length):
self.length = length
def __getitem__(self, indices):
if not isinstance(indices, (list, tuple)):
raise AssertionError(
f"Expected indices to be list or tuple, got {type(indices)}"
)
return torch.as_tensor(indices)
def __len__(self):
return self.length
class BulkLoadingSampler(torch.utils.data.Sampler):
def __init__(self, dataset, batch_size):
self.dataset = dataset
self.batch_size = batch_size
def __iter__(self):
for x in torch.randperm(len(self.dataset)).split(self.batch_size):
yield x.tolist()
def __len__(self):
return int(math.ceil(len(self.dataset) / float(self.batch_size)))
class TestMultiEpochDataset(IterableDataset):
def __init__(self, length):
self.length = length
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
if worker_info is None:
raise AssertionError("Expected worker_info to be available")
worker_id = worker_info.id
for _ in range(self.length // worker_info.num_workers):
yield worker_id
def __len__(self):
return self.length
class CustomList(list):
pass
class CustomDict(dict):
pass
def row_processor(row):
return np.add(row, 1)
def filter_len(row):
return len(row) == 4
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)",
)
@unittest.skipIf(
TEST_WITH_ASAN,
"DataLoader tests hang in ASAN, see: https://github.com/pytorch/pytorch/issues/66223",
)
class TestDataLoader(TestCase):
def setUp(self):
super().setUp()
self.data = torch.randn(100, 2, 3, 5)
self.labels = torch.randperm(50).repeat(2)
self.dataset = TensorDataset(self.data, self.labels)
self.persistent_workers = False
def _get_data_loader(self, dataset, **kwargs):
persistent_workers = kwargs.get("persistent_workers", self.persistent_workers)
if persistent_workers and kwargs.get("num_workers", 0) == 0:
persistent_workers = False
kwargs["persistent_workers"] = persistent_workers
return DataLoader(dataset, **kwargs)
def _test_sequential(self, loader):
batch_size = loader.batch_size
if batch_size is None:
for idx, (sample, target) in enumerate(loader):
self.assertEqual(sample, self.data[idx])
self.assertEqual(target, self.labels[idx])
self.assertEqual(idx, len(self.dataset) - 1)
else:
for i, (sample, target) in enumerate(loader):
idx = i * batch_size
self.assertEqual(sample, self.data[idx : idx + batch_size])
self.assertEqual(target, self.labels[idx : idx + batch_size])
self.assertEqual(i, math.floor((len(self.dataset) - 1) / batch_size))
def _test_shuffle(self, loader):
found_data = dict.fromkeys(range(self.data.size(0)), 0)
found_labels = dict.fromkeys(range(self.labels.size(0)), 0)
batch_size = loader.batch_size
if batch_size is None:
for i, (batch_samples, batch_targets) in enumerate(loader):
sample, target = (batch_samples, batch_targets)
for data_point_idx, data_point in enumerate(self.data):
if data_point.eq(sample).all():
self.assertFalse(found_data[data_point_idx])
found_data[data_point_idx] += 1
break
self.assertEqual(target, self.labels[data_point_idx])
found_labels[data_point_idx] += 1
self.assertEqual(sum(found_data.values()), (i + 1))
self.assertEqual(sum(found_labels.values()), (i + 1))
self.assertEqual(i, (len(self.dataset) - 1))
else:
for i, (batch_samples, batch_targets) in enumerate(loader):
for sample, target in zip(batch_samples, batch_targets):
for data_point_idx, data_point in enumerate(self.data):
if data_point.eq(sample).all():
self.assertFalse(found_data[data_point_idx])
found_data[data_point_idx] += 1
break
self.assertEqual(target, self.labels[data_point_idx])
found_labels[data_point_idx] += 1
self.assertEqual(sum(found_data.values()), (i + 1) * batch_size)
self.assertEqual(sum(found_labels.values()), (i + 1) * batch_size)
self.assertEqual(i, math.floor((len(self.dataset) - 1) / batch_size))
def _test_error(self, loader):
it = iter(loader)
errors = 0
while True:
try:
next(it)
except NotImplementedError:
errors += 1
except StopIteration:
self.assertEqual(
errors, math.ceil(float(len(loader.dataset)) / loader.batch_size)
)
return
def test_error_in_init(self):
for num_workers in [0, 2]:
loader = self._get_data_loader(
ErrorIterableDataset(), num_workers=num_workers
)
with self.assertRaisesRegex(RuntimeError, "Error in __iter__"):
list(iter(loader))
loader = self._get_data_loader(
self.dataset, num_workers=2, worker_init_fn=error_worker_init_fn
)
with self.assertRaisesRegex(RuntimeError, "Error in worker_init_fn"):
list(iter(loader))
def test_typing(self):
# Make sure there is no TypeError
class SomeDatasetClass(Dataset[list[torch.Tensor]]):
pass
def _create_dataloader(is_train: bool) -> DataLoader[list[torch.Tensor]]:
pass
@unittest.skipIf(IS_SANDCASTLE, "subprocess doesn't work in FB internal CI")
@unittest.skipIf(IS_WINDOWS, "No 'resource' module on Windows")
def test_fd_limit_exceeded(self):
# See NOTE [ DataLoader on Linux and open files limit ]
import subprocess
subprocess.check_output(
[
sys.executable,
"-c",
"""\
import torch
import resource
from torch.utils.data import DataLoader, IterableDataset
class RandomDataset(IterableDataset):
def __init__(self, len, size):
super(RandomDataset).__init__()
self.len = len
self.size = size
def __iter__(self):
return self
def __next__(self):
if self.len <= 0:
raise StopIteration
self.len -= 1
return torch.randn(self.size)
try:
keep_fds_alive = []
resource.setrlimit(resource.RLIMIT_NOFILE, (100, 100))
for random_t in DataLoader(RandomDataset(200, (2,2)), multiprocessing_context="fork",
num_workers=1):
random_t.max(dim=0)
keep_fds_alive.append(random_t)
except RuntimeError as e:
assert "ulimit -n" in str(e)
assert "set_sharing_strategy" in str(e)
""",
]
)
def test_invalid_assign_after_init(self):
dl = self._get_data_loader(self.dataset)
for attr in ("batch_size", "sampler", "batch_sampler", "drop_last", "dataset"):
def fn():
setattr(dl, attr, {})
self.assertRaises(ValueError, fn)
def test_sequential_nonbatch(self):
self._test_sequential(self._get_data_loader(self.dataset, batch_size=None))
def test_sequential_batch(self):
self._test_sequential(self._get_data_loader(self.dataset))
self._test_sequential(self._get_data_loader(self.dataset, batch_size=2))
def test_bulk_loading_nobatch(self):
n = 35
bs = 4
ds = BulkLoadingDataset(n)
sampler = BulkLoadingSampler(ds, batch_size=4)
for num_workers in [0, 4]:
dl = self._get_data_loader(
ds,
num_workers=num_workers,
batch_size=None,
sampler=sampler,
pin_memory=TEST_CUDA,
)
self.assertFalse(dl._auto_collation)
samples = list(dl)
self.assertEqual(samples[0].is_pinned(), TEST_CUDA)
self.assertEqual(set(torch.cat(samples, 0).tolist()), set(range(n)))
def test_growing_dataset(self):
dataset = [torch.ones(4) for _ in range(4)]
dataloader_seq = self._get_data_loader(dataset, shuffle=False)
dataloader_shuffle = self._get_data_loader(dataset, shuffle=True)
dataset.append(torch.ones(4))
self.assertEqual(len(dataloader_seq), 5)
self.assertEqual(len(dataloader_shuffle), 5)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_sequential_pin_memory(self):
loader = self._get_data_loader(self.dataset, batch_size=2, pin_memory=True)
for input, target in loader:
self.assertTrue(input.is_pinned())
self.assertTrue(target.is_pinned())
@unittest.skipIf(not TEST_CUDA_IPC, "CUDA IPC not available")
def test_multiple_dataloaders(self):
for multiprocessing_context in supported_multiprocessing_contexts:
loader1_it = iter(self._get_data_loader(self.dataset, num_workers=1))
loader2_it = iter(
self._get_data_loader(
self.dataset,
num_workers=2,
multiprocessing_context=multiprocessing_context,
)
)
next(loader1_it)
next(loader1_it)
next(loader2_it)
next(loader2_it)
next(loader1_it)
next(loader2_it)
del loader1_it
del loader2_it
# This case pass on Intel GPU, but currently expected failure on other device,
# please don't forget to remove this skip when remove the xfailIfLinux.
@skipIfXpu
# This case passes on s390x too.
# please don't forget to remove this skip when remove the xfailIfLinux.
@unittest.skipIf(IS_S390X, "Unexpectedly succeeds on s390x")
# https://github.com/pytorch/pytorch/issues/128551
@xfailIfLinux
def test_segfault(self):
p = ErrorTrackingProcess(target=_test_segfault)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertNotEqual(p.exitcode, 0)
if IS_WINDOWS:
self.assertIsInstance(p.exception, OSError)
self.assertRegex(str(p.exception), r"access violation reading ")
else:
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(
str(p.exception),
r"DataLoader worker \(pid \d+\) is killed by signal: ",
)
finally:
p.terminate()
# Tests if the child process forked by the DataLoader segfaults due to having more than 3 threads
# in the parent process after at least one set_num_threads invocation in the parent process.
# After forking, set_num_threads(1) in the child process entails handling some inherited data-structures
# of the Caffe2 thread-pool of the parent process, culminating in a segfault.
# Reference: https://github.com/pytorch/pytorch/issues/54752
@unittest.skipIf(IS_WINDOWS, "Needs fork")
def test_no_segfault(self):
p = ErrorTrackingProcess(target=_test_no_segfault)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
if p.exception:
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(
str(p.exception),
r"DataLoader worker \(pid \d+\) is killed by signal: ",
)
self.fail("Segfault occurred in worker process after fork")
finally:
p.terminate()
def test_timeout(self):
if TEST_CUDA:
# This test runs in a subprocess, which can only initialize CUDA with spawn.
# _test_timeout_pin_memory with pin_memory=True initializes CUDA when the iterator is
# constructed.
targets = (_test_timeout, _test_timeout_pin_memory)
else:
targets = (_test_timeout,)
for target in targets:
p = ErrorTrackingProcess(target=target, args=(self.persistent_workers,))
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertNotEqual(p.exitcode, 0)
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(
str(p.exception), r"DataLoader timed out after \d+ seconds"
)
finally:
p.terminate()
def test_large_sampler_indices(self):
# Test that the data loader cleanly exit when the process errors
# 1. having an reference to the iterator
# 2. using a sampler that yields big elements s.t. _index_queues putters block
#
# More context: https://github.com/pytorch/pytorch/issues/48666
p = ErrorTrackingProcess(
target=_test_large_sampler_indices, args=(self.persistent_workers,)
)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertNotEqual(p.exitcode, 0)
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r"My Error")
finally:
p.terminate()
def test_invalid_ctor_args_combinations(self):
# general
with self.assertRaisesRegex(
ValueError, "num_workers option should be non-negative"
):
self._get_data_loader(self.dataset, num_workers=-1)
with self.assertRaisesRegex(
ValueError, "timeout option should be non-negative"
):
self._get_data_loader(self.dataset, timeout=-1)
# disable auto-batching
with self.assertRaisesRegex(
ValueError,
"batch_size=None option disables auto-batching and is mutually exclusive",
):
self._get_data_loader(self.dataset, batch_size=None, drop_last=True)
valid_ctx = list(torch.multiprocessing.get_all_start_methods())[-1]
with self.assertRaisesRegex(
ValueError, r"multi-process loading \(num_workers > 0\), but got"
):
self._get_data_loader(
self.dataset, num_workers=0, multiprocessing_context=valid_ctx
)
with self.assertRaisesRegex(
ValueError, "should specify a valid start method in"
):
self._get_data_loader(
self.dataset, num_workers=1, multiprocessing_context="bad"
)
with self.assertRaisesRegex(
TypeError, "multiprocessing_context option should be a valid context "
):
self._get_data_loader(
self.dataset, num_workers=1, multiprocessing_context=object()
)
# map-style
sampler = torch.utils.data.SequentialSampler(self.dataset)
batch_sampler = torch.utils.data.BatchSampler(sampler, 3, False)
with self.assertRaisesRegex(
ValueError, "sampler option is mutually exclusive with shuffle"
):
self._get_data_loader(
self.dataset, batch_size=11, sampler=sampler, shuffle=True
)
with self.assertRaisesRegex(
ValueError, "sampler option is mutually exclusive with shuffle"
):
self._get_data_loader(
self.dataset, batch_sampler=batch_sampler, sampler=sampler, shuffle=True
)
with self.assertRaisesRegex(
ValueError, "sampler option is mutually exclusive with shuffle"
):
self._get_data_loader(
self.dataset, batch_sampler=batch_sampler, sampler=sampler, shuffle=3
)
with self.assertRaisesRegex(
ValueError, "batch_sampler option is mutually exclusive with"
):
self._get_data_loader(
self.dataset, batch_size=11, batch_sampler=batch_sampler
)
with self.assertRaisesRegex(
ValueError, "batch_sampler option is mutually exclusive with"
):
self._get_data_loader(
self.dataset, shuffle=True, batch_sampler=batch_sampler
)
with self.assertRaisesRegex(
ValueError, "batch_sampler option is mutually exclusive with"
):
self._get_data_loader(
self.dataset, drop_last=True, batch_sampler=batch_sampler
)
with self.assertRaisesRegex(
ValueError, "batch_sampler option is mutually exclusive with"
):
self._get_data_loader(
self.dataset, drop_last=3, batch_sampler=batch_sampler
)
# iterable-style
dataset = CountingIterableDataset(20)
with self.assertRaisesRegex(
ValueError, "DataLoader with IterableDataset: expected unspecified shuffle"
):
self._get_data_loader(dataset, shuffle=True)
with self.assertRaisesRegex(
ValueError, "DataLoader with IterableDataset: expected unspecified shuffle"
):
self._get_data_loader(dataset, shuffle=3)
with self.assertRaisesRegex(
ValueError, "DataLoader with IterableDataset: expected unspecified sampler"
):
self._get_data_loader(
dataset, sampler=torch.utils.data.SequentialSampler(dataset)
)
with self.assertRaisesRegex(
ValueError, "DataLoader with IterableDataset: expected unspecified sampler"
):
self._get_data_loader(dataset, sampler=3)
with self.assertRaisesRegex(
ValueError,
"DataLoader with IterableDataset: expected unspecified batch_sampler",
):
self._get_data_loader(
dataset,
batch_sampler=torch.utils.data.BatchSampler(
torch.utils.data.SequentialSampler(dataset), 3, False
),
)
with self.assertRaisesRegex(
ValueError,
"DataLoader with IterableDataset: expected unspecified batch_sampler",
):
self._get_data_loader(dataset, batch_sampler=3)
def test_builtin_collection_conversion(self):
for coll_ty in (list, tuple):
for num_workers in (0, 1):
# map-style dataset
dataset = CountingDataset(20)
# no auto-batching
fetched = coll_ty(
self._get_data_loader(
dataset, batch_size=None, num_workers=num_workers
)
)
self.assertEqual(fetched, coll_ty(range(20)))
# auto-batching
fetched = coll_ty(
self._get_data_loader(
dataset, batch_size=2, num_workers=num_workers
)
)
self.assertEqual(
fetched, coll_ty(torch.tensor([i, i + 1]) for i in range(0, 20, 2))
)
# iterable-style dataset
dataset = CountingIterableDataset(20)
# no auto-batching
fetched = coll_ty(
self._get_data_loader(
dataset, batch_size=None, num_workers=num_workers
)
)
self.assertEqual(fetched, coll_ty(range(20)))
# auto-batching
# this IterableDataset isn't configured for each worker, so for
# the equality test below to be valid, we cannot have more than 1 workers.
if num_workers not in [0, 1]:
raise AssertionError("invalid test")
fetched = coll_ty(
self._get_data_loader(
dataset, batch_size=2, num_workers=num_workers
)
)
self.assertEqual(
fetched, coll_ty(torch.tensor([i, i + 1]) for i in range(0, 20, 2))
)
def test_iterable_style_dataset(self):
# [no auto-batching] single process loading
dataset = CountingIterableDataset(20)
dataloader = self._get_data_loader(dataset, batch_size=None)
fetched = list(dataloader)
self.assertEqual(len(fetched), 20)
for i, d in enumerate(fetched):
# non-batched should not convert ints into tensors
self.assertIsInstance(d, int)
self.assertEqual(d, i)
# DataLoader should match len of the iterable-style dataset (if implemented)
self.assertEqual(len(dataloader), len(dataset))
# [no auto-batching] multiprocessing loading
num_workers = 3
sizes_for_all_workers = [0, 4, 20]
expected = sorted(
functools.reduce(
operator.iadd, (list(range(s)) for s in sizes_for_all_workers), []
)
)
if len(sizes_for_all_workers) != num_workers:
raise AssertionError("invalid test case")
for prefetch_factor in [2, 3, 4]:
dataset = WorkerSpecificIterableDataset(sizes_for_all_workers)
dataloader = self._get_data_loader(
dataset,
num_workers=num_workers,
batch_size=None,
worker_init_fn=set_faulthander_if_available,
prefetch_factor=prefetch_factor,
)
dataloader_iter = iter(dataloader)
fetched = sorted(dataloader_iter)
for a, b in zip(fetched, expected):
# non-batched should not convert ints into tensors
self.assertIsInstance(a, int)
self.assertEqual(a, b)
# DataLoader should match len of the iterable-style dataset (if implemented)
self.assertEqual(len(dataloader), len(dataset))
# When loading more than len(dataset) data, after accessing len(dataloader),
# we should get a warning. See NOTE [ IterableDataset and __len__ ].
dataset = CountingIterableDataset(20)
dataloader = self._get_data_loader(
dataset,
num_workers=num_workers,
worker_init_fn=set_faulthander_if_available,
prefetch_factor=prefetch_factor,
)
it = iter(dataloader)
for _ in range(40):
self.assertNotWarn(
lambda: next(it), "Should not warn before accessing len(dataloader)"
)
self.assertEqual(len(dataloader), len(dataset))
self.assertEqual(len(dataloader), 20)
it = iter(dataloader)
for _ in range(20):
self.assertNotWarn(
lambda: next(it), "Should not warn before exceeding length"
)
for _ in range(3):
with self.assertWarnsRegex(
UserWarning,
r"but [0-9]+ samples have been fetched\. For multiprocessing data-loading, this",
msg="Should always warn after exceeding length",
):
next(it)
# [no auto-batching] test that workers exit gracefully
workers = dataloader_iter._workers
del dataloader_iter
del dataloader
try:
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive())
self.assertEqual(w.exitcode, 0)
finally:
for w in workers:
w.terminate()
# [auto-batching] single process loading
dataset = CountingIterableDataset(20)
fetched = list(self._get_data_loader(dataset, batch_size=7))
self.assertEqual(len(fetched), 3)
self.assertEqual(fetched[0].tolist(), list(range(7)))
self.assertEqual(fetched[1].tolist(), list(range(7, 14)))
self.assertEqual(fetched[2].tolist(), list(range(14, 20)))
# [auto-batching] multiprocessing loading
num_workers = 3
sizes_for_all_workers = [0, 4, 20]
expected = sorted(
functools.reduce(
operator.iadd, (list(range(s)) for s in sizes_for_all_workers), []
)
)
if len(sizes_for_all_workers) != num_workers:
raise AssertionError("invalid test case")
for prefetch_factor in [2, 3, 4]:
dataset = WorkerSpecificIterableDataset(sizes_for_all_workers)
# worker 0 should return 0 batches
# worker 1 should return 1 batches
# worker 2 should return 3 batches
dataloader = self._get_data_loader(
dataset,
num_workers=num_workers,
batch_size=7,
prefetch_factor=prefetch_factor,
)
dataloader_iter = iter(dataloader)
fetched = list(dataloader_iter)
self.assertEqual(len(fetched), 4)
fetched = {tuple(t.tolist()) for t in fetched}
self.assertEqual(
fetched,
{
tuple(range(4)),
tuple(range(7)),
tuple(range(7, 14)),
tuple(range(14, 20)),
},
)
# [auto-batching] test that workers exit gracefully
workers = dataloader_iter._workers
del dataloader_iter
del dataloader
try:
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive())
self.assertEqual(w.exitcode, 0)
finally:
for w in workers:
w.terminate()
# [auto-batching & drop_last] single process loading
dataset = CountingIterableDataset(20)
fetched = list(self._get_data_loader(dataset, batch_size=7, drop_last=True))
self.assertEqual(len(fetched), 2)
self.assertEqual(fetched[0].tolist(), list(range(7)))
self.assertEqual(fetched[1].tolist(), list(range(7, 14)))
# [auto-batching & drop_last] multiprocessing loading
num_workers = 3
sizes_for_all_workers = [0, 4, 20]
expected = sorted(
functools.reduce(
operator.iadd, (list(range(s)) for s in sizes_for_all_workers), []
)
)
if len(sizes_for_all_workers) != num_workers:
raise AssertionError("invalid test case")
for prefetch_factor in [2, 3, 4]:
dataset = WorkerSpecificIterableDataset(sizes_for_all_workers)
# worker 0 should return 0 batches
# worker 1 should return 1 batches
# worker 2 should return 3 batches
dataloader = self._get_data_loader(
dataset,
num_workers=num_workers,
batch_size=7,
drop_last=True,
worker_init_fn=set_faulthander_if_available,
prefetch_factor=prefetch_factor,
)
dataloader_iter = iter(dataloader)
fetched = list(dataloader_iter)
self.assertEqual(len(fetched), 2)
fetched = {tuple(t.tolist()) for t in fetched}
self.assertEqual(fetched, {tuple(range(7)), tuple(range(7, 14))})
# [auto-batching & drop_last] test that workers exit gracefully
workers = dataloader_iter._workers
del dataloader_iter
del dataloader
try:
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive())
self.assertEqual(w.exitcode, 0)
finally:
for w in workers:
w.terminate()
def test_chain_iterable_style_dataset(self):
# chaining (concatenation)
dataset1 = CountingIterableDataset(20)
dataset2 = CountingIterableDataset(15)
expected = list(range(20)) + list(range(15))
for num_workers in [0, 1]:
for chained_dataset in [
dataset1 + dataset2,
ChainDataset([dataset1, dataset2]),
]:
fetched = list(
self._get_data_loader(chained_dataset, num_workers=num_workers)
)
self.assertEqual(len(fetched), len(expected))
for e, d in zip(expected, fetched):
self.assertIsInstance(d, torch.Tensor)
self.assertEqual(e, d)
with self.assertRaisesRegex(
AssertionError, "ChainDataset only supports IterableDataset"
):
list(iter(dataset1 + self.dataset))
with self.assertRaisesRegex(
AssertionError, "ChainDataset only supports IterableDataset"
):
list(iter(ChainDataset([dataset1, self.dataset])))
@unittest.skipIf(not TEST_CUDA_IPC, "CUDA IPC not available")
def test_multiprocessing_contexts(self):
reference = [
torch.arange(3),
torch.arange(3, 6),
torch.arange(6, 9),
torch.arange(9, 11),
]
counting_ds_n = 11
dl_common_args = dict(num_workers=3, batch_size=3, pin_memory=(not TEST_CUDA))
for ctx in supported_multiprocessing_contexts:
# windows and jetson devices don't support sharing cuda tensor; ROCm does not yet fully support IPC
if (
ctx in ["spawn", "forkserver"]
and TEST_CUDA
and not IS_WINDOWS
and not IS_JETSON
):
ds_cls = CUDACountingDataset
else:
ds_cls = CountingDataset
self.assertEqual(
reference,
list(
self._get_data_loader(
ds_cls(counting_ds_n),
multiprocessing_context=ctx,
**dl_common_args,
)
),
)
if ctx is not None:
# test ctx object
ctx = mp.get_context(ctx)
self.assertEqual(
reference,
list(
self._get_data_loader(
ds_cls(counting_ds_n),
multiprocessing_context=ctx,
**dl_common_args,
)
),
)
def _test_multiprocessing_iterdatapipe(self, with_dill):
# Testing to make sure that function from global scope (e.g. imported from library) can be serialized
# and used with multiprocess DataLoader
reference = [
torch.as_tensor([[2, 3, 4, 5]], dtype=torch.int64),
torch.as_tensor([[2, 3, 4, 5]], dtype=torch.int64),
]
datapipe: IterDataPipe = IterableWrapper([[1, 2, 3, 4], [1, 2, 3, 4, 5, 6]])
datapipe = datapipe.map(row_processor)
datapipe = (
datapipe.filter(lambda row: len(row) == 4)
if with_dill
else datapipe.filter(filter_len)
)
dl_common_args = dict(
num_workers=2, batch_size=2, shuffle=True, pin_memory=(not TEST_CUDA)
)
for ctx in supported_multiprocessing_contexts:
self.assertEqual(
reference,
[
t.type(torch.int64)
for t in self._get_data_loader(
datapipe, multiprocessing_context=ctx, **dl_common_args
)
],
)
if ctx is not None:
# test ctx object
ctx = mp.get_context(ctx)
self.assertEqual(
reference,
[
t.type(torch.int64)
for t in self._get_data_loader(
datapipe, multiprocessing_context=ctx, **dl_common_args
)
],
)
@skipIfNoNumpy
@unittest.skipIf(not TEST_CUDA_IPC, "CUDA IPC not available")
def test_multiprocessing_iterdatapipe(self):
self._test_multiprocessing_iterdatapipe(with_dill=False)
@unittest.expectedFailure
@skipIfNoNumpy
@unittest.skipIf(not TEST_CUDA_IPC, "CUDA IPC not available")
@skipIfNoDill
def test_multiprocessing_iterdatapipe_with_dill(self):
self._test_multiprocessing_iterdatapipe(with_dill=True)
def test_worker_seed(self):
num_workers = 6
batch_size = 1
dataset = SynchronizedSeedDataset(num_workers, batch_size, num_workers)
dataloader = self._get_data_loader(
dataset, batch_size=batch_size, num_workers=num_workers
)
seeds = set()
seeds.update(batch[0] for batch in dataloader)
self.assertEqual(len(seeds), num_workers)
def test_worker_seed_reproducibility(self):
def get_dataloader():
return DataLoader(
dataset,
batch_size=batch_size,
num_workers=num_workers,
generator=torch.Generator().manual_seed(42),
)
num_workers = 6
batch_size = 1
dataset = SynchronizedSeedDataset(num_workers, batch_size, num_workers)
self.assertEqual(
{int(batch) for batch in get_dataloader()},
{int(batch) for batch in get_dataloader()},
)
def test_multi_epochs_reproducibility(self):
num_workers = 2
batch_size = 10
num_epochs = 3
dataset = TestMultiEpochDataset(batch_size * num_workers)
dataloader = self._get_data_loader(
dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers
)
for _ in range(num_epochs):
for batch_idx, sample in enumerate(dataloader):
self.assertEqual(
sample.tolist(), [batch_idx % num_workers] * batch_size
)
def test_worker_init_fn(self):
dataset = SeedDataset(4)
dataloader = self._get_data_loader(
dataset, batch_size=2, num_workers=2, worker_init_fn=init_fn
)
for batch in dataloader:
self.assertEqual(12345, batch[0])
self.assertEqual(12345, batch[1])
@unittest.skipIf(
IS_WINDOWS or IS_MACOS,
"`ValueError: cannot find context for 'forkserver'` in Windows",
)
def test_worker_init_fn_forkserver(self):
def local_init_fn(worker_id):
torch.manual_seed(12345)
import multiprocessing as py_mp
py_mp.set_start_method("forkserver", force=True)
dataset = SeedDataset(4)
dataloader = self._get_data_loader(
dataset, batch_size=2, num_workers=2, worker_init_fn=local_init_fn
)
with self.assertWarnsRegex(UserWarning, "Got pickle error when"):
with self.assertRaises(Exception):
next(iter(dataloader))
def test_get_worker_info(self):
p = ErrorTrackingProcess(target=_test_get_worker_info)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertEqual(p.exitcode, 0)
finally:
p.terminate()
def test_shuffle(self):
self._test_shuffle(self._get_data_loader(self.dataset, shuffle=True))
def test_shuffle_batch_none(self):
self._test_shuffle(DataLoader(self.dataset, batch_size=None, shuffle=True))
def test_shuffle_batch(self):
self._test_shuffle(
self._get_data_loader(self.dataset, batch_size=2, shuffle=True)
)
def test_shuffle_reproducibility(self):
for fn in (
lambda: DataLoader(
self.dataset,
shuffle=True,
num_workers=0,
generator=torch.Generator().manual_seed(42),
),
lambda: DataLoader(
self.dataset,
shuffle=True,
num_workers=2,
generator=torch.Generator().manual_seed(42),
),
):
self.assertEqual(list(fn()), list(fn()))
def test_sequential_workers(self):
self._test_sequential(self._get_data_loader(self.dataset, num_workers=4))
def test_seqential_batch_workers(self):
self._test_sequential(
self._get_data_loader(self.dataset, batch_size=2, num_workers=4)
)
def test_seqential_batch_workers_prefetch(self):
self._test_sequential(
DataLoader(self.dataset, batch_size=2, num_workers=4, prefetch_factor=3)
)
def test_shuffle_workers(self):
self._test_shuffle(
self._get_data_loader(self.dataset, shuffle=True, num_workers=4)
)
def test_shuffle_batch_workers(self):
self._test_shuffle(
self._get_data_loader(
self.dataset, batch_size=2, shuffle=True, num_workers=4
)
)
def test_shuffle_batch_workers_prefetch(self):
self._test_shuffle(
DataLoader(
self.dataset,
batch_size=2,
shuffle=True,
num_workers=4,
prefetch_factor=3,
)
)
def test_random_sampler(self):
from collections import Counter
from torch.utils.data import RandomSampler
def sample_stat(sampler, num_samples):
counts = Counter(sampler)
count_repeated = sum(val > 1 for val in counts.values())
return (
count_repeated,
min(counts.keys()),
max(counts.keys()),
sum(counts.values()),
)
# test sample with replacement
n = len(self.dataset) + 1 # ensure at least one sample is drawn more than once
sampler_with_replacement = RandomSampler(
self.dataset, replacement=True, num_samples=n
)
count_repeated, minval, maxval, count_total = sample_stat(
sampler_with_replacement, n
)
self.assertTrue(count_repeated > 0)
self.assertTrue(minval >= 0)
self.assertTrue(maxval < len(self.dataset))
self.assertTrue(count_total == n)
# test sample without replacement and without specified num_samples
sampler_without_replacement = RandomSampler(self.dataset)
count_repeated, minval, maxval, count_total = sample_stat(
sampler_without_replacement, len(self.dataset)
)
self.assertTrue(count_repeated == 0)
self.assertTrue(minval == 0)
self.assertTrue(maxval == len(self.dataset) - 1)
self.assertTrue(count_total == len(self.dataset))
# test sample without replacement and with specified num_samples
n = len(self.dataset) * 2
sampler_without_replacement = RandomSampler(self.dataset, num_samples=n)
count_repeated, minval, maxval, count_total = sample_stat(
sampler_without_replacement, len(self.dataset)
)
self.assertTrue(count_repeated == len(self.dataset))
self.assertTrue(minval == 0)
self.assertTrue(maxval == len(self.dataset) - 1)
self.assertTrue(count_total == n)
n = len(self.dataset) - 1
sampler_without_replacement = RandomSampler(self.dataset, num_samples=n)
count_repeated, minval, maxval, count_total = sample_stat(
sampler_without_replacement, len(self.dataset)
)
self.assertTrue(count_repeated == 0)
self.assertTrue(minval >= 0)
self.assertTrue(maxval < len(self.dataset))
self.assertTrue(count_total == n)
n = len(self.dataset) + 1
sampler_without_replacement = RandomSampler(self.dataset, num_samples=n)
count_repeated, minval, maxval, count_total = sample_stat(
sampler_without_replacement, len(self.dataset)
)
self.assertTrue(count_repeated == 1)
self.assertTrue(minval == 0)
self.assertTrue(maxval == len(self.dataset) - 1)
self.assertTrue(count_total == n)
# raise error when replacement is non-boolean
with self.assertRaisesRegex(
TypeError, "replacement should be a boolean value, but got replacement=0"
):
RandomSampler(self.dataset, replacement=0)
def test_random_sampler_len_with_replacement(self):
from torch.utils.data import RandomSampler
# add 5 extra samples
num_samples = len(self.dataset) + 5
sampler = RandomSampler(self.dataset, replacement=True, num_samples=num_samples)
# test len method
self.assertEqual(num_samples, len(sampler))
# test with iteration
count_num_samples = sum(1 for _ in sampler)
self.assertEqual(num_samples, count_num_samples)
# test with dataloader, batch_size = 1
batch_size = 1
count_num_samples_in_data_loader = len(
self._get_data_loader(self.dataset, batch_size=batch_size, sampler=sampler)
)
self.assertEqual(num_samples, count_num_samples_in_data_loader)
# test with dataloader, batch_size = 6
batch_size = 6
count_num_samples_in_data_loader = len(
self._get_data_loader(self.dataset, batch_size=batch_size, sampler=sampler)
)
self.assertEqual(
int(math.ceil(float(num_samples) / batch_size)),
count_num_samples_in_data_loader,
)
def test_random_sampler_len_without_replacement(self):
from torch.utils.data import RandomSampler
# add 5 extra samples
num_samples = len(self.dataset) + 5
sampler = RandomSampler(
self.dataset, replacement=False, num_samples=num_samples
)
# test len method
self.assertEqual(num_samples, len(sampler))
# test with iteration
count_num_samples = sum(1 for _ in sampler)
self.assertEqual(num_samples, count_num_samples)
# test with dataloader, batch_size = 1
batch_size = 1
count_num_samples_in_data_loader = len(
self._get_data_loader(self.dataset, batch_size=batch_size, sampler=sampler)
)
self.assertEqual(num_samples, count_num_samples_in_data_loader)
# test with dataloader, batch_size = 6
batch_size = 6
count_num_samples_in_data_loader = len(
self._get_data_loader(self.dataset, batch_size=batch_size, sampler=sampler)
)
self.assertEqual(
num_samples // batch_size + (num_samples % batch_size > 0),
count_num_samples_in_data_loader,
)
def test_distributed_sampler_invalid_rank(self):
from torch.utils.data.distributed import DistributedSampler
dataset = torch.IntTensor(range(10))
with self.assertRaisesRegex(ValueError, "Invalid rank"):
sampler = DistributedSampler(dataset, 3, 3)
with self.assertRaisesRegex(ValueError, "Invalid rank"):
sampler = DistributedSampler(dataset, 3, -1)
def test_duplicating_data_with_drop_last(self):
from torch.utils.data.distributed import DistributedSampler
num_processes = 4
num_batches = 9
data_set = torch.IntTensor(range(num_batches))
scanned_data = torch.IntTensor([])
for i in range(num_processes):
s = DistributedSampler(data_set, num_processes, i)
d_loader = self._get_data_loader(
data_set,
batch_size=int(num_batches / num_processes),
drop_last=True,
sampler=s,
)
for data in d_loader:
scanned_data = torch.cat((scanned_data, data), 0)
self.assertEqual(scanned_data.size(), scanned_data.unique().size())
def test_sampler_reproducibility(self):
from torch.utils.data import (
RandomSampler,
SubsetRandomSampler,
WeightedRandomSampler,
)
weights = [0.1, 0.9, 0.4, 0.7, 3.0, 0.6]
for fn in (
lambda: RandomSampler(
self.dataset,
num_samples=5,
replacement=True,
generator=torch.Generator().manual_seed(42),
),
lambda: RandomSampler(
self.dataset,
replacement=False,
generator=torch.Generator().manual_seed(42),
),
lambda: WeightedRandomSampler(
weights,
num_samples=5,
replacement=True,
generator=torch.Generator().manual_seed(42),
),
lambda: WeightedRandomSampler(
weights,
num_samples=5,
replacement=False,
generator=torch.Generator().manual_seed(42),
),
lambda: SubsetRandomSampler(
range(10), generator=torch.Generator().manual_seed(42)
),
):
self.assertEqual(list(fn()), list(fn()))
for sampler in (
RandomSampler(self.dataset, num_samples=5, replacement=True),
RandomSampler(self.dataset, replacement=False),
WeightedRandomSampler(weights, num_samples=5, replacement=True),
WeightedRandomSampler(weights, num_samples=5, replacement=False),
SubsetRandomSampler(range(10)),
):
torch.manual_seed(0)
l1 = list(sampler) + list(sampler)
torch.manual_seed(0)
l2 = list(sampler) + list(sampler)
self.assertEqual(l1, l2)
its = (iter(sampler), iter(sampler))
ls = ([], [])
for idx in range(len(sampler)):
for i in range(2):
if idx == 0:
torch.manual_seed(0)
ls[i].append(next(its[i]))
self.assertEqual(ls[0], ls[1])
def _test_sampler(self, **kwargs):
indices = range(2, 12) # using a regular iterable
dl = self._get_data_loader(
self.dataset, sampler=indices, batch_size=2, **kwargs
)
self.assertEqual(len(dl), 5)
for i, (input, _target) in enumerate(dl):
self.assertEqual(len(input), 2)
self.assertEqual(input, self.data[i * 2 + 2 : i * 2 + 4])
def test_sampler(self):
self._test_sampler()
self._test_sampler(num_workers=4)
self._test_batch_sampler(num_workers=4, multiprocessing_context="spawn")
def _test_batch_sampler(self, **kwargs):
# [(0, 1), (2, 3, 4), (5, 6), (7, 8, 9), ...]
batches = [] # using a regular iterable
for i in range(0, 20, 5):
batches.append(tuple(range(i, i + 2)))
batches.append(tuple(range(i + 2, i + 5)))
dl = self._get_data_loader(self.dataset, batch_sampler=batches, **kwargs)
self.assertEqual(len(dl), 8)
for i, (input, _target) in enumerate(dl):
if i % 2 == 0:
offset = i * 5 // 2
self.assertEqual(len(input), 2)
self.assertEqual(input, self.data[offset : offset + 2])
else:
offset = i * 5 // 2
self.assertEqual(len(input), 3)
self.assertEqual(input, self.data[offset : offset + 3])
def test_batch_sampler(self):
self._test_batch_sampler()
self._test_batch_sampler(num_workers=4)
self._test_batch_sampler(num_workers=4, multiprocessing_context="spawn")
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_shuffle_pin_memory(self):
loader = self._get_data_loader(
self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True
)
for input, target in loader:
self.assertTrue(input.is_pinned())
self.assertTrue(target.is_pinned())
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_numpy(self):
import numpy as np
class TestDataset(torch.utils.data.Dataset):
def __getitem__(self, i):
return np.ones((2, 3, 4)) * i
def __len__(self):
return 1000
loader = self._get_data_loader(TestDataset(), batch_size=12)
batch = next(iter(loader))
self.assertIsInstance(batch, torch.DoubleTensor)
self.assertEqual(batch.size(), torch.Size([12, 2, 3, 4]))
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_numpy_gen_state(self):
from torch.utils.data._utils.worker import _generate_state
# Using NumPy generated states as the reference to test `_generate_state`
# having the same result.
# Test case: ((worker_id, base_seed), expected_state)
test_cases = [
(
(4, 13434589827475259383),
(2884386318, 1088094898, 3523808998, 3860348662),
),
((1, 15014285634777110771), (1934848465, 763213760, 2959016433, 179751970)),
(
(10, 978296274032934101),
(1759791917, 3550927336, 1225977135, 1036538043),
),
(
(12, 11868770762134256968),
(3974661794, 3331131333, 3630387033, 2885815368),
),
(
(9, 15378787925219019706),
(3815056996, 3162224466, 2735102421, 3190253477),
),
((5, 9055612723125076328), (3522565701, 3368424109, 959377806, 621878693)),
(
(15, 14617792358407278405),
(3402479508, 1588702753, 1169536393, 3675067356),
),
(
(9, 17363320784006640087),
(957989458, 2518334477, 1421725660, 3086155459),
),
(
(12, 480002904169484764),
(2732851467, 1762620729, 4055801988, 1277640511),
),
(
(15, 16803975943592702950),
(3479415043, 4022359553, 295994005, 3358606349),
),
(
(9, 11704776406047813044),
(1968928009, 710113752, 2442656196, 1587420279),
),
(
(10, 16357891985431864516),
(1271733898, 4197047399, 3727213786, 2338547348),
),
(
(2, 17423369006318065007),
(544294336, 1911284083, 3299147734, 3231058347),
),
((2, 2889492011444113593), (3721591783, 2595811276, 2212881745, 977682627)),
((0, 8979703111668486195), (4276723937, 2556068849, 2962827292, 233130238)),
(
(6, 6269787272229682235),
(2548857855, 1216457374, 1012973562, 2999759647),
),
]
for (worker_id, base_seed), exp in test_cases:
self.assertEqual(exp, _generate_state(base_seed, worker_id))
def test_error(self):
self._test_error(
self._get_data_loader(ErrorDataset(100), batch_size=2, shuffle=True)
)
def test_error_workers(self):
self._test_error(
self._get_data_loader(
ErrorDataset(41), batch_size=2, shuffle=True, num_workers=4
)
)
def test_subset_custom_getitem(self):
"""
Regression test for issue #163184 where DataLoader ignores custom
transformations in Subset subclasses that override __getitem__.
"""
class SimpleDataset(Dataset):
def __init__(self):
self.data = torch.arange(20)
def __len__(self):
return 20
def __getitem__(self, idx):
return self.data[idx]
class TransformSubset(Subset):
def __getitem__(self, idx):
# transform: multiply by 2
original_idx = self.indices[idx]
value = self.dataset[original_idx]
return value * 2
def __getitems__(self, indices):
return [self.__getitem__(idx) for idx in indices]
dataset = SimpleDataset()
subset = TransformSubset(dataset, [0, 1, 2, 3])
self.assertEqual(subset[0].item(), 0)
self.assertEqual(subset[1].item(), 2)
self.assertEqual(subset[2].item(), 4)
loader = self._get_data_loader(subset, batch_size=2, shuffle=False)
batches = list(loader)
self.assertTrue(torch.equal(batches[0], torch.tensor([0, 2])))
self.assertTrue(torch.equal(batches[1], torch.tensor([4, 6])))
def test_subset_custom_getitem_with_tuple_data(self):
"""Test Subset custom __getitem__ with tuple data (like the original issue)."""
class TupleDataset(Dataset):
def __init__(self):
self.a = torch.arange(10)
self.b = torch.arange(100, 110)
def __len__(self):
return 10
def __getitem__(self, idx):
return self.a[idx], self.b[idx]
class SumSubset(Subset):
"""Subset that returns sum instead of tuple"""
def __getitem__(self, idx):
original_idx = self.indices[idx]
a, b = self.dataset[original_idx]
return a + b
def __getitems__(self, indices):
return [self.__getitem__(idx) for idx in indices]
dataset = TupleDataset()
subset = SumSubset(dataset, [0, 1, 2, 3])
self.assertEqual(subset[0].item(), 100)
self.assertEqual(subset[1].item(), 102)
loader = self._get_data_loader(subset, batch_size=2, shuffle=False)
batches = list(loader)
self.assertTrue(torch.equal(batches[0], torch.tensor([100, 102])))
self.assertTrue(torch.equal(batches[1], torch.tensor([104, 106])))
def test_subset_override_getitem_requires_getitems(self):
"""
Test that subclassing Subset and overriding only __getitem__ without
__getitems__ raises a NotImplementedError at instantiation time.
"""
class SimpleDataset(Dataset):
def __init__(self):
self.data = torch.arange(10)
def __len__(self):
return 10
def __getitem__(self, idx):
return self.data[idx]
class IncompleteSubset(Subset):
def __getitem__(self, idx):
original_idx = self.indices[idx]
return self.dataset[original_idx] * 2
dataset = SimpleDataset()
with self.assertRaises(NotImplementedError) as cm:
subset = IncompleteSubset(dataset, [0, 1, 2, 3])
error_message = str(cm.exception)
self.assertIn("IncompleteSubset", error_message)
self.assertIn("__getitem__", error_message)
self.assertIn("__getitems__", error_message)
@unittest.skipIf(IS_WINDOWS, "FIXME: stuck test")
def test_partial_workers(self):
r"""Check that workers exit even if the iterator is not exhausted."""
if TEST_CUDA:
pin_memory_configs = (True, False)
else:
pin_memory_configs = (False,)
for pin_memory in pin_memory_configs:
loader = iter(
self._get_data_loader(
self.dataset, batch_size=2, num_workers=4, pin_memory=pin_memory
)
)
workers = loader._workers
if pin_memory:
pin_memory_thread = loader._pin_memory_thread
for i, _ in enumerate(loader):
if i == 10:
break
if i != 10:
raise AssertionError(f"Expected to stop at i=10, got i={i}")
del loader
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive(), "subprocess not terminated")
if pin_memory:
pin_memory_thread.join(JOIN_TIMEOUT)
self.assertFalse(pin_memory_thread.is_alive())
# Takes 2.5min to finish, see https://github.com/pytorch/pytorch/issues/46065
@unittest.skipIf(not HAS_PSUTIL, "psutil not found")
@slowTest
def test_proper_exit(self):
"""There might be ConnectionResetError or leaked semaphore warning
(due to dirty process exit), but they are all safe to ignore"""
# TODO: test the case where the pin_memory_thread triggers an
# error/fatal signal. I haven't found out how to properly do that.
for (
is_iterable_dataset,
use_workers,
pin_memory,
hold_iter_reference,
) in itertools.product([True, False], repeat=4):
# `hold_iter_reference` specifies whether we hold a reference to the
# iterator. This is interesting because Python3 error traces holds a
# reference to the frames, which hold references to all the local
# variables including the iterator, and then the iterator dtor may
# not be called before process end. It is important to see that the
# processes still exit in both cases.
if pin_memory and (not TEST_CUDA or IS_WINDOWS):
# This test runs in a subprocess, which can only initialize CUDA with spawn.
# DataLoader with pin_memory=True initializes CUDA when its iterator is constructed.
# For windows, pin_memory sometimes causes CUDA oom.
continue
# `exit_method` controls the way the loader process ends.
# - `*_kill` means that `*` is killed by OS.
# - `*_error` means that `*` raises an error.
# - `None` means that no error happens.
# In all cases, all processes should end properly.
if use_workers:
# TODO: Fix test for 'loader_kill' that would cause running out of shared memory.
# Killing loader process would prevent DataLoader iterator clean up all queues
# and worker processes
exit_methods = [None, "loader_error", "worker_error", "worker_kill"]
persistent_workers = self.persistent_workers
else:
exit_methods = [None, "loader_error", "loader_kill"]
persistent_workers = False
for exit_method in exit_methods:
if exit_method == "worker_kill":
# FIXME: This sometimes hangs. See #16608.
continue
desc = []
desc.append(f"is_iterable_dataset={is_iterable_dataset}")
desc.append(f"use_workers={use_workers}")
desc.append(f"pin_memory={pin_memory}")
desc.append(f"hold_iter_reference={hold_iter_reference}")
desc.append(f"exit_method={exit_method}")
desc = "test_proper_exit with " + ", ".join(desc)
# Event that the loader process uses to signal testing process
# that various things are setup, including that the worker pids
# are specified in `worker_pids` array.
loader_setup_event = mp.Event()
# Event that this process has finished setting up, and the
# loader process can now proceed to trigger error events or
# finish normally.
tester_setup_event = mp.Event()
loader_p = ErrorTrackingProcess(
target=_test_proper_exit,
args=(
is_iterable_dataset,
use_workers,
pin_memory,
exit_method,
hold_iter_reference,
loader_setup_event,
tester_setup_event,
persistent_workers,
),
disable_stderr=False,
)
loader_p.start()
loader_psutil_p = psutil.Process(loader_p.pid)
# Wait for loader process to set everything up, e.g., starting
# workers.
loader_setup_event.wait(timeout=JOIN_TIMEOUT)
if not loader_setup_event.is_set():
fail_msg = (
desc + ": loader process failed to setup within given time"
)
if loader_p.exception is not None:
fail_msg += f", and had exception {loader_p.exception}"
elif not loader_p.is_alive():
fail_msg += f", and exited with code {loader_p.exitcode} but had no exception"
else:
fail_msg += ", and is still alive."
if loader_p.is_alive():
# this may kill the process, needs to run after the above lines
loader_p.print_traces_of_all_threads()
self.fail(fail_msg)
# We are certain that the workers have started now.
worker_psutil_ps = loader_psutil_p.children()
def fail(reason):
report_psutil_attrs = [
"pid",
"name",
"cpu_times",
"io_counters",
"memory_full_info",
"num_ctx_switches",
"open_files",
"threads",
"status",
"nice",
"ionice",
]
if reason is None:
err_msg = desc
else:
err_msg = f"{desc}: {reason}"
err_msg += "\nLoader info:\n\t"
if loader_psutil_p.is_running():
err_msg += str(
loader_psutil_p.as_dict(attrs=report_psutil_attrs)
)
# this may kill the process, needs to run after the above line
loader_p.print_traces_of_all_threads()
else:
err_msg += f"exited with code {loader_p.exitcode}"
if use_workers:
err_msg += "\nWorker(s) info:"
for idx, worker_psutil_p in enumerate(worker_psutil_ps):
err_msg += f"\n\tWorker {idx}:\n\t\t"
if worker_psutil_p.is_running():
err_msg += str(
worker_psutil_p.as_dict(attrs=report_psutil_attrs)
)
# this may kill the process, needs to run after the above line
print_traces_of_all_threads(worker_psutil_p.pid)
else:
err_msg += "exited with unknown code"
self.fail(err_msg)
tester_setup_event.set()
try:
loader_p.join(JOIN_TIMEOUT + MP_STATUS_CHECK_INTERVAL)
if loader_p.is_alive():
fail_reason = "loader process did not terminate"
if loader_p.exception is not None:
fail(
fail_reason
+ f", and had exception {loader_p.exception}"
)
else:
fail(fail_reason + ", and had no exception")
_, alive = psutil.wait_procs(
worker_psutil_ps,
timeout=(MP_STATUS_CHECK_INTERVAL + JOIN_TIMEOUT),
)
if len(alive) > 0:
fail(
"worker process (pid(s) {}) did not terminate".format(
", ".join(str(p.pid) for p in alive)
)
)
if exit_method is None:
if loader_p.exitcode != 0:
fail(
f"loader process had nonzero exitcode {loader_p.exitcode}"
)
else:
if loader_p.exitcode == 0:
fail("loader process had zero exitcode")
if exit_method == "loader_error":
if not isinstance(
loader_p.exception, RuntimeError
) or "Loader error" not in str(loader_p.exception):
fail(
f"loader process did not raise expected exception, but had {loader_p.exception}"
)
elif exit_method == "worker_kill":
if isinstance(loader_p.exception, RuntimeError):
if "DataLoader worker (pid" not in str(
loader_p.exception
):
fail(
f"loader process did not raise expected exception, but had {loader_p.exception}"
)
elif isinstance(loader_p.exception, ConnectionRefusedError):
# Sometimes, when the worker is being killed and is freeing its
# resources, the unpickling in loader process will be met an
# a `ConnectionRefusedError` as it can not open a socket to receive
# resource. In such cases, the worker may not have fully exited,
# and the loader can't know this via `is_alive` check or `SIGCHLD`
# handler. So we permit this as an allowed error as well.
# After all, we are happy as long as it terminates.
pass
else:
fail(
f"loader process did not raise expected exception, but had {loader_p.exception}"
)
elif exit_method == "worker_error":
if not isinstance(
loader_p.exception, RuntimeError
) or "Worker error" not in str(loader_p.exception):
fail(
f"loader process did not raise expected exception, but had {loader_p.exception}"
)
finally:
loader_p.terminate()
def test_len(self):
def check_len(dl, expected):
self.assertEqual(len(dl), expected)
n = 0
for _ in dl:
n += 1
self.assertEqual(n, expected)
check_len(self.dataset, 100)
check_len(self._get_data_loader(self.dataset, batch_size=2), 50)
check_len(self._get_data_loader(self.dataset, batch_size=3), 34)
def test_iterabledataset_len(self):
class IterableDataset(torch.utils.data.IterableDataset):
def __len__(self):
return 10
def __iter__(self):
return iter(range(10))
iterable_loader = DataLoader(IterableDataset(), batch_size=1)
self.assertEqual(len(iterable_loader), 10)
iterable_loader = DataLoader(IterableDataset(), batch_size=1, drop_last=True)
self.assertEqual(len(iterable_loader), 10)
iterable_loader = DataLoader(IterableDataset(), batch_size=2)
self.assertEqual(len(iterable_loader), 5)
iterable_loader = DataLoader(IterableDataset(), batch_size=2, drop_last=True)
self.assertEqual(len(iterable_loader), 5)
iterable_loader = DataLoader(IterableDataset(), batch_size=3)
self.assertEqual(len(iterable_loader), 4)
iterable_loader = DataLoader(IterableDataset(), batch_size=3, drop_last=True)
self.assertEqual(len(iterable_loader), 3)
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_numpy_scalars(self):
import numpy as np
class ScalarDataset(torch.utils.data.Dataset):
def __init__(self, dtype):
self.dtype = dtype
def __getitem__(self, i):
return self.dtype()
def __len__(self):
return 4
dtypes = {
np.float64: torch.DoubleTensor,
np.float32: torch.FloatTensor,
np.float16: torch.HalfTensor,
np.int64: torch.LongTensor,
np.int32: torch.IntTensor,
np.int16: torch.ShortTensor,
np.int8: torch.CharTensor,
np.uint8: torch.ByteTensor,
}
for dt, tt in dtypes.items():
dset = ScalarDataset(dt)
loader = self._get_data_loader(dset, batch_size=2)
batch = next(iter(loader))
self.assertIsInstance(batch, tt)
def test_default_convert_mapping_keep_type(self):
data = CustomDict({"a": 1, "b": 2})
converted = dataloader.default_convert(data)
self.assertEqual(converted, data)
def test_default_convert_sequence_keep_type(self):
data = CustomList([1, 2, 3])
converted = dataloader.default_convert(data)
self.assertEqual(converted, data)
def test_default_convert_sequence_dont_keep_type(self):
data = range(2)
converted = dataloader.default_convert(data)
self.assertEqual(converted, [0, 1])
def test_default_collate_dtype(self):
arr = [1, 2, -1]
collated = dataloader.default_collate(arr)
self.assertEqual(collated, torch.tensor(arr))
self.assertEqual(collated.dtype, torch.int64)
arr = [1.1, 2.3, -0.9]
collated = dataloader.default_collate(arr)
self.assertEqual(collated, torch.tensor(arr, dtype=torch.float64))
arr = [True, False]
collated = dataloader.default_collate(arr)
self.assertEqual(collated, torch.tensor(arr))
self.assertEqual(collated.dtype, torch.bool)
# Should be a no-op
arr = ["a", "b", "c"]
self.assertEqual(arr, dataloader.default_collate(arr))
def test_default_collate_mapping_keep_type(self):
batch = [CustomDict({"a": 1, "b": 2}), CustomDict({"a": 3, "b": 4})]
collated = dataloader.default_collate(batch)
expected = CustomDict({"a": torch.tensor([1, 3]), "b": torch.tensor([2, 4])})
self.assertEqual(collated, expected)
def test_default_collate_sequence_keep_type(self):
batch = [CustomList([1, 2, 3]), CustomList([4, 5, 6])]
collated = dataloader.default_collate(batch)
expected = CustomList(
[
torch.tensor([1, 4]),
torch.tensor([2, 5]),
torch.tensor([3, 6]),
]
)
self.assertEqual(collated, expected)
def test_default_collate_sequence_dont_keep_type(self):
batch = [range(2), range(2)]
collated = dataloader.default_collate(batch)
self.assertEqual(collated, [torch.tensor([0, 0]), torch.tensor([1, 1])])
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_default_collate_bad_numpy_types(self):
import numpy as np
# Should be a no-op
arr = np.array(["a", "b", "c"])
self.assertEqual(arr, dataloader.default_collate(arr))
arr = np.array([[["a", "b", "c"]]])
self.assertRaises(TypeError, lambda: dataloader.default_collate(arr))
arr = np.array([object(), object(), object()])
self.assertRaises(TypeError, lambda: dataloader.default_collate(arr))
arr = np.array([[[object(), object(), object()]]])
self.assertRaises(TypeError, lambda: dataloader.default_collate(arr))
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_default_collate_numpy_memmap(self):
import numpy as np
with tempfile.TemporaryFile() as f:
arr = np.array([[0, 1], [2, 3], [4, 5], [6, 7]])
arr_memmap = np.memmap(f, dtype=arr.dtype, mode="w+", shape=arr.shape)
arr_memmap[:] = arr[:]
arr_new = np.memmap(f, dtype=arr.dtype, mode="r", shape=arr.shape)
tensor = dataloader.default_collate(list(arr_new))
self.assertTrue(
(tensor == tensor.new_tensor([[0, 1], [2, 3], [4, 5], [6, 7]])).all().item()
)
def test_default_collate_bad_sequence_type(self):
batch = [["X"], ["X", "X"]]
self.assertRaises(RuntimeError, lambda: dataloader.default_collate(batch))
self.assertRaises(RuntimeError, lambda: dataloader.default_collate(batch[::-1]))
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_default_collate_shared_tensor(self):
import numpy as np
t_in = torch.zeros(1)
n_in = np.zeros(1)
self.assertEqual(t_in.is_shared(), False)
self.assertEqual(dataloader.default_collate([t_in]).is_shared(), False)
self.assertEqual(dataloader.default_collate([n_in]).is_shared(), False)
# FIXME: fix the following hack that makes `default_collate` believe
# that it is in a worker process (since it tests
# `get_worker_info() != None`), even though it is not.
old = _utils.worker._worker_info
try:
_utils.worker._worker_info = "x"
self.assertEqual(dataloader.default_collate([t_in]).is_shared(), True)
self.assertEqual(dataloader.default_collate([n_in]).is_shared(), True)
finally:
_utils.worker._worker_info = old
def test_excessive_thread_creation_warning(self):
with self.assertWarnsRegex(
UserWarning,
r"excessive worker creation might get DataLoader running slow or even freeze",
):
dataloader = DataLoader(self.dataset, batch_size=2, num_workers=1000)
class TestDataLoaderDeviceType(TestCase):
@parametrize(
"context",
[ctx for ctx in supported_multiprocessing_contexts if ctx is not None],
)
@unittest.skipIf(not TEST_CUDA_IPC, "CUDA IPC not available")
def test_nested_tensor_multiprocessing(self, device, context):
# The 'fork' multiprocessing context doesn't work for CUDA so skip it
if "cuda" in device and context == "fork":
self.skipTest(
f"{context} multiprocessing context not supported for {device}"
)
dataset = [
torch.nested.nested_tensor([torch.randn(5)], device=device)
for _ in range(10)
]
pin_memory_settings = [False]
if device == "cpu" and torch.cuda.is_available():
pin_memory_settings.append(True)
for pin_memory in pin_memory_settings:
loader = torch.utils.data.DataLoader(
dataset,
batch_size=1,
num_workers=4,
collate_fn=_clone_collate,
pin_memory=pin_memory,
multiprocessing_context=context,
)
for i, batch in enumerate(loader):
self.assertEqual(batch[0], dataset[i])
# Error case: default collate_fn doesn't currently support batches of nested tensors.
# Following the current semantics, we'd need to stack them, which isn't possible atm.
with self.assertRaisesRegex(
RuntimeError, "not currently supported by the default collate_fn"
):
loader = torch.utils.data.DataLoader(
dataset,
batch_size=1,
num_workers=4,
multiprocessing_context=context,
)
next(iter(loader))
@parametrize(
"context",
[ctx for ctx in supported_multiprocessing_contexts if ctx is not None],
)
@unittest.skipIf(not TEST_CUDA_IPC, "CUDA IPC not available")
def test_sparse_tensor_multiprocessing(self, device, context):
# The 'fork' multiprocessing context doesn't work for CUDA so skip it
if "cuda" in device and context == "fork":
self.skipTest(
f"{context} multiprocessing context not supported for {device}"
)
dataset = [torch.randn(5, 5).to_sparse().to(device) for _ in range(10)]
pin_memory_settings = [False]
if device == "cpu" and torch.cuda.is_available():
pin_memory_settings.append(True)
for pin_memory in pin_memory_settings:
loader = torch.utils.data.DataLoader(
dataset,
batch_size=1,
num_workers=4,
collate_fn=_sparse_coo_collate,
pin_memory=pin_memory,
multiprocessing_context=context,
)
for i, batch in enumerate(loader):
self.assertEqual(batch[0], dataset[i])
class IntegrationTestDataLoaderDataPipe(TestCase):
r"""
Verify the behavior of a certain ``DataPipes`` with ``DataLoader``
"""
def test_shuffler_iterdatapipe(self):
r"""
Verify ``IterDataPipe.shuffle`` is controlled by ``DataLoader``
to generate different seeds deterministically per epoch.
"""
exp = list(range(100))
def _create_dp(buffer_size):
input_ds = dp.iter.IterableWrapper(exp)
return input_ds.shuffle(buffer_size=buffer_size).sharding_filter()
for bs in (5, 20, 33):
# Test Deterministic
for num_workers, pw in itertools.product((0, 1, 2), (True, False)):
if num_workers == 0 and pw:
continue
shuffle_dp = _create_dp(bs)
mp_ctx = "spawn" if num_workers > 0 else None
dl = DataLoader(
shuffle_dp,
num_workers=num_workers,
shuffle=True,
multiprocessing_context=mp_ctx,
persistent_workers=pw,
)
# No seed
dl_res_ns = list(dl)
self.assertEqual(sorted(dl_res_ns), exp)
# Same seeds
dl_res = []
for _epoch in range(2):
torch.manual_seed(123)
dl_res.append(list(dl))
self.assertEqual(dl_res[0], dl_res[1])
self.assertEqual(sorted(dl_res[0]), exp)
# Different seeds
torch.manual_seed(321)
dl_res.append(list(dl))
self.assertEqual(len(dl_res[0]), len(dl_res[2]))
self.assertNotEqual(dl_res[0], dl_res[2])
self.assertEqual(sorted(dl_res[0]), sorted(dl_res[2]))
if dl._iterator is not None:
dl._iterator._shutdown_workers()
dl._iterator = None
del dl
class StringDataset(Dataset):
def __init__(self) -> None:
self.s = "12345"
def __len__(self):
return len(self.s)
def __getitem__(self, ndx):
return (self.s[ndx], ndx)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)",
)
class TestStringDataLoader(TestCase):
def setUp(self):
super().setUp()
self.dataset = StringDataset()
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_shuffle_pin_memory(self):
loader = DataLoader(
self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True
)
for s, n in loader:
self.assertIsInstance(s[0], str)
self.assertTrue(n.is_pinned())
class DictDataset(Dataset):
def __len__(self):
return 4
def __getitem__(self, ndx):
return {
"a_tensor": torch.empty(4, 2).fill_(ndx),
"another_dict": {"a_number": ndx},
}
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)",
)
class TestDictDataLoader(TestCase):
def setUp(self):
super().setUp()
self.dataset = DictDataset()
def test_sequential_batch(self):
for persistent_workers in (False, True):
if persistent_workers:
loader = DataLoader(
self.dataset,
batch_size=2,
shuffle=False,
persistent_workers=persistent_workers,
num_workers=1,
)
else:
loader = DataLoader(
self.dataset,
batch_size=2,
shuffle=False,
persistent_workers=persistent_workers,
)
batch_size = loader.batch_size
for i, sample in enumerate(loader):
idx = i * batch_size
self.assertEqual(set(sample.keys()), {"a_tensor", "another_dict"})
self.assertEqual(set(sample["another_dict"].keys()), {"a_number"})
t = sample["a_tensor"]
self.assertEqual(t.size(), torch.Size([batch_size, 4, 2]))
self.assertTrue((t[0] == idx).all())
self.assertTrue((t[1] == idx + 1).all())
n = sample["another_dict"]["a_number"]
self.assertEqual(n.size(), torch.Size([batch_size]))
self.assertEqual(n[0], idx)
self.assertEqual(n[1], idx + 1)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, pin_memory=True)
for sample in loader:
self.assertTrue(sample["a_tensor"].is_pinned())
self.assertTrue(sample["another_dict"]["a_number"].is_pinned())
@skipIfXpu
@unittest.skipIf(TEST_CUDA, "Test for when CUDA is not available")
def test_pin_memory_no_cuda(self):
loader = DataLoader(self.dataset, batch_size=2, pin_memory=True)
for sample in loader:
self.assertFalse(sample["a_tensor"].is_pinned())
self.assertFalse(sample["another_dict"]["a_number"].is_pinned())
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_pin_memory_device(self):
loader = DataLoader(
self.dataset, batch_size=2, pin_memory=True, pin_memory_device="cuda"
)
for sample in loader:
self.assertTrue(sample["a_tensor"].is_pinned())
self.assertTrue(sample["another_dict"]["a_number"].is_pinned())
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_pin_memory_with_only_device(self):
loader = DataLoader(self.dataset, batch_size=2, pin_memory_device="cuda")
for sample in loader:
self.assertFalse(sample["a_tensor"].is_pinned())
self.assertFalse(sample["another_dict"]["a_number"].is_pinned())
class DummyDataset(torch.utils.data.Dataset):
def __init__(self) -> None:
self.data = list(range(10))
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# The persistent workers always maintain the original
# dataset through the dataloader lifetime
# so the attributes will remain the same as the
# first time the workers where spawned (dataloader iteration)
if self.start != 0:
raise AssertionError(f"Expected start=0, got {self.start}")
return self.data[idx]
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)",
)
class TestDataLoaderPersistentWorkers(TestDataLoader):
def setUp(self):
super().setUp()
self.persistent_workers = True
@unittest.skipIf(IS_SANDCASTLE, "subprocess doesn't work in FB internal CI")
@unittest.skipIf(IS_WINDOWS, "No 'resource' module on Windows")
def test_fd_limit_exceeded(self):
# See NOTE [ DataLoader on Linux and open files limit ]
import subprocess
subprocess.check_output(
[
sys.executable,
"-c",
"""\
import torch
import resource
from torch.utils.data import DataLoader, IterableDataset
class RandomDataset(IterableDataset):
def __init__(self, len, size):
super(RandomDataset).__init__()
self.len = len
self.size = size
def __iter__(self):
return self
def __next__(self):
if self.len <= 0:
raise StopIteration
self.len -= 1
return torch.randn(self.size)
try:
keep_fds_alive = []
resource.setrlimit(resource.RLIMIT_NOFILE, (100, 100))
for random_t in DataLoader(RandomDataset(200, (2,2)), multiprocessing_context="fork",
num_workers=1, persistent_workers=True):
random_t.max(dim=0)
keep_fds_alive.append(random_t)
except RuntimeError as e:
assert "ulimit -n" in str(e)
assert "set_sharing_strategy" in str(e)
""",
]
)
def test_dataset_not_reset(self):
dataset = DummyDataset()
pin_memory_configs = [False]
if TEST_CUDA:
pin_memory_configs.append(True)
for pin_memory in pin_memory_configs:
dataloader = self._get_data_loader(
dataset, num_workers=2, pin_memory=pin_memory
)
dataset.start = 0
for i in range(10):
for _ in dataloader:
pass
# Changing the start value here doesn't have any effect in the dataset
# cached by the workers. since they are not recreated between epochs
# and can cache values safely
dataset.start = i
@unittest.skipIf(IS_SANDCASTLE, "subprocess doesn't work in FB internal CI")
@unittest.skipIf(IS_WINDOWS, "Needs fork")
def test_early_exit(self):
import subprocess
proc = subprocess.check_output(
[
sys.executable,
"-c",
"""\
import torch
from torch.utils.data import DataLoader, IterableDataset
class RandomDataset(IterableDataset):
def __init__(self, len, size):
super(RandomDataset).__init__()
self.len = len
self.size = size
def __iter__(self):
return self
def __next__(self):
if self.len <= 0:
raise StopIteration
self.len -= 1
return torch.randn(self.size)
if __name__ == '__main__':
dl = DataLoader(
RandomDataset(64, (28, 28)),
batch_size=16,
num_workers=2,
pin_memory=True,
persistent_workers=True,
multiprocessing_context="fork",
)
for _ in dl:
break
""",
]
)
class NamedTupleDataset(Dataset):
from collections import namedtuple
Batch = namedtuple("Batch", ["data", "label", "random_tensor"])
Data = namedtuple("Data", ["positive", "negative"])
def __len__(self):
return 4
def __getitem__(self, ndx):
return self.Batch(
data=self.Data(positive=ndx, negative=-ndx),
label=str(ndx),
random_tensor=torch.randn(3),
)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)",
)
class TestNamedTupleDataLoader(TestCase):
def setUp(self):
super().setUp()
self.dataset = NamedTupleDataset()
def test_dataloader_with_namedtuple(self):
# auto-collation
loader = DataLoader(self.dataset, batch_size=2, pin_memory=TEST_CUDA)
for batch in loader:
self.assertIsInstance(batch, NamedTupleDataset.Batch)
self.assertEqual(batch.random_tensor.is_pinned(), TEST_CUDA)
self.assertIsInstance(batch.data, NamedTupleDataset.Data)
self.assertIsInstance(batch.data.positive, torch.Tensor)
self.assertEqual(batch.data.positive.is_pinned(), TEST_CUDA)
# no auto-collation
loader = DataLoader(self.dataset, batch_size=None, pin_memory=TEST_CUDA)
for batch in loader:
self.assertIsInstance(batch, NamedTupleDataset.Batch)
self.assertEqual(batch.random_tensor.is_pinned(), TEST_CUDA)
self.assertIsInstance(batch.data, NamedTupleDataset.Data)
self.assertNotIsInstance(batch.data.positive, torch.Tensor)
class SimpleCustomBatch:
def __init__(self, data):
transposed_data = list(zip(*data))
self.inp = torch.stack(transposed_data[0], 0)
self.tgt = torch.stack(transposed_data[1], 0)
def pin_memory(self):
self.inp = self.inp.pin_memory()
self.tgt = self.tgt.pin_memory()
return self
def is_pinned(self):
return self.inp.is_pinned() and self.tgt.is_pinned()
# Workaround for https://github.com/pytorch/pytorch/issues/50661
# Classes from `__main__` can not be correctly unpickled from spawned module
# See https://docs.python.org/3/library/multiprocessing.html#multiprocessing-programming
self_module = __import__(os.path.splitext(os.path.basename(__file__))[0])
def collate_wrapper(batch):
return self_module.SimpleCustomBatch(batch)
def collate_into_packed_sequence(batch):
data = torch.stack([sample[0] for sample in batch], 1)
t, b = data.size()
lengths = torch.randint(1, t, size=(b,), dtype=torch.int64)
return torch.nn.utils.rnn.pack_padded_sequence(data, lengths, enforce_sorted=False)
def collate_into_packed_sequence_batch_first(batch):
data = torch.stack([sample[0] for sample in batch], 0)
b, t = data.size()
lengths = torch.randint(1, t, size=(b,), dtype=torch.int64)
return torch.nn.utils.rnn.pack_padded_sequence(
data, lengths, batch_first=True, enforce_sorted=False
)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)",
)
class TestCustomPinFn(TestCase):
def setUp(self):
super().setUp()
inps = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
tgts = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
self.dataset = TensorDataset(inps, tgts)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_custom_batch_pin(self):
test_cases = [
(collate_wrapper, self_module.SimpleCustomBatch),
(collate_into_packed_sequence, torch.nn.utils.rnn.PackedSequence),
(
collate_into_packed_sequence_batch_first,
torch.nn.utils.rnn.PackedSequence,
),
]
for collate_fn, elem_cls in test_cases:
loader = DataLoader(
self.dataset, batch_size=2, collate_fn=collate_fn, pin_memory=True
)
for sample in loader:
self.assertIsInstance(sample, elem_cls)
self.assertTrue(sample.is_pinned())
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_custom_batch_pin_worker(self):
test_cases = [
(collate_wrapper, self_module.SimpleCustomBatch),
(collate_into_packed_sequence, torch.nn.utils.rnn.PackedSequence),
(
collate_into_packed_sequence_batch_first,
torch.nn.utils.rnn.PackedSequence,
),
]
for collate_fn, elem_cls in test_cases:
loader = DataLoader(
self.dataset,
batch_size=2,
collate_fn=collate_fn,
pin_memory=True,
num_workers=1,
)
for sample in loader:
self.assertIsInstance(sample, elem_cls)
self.assertTrue(sample.is_pinned())
class TestWorkerQueueDataset(Dataset):
def __init__(self, data):
self.data = data
self.worker_id = None
def worker_init_fn(self, worker_id):
self.worker_id = worker_id
def __getitem__(self, item):
return self.worker_id, self.data[item]
def __len__(self):
return len(self.data)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)",
)
class TestIndividualWorkerQueue(TestCase):
def setUp(self):
super().setUp()
self.dataset = TestWorkerQueueDataset(list(range(128)))
def _run_ind_worker_queue_test(self, batch_size, num_workers):
loader = DataLoader(
self.dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
timeout=JOIN_TIMEOUT,
worker_init_fn=self.dataset.worker_init_fn,
)
current_worker_idx = 0
for i, (worker_ids, sample) in enumerate(loader):
self.assertEqual(worker_ids.tolist(), [current_worker_idx] * batch_size)
self.assertEqual(
sample.tolist(), list(range(i * batch_size, (i + 1) * batch_size))
)
current_worker_idx += 1
if current_worker_idx == num_workers:
current_worker_idx = 0
@unittest.skipIf(
IS_WINDOWS or IS_MACOS,
"Flaky on Windows and MacOS https://github.com/pytorch/pytorch/issues/68643",
)
def test_ind_worker_queue(self):
for batch_size in (8, 32, 64):
for num_workers in range(1, 6):
self._run_ind_worker_queue_test(
batch_size=batch_size, num_workers=num_workers
)
class SetAffinityDataset(IterableDataset):
def __init__(self, expected_affinity=None):
self.expected_affinity = expected_affinity
def __iter__(self):
affinity_mask = os.sched_getaffinity(0)
return iter(affinity_mask)
def _worker_set_affinity_init(worker_id):
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
dataset = worker_info.dataset
if (
isinstance(dataset, SetAffinityDataset)
and dataset.expected_affinity is not None
):
os.sched_setaffinity(0, [dataset.expected_affinity])
@unittest.skipIf(
not hasattr(os, "sched_setaffinity"), "os.sched_setaffinity is not available"
)
class TestSetAffinity(TestCase):
def test_set_affinity_in_worker_init(self):
# Query the current affinity mask to avoid setting a disallowed one
old_affinity = os.sched_getaffinity(0)
if not old_affinity:
self.skipTest("No affinity information")
# Choose any
expected_affinity = list(old_affinity)[-1]
# Pass expected affinity through the dataset
dataset = SetAffinityDataset(expected_affinity=expected_affinity)
dataloader = torch.utils.data.DataLoader(
dataset,
num_workers=2,
worker_init_fn=_worker_set_affinity_init,
)
for sample in dataloader:
self.assertEqual(sample, [expected_affinity])
class ConvDataset(Dataset):
def __init__(self) -> None:
self.x = torch.ones(1, 1, 24000)
# Call convolution on parent process
self[0]
def __len__(self):
return 1
def __getitem__(self, index):
return torch.nn.functional.conv1d(self.x, torch.ones(1, 1, 2))
@unittest.skipIf(IS_WINDOWS, "Needs fork")
class TestConvAfterFork(TestCase):
# Tests crash reported in https://github.com/pytorch/pytorch/issues/53565
def test_conv_after_fork(self):
loader = DataLoader(ConvDataset(), num_workers=1)
for x in loader:
self.assertEqual(x.shape, (1, 1, 1, 23999))
class TestSlowIndexDataset(Dataset):
def __init__(self, end: int, slow_index: int):
self.end = end
self.slow_index = slow_index
self._worker_id = None
def __getitem__(self, idx):
if not self._worker_id:
worker_info = torch.utils.data.get_worker_info()
self._worker_id = worker_info.id
if idx == self.slow_index:
time.sleep(1.0)
return (self._worker_id, idx)
def __len__(self):
return self.end
class TestSlowIterableDataset(IterableDataset):
def __init__(self, start: int, end: int):
self.start = start
self.end = end
self.mid = math.ceil((self.end - self.start) / 2)
def give_data(self, worker_id, iter_start, iter_end):
for i in range(iter_start, iter_end):
if i == self.mid:
time.sleep(1.0)
yield (worker_id, i)
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
per_worker = int(
math.ceil((self.end - self.start) / float(worker_info.num_workers))
)
worker_id = worker_info.id
iter_start = self.start + worker_id * per_worker
iter_end = min(iter_start + per_worker, self.end)
return self.give_data(worker_id, iter_start, iter_end)
class TestOutOfOrderDataLoader(TestCase):
def test_in_order_index_ds(self):
dataset = TestSlowIndexDataset(end=10, slow_index=0)
dataloader = torch.utils.data.DataLoader(
dataset,
num_workers=2,
in_order=True,
)
expected_worker_ids = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
expected_data = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
outputs = list(dataloader)
worker_ids = [o[0] for o in outputs]
data = [o[1] for o in outputs]
self.assertEqual(expected_worker_ids, worker_ids)
self.assertEqual(expected_data, data)
def test_out_of_order_index_ds(self):
dataset = TestSlowIndexDataset(end=10, slow_index=0)
dataloader = torch.utils.data.DataLoader(
dataset,
num_workers=2,
prefetch_factor=2,
in_order=False,
)
# worker_id = 0 gets 'stuck' on 0 and also has 2 in it's queue
# due to prefetch_factor being 2
# this makes the test more deterministic as [0, 2] will be the last elements
expected_worker_ids = [1, 1, 1, 1, 1, 1, 1, 1, 0, 0]
expected_data = [1, 3, 4, 5, 6, 7, 8, 9, 0, 2]
outputs = list(dataloader)
worker_ids = [o[0].item() for o in outputs]
data = [o[1].item() for o in outputs]
self.assertEqual(expected_worker_ids, worker_ids)
self.assertNotEqual(data, list(range(10)))
self.assertEqual(expected_data, data)
def test_in_order_iterable_ds(self):
dataset = TestSlowIterableDataset(start=0, end=10)
dataloader = torch.utils.data.DataLoader(
dataset,
num_workers=2,
in_order=True,
)
expected_worker_ids = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
expected_data = [0, 5, 1, 6, 2, 7, 3, 8, 4, 9]
outputs = list(dataloader)
worker_ids = [o[0] for o in outputs]
data = [o[1] for o in outputs]
self.assertEqual(expected_worker_ids, worker_ids)
self.assertEqual(expected_data, data)
def test_out_of_order_iterable_ds(self):
dataset = TestSlowIterableDataset(start=0, end=10)
dataloader = torch.utils.data.DataLoader(
dataset,
num_workers=2,
in_order=False,
)
# worker 0 has [0, 1, 2, 3, 4], worker 1 has [5, 6, 7, 8, 9]
# index 5 is slow, so expect all of worker 0 before worker 1
expected_worker_ids = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
expected_data = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
outputs = list(dataloader)
worker_ids = [o[0] for o in outputs]
data = [o[1] for o in outputs]
self.assertEqual(expected_worker_ids, worker_ids)
self.assertEqual(sum(worker_ids), 5)
self.assertNotEqual(data, [0, 5, 1, 6, 2, 7, 3, 8, 4, 9])
self.assertEqual(expected_data, data)
instantiate_device_type_tests(TestDataLoaderDeviceType, globals())
if __name__ == "__main__":
run_tests() | python | github | https://github.com/pytorch/pytorch | test/test_dataloader.py |
relative_included | html | github | https://github.com/jekyll/jekyll | test/source/_posts/include_relative/rel_include.html |
// This file is part of ICU4X. For terms of use, please see the file
// called LICENSE at the top level of the ICU4X source tree
// (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ).
#[cfg(feature = "alloc")]
use crate::preferences::extensions::unicode::enum_keyword;
#[cfg(feature = "alloc")]
enum_keyword!(
/// A Unicode Currency Format Identifier defines a style for currency formatting.
///
/// The valid values are listed in [LDML](https://unicode.org/reports/tr35/#UnicodeCurrencyFormatIdentifier).
[Default]
CurrencyFormatStyle {
/// Negative numbers use the minusSign symbol (the default)
[default]
("standard" => Standard),
/// Negative numbers use parentheses or equivalent
("account" => Account)
}, "cf"); | rust | github | https://github.com/nodejs/node | deps/crates/vendor/icu_locale_core/src/preferences/extensions/unicode/keywords/currency_format.rs |
<?php
namespace Illuminate\Contracts\Concurrency;
use Closure;
use Illuminate\Support\Defer\DeferredCallback;
interface Driver
{
/**
* Run the given tasks concurrently and return an array containing the results.
*/
public function run(Closure|array $tasks): array;
/**
* Defer the execution of the given tasks.
*/
public function defer(Closure|array $tasks): DeferredCallback;
} | php | github | https://github.com/laravel/framework | src/Illuminate/Contracts/Concurrency/Driver.php |
{
"extends": [
"stylelint-config-twbs-bootstrap"
],
"reportInvalidScopeDisables": true,
"reportNeedlessDisables": true,
"overrides": [
{
"files": "**/*.scss",
"rules": {
"declaration-property-value-disallowed-list": {
"border": "none",
"outline": "none"
},
"function-disallowed-list": [
"calc",
"lighten",
"darken"
],
"property-disallowed-list": [
"border-radius",
"border-top-left-radius",
"border-top-right-radius",
"border-bottom-right-radius",
"border-bottom-left-radius",
"transition"
],
"scss/dollar-variable-default": [
true,
{
"ignore": "local"
}
],
"scss/selector-no-union-class-name": true
}
},
{
"files": "scss/**/*.{test,spec}.scss",
"rules": {
"scss/dollar-variable-default": null,
"declaration-no-important": null
}
},
{
"files": "site/**/*.scss",
"rules": {
"scss/dollar-variable-default": null
}
},
{
"files": "site/**/examples/**/*.css",
"rules": {
"comment-empty-line-before": null,
"property-no-vendor-prefix": null,
"selector-no-qualifying-type": null,
"value-no-vendor-prefix": null
}
}
]
} | json | github | https://github.com/twbs/bootstrap | .stylelintrc.json |
##########################################################################
#
# Copyright (c) 2012-2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferUI
import GafferRenderMan
Gaffer.Metadata.registerNode(
GafferRenderMan.RenderManRender,
"description",
"""
Performs offline batch rendering using a
RenderMan renderer. This is done in two
phases - first a RIB file is generated and
then the renderer is invoked to render it in
a separate process. Note though that the RIB
file is lightweight, and contains a single
procedural which will invoke Gaffer to generate
the scene on demand at runtime. The RIB therefore
requires very little disk space.
""",
plugs = {
"mode" : [
"description",
"""
When in "Render" mode, a RIB file is generated
and then renderered by running the renderer on
it. In "Generate RIB only" mode, only the RIB
is generated, and a subsequent node could be used
to post-process or launch the render in another
way - a SystemCommand node may be useful for this.
""",
"preset:Render", "render",
"preset:Generate RIB only", "generate",
"nodule:type", "",
],
"ribFileName" : [
"description",
"""
The name of the RIB file to be generated.
""",
"nodule:type", "",
],
"command" : [
"description",
"""
The system command used to invoke the renderer - this
can be edited to add any custom flags that are necessary,
or to use a different renderer. The rib filename is
automatically appended to the command before it is invoked.
""",
"nodule:type", "",
],
},
)
GafferUI.PlugValueWidget.registerCreator(
GafferRenderMan.RenderManRender,
"mode",
GafferUI.PresetsPlugValueWidget,
)
GafferUI.PlugValueWidget.registerCreator(
GafferRenderMan.RenderManRender,
"ribFileName",
lambda plug : GafferUI.PathPlugValueWidget( plug,
path = Gaffer.FileSystemPath( "/", filter = Gaffer.FileSystemPath.createStandardFilter() ),
pathChooserDialogueKeywords = {
"bookmarks" : GafferUI.Bookmarks.acquire( plug, category = "rib" ),
"leaf" : True,
},
),
) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
"""Execute the tests for splazers.
The golden test outputs are generated by the script generate_outputs.sh.
You have to give the root paths to the source and the binaries as arguments to
the program. These are the paths to the directory that contains the 'projects'
directory.
Usage: run_tests.py SOURCE_ROOT_PATH BINARY_ROOT_PATH
"""
import logging
import os.path
import sys
# Automagically add util/py_lib to PYTHONPATH environment variable.
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
'..', '..', 'util', 'py_lib'))
sys.path.insert(0, path)
import seqan.app_tests as app_tests
def main(source_base, binary_base):
"""Main entry point of the script."""
print 'Executing test for splazers'
print '========================='
print
ph = app_tests.TestPathHelper(
source_base, binary_base,
'core/apps/splazers/tests') # tests dir
# ============================================================
# Auto-detect the binary path.
# ============================================================
path_to_program = app_tests.autolocateBinary(
binary_base, 'core/apps/splazers', 'splazers')
# ============================================================
# Built TestConf list.
# ============================================================
# Build list with TestConf objects, analoguely to how the output
# was generated in generate_outputs.sh.
conf_list = []
# ============================================================
# First Section.
# ============================================================
# We run the following for all read lengths we have reads for.
for rl in [100]:
# Run with default options.
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads%d_1_default.stdout' % rl),
args=[ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
'-o', ph.outFile('se-adeno-reads%d_1_default.out' % rl)],
to_diff=[(ph.inFile('se-adeno-reads%d_1_default.out' % rl),
ph.outFile('se-adeno-reads%d_1_default.out' % rl)),
(ph.inFile('se-adeno-reads%d_1_default.stdout' % rl),
ph.outFile('se-adeno-reads%d_1_default.stdout' % rl))])
conf_list.append(conf)
# test different min. match lengths
for mml in range(16,26):
# test different numbers of prefix errors
for ep in range(0,2):
# test different numbers of suffix errors
for es in range(0,3):
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads%d_1_mml%d_ep%d_es%d.stdout' % (rl, mml, ep, es)),
args=['-sm', str(mml), '-ep', str(ep), '-es', str(es),
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
'-o', ph.outFile('se-adeno-reads%d_1_mml%d_ep%d_es%d.out' % (rl, mml, ep, es))],
to_diff=[(ph.inFile('se-adeno-reads%d_1_mml%d_ep%d_es%d.out' % (rl, mml, ep, es)),
ph.outFile('se-adeno-reads%d_1_mml%d_ep%d_es%d.out' % (rl, mml, ep, es))),
(ph.inFile('se-adeno-reads%d_1_mml%d_ep%d_es%d.stdout' % (rl, mml, ep, es)),
ph.outFile('se-adeno-reads%d_1_mml%d_ep%d_es%d.stdout' % (rl, mml, ep, es)))])
conf_list.append(conf)
# # Allow indels.
# conf = app_tests.TestConf(
# program=path_to_program,
# redir_stdout=ph.outFile('se-adeno-reads%d_1_mml%d-id_ep%d_es%d.stdout' % (rl, mml, ep, es)),
# args=['-id', '-sm', str(mml), '-ep', str(ep), '-es', str(es),
# ph.inFile('adeno-genome.fa'),
# ph.inFile('adeno-reads%d_1.fa' % rl),
# '-o', ph.outFile('se-adeno-reads%d_1_mml%d-id_ep%d_es%d.out' % (rl, mml, ep, es))],
# to_diff=[(ph.inFile('se-adeno-reads%d_1_mml%d-id_ep%d_es%d.out' % (rl, mml, ep, es)),
# ph.outFile('se-adeno-reads%d_1_mml%d-id_ep%d_es%d.out' % (rl, mml, ep, es))),
# (ph.inFile('se-adeno-reads%d_1_mml%d-id_ep%d_es%d.stdout' % (rl, mml, ep, es)),
# ph.outFile('se-adeno-reads%d_1_mml%d-id_ep%d_es%d.stdout' % (rl, mml, ep, es)))])
# conf_list.append(conf)
# Compute forward/reverse matches only.
for o in ['-r', '-f']:
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads%d_1_mml20%s_ep1_es1.stdout' % (rl, o)),
args=[ o, '-id', '-sm', str(20), '-ep', str(1), '-es', str(1),
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
'-o', ph.outFile('se-adeno-reads%d_1_mml20%s_ep1_es1.out' % (rl, o))],
to_diff=[(ph.inFile('se-adeno-reads%d_1_mml20%s_ep1_es1.out' % (rl, o)),
ph.outFile('se-adeno-reads%d_1_mml20%s_ep1_es1.out' % (rl, o))),
(ph.inFile('se-adeno-reads%d_1_mml20%s_ep1_es1.stdout' % (rl, o)),
ph.outFile('se-adeno-reads%d_1_mml20%s_ep1_es1.stdout' % (rl, o)))])
conf_list.append(conf)
# Compute with different identity rates.
for i in range(90, 100):
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads%d_1_mml20-i%d_ep1_es1.stdout' % (rl, i)),
args=['-i', str(i), '-sm', str(20), '-ep', str(1), '-es', str(1),
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
'-o', ph.outFile('se-adeno-reads%d_1_mml20-i%d_ep1_es1.out' % (rl, i))],
to_diff=[(ph.inFile('se-adeno-reads%d_1_mml20-i%d_ep1_es1.out' % (rl, i)),
ph.outFile('se-adeno-reads%d_1_mml20-i%d_ep1_es1.out' % (rl, i))),
(ph.inFile('se-adeno-reads%d_1_mml20-i%d_ep1_es1.stdout' % (rl, i)),
ph.outFile('se-adeno-reads%d_1_mml20-i%d_ep1_es1.stdout' % (rl, i)))])
conf_list.append(conf)
# Compute with different output formats.
for of in [3, 4]:
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads%d_1_mml20-of%d_ep1_es1.stdout' % (rl, of)),
args=['-of', str(of), '-sm', str(20), '-ep', str(1), '-es', str(1),
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
'-o', ph.outFile('se-adeno-reads%d_1_mml20-of%d_ep1_es1.out' % (rl, of))],
to_diff=[(ph.inFile('se-adeno-reads%d_1_mml20-of%d_ep1_es1.out' % (rl, of)),
ph.outFile('se-adeno-reads%d_1_mml20-of%d_ep1_es1.out' % (rl, of))),
(ph.inFile('se-adeno-reads%d_1_mml20-of%d_ep1_es1.stdout' % (rl, of)),
ph.outFile('se-adeno-reads%d_1_mml20-of%d_ep1_es1.stdout' % (rl, of)))])
conf_list.append(conf)
# Compute with different sort orders.
for so in [0, 1]:
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads%d_1_mml20-so%d_ep1_es1.stdout' % (rl, so)),
args=[ '-so', str(so), '-sm', str(20), '-ep', str(1), '-es', str(1),
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
'-o', ph.outFile('se-adeno-reads%d_1_mml20-so%d_ep1_es1.out' % (rl, so))],
to_diff=[(ph.inFile('se-adeno-reads%d_1_mml20-so%d_ep1_es1.out' % (rl, so)),
ph.outFile('se-adeno-reads%d_1_mml20-so%d_ep1_es1.out' % (rl, so))),
(ph.inFile('se-adeno-reads%d_1_mml20-so%d_ep1_es1.stdout' % (rl, so)),
ph.outFile('se-adeno-reads%d_1_mml20-so%d_ep1_es1.stdout' % (rl, so)))])
conf_list.append(conf)
# Run in default anchored mode
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('anchored_adeno_example%d.stdout' % rl),
args=['-an',
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads-pe.sam'), # % rl),
'-o', ph.outFile('anchored_adeno_example%d.out' % rl),
'-ll', '300', '-le', '90'],
to_diff=[(ph.inFile('anchored_adeno_example%d.out' % rl),
ph.outFile('anchored_adeno_example%d.out' % rl)),
(ph.inFile('anchored_adeno_example%d.stdout' % rl),
ph.outFile('anchored_adeno_example%d.stdout' % rl))])
conf_list.append(conf)
# ============================================================
# Execute the tests.
# ============================================================
failures = 0
for conf in conf_list:
res = app_tests.runTest(conf)
# Output to the user.
print ' '.join(['splazers'] + conf.args),
if res:
print 'OK'
else:
failures += 1
print 'FAILED'
# Cleanup.
ph.deleteTempDir()
print '=============================='
print ' total tests: %d' % len(conf_list)
print ' failed tests: %d' % failures
print 'successful tests: %d' % (len(conf_list) - failures)
print '=============================='
# Compute and return return code.
return failures != 0
if __name__ == '__main__':
sys.exit(app_tests.main(main)) | unknown | codeparrot/codeparrot-clean | ||
class Foo {
companion object {
@JvmStatic
fun foo(x: String): String {
return x
}
}
} | kotlin | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-standalone/testData/sessionBuilder/conflict/dependent/Foo.kt |
import unittest
from test.support import (cpython_only, is_wasi, requires_limited_api, Py_DEBUG,
set_recursion_limit, skip_on_s390x,
skip_emscripten_stack_overflow,
skip_wasi_stack_overflow, skip_if_sanitizer,
import_helper)
try:
import _testcapi
except ImportError:
_testcapi = None
try:
import _testlimitedcapi
except ImportError:
_testlimitedcapi = None
try:
import _testinternalcapi
except ImportError:
_testinternalcapi = None
import struct
import collections
import itertools
import gc
import contextlib
import types
class BadStr(str):
def __eq__(self, other):
return True
def __hash__(self):
# Guaranteed different hash
return str.__hash__(self) ^ 3
class FunctionCalls(unittest.TestCase):
def test_kwargs_order(self):
# bpo-34320: **kwargs should preserve order of passed OrderedDict
od = collections.OrderedDict([('a', 1), ('b', 2)])
od.move_to_end('a')
expected = list(od.items())
def fn(**kw):
return kw
res = fn(**od)
self.assertIsInstance(res, dict)
self.assertEqual(list(res.items()), expected)
def test_frames_are_popped_after_failed_calls(self):
# GH-93252: stuff blows up if we don't pop the new frame after
# recovering from failed calls:
def f():
pass
class C:
def m(self):
pass
callables = [f, C.m, [].__len__]
for c in callables:
for _ in range(1000):
try:
c(None)
except TypeError:
pass
# BOOM!
@cpython_only
class CFunctionCallsErrorMessages(unittest.TestCase):
def test_varargs0(self):
msg = r"__contains__\(\) takes exactly one argument \(0 given\)"
self.assertRaisesRegex(TypeError, msg, {}.__contains__)
def test_varargs2(self):
msg = r"__contains__\(\) takes exactly one argument \(2 given\)"
self.assertRaisesRegex(TypeError, msg, {}.__contains__, 0, 1)
def test_varargs3(self):
msg = r"^from_bytes\(\) takes at most 2 positional arguments \(3 given\)"
self.assertRaisesRegex(TypeError, msg, int.from_bytes, b'a', 'little', False)
def test_varargs1min(self):
msg = (r"get\(\) takes at least 1 argument \(0 given\)|"
r"get expected at least 1 argument, got 0")
self.assertRaisesRegex(TypeError, msg, {}.get)
msg = r"expected 1 argument, got 0"
self.assertRaisesRegex(TypeError, msg, {}.__delattr__)
def test_varargs2min(self):
msg = r"getattr expected at least 2 arguments, got 0"
self.assertRaisesRegex(TypeError, msg, getattr)
def test_varargs1max(self):
msg = (r"input\(\) takes at most 1 argument \(2 given\)|"
r"input expected at most 1 argument, got 2")
self.assertRaisesRegex(TypeError, msg, input, 1, 2)
def test_varargs2max(self):
msg = (r"get\(\) takes at most 2 arguments \(3 given\)|"
r"get expected at most 2 arguments, got 3")
self.assertRaisesRegex(TypeError, msg, {}.get, 1, 2, 3)
def test_varargs1_kw(self):
msg = r"__contains__\(\) takes no keyword arguments"
self.assertRaisesRegex(TypeError, msg, {}.__contains__, x=2)
def test_varargs2_kw(self):
msg = r"__contains__\(\) takes no keyword arguments"
self.assertRaisesRegex(TypeError, msg, {}.__contains__, x=2, y=2)
def test_varargs3_kw(self):
msg = r"bool\(\) takes no keyword arguments"
self.assertRaisesRegex(TypeError, msg, bool, x=2)
def test_varargs4_kw(self):
msg = r"^(list[.])?index\(\) takes no keyword arguments$"
self.assertRaisesRegex(TypeError, msg, [].index, x=2)
def test_varargs5_kw(self):
msg = r"^hasattr\(\) takes no keyword arguments$"
self.assertRaisesRegex(TypeError, msg, hasattr, x=2)
def test_varargs6_kw(self):
msg = r"^getattr\(\) takes no keyword arguments$"
self.assertRaisesRegex(TypeError, msg, getattr, x=2)
def test_varargs7_kw(self):
msg = r"^next\(\) takes no keyword arguments$"
self.assertRaisesRegex(TypeError, msg, next, x=2)
def test_varargs8_kw(self):
msg = r"^_struct[.]pack\(\) takes no keyword arguments$"
self.assertRaisesRegex(TypeError, msg, struct.pack, x=2)
def test_varargs9_kw(self):
msg = r"^_struct[.]pack_into\(\) takes no keyword arguments$"
self.assertRaisesRegex(TypeError, msg, struct.pack_into, x=2)
def test_varargs10_kw(self):
msg = r"^deque[.]index\(\) takes no keyword arguments$"
self.assertRaisesRegex(TypeError, msg, collections.deque().index, x=2)
def test_varargs11_kw(self):
msg = r"^Struct[.]pack\(\) takes no keyword arguments$"
self.assertRaisesRegex(TypeError, msg, struct.Struct.pack, struct.Struct(""), x=2)
def test_varargs12_kw(self):
msg = r"^staticmethod\(\) takes no keyword arguments$"
self.assertRaisesRegex(TypeError, msg, staticmethod, func=id)
def test_varargs13_kw(self):
msg = r"^classmethod\(\) takes no keyword arguments$"
self.assertRaisesRegex(TypeError, msg, classmethod, func=id)
def test_varargs14_kw(self):
msg = r"^product\(\) takes at most 1 keyword argument \(2 given\)$"
self.assertRaisesRegex(TypeError, msg,
itertools.product, 0, repeat=1, foo=2)
def test_varargs15_kw(self):
msg = r"^ImportError\(\) takes at most 3 keyword arguments \(4 given\)$"
self.assertRaisesRegex(TypeError, msg,
ImportError, 0, name=1, path=2, name_from=3, foo=3)
def test_varargs16_kw(self):
msg = r"^min\(\) takes at most 2 keyword arguments \(3 given\)$"
self.assertRaisesRegex(TypeError, msg,
min, 0, default=1, key=2, foo=3)
def test_varargs17_kw(self):
msg = r"print\(\) got an unexpected keyword argument 'foo'$"
self.assertRaisesRegex(TypeError, msg,
print, 0, sep=1, end=2, file=3, flush=4, foo=5)
def test_varargs18_kw(self):
# _PyArg_UnpackKeywords() with varpos
msg = r"invalid keyword argument for print\(\)$"
with self.assertRaisesRegex(TypeError, msg):
print(0, 1, **{BadStr('foo'): ','})
def test_varargs19_kw(self):
# _PyArg_UnpackKeywords()
msg = r"invalid keyword argument for round\(\)$"
with self.assertRaisesRegex(TypeError, msg):
round(1.75, **{BadStr('foo'): 1})
def test_oldargs0_1(self):
msg = r"keys\(\) takes no arguments \(1 given\)"
self.assertRaisesRegex(TypeError, msg, {}.keys, 0)
def test_oldargs0_2(self):
msg = r"keys\(\) takes no arguments \(2 given\)"
self.assertRaisesRegex(TypeError, msg, {}.keys, 0, 1)
def test_oldargs0_1_kw(self):
msg = r"keys\(\) takes no keyword arguments"
self.assertRaisesRegex(TypeError, msg, {}.keys, x=2)
def test_oldargs0_2_kw(self):
msg = r"keys\(\) takes no keyword arguments"
self.assertRaisesRegex(TypeError, msg, {}.keys, x=2, y=2)
def test_oldargs1_0(self):
msg = r"count\(\) takes exactly one argument \(0 given\)"
self.assertRaisesRegex(TypeError, msg, [].count)
def test_oldargs1_2(self):
msg = r"count\(\) takes exactly one argument \(2 given\)"
self.assertRaisesRegex(TypeError, msg, [].count, 1, 2)
def test_oldargs1_0_kw(self):
msg = r"count\(\) takes no keyword arguments"
self.assertRaisesRegex(TypeError, msg, [].count, x=2)
def test_oldargs1_1_kw(self):
msg = r"count\(\) takes no keyword arguments"
self.assertRaisesRegex(TypeError, msg, [].count, {}, x=2)
def test_oldargs1_2_kw(self):
msg = r"count\(\) takes no keyword arguments"
self.assertRaisesRegex(TypeError, msg, [].count, x=2, y=2)
def test_object_not_callable(self):
msg = r"^'object' object is not callable$"
self.assertRaisesRegex(TypeError, msg, object())
def test_module_not_callable_no_suggestion_0(self):
msg = r"^'module' object is not callable$"
self.assertRaisesRegex(TypeError, msg, types.ModuleType("mod"))
def test_module_not_callable_no_suggestion_1(self):
msg = r"^'module' object is not callable$"
mod = types.ModuleType("mod")
mod.mod = 42
self.assertRaisesRegex(TypeError, msg, mod)
def test_module_not_callable_no_suggestion_2(self):
msg = r"^'module' object is not callable$"
mod = types.ModuleType("mod")
del mod.__name__
self.assertRaisesRegex(TypeError, msg, mod)
def test_module_not_callable_no_suggestion_3(self):
msg = r"^'module' object is not callable$"
mod = types.ModuleType("mod")
mod.__name__ = 42
self.assertRaisesRegex(TypeError, msg, mod)
def test_module_not_callable_suggestion(self):
msg = r"^'module' object is not callable\. Did you mean: 'mod\.mod\(\.\.\.\)'\?$"
mod = types.ModuleType("mod")
mod.mod = lambda: ...
self.assertRaisesRegex(TypeError, msg, mod)
@unittest.skipIf(_testcapi is None, "requires _testcapi")
class TestCallingConventions(unittest.TestCase):
"""Test calling using various C calling conventions (METH_*) from Python
Subclasses test several kinds of functions (module-level, methods,
class methods static methods) using these attributes:
obj: the object that contains tested functions (as attributes)
expected_self: expected "self" argument to the C function
The base class tests module-level functions.
"""
def setUp(self):
self.obj = self.expected_self = _testcapi
def test_varargs(self):
self.assertEqual(
self.obj.meth_varargs(1, 2, 3),
(self.expected_self, (1, 2, 3)),
)
def test_varargs_ext(self):
self.assertEqual(
self.obj.meth_varargs(*(1, 2, 3)),
(self.expected_self, (1, 2, 3)),
)
def test_varargs_error_kw(self):
msg = r"meth_varargs\(\) takes no keyword arguments"
self.assertRaisesRegex(
TypeError, msg, lambda: self.obj.meth_varargs(k=1),
)
def test_varargs_keywords(self):
self.assertEqual(
self.obj.meth_varargs_keywords(1, 2, a=3, b=4),
(self.expected_self, (1, 2), {'a': 3, 'b': 4})
)
def test_varargs_keywords_ext(self):
self.assertEqual(
self.obj.meth_varargs_keywords(*[1, 2], **{'a': 3, 'b': 4}),
(self.expected_self, (1, 2), {'a': 3, 'b': 4})
)
def test_o(self):
self.assertEqual(self.obj.meth_o(1), (self.expected_self, 1))
def test_o_ext(self):
self.assertEqual(self.obj.meth_o(*[1]), (self.expected_self, 1))
def test_o_error_no_arg(self):
msg = r"meth_o\(\) takes exactly one argument \(0 given\)"
self.assertRaisesRegex(TypeError, msg, self.obj.meth_o)
def test_o_error_two_args(self):
msg = r"meth_o\(\) takes exactly one argument \(2 given\)"
self.assertRaisesRegex(
TypeError, msg, lambda: self.obj.meth_o(1, 2),
)
def test_o_error_ext(self):
msg = r"meth_o\(\) takes exactly one argument \(3 given\)"
self.assertRaisesRegex(
TypeError, msg, lambda: self.obj.meth_o(*(1, 2, 3)),
)
def test_o_error_kw(self):
msg = r"meth_o\(\) takes no keyword arguments"
self.assertRaisesRegex(
TypeError, msg, lambda: self.obj.meth_o(k=1),
)
def test_o_error_arg_kw(self):
msg = r"meth_o\(\) takes no keyword arguments"
self.assertRaisesRegex(
TypeError, msg, lambda: self.obj.meth_o(k=1),
)
def test_noargs(self):
self.assertEqual(self.obj.meth_noargs(), self.expected_self)
def test_noargs_ext(self):
self.assertEqual(self.obj.meth_noargs(*[]), self.expected_self)
def test_noargs_error_arg(self):
msg = r"meth_noargs\(\) takes no arguments \(1 given\)"
self.assertRaisesRegex(
TypeError, msg, lambda: self.obj.meth_noargs(1),
)
def test_noargs_error_arg2(self):
msg = r"meth_noargs\(\) takes no arguments \(2 given\)"
self.assertRaisesRegex(
TypeError, msg, lambda: self.obj.meth_noargs(1, 2),
)
def test_noargs_error_ext(self):
msg = r"meth_noargs\(\) takes no arguments \(3 given\)"
self.assertRaisesRegex(
TypeError, msg, lambda: self.obj.meth_noargs(*(1, 2, 3)),
)
def test_noargs_error_kw(self):
msg = r"meth_noargs\(\) takes no keyword arguments"
self.assertRaisesRegex(
TypeError, msg, lambda: self.obj.meth_noargs(k=1),
)
def test_fastcall(self):
self.assertEqual(
self.obj.meth_fastcall(1, 2, 3),
(self.expected_self, (1, 2, 3)),
)
def test_fastcall_ext(self):
self.assertEqual(
self.obj.meth_fastcall(*(1, 2, 3)),
(self.expected_self, (1, 2, 3)),
)
def test_fastcall_error_kw(self):
msg = r"meth_fastcall\(\) takes no keyword arguments"
self.assertRaisesRegex(
TypeError, msg, lambda: self.obj.meth_fastcall(k=1),
)
def test_fastcall_keywords(self):
self.assertEqual(
self.obj.meth_fastcall_keywords(1, 2, a=3, b=4),
(self.expected_self, (1, 2), {'a': 3, 'b': 4})
)
def test_fastcall_keywords_ext(self):
self.assertEqual(
self.obj.meth_fastcall_keywords(*(1, 2), **{'a': 3, 'b': 4}),
(self.expected_self, (1, 2), {'a': 3, 'b': 4})
)
class TestCallingConventionsInstance(TestCallingConventions):
"""Test calling instance methods using various calling conventions"""
def setUp(self):
self.obj = self.expected_self = _testcapi.MethInstance()
class TestCallingConventionsClass(TestCallingConventions):
"""Test calling class methods using various calling conventions"""
def setUp(self):
self.obj = self.expected_self = _testcapi.MethClass
class TestCallingConventionsClassInstance(TestCallingConventions):
"""Test calling class methods on instance"""
def setUp(self):
self.obj = _testcapi.MethClass()
self.expected_self = _testcapi.MethClass
class TestCallingConventionsStatic(TestCallingConventions):
"""Test calling static methods using various calling conventions"""
def setUp(self):
self.obj = _testcapi.MethStatic()
self.expected_self = None
def pyfunc(arg1, arg2):
return [arg1, arg2]
def pyfunc_noarg():
return "noarg"
class PythonClass:
def method(self, arg1, arg2):
return [arg1, arg2]
def method_noarg(self):
return "noarg"
@classmethod
def class_method(cls):
return "classmethod"
@staticmethod
def static_method():
return "staticmethod"
PYTHON_INSTANCE = PythonClass()
NULL_OR_EMPTY = object()
class FastCallTests(unittest.TestCase):
"""Test calling using various callables from C
"""
# Test calls with positional arguments
CALLS_POSARGS = [
# (func, args: tuple, result)
# Python function with 2 arguments
(pyfunc, (1, 2), [1, 2]),
# Python function without argument
(pyfunc_noarg, (), "noarg"),
# Python class methods
(PythonClass.class_method, (), "classmethod"),
(PythonClass.static_method, (), "staticmethod"),
# Python instance methods
(PYTHON_INSTANCE.method, (1, 2), [1, 2]),
(PYTHON_INSTANCE.method_noarg, (), "noarg"),
(PYTHON_INSTANCE.class_method, (), "classmethod"),
(PYTHON_INSTANCE.static_method, (), "staticmethod"),
# C callables are added later
]
# Test calls with positional and keyword arguments
CALLS_KWARGS = [
# (func, args: tuple, kwargs: dict, result)
# Python function with 2 arguments
(pyfunc, (1,), {'arg2': 2}, [1, 2]),
(pyfunc, (), {'arg1': 1, 'arg2': 2}, [1, 2]),
# Python instance methods
(PYTHON_INSTANCE.method, (1,), {'arg2': 2}, [1, 2]),
(PYTHON_INSTANCE.method, (), {'arg1': 1, 'arg2': 2}, [1, 2]),
# C callables are added later
]
# Add all the calling conventions and variants of C callables
if _testcapi:
_instance = _testcapi.MethInstance()
for obj, expected_self in (
(_testcapi, _testcapi), # module-level function
(_instance, _instance), # bound method
(_testcapi.MethClass, _testcapi.MethClass), # class method on class
(_testcapi.MethClass(), _testcapi.MethClass), # class method on inst.
(_testcapi.MethStatic, None), # static method
):
CALLS_POSARGS.extend([
(obj.meth_varargs, (1, 2), (expected_self, (1, 2))),
(obj.meth_varargs_keywords,
(1, 2), (expected_self, (1, 2), NULL_OR_EMPTY)),
(obj.meth_fastcall, (1, 2), (expected_self, (1, 2))),
(obj.meth_fastcall, (), (expected_self, ())),
(obj.meth_fastcall_keywords,
(1, 2), (expected_self, (1, 2), NULL_OR_EMPTY)),
(obj.meth_fastcall_keywords,
(), (expected_self, (), NULL_OR_EMPTY)),
(obj.meth_noargs, (), expected_self),
(obj.meth_o, (123, ), (expected_self, 123)),
])
CALLS_KWARGS.extend([
(obj.meth_varargs_keywords,
(1, 2), {'x': 'y'}, (expected_self, (1, 2), {'x': 'y'})),
(obj.meth_varargs_keywords,
(), {'x': 'y'}, (expected_self, (), {'x': 'y'})),
(obj.meth_varargs_keywords,
(1, 2), {}, (expected_self, (1, 2), NULL_OR_EMPTY)),
(obj.meth_fastcall_keywords,
(1, 2), {'x': 'y'}, (expected_self, (1, 2), {'x': 'y'})),
(obj.meth_fastcall_keywords,
(), {'x': 'y'}, (expected_self, (), {'x': 'y'})),
(obj.meth_fastcall_keywords,
(1, 2), {}, (expected_self, (1, 2), NULL_OR_EMPTY)),
])
def check_result(self, result, expected):
if isinstance(expected, tuple) and expected[-1] is NULL_OR_EMPTY:
if result[-1] in ({}, None):
expected = (*expected[:-1], result[-1])
self.assertEqual(result, expected)
@unittest.skipIf(_testcapi is None, "requires _testcapi")
def test_vectorcall_dict(self):
# Test PyObject_VectorcallDict()
for func, args, expected in self.CALLS_POSARGS:
with self.subTest(func=func, args=args):
# kwargs=NULL
result = _testcapi.pyobject_fastcalldict(func, args, None)
self.check_result(result, expected)
if not args:
# args=NULL, nargs=0, kwargs=NULL
result = _testcapi.pyobject_fastcalldict(func, None, None)
self.check_result(result, expected)
for func, args, kwargs, expected in self.CALLS_KWARGS:
with self.subTest(func=func, args=args, kwargs=kwargs):
result = _testcapi.pyobject_fastcalldict(func, args, kwargs)
self.check_result(result, expected)
@unittest.skipIf(_testcapi is None, "requires _testcapi")
def test_vectorcall(self):
# Test PyObject_Vectorcall()
for func, args, expected in self.CALLS_POSARGS:
with self.subTest(func=func, args=args):
# kwnames=NULL
result = _testcapi.pyobject_vectorcall(func, args, None)
self.check_result(result, expected)
# kwnames=()
result = _testcapi.pyobject_vectorcall(func, args, ())
self.check_result(result, expected)
if not args:
# kwnames=NULL
result = _testcapi.pyobject_vectorcall(func, None, None)
self.check_result(result, expected)
# kwnames=()
result = _testcapi.pyobject_vectorcall(func, None, ())
self.check_result(result, expected)
for func, args, kwargs, expected in self.CALLS_KWARGS:
with self.subTest(func=func, args=args, kwargs=kwargs):
kwnames = tuple(kwargs.keys())
args = args + tuple(kwargs.values())
result = _testcapi.pyobject_vectorcall(func, args, kwnames)
self.check_result(result, expected)
def test_fastcall_clearing_dict(self):
# Test bpo-36907: the point of the test is just checking that this
# does not crash.
class IntWithDict:
__slots__ = ["kwargs"]
def __init__(self, **kwargs):
self.kwargs = kwargs
def __index__(self):
self.kwargs.clear()
gc.collect()
return 0
x = IntWithDict(optimize=IntWithDict())
# We test the argument handling of "compile" here, the compilation
# itself is not relevant. When we pass flags=x below, x.__index__() is
# called, which changes the keywords dict.
compile("pass", "", "exec", x, **x.kwargs)
Py_TPFLAGS_HAVE_VECTORCALL = 1 << 11
Py_TPFLAGS_METHOD_DESCRIPTOR = 1 << 17
def testfunction(self):
"""some doc"""
return self
def testfunction_kw(self, *, kw):
"""some doc"""
return self
@unittest.skipIf(_testcapi is None, "requires _testcapi")
class TestPEP590(unittest.TestCase):
def test_method_descriptor_flag(self):
import functools
cached = functools.lru_cache(1)(testfunction)
self.assertFalse(type(repr).__flags__ & Py_TPFLAGS_METHOD_DESCRIPTOR)
self.assertTrue(type(list.append).__flags__ & Py_TPFLAGS_METHOD_DESCRIPTOR)
self.assertTrue(type(list.__add__).__flags__ & Py_TPFLAGS_METHOD_DESCRIPTOR)
self.assertTrue(type(testfunction).__flags__ & Py_TPFLAGS_METHOD_DESCRIPTOR)
self.assertTrue(type(cached).__flags__ & Py_TPFLAGS_METHOD_DESCRIPTOR)
self.assertTrue(_testcapi.MethodDescriptorBase.__flags__ & Py_TPFLAGS_METHOD_DESCRIPTOR)
self.assertTrue(_testcapi.MethodDescriptorDerived.__flags__ & Py_TPFLAGS_METHOD_DESCRIPTOR)
self.assertFalse(_testcapi.MethodDescriptorNopGet.__flags__ & Py_TPFLAGS_METHOD_DESCRIPTOR)
# Mutable heap types should not inherit Py_TPFLAGS_METHOD_DESCRIPTOR
class MethodDescriptorHeap(_testcapi.MethodDescriptorBase):
pass
self.assertFalse(MethodDescriptorHeap.__flags__ & Py_TPFLAGS_METHOD_DESCRIPTOR)
def test_vectorcall_flag(self):
self.assertTrue(_testcapi.MethodDescriptorBase.__flags__ & Py_TPFLAGS_HAVE_VECTORCALL)
self.assertTrue(_testcapi.MethodDescriptorDerived.__flags__ & Py_TPFLAGS_HAVE_VECTORCALL)
self.assertFalse(_testcapi.MethodDescriptorNopGet.__flags__ & Py_TPFLAGS_HAVE_VECTORCALL)
self.assertTrue(_testcapi.MethodDescriptor2.__flags__ & Py_TPFLAGS_HAVE_VECTORCALL)
# Mutable heap types should inherit Py_TPFLAGS_HAVE_VECTORCALL,
# but should lose it when __call__ is overridden
class MethodDescriptorHeap(_testcapi.MethodDescriptorBase):
pass
self.assertTrue(MethodDescriptorHeap.__flags__ & Py_TPFLAGS_HAVE_VECTORCALL)
MethodDescriptorHeap.__call__ = print
self.assertFalse(MethodDescriptorHeap.__flags__ & Py_TPFLAGS_HAVE_VECTORCALL)
# Mutable heap types should not inherit Py_TPFLAGS_HAVE_VECTORCALL if
# they define __call__ directly
class MethodDescriptorHeap(_testcapi.MethodDescriptorBase):
def __call__(self):
pass
self.assertFalse(MethodDescriptorHeap.__flags__ & Py_TPFLAGS_HAVE_VECTORCALL)
def test_vectorcall_override(self):
# Check that tp_call can correctly override vectorcall.
# MethodDescriptorNopGet implements tp_call but it inherits from
# MethodDescriptorBase, which implements vectorcall. Since
# MethodDescriptorNopGet returns the args tuple when called, we check
# additionally that no new tuple is created for this call.
args = tuple(range(5))
f = _testcapi.MethodDescriptorNopGet()
self.assertIs(f(*args), args)
def test_vectorcall_override_on_mutable_class(self):
"""Setting __call__ should disable vectorcall"""
TestType = _testcapi.make_vectorcall_class()
instance = TestType()
self.assertEqual(instance(), "tp_call")
instance.set_vectorcall(TestType)
self.assertEqual(instance(), "vectorcall") # assume vectorcall is used
TestType.__call__ = lambda self: "custom"
self.assertEqual(instance(), "custom")
def test_vectorcall_override_with_subclass(self):
"""Setting __call__ on a superclass should disable vectorcall"""
SuperType = _testcapi.make_vectorcall_class()
class DerivedType(SuperType):
pass
instance = DerivedType()
# Derived types with its own vectorcall should be unaffected
UnaffectedType1 = _testcapi.make_vectorcall_class(DerivedType)
UnaffectedType2 = _testcapi.make_vectorcall_class(SuperType)
# Aside: Quickly check that the C helper actually made derived types
self.assertIsSubclass(UnaffectedType1, DerivedType)
self.assertIsSubclass(UnaffectedType2, SuperType)
# Initial state: tp_call
self.assertEqual(instance(), "tp_call")
self.assertEqual(_testcapi.has_vectorcall_flag(SuperType), True)
self.assertEqual(_testcapi.has_vectorcall_flag(DerivedType), True)
self.assertEqual(_testcapi.has_vectorcall_flag(UnaffectedType1), True)
self.assertEqual(_testcapi.has_vectorcall_flag(UnaffectedType2), True)
# Setting the vectorcall function
instance.set_vectorcall(SuperType)
self.assertEqual(instance(), "vectorcall")
self.assertEqual(_testcapi.has_vectorcall_flag(SuperType), True)
self.assertEqual(_testcapi.has_vectorcall_flag(DerivedType), True)
self.assertEqual(_testcapi.has_vectorcall_flag(UnaffectedType1), True)
self.assertEqual(_testcapi.has_vectorcall_flag(UnaffectedType2), True)
# Setting __call__ should remove vectorcall from all subclasses
SuperType.__call__ = lambda self: "custom"
self.assertEqual(instance(), "custom")
self.assertEqual(_testcapi.has_vectorcall_flag(SuperType), False)
self.assertEqual(_testcapi.has_vectorcall_flag(DerivedType), False)
self.assertEqual(_testcapi.has_vectorcall_flag(UnaffectedType1), True)
self.assertEqual(_testcapi.has_vectorcall_flag(UnaffectedType2), True)
def test_vectorcall(self):
# Test a bunch of different ways to call objects:
# 1. vectorcall using PyVectorcall_Call()
# (only for objects that support vectorcall directly)
# 2. normal call
# 3. vectorcall using PyObject_Vectorcall()
# 4. call as bound method
# 5. call using functools.partial
# A list of (function, args, kwargs, result) calls to test
calls = [(len, (range(42),), {}, 42),
(list.append, ([], 0), {}, None),
([].append, (0,), {}, None),
(sum, ([36],), {"start":6}, 42),
(testfunction, (42,), {}, 42),
(testfunction_kw, (42,), {"kw":None}, 42),
(_testcapi.MethodDescriptorBase(), (0,), {}, True),
(_testcapi.MethodDescriptorDerived(), (0,), {}, True),
(_testcapi.MethodDescriptor2(), (0,), {}, False)]
from _testcapi import pyobject_vectorcall, pyvectorcall_call
from types import MethodType
from functools import partial
def vectorcall(func, args, kwargs):
args = *args, *kwargs.values()
kwnames = tuple(kwargs)
return pyobject_vectorcall(func, args, kwnames)
for (func, args, kwargs, expected) in calls:
with self.subTest(str(func)):
if not kwargs:
self.assertEqual(expected, pyvectorcall_call(func, args))
self.assertEqual(expected, pyvectorcall_call(func, args, kwargs))
# Add derived classes (which do not support vectorcall directly,
# but do support all other ways of calling).
class MethodDescriptorHeap(_testcapi.MethodDescriptorBase):
pass
class MethodDescriptorOverridden(_testcapi.MethodDescriptorBase):
def __call__(self, n):
return 'new'
class SuperBase:
def __call__(self, *args):
return super().__call__(*args)
class MethodDescriptorSuper(SuperBase, _testcapi.MethodDescriptorBase):
def __call__(self, *args):
return super().__call__(*args)
calls += [
(dict.update, ({},), {"key":True}, None),
({}.update, ({},), {"key":True}, None),
(MethodDescriptorHeap(), (0,), {}, True),
(MethodDescriptorOverridden(), (0,), {}, 'new'),
(MethodDescriptorSuper(), (0,), {}, True),
]
for (func, args, kwargs, expected) in calls:
with self.subTest(str(func)):
args1 = args[1:]
meth = MethodType(func, args[0])
wrapped = partial(func)
if not kwargs:
self.assertEqual(expected, func(*args))
self.assertEqual(expected, pyobject_vectorcall(func, args, None))
self.assertEqual(expected, meth(*args1))
self.assertEqual(expected, wrapped(*args))
self.assertEqual(expected, func(*args, **kwargs))
self.assertEqual(expected, vectorcall(func, args, kwargs))
self.assertEqual(expected, meth(*args1, **kwargs))
self.assertEqual(expected, wrapped(*args, **kwargs))
def test_setvectorcall(self):
from _testcapi import function_setvectorcall
_testinternalcapi = import_helper.import_module("_testinternalcapi")
def f(num): return num + 1
assert_equal = self.assertEqual
num = 10
assert_equal(11, f(num))
function_setvectorcall(f)
for _ in range(_testinternalcapi.SPECIALIZATION_THRESHOLD):
assert_equal("overridden", f(num))
def test_setvectorcall_load_attr_specialization_skip(self):
from _testcapi import function_setvectorcall
_testinternalcapi = import_helper.import_module("_testinternalcapi")
class X:
def __getattribute__(self, attr):
return attr
assert_equal = self.assertEqual
x = X()
assert_equal("a", x.a)
function_setvectorcall(X.__getattribute__)
# make sure specialization doesn't trigger
# when vectorcall is overridden
for _ in range(_testinternalcapi.SPECIALIZATION_THRESHOLD):
assert_equal("overridden", x.a)
def test_setvectorcall_load_attr_specialization_deopt(self):
from _testcapi import function_setvectorcall
_testinternalcapi = import_helper.import_module("_testinternalcapi")
class X:
def __getattribute__(self, attr):
return attr
def get_a(x):
return x.a
assert_equal = self.assertEqual
x = X()
# trigger LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN specialization
for _ in range(_testinternalcapi.SPECIALIZATION_THRESHOLD):
assert_equal("a", get_a(x))
function_setvectorcall(X.__getattribute__)
# make sure specialized LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN
# gets deopted due to overridden vectorcall
for _ in range(_testinternalcapi.SPECIALIZATION_THRESHOLD):
assert_equal("overridden", get_a(x))
@requires_limited_api
def test_vectorcall_limited_incoming(self):
from _testcapi import pyobject_vectorcall
for cls in (_testlimitedcapi.LimitedVectorCallClass,
_testlimitedcapi.LimitedRelativeVectorCallClass):
with self.subTest(cls=cls):
obj = cls()
self.assertEqual(
pyobject_vectorcall(obj, (), ()),
"vectorcall called")
@requires_limited_api
def test_vectorcall_limited_outgoing(self):
from _testlimitedcapi import call_vectorcall
args_captured = []
kwargs_captured = []
def f(*args, **kwargs):
args_captured.append(args)
kwargs_captured.append(kwargs)
return "success"
self.assertEqual(call_vectorcall(f), "success")
self.assertEqual(args_captured, [("foo",)])
self.assertEqual(kwargs_captured, [{"baz": "bar"}])
@requires_limited_api
def test_vectorcall_limited_outgoing_method(self):
from _testlimitedcapi import call_vectorcall_method
args_captured = []
kwargs_captured = []
class TestInstance:
def f(self, *args, **kwargs):
args_captured.append(args)
kwargs_captured.append(kwargs)
return "success"
self.assertEqual(call_vectorcall_method(TestInstance()), "success")
self.assertEqual(args_captured, [("foo",)])
self.assertEqual(kwargs_captured, [{"baz": "bar"}])
class A:
def method_two_args(self, x, y):
pass
@staticmethod
def static_no_args():
pass
@staticmethod
def positional_only(arg, /):
pass
@cpython_only
class TestErrorMessagesUseQualifiedName(unittest.TestCase):
@contextlib.contextmanager
def check_raises_type_error(self, message):
with self.assertRaises(TypeError) as cm:
yield
self.assertEqual(str(cm.exception), message)
def test_missing_arguments(self):
msg = "A.method_two_args() missing 1 required positional argument: 'y'"
with self.check_raises_type_error(msg):
A().method_two_args("x")
def test_too_many_positional(self):
msg = "A.static_no_args() takes 0 positional arguments but 1 was given"
with self.check_raises_type_error(msg):
A.static_no_args("oops it's an arg")
def test_positional_only_passed_as_keyword(self):
msg = "A.positional_only() got some positional-only arguments passed as keyword arguments: 'arg'"
with self.check_raises_type_error(msg):
A.positional_only(arg="x")
def test_unexpected_keyword(self):
msg = "A.method_two_args() got an unexpected keyword argument 'bad'"
with self.check_raises_type_error(msg):
A().method_two_args(bad="x")
def test_multiple_values(self):
msg = "A.method_two_args() got multiple values for argument 'x'"
with self.check_raises_type_error(msg):
A().method_two_args("x", "y", x="oops")
@cpython_only
class TestErrorMessagesSuggestions(unittest.TestCase):
@contextlib.contextmanager
def check_suggestion_includes(self, message):
with self.assertRaises(TypeError) as cm:
yield
self.assertIn(f"Did you mean '{message}'?", str(cm.exception))
@contextlib.contextmanager
def check_suggestion_not_present(self):
with self.assertRaises(TypeError) as cm:
yield
self.assertNotIn("Did you mean", str(cm.exception))
def test_unexpected_keyword_suggestion_valid_positions(self):
def foo(blech=None, /, aaa=None, *args, late1=None):
pass
cases = [
("blach", None),
("aa", "aaa"),
("orgs", None),
("late11", "late1"),
]
for keyword, suggestion in cases:
with self.subTest(keyword):
ctx = self.check_suggestion_includes(suggestion) if suggestion else self.check_suggestion_not_present()
with ctx:
foo(**{keyword:None})
def test_unexpected_keyword_suggestion_kinds(self):
def substitution(noise=None, more_noise=None, a = None, blech = None):
pass
def elimination(noise = None, more_noise = None, a = None, blch = None):
pass
def addition(noise = None, more_noise = None, a = None, bluchin = None):
pass
def substitution_over_elimination(blach = None, bluc = None):
pass
def substitution_over_addition(blach = None, bluchi = None):
pass
def elimination_over_addition(bluc = None, blucha = None):
pass
def case_change_over_substitution(BLuch=None, Luch = None, fluch = None):
pass
for func, suggestion in [
(addition, "bluchin"),
(substitution, "blech"),
(elimination, "blch"),
(addition, "bluchin"),
(substitution_over_elimination, "blach"),
(substitution_over_addition, "blach"),
(elimination_over_addition, "bluc"),
(case_change_over_substitution, "BLuch"),
]:
with self.subTest(suggestion):
with self.check_suggestion_includes(suggestion):
func(bluch=None)
def test_unexpected_keyword_suggestion_via_getargs(self):
with self.check_suggestion_includes("maxsplit"):
"foo".split(maxsplt=1)
self.assertRaisesRegex(
TypeError, r"split\(\) got an unexpected keyword argument 'blech'$",
"foo".split, blech=1
)
with self.check_suggestion_not_present():
"foo".split(blech=1)
with self.check_suggestion_not_present():
"foo".split(more_noise=1, maxsplt=1)
# Also test the vgetargskeywords path
with self.check_suggestion_includes("name"):
ImportError(namez="oops")
self.assertRaisesRegex(
TypeError, r"ImportError\(\) got an unexpected keyword argument 'blech'$",
ImportError, blech=1
)
with self.check_suggestion_not_present():
ImportError(blech=1)
with self.check_suggestion_not_present():
ImportError(blech=1, namez="oops")
@cpython_only
class TestRecursion(unittest.TestCase):
def test_margin_is_sufficient(self):
def get_sp():
return _testinternalcapi.get_stack_pointer()
this_sp = _testinternalcapi.get_stack_pointer()
lower_sp = _testcapi.pyobject_vectorcall(get_sp, (), ())
if _testcapi._Py_STACK_GROWS_DOWN:
self.assertLess(lower_sp, this_sp)
safe_margin = this_sp - lower_sp
else:
self.assertGreater(lower_sp, this_sp)
safe_margin = lower_sp - this_sp
# Add an (arbitrary) extra 25% for safety
safe_margin = safe_margin * 5 / 4
self.assertLess(safe_margin, _testinternalcapi.get_stack_margin())
@skip_on_s390x
@unittest.skipIf(is_wasi and Py_DEBUG, "requires deep stack")
@skip_if_sanitizer("requires deep stack", thread=True)
@unittest.skipIf(_testcapi is None, "requires _testcapi")
@skip_emscripten_stack_overflow()
@skip_wasi_stack_overflow()
def test_super_deep(self):
def recurse(n):
if n:
recurse(n-1)
def py_recurse(n, m):
if n:
py_recurse(n-1, m)
else:
c_py_recurse(m-1)
def c_recurse(n):
if n:
_testcapi.pyobject_vectorcall(c_recurse, (n-1,), ())
def c_py_recurse(m):
if m:
_testcapi.pyobject_vectorcall(py_recurse, (1000, m), ())
with set_recursion_limit(100_000):
recurse(90_000)
with self.assertRaises(RecursionError):
recurse(101_000)
c_recurse(50)
with self.assertRaises(RecursionError):
c_recurse(90_000)
c_py_recurse(50)
with self.assertRaises(RecursionError):
c_py_recurse(100_000)
def test_recursion_with_kwargs(self):
# GH-137883: The interpreter forgot to check the recursion limit when
# calling with keywords.
def recurse_kw(a=0):
recurse_kw(a=0)
with self.assertRaises(RecursionError):
recurse_kw()
class TestFunctionWithManyArgs(unittest.TestCase):
def test_function_with_many_args(self):
for N in (10, 500, 1000):
with self.subTest(N=N):
args = ",".join([f"a{i}" for i in range(N)])
src = f"def f({args}) : return a{N//2}"
l = {}
exec(src, {}, l)
self.assertEqual(l['f'](*range(N)), N//2)
@unittest.skipIf(_testcapi is None, 'need _testcapi')
class TestCAPI(unittest.TestCase):
def test_cfunction_call(self):
def func(*args, **kwargs):
return (args, kwargs)
# PyCFunction_Call() was removed in Python 3.13 API, but was kept in
# the stable ABI.
def PyCFunction_Call(func, *args, **kwargs):
if kwargs:
return _testcapi.pycfunction_call(func, args, kwargs)
else:
return _testcapi.pycfunction_call(func, args)
self.assertEqual(PyCFunction_Call(func), ((), {}))
self.assertEqual(PyCFunction_Call(func, 1, 2, 3), ((1, 2, 3), {}))
self.assertEqual(PyCFunction_Call(func, "arg", num=5), (("arg",), {'num': 5}))
if __name__ == "__main__":
unittest.main() | python | github | https://github.com/python/cpython | Lib/test/test_call.py |
#!/usr/bin/python
# vim:sw=4:softtabstop=4:expandtab:set fileencoding=ISO8859-2
#
# JumpBridge.py, part of the FleetPanel
#
# Copyright (c) 2008-2009 Pawe³ 'Reef' Polewicz
# All rights reserved.
#
# This software is licensed as described in the file LICENSE, which
# you should have received as part of this distribution. The terms
# are also available at http://www.opensource.org/licenses/mit-license.php.
class JumpBridge:
""" class representing a Jump Bridge between two systems. The order of from/to is irrelevant """
def __init__(self, sys_from, planet_from, moon_from, sys_to, planet_to, moon_to, owner, password, comment=""):
self.sys_from = sys_from
self.planet_from = planet_from
self.moon_from = moon_from
self.sys_to = sys_to
self.planet_to = planet_to
self.moon_to = moon_to
self.owner = owner
self.password = password
self.comment = comment
def exact_to(self):
return self.planet_to + "-" + self.moon_to
def exact_from(self):
return self.planet_from + "-" + self.moon_from
def __contains__(self, item):
return self.sys_from==item or self.sys_to==item
def other_side_than(self, sys):
if self.sys_from==sys:
return self.sys_to
else:
return self.sys_from
# | unknown | codeparrot/codeparrot-clean | ||
#! /usr/bin/env python
macros = [\
"slits.C",
"write_ntuple_to_file_advanced.C",
"write_ntuple_to_file.C",
"write_to_file.C",
"ExampleMacro.C",
"ExampleMacro_GUI.C",
"makeMySelector.C",
"RunMySelector.C",
"macro1.C",
"macro2.C",
"macro3.C",
"macro4.C",
"macro5.C",
"macro6.C",
"macro7.C",
"macro8.C",
"macro9.C",
"read_from_file.C",
"read_ntuple_from_file.C",
"read_ntuple_with_chain.C",
"TGraphFit.C",
"multigraph.C"]
pymacros = [\
"TGraphFit.py",
"macro3.py"]
import os
import sys
for mName in macros:
command = "root -b -l -q %s" %mName
if mName == "slits.C": command = 'echo "2 4" | %s' %command
print "\n ******* Running %s" %mName
if 0 !=os.system(command):
print "Macro %s" %mName
sys.exit(1)
print "\n"+"-"*80+"\nAll macros ran successfully"
for mName in pymacros:
command = "echo 1 | python %s" %mName
print "\n ******* Running %s" %mName
if 0 !=os.system(command):
print "Python macro %s" %mName
sys.exit(1)
print "\n"+"-"*80+"\nAll Python macros ran successfully"
sys.exit(0) | unknown | codeparrot/codeparrot-clean | ||
This crate defines an unsafe marker trait, StableDeref, for container types which deref to a fixed address which is valid even when the containing type is moved. For example, Box, Vec, Rc, Arc and String implement this trait. Additionally, it defines CloneStableDeref for types like Rc where clones deref to the same address.
It is intended to be used by crates such as [owning_ref](https://crates.io/crates/owning_ref) and [rental](https://crates.io/crates/rental), as well as library authors who wish to make their code interoperable with such crates. For example, if you write a custom Vec type, you can implement StableDeref, and then users will be able to use your custom Vec type together with owning_ref and rental.
no_std support can be enabled by disabling default features (specifically "std"). In this case, the trait will not be implemented for the std types mentioned above, but you can still use it for your own types.
Enable the "alloc" feature (with default-features disabled) to have this trait be implemented for the above types from the built-in `alloc` crate, specifically
* `alloc::boxed::Box`
* `alloc::vec::Vec`
* `alloc::rc::Rc`
* `alloc::arc::Arc`
* `alloc::string::String`
For example, this crate can be built with alloc support via the following command:
`cargo build --no-default-features --features alloc`
Or added as a `Cargo.toml` dependency as follows:
```
[dependencies.stable_deref_trait]
version = "<version>"
default-features = false
features = [ "alloc" ]
``` | unknown | github | https://github.com/nodejs/node | deps/crates/vendor/stable_deref_trait/README.md |
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class RadioFranceIE(InfoExtractor):
_VALID_URL = r'^https?://maison\.radiofrance\.fr/radiovisions/(?P<id>[^?#]+)'
IE_NAME = 'radiofrance'
_TEST = {
'url': 'http://maison.radiofrance.fr/radiovisions/one-one',
'md5': 'bdbb28ace95ed0e04faab32ba3160daf',
'info_dict': {
'id': 'one-one',
'ext': 'ogg',
"title": "One to one",
"description": "Plutôt que d'imaginer la radio de demain comme technologie ou comme création de contenu, je veux montrer que quelles que soient ses évolutions, j'ai l'intime conviction que la radio continuera d'être un grand média de proximité pour les auditeurs.",
"uploader": "Thomas Hercouët",
},
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title')
description = self._html_search_regex(
r'<div class="bloc_page_wrapper"><div class="text">(.*?)</div>',
webpage, 'description', fatal=False)
uploader = self._html_search_regex(
r'<div class="credit"> © (.*?)</div>',
webpage, 'uploader', fatal=False)
formats_str = self._html_search_regex(
r'class="jp-jplayer[^"]*" data-source="([^"]+)">',
webpage, 'audio URLs')
formats = [
{
'format_id': fm[0],
'url': fm[1],
'vcodec': 'none',
'preference': i,
}
for i, fm in
enumerate(re.findall(r"([a-z0-9]+)\s*:\s*'([^']+)'", formats_str))
]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'description': description,
'uploader': uploader,
} | unknown | codeparrot/codeparrot-clean | ||
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2common import log as logging
from st2common import transport
from st2common.exceptions.db import StackStormDBObjectNotFoundError
from st2common.models.db.trigger import triggertype_access, trigger_access, triggerinstance_access
from st2common.persistence.base import (Access, ContentPackResource)
from st2common.transport import utils as transport_utils
LOG = logging.getLogger(__name__)
class TriggerType(ContentPackResource):
impl = triggertype_access
@classmethod
def _get_impl(cls):
return cls.impl
class Trigger(ContentPackResource):
impl = trigger_access
publisher = None
@classmethod
def _get_impl(cls):
return cls.impl
@classmethod
def _get_publisher(cls):
if not cls.publisher:
cls.publisher = transport.reactor.TriggerCUDPublisher(
urls=transport_utils.get_messaging_urls())
return cls.publisher
@classmethod
def delete_if_unreferenced(cls, model_object, publish=True, dispatch_trigger=True):
# Found in the innards of mongoengine.
# e.g. {'pk': ObjectId('5609e91832ed356d04a93cc0')}
delete_query = model_object._object_key
delete_query['ref_count__lte'] = 0
cls._get_impl().delete_by_query(**delete_query)
# Since delete_by_query cannot tell if teh delete actually happened check with a get call
# if the trigger was deleted. Unfortuantely, this opens up to races on delete.
confirmed_delete = False
try:
cls.get_by_id(model_object.id)
except (StackStormDBObjectNotFoundError, ValueError):
confirmed_delete = True
# Publish internal event on the message bus
if confirmed_delete and publish:
try:
cls.publish_delete(model_object)
except Exception:
LOG.exception('Publish failed.')
# Dispatch trigger
if confirmed_delete and dispatch_trigger:
try:
cls.dispatch_delete_trigger(model_object)
except Exception:
LOG.exception('Trigger dispatch failed.')
return model_object
class TriggerInstance(Access):
impl = triggerinstance_access
@classmethod
def _get_impl(cls):
return cls.impl
@classmethod
def delete_by_query(cls, **query):
return cls._get_impl().delete_by_query(**query) | unknown | codeparrot/codeparrot-clean | ||
#! /usr/bin/env python
#
# $Id: shimmer_timesync.py,v 1.1 2010/03/31 19:35:39 ayer1 Exp $
#
# Copyright (c) 2007, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Jason Waterman
# July, 2007
# truncated to shimmer_timesync by
# Steve Ayer
# March, 2010
import serial
import struct
import time
import random
import sys
import shimmerUtil
# Find the data serial port
# this needs to find the real serial port
port = shimmerUtil.find_data_port(True)
if port == '':
print 'Could not find SHIMMER data port port. Exiting.'
sys.exit()
speed = 115200
print 'Found SHIMMER data port on %s' % (port)
ser = serial.Serial(port, speed, timeout = 1)
ser.flushInput()
print "Synchronizing clocks..."
t1 = int(time.time())
for i in range(0, 4):
t2 = (t1 >> 8 * (3-i)) & 0x000000ff
time.sleep(random.random())
t2_str = struct.pack('B', t2)
ser.write(t2_str)
print "Wrote %d to shimmer. Done!" % t1
ser.close() | unknown | codeparrot/codeparrot-clean | ||
/* Copyright (c) 2006, 2025, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2.0,
as published by the Free Software Foundation.
This program is designed to work with certain software (including
but not limited to OpenSSL) that is licensed under separate terms,
as designated in a particular file or component or in included license
documentation. The authors of MySQL hereby grant you an additional
permission to link the program and your derivative works with the
separately licensed software that they have either included with
the program or referenced in the documentation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License, version 2.0, for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
#ifndef SQL_PARSE_INCLUDED
#define SQL_PARSE_INCLUDED
#include <stddef.h>
#include <sys/types.h>
#include "lex_string.h"
#include "my_command.h"
#include "my_sqlcommand.h"
#include "mysql/strings/m_ctype.h"
#include "mysql_com.h" // enum_server_command
#include "sql/handler.h" // enum_schema_tables
#include "sql/system_variables.h" // System_variables
#include "storage/perfschema/terminology_use_previous_enum.h"
struct mysql_rwlock_t;
template <typename T>
class SQL_I_List;
/**
@addtogroup GROUP_PARSER
@{
*/
class Comp_creator;
class Item;
class Object_creation_ctx;
class Parser_state;
class THD;
class Table_ident;
struct LEX;
struct LEX_USER;
struct ORDER;
struct Parse_context;
class Table_ref;
union COM_DATA;
enum class enum_sp_type;
extern "C" int test_if_data_home_dir(const char *dir);
bool stmt_causes_implicit_commit(const THD *thd, uint mask);
#ifndef NDEBUG
extern void turn_parser_debug_on();
#endif
bool parse_sql(THD *thd, Parser_state *parser_state,
Object_creation_ctx *creation_ctx);
void free_items(Item *item);
void cleanup_items(Item *item);
void bind_fields(Item *first);
Comp_creator *comp_eq_creator(bool invert);
Comp_creator *comp_equal_creator(bool invert);
Comp_creator *comp_ge_creator(bool invert);
Comp_creator *comp_gt_creator(bool invert);
Comp_creator *comp_le_creator(bool invert);
Comp_creator *comp_lt_creator(bool invert);
Comp_creator *comp_ne_creator(bool invert);
int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
enum enum_schema_tables schema_table_idx);
void get_default_definer(THD *thd, LEX_USER *definer);
LEX_USER *create_default_definer(THD *thd);
LEX_USER *get_current_user(THD *thd, LEX_USER *user);
bool check_string_char_length(const LEX_CSTRING &str, const char *err_msg,
size_t max_char_length, const CHARSET_INFO *cs,
bool no_error);
bool merge_charset_and_collation(const CHARSET_INFO *charset,
const CHARSET_INFO *collation,
const CHARSET_INFO **to);
bool merge_sp_var_charset_and_collation(const CHARSET_INFO *charset,
const CHARSET_INFO *collation,
const CHARSET_INFO **to);
bool check_host_name(const LEX_CSTRING &str);
bool mysql_test_parse_for_slave(THD *thd);
bool is_update_query(enum enum_sql_command command);
bool is_explainable_query(enum enum_sql_command command);
bool is_log_table_write_query(enum enum_sql_command command);
bool alloc_query(THD *thd, const char *packet, size_t packet_length);
void dispatch_sql_command(THD *thd, Parser_state *parser_state,
bool is_retry = false);
void mysql_reset_thd_for_next_command(THD *thd);
void create_table_set_open_action_and_adjust_tables(LEX *lex);
void add_automatic_sp_privileges(THD *thd, enum_sp_type sp_type,
const char *db_name, const char *sp_name);
bool remove_automatic_sp_privileges(THD *thd, enum_sp_type sp_type,
bool sp_did_not_exist, const char *db_name,
const char *sp_name);
int mysql_execute_command(THD *thd, bool first_level = false);
bool do_command(THD *thd);
bool dispatch_command(THD *thd, const COM_DATA *com_data,
enum enum_server_command command);
bool prepare_index_and_data_dir_path(THD *thd, const char **data_file_name,
const char **index_file_name,
const char *table_name);
int append_file_to_dir(THD *thd, const char **filename_ptr,
const char *table_name);
void execute_init_command(THD *thd, LEX_STRING *init_command,
mysql_rwlock_t *var_lock);
void add_to_list(SQL_I_List<ORDER> &list, ORDER *order);
void add_join_on(Table_ref *b, Item *expr);
bool push_new_name_resolution_context(Parse_context *pc, Table_ref *left_op,
Table_ref *right_op);
void init_sql_command_flags(void);
const CHARSET_INFO *get_bin_collation(const CHARSET_INFO *cs);
void killall_non_super_threads(THD *thd);
bool shutdown(THD *thd, enum mysql_enum_shutdown_level level);
bool show_precheck(THD *thd, LEX *lex, bool lock);
void statement_id_to_session(THD *thd);
/* Variables */
extern uint sql_command_flags[];
/**
Map from enumeration values of type enum_server_command to
descriptions of type std::string.
In this context, a "command" is a type code for a remote procedure
call in the client-server protocol; for instance, a "connect" or a
"ping" or a "query".
The getter functions use @@terminology_use_previous to
decide which version of the name to use, for names that depend on
it.
*/
class Command_names {
private:
/**
Array indexed by enum_server_command, where each element is a
description string.
*/
static const std::string m_names[];
/**
Command whose name depends on @@terminology_use_previous.
Currently, there is only one such command, so we use a single
member variable. In case we ever change any other command name
and control the use of the old or new name using
@@terminology_use_previous, we need to change the
following three members into some collection type, e.g.,
std::unordered_set.
*/
static constexpr enum_server_command m_replace_com{COM_REGISTER_SLAVE};
/**
Name to use when compatibility is enabled.
*/
static const std::string m_replace_str;
/**
The version when the name was changed.
*/
static constexpr terminology_use_previous::enum_compatibility_version
m_replace_version{terminology_use_previous::BEFORE_8_0_26};
/**
Given a system_variable object, returns the string to use for
m_replace_com, according to the setting of
terminology_use_previous stored in the object.
@param sysvars The System_variables object holding the
configuration that should be considered when doing the translation.
@return The instrumentation name that was in use in the configured
version, for m_replace_com.
*/
static const std::string &translate(const System_variables &sysvars);
/**
Cast an integer to enum_server_command, and assert it is in range.
@param cmd The integer value
@return The enum_server_command
*/
static enum_server_command int_to_cmd(int cmd) {
assert(cmd >= 0);
assert(cmd <= COM_END);
return static_cast<enum_server_command>(cmd);
}
public:
/**
Return a description string for a given enum_server_command.
This bypasses @@terminology_use_previous and acts as if
it was set to NONE.
@param cmd The enum_server_command
@retval The description string
*/
static const std::string &str_notranslate(enum_server_command cmd) {
return m_names[cmd];
}
/**
Return a description string for an integer that is the numeric
value of an enum_server_command.
This bypasses @@terminology_use_previous and acts as if
it was set to NONE.
@param cmd The integer value
@retval The description string
*/
static const std::string &str_notranslate(int cmd) {
return str_notranslate(int_to_cmd(cmd));
}
/**
Return a description string for a given enum_server_command.
This takes @@session.terminology_use_previous into
account, and returns an old name if one has been defined and the
option is enabled.
@param cmd The enum_server_command
@retval The description string
*/
static const std::string &str_session(enum_server_command cmd);
/**
Return a description string for a given enum_server_command.
This takes @@global.terminology_use_previous into
account, and returns an old name if one has been defined and the
option is enabled.
@param cmd The enum_server_command
@retval The description string
*/
static const std::string &str_global(enum_server_command cmd);
/**
Return a description string for an integer that is the numeric
value of an enum_server_command.
This takes @@session.terminology_use_previous into
account, and returns an old name if one has been defined and the
option is enabled.
@param cmd The integer value
@retval The description string
*/
static const std::string &str_session(int cmd) {
return str_session(int_to_cmd(cmd));
}
};
bool sqlcom_can_generate_row_events(enum enum_sql_command command);
/**
@brief This function checks if the sql_command is one that identifies the
boundaries (begin, end or savepoint) of a transaction.
@note this is used for replication purposes.
@param command The parsed SQL_COMM to check.
@return true if this is either a BEGIN, COMMIT, SAVEPOINT, ROLLBACK,
ROLLBACK_TO_SAVEPOINT.
@return false any other SQL command.
*/
bool is_normal_transaction_boundary_stmt(enum enum_sql_command command);
/**
@brief This function checks if the sql_command is one that identifies the
boundaries (begin, end or savepoint) of an XA transaction. It does not
consider PREPARE statements.
@note this is used for replication purposes.
@param command The parsed SQL_COMM to check.
@return true if this is either a XA_START, XA_END, XA_COMMIT, XA_ROLLBACK.
@return false any other SQL command.
*/
bool is_xa_transaction_boundary_stmt(enum enum_sql_command command);
bool all_tables_not_ok(THD *thd, Table_ref *tables);
bool some_non_temp_table_to_be_updated(THD *thd, Table_ref *tables);
// TODO: remove after refactoring of ALTER DATABASE:
bool set_default_charset(HA_CREATE_INFO *create_info,
const CHARSET_INFO *value);
// TODO: remove after refactoring of ALTER DATABASE:
bool set_default_collation(HA_CREATE_INFO *create_info,
const CHARSET_INFO *value);
bool sp_process_definer(THD *);
/* Bits in sql_command_flags */
#define CF_CHANGES_DATA (1U << 0)
/* The 2nd bit is unused -- it used to be CF_HAS_ROW_COUNT. */
#define CF_STATUS_COMMAND (1U << 2)
#define CF_SHOW_TABLE_COMMAND (1U << 3)
#define CF_WRITE_LOGS_COMMAND (1U << 4)
/**
Must be set for SQL statements that may contain
Item expressions and/or use joins and tables.
Indicates that the parse tree of such statement may
contain rule-based optimizations that depend on metadata
(i.e. number of columns in a table), and consequently
that the statement must be re-prepared whenever
referenced metadata changes. Must not be set for
statements that themselves change metadata, e.g. RENAME,
ALTER and other DDL, since otherwise will trigger constant
reprepare. Consequently, complex item expressions and
joins are currently prohibited in these statements.
*/
#define CF_REEXECUTION_FRAGILE (1U << 5)
/**
Implicitly commit before the SQL statement is executed.
Statements marked with this flag will cause any active
transaction to end (commit) before proceeding with the
command execution.
This flag should be set for statements that probably can't
be rolled back or that do not expect any previously metadata
locked tables.
*/
#define CF_IMPLICIT_COMMIT_BEGIN (1U << 6)
/**
Implicitly commit after the SQL statement.
Statements marked with this flag are automatically committed
at the end of the statement.
This flag should be set for statements that will implicitly
open and take metadata locks on system tables that should not
be carried for the whole duration of a active transaction.
*/
#define CF_IMPLICIT_COMMIT_END (1U << 7)
/**
CF_IMPLICIT_COMMIT_BEGIN and CF_IMPLICIT_COMMIT_END are used
to ensure that the active transaction is implicitly committed
before and after every DDL statement and any statement that
modifies our currently non-transactional system tables.
*/
#define CF_AUTO_COMMIT_TRANS (CF_IMPLICIT_COMMIT_BEGIN | CF_IMPLICIT_COMMIT_END)
/**
Diagnostic statement.
Diagnostic statements:
- SHOW WARNING
- SHOW ERROR
- GET DIAGNOSTICS (WL#2111)
do not modify the Diagnostics Area during execution.
*/
#define CF_DIAGNOSTIC_STMT (1U << 8)
/**
Identifies statements that may generate row events
and that may end up in the binary log.
*/
#define CF_CAN_GENERATE_ROW_EVENTS (1U << 9)
/**
Identifies statements which may deal with temporary tables and for which
temporary tables should be pre-opened to simplify privilege checks.
*/
#define CF_PREOPEN_TMP_TABLES (1U << 10)
/**
Identifies statements for which open handlers should be closed in the
beginning of the statement.
*/
#define CF_HA_CLOSE (1U << 11)
/**
Identifies statements that can be explained with EXPLAIN.
*/
#define CF_CAN_BE_EXPLAINED (1U << 12)
/** Identifies statements which may generate an optimizer trace */
#define CF_OPTIMIZER_TRACE (1U << 14)
/**
Identifies statements that should always be disallowed in
read only transactions.
*/
#define CF_DISALLOW_IN_RO_TRANS (1U << 15)
/**
Identifies statements and commands that can be used with Protocol Plugin
*/
#define CF_ALLOW_PROTOCOL_PLUGIN (1U << 16)
/**
Identifies statements (typically DDL) which needs auto-commit mode
temporarily turned off.
@note This is necessary to prevent InnoDB from automatically committing
InnoDB transaction each time data-dictionary tables are closed
after being updated.
@note This is also necessary for ACL DDL, so the code which
saves GTID state or slave state in the system tables at the
commit time works correctly. This code does statement commit
on low-level (see System_table_access:: close_table()) and
thus can pre-maturely commit DDL if @@autocommit=1.
*/
#define CF_NEEDS_AUTOCOMMIT_OFF (1U << 17)
/**
Identifies statements which can return rows of data columns (SELECT, SHOW ...)
*/
#define CF_HAS_RESULT_SET (1U << 18)
/**
Identifies DDL statements which can be atomic.
Having the bit ON does not yet define an atomic.
The property is used both on the master and slave.
On the master atomicity infers the binlog and gtid_executed system table.
On the slave it more involves the slave info table.
@note At the momemnt of declaration the covered DDL subset coincides
with the of CF_NEEDS_AUTOCOMMIT_OFF.
*/
#define CF_POTENTIAL_ATOMIC_DDL (1U << 19)
/**
Statement is depending on the ACL cache, which can be disabled by the
--skip-grant-tables server option.
*/
#define CF_REQUIRE_ACL_CACHE (1U << 20)
/**
Identifies statements as SHOW commands using INFORMATION_SCHEMA system views.
*/
#define CF_SHOW_USES_SYSTEM_VIEW (1U << 21)
/* Bits in server_command_flags */
/**
Skip the increase of the global query id counter. Commonly set for
commands that are stateless (won't cause any change on the server
internal states). This is made obsolete as query id is incremented
for ping and statistics commands as well because of race condition
(Bug#58785).
*/
#define CF_SKIP_QUERY_ID (1U << 0)
/**
Skip the increase of the number of statements that clients have
sent to the server. Commonly used for commands that will cause
a statement to be executed but the statement might have not been
sent by the user (ie: stored procedure).
*/
#define CF_SKIP_QUESTIONS (1U << 1)
/**
1U << 16 is reserved for Protocol Plugin statements and commands
*/
/**
@} (end of group GROUP_PARSER)
*/
#endif /* SQL_PARSE_INCLUDED */ | c | github | https://github.com/mysql/mysql-server | sql/sql_parse.h |
#!/usr/local/bin/env python
# ==============================================================================
# MODULE DOCSTRING
# ==============================================================================
"""
MultistateSampler
=================
Base multi-thermodynamic state multistate class
COPYRIGHT
Current version by Andrea Rizzi <andrea.rizzi@choderalab.org>, Levi N. Naden <levi.naden@choderalab.org> and
John D. Chodera <john.chodera@choderalab.org> while at Memorial Sloan Kettering Cancer Center.
Original version by John D. Chodera <jchodera@gmail.com> while at the University of
California Berkeley.
LICENSE
This code is licensed under the latest available version of the MIT License.
"""
# ==============================================================================
# GLOBAL IMPORTS
# ==============================================================================
import os
import copy
import time
import typing
import inspect
import logging
import datetime
import numpy as np
from simtk import unit, openmm
from openmmtools import multistate, utils, states, mcmc, cache
import mpiplus
from openmmtools.multistate.utils import SimulationNaNError
from pymbar.utils import ParameterError
from openmmtools.integrators import FIREMinimizationIntegrator
logger = logging.getLogger(__name__)
# ==============================================================================
# MULTISTATE SAMPLER
# ==============================================================================
class MultiStateSampler(object):
"""
Base class for samplers that sample multiple thermodynamic states using
one or more replicas.
This base class provides a general simulation facility for multistate from multiple
thermodynamic states, allowing any set of thermodynamic states to be specified.
If instantiated on its own, the thermodynamic state indices associated with each
state are specified and replica mixing does not change any thermodynamic states,
meaning that each replica remains in its original thermodynamic state.
Stored configurations, energies, swaps, and restart information are all written
to a single output file using the platform portable, robust, and efficient
NetCDF4 library.
Parameters
----------
mcmc_moves : MCMCMove or list of MCMCMove, optional
The MCMCMove used to propagate the thermodynamic states. If a list of MCMCMoves,
they will be assigned to the correspondent thermodynamic state on
creation. If None is provided, Langevin dynamics with 2fm timestep, 5.0/ps collision rate,
and 500 steps per iteration will be used.
number_of_iterations : int or infinity, optional, default: 1
The number of iterations to perform. Both ``float('inf')`` and
``numpy.inf`` are accepted for infinity. If you set this to infinity,
be sure to set also ``online_analysis_interval``.
online_analysis_interval : None or Int >= 1, optional, default: 200
Choose the interval at which to perform online analysis of the free energy.
After every interval, the simulation will be stopped and the free energy estimated.
If the error in the free energy estimate is at or below ``online_analysis_target_error``, then the simulation
will be considered completed.
If set to ``None``, then no online analysis is performed
online_analysis_target_error : float >= 0, optional, default 0.0
The target error for the online analysis measured in kT per phase.
Once the free energy is at or below this value, the phase will be considered complete.
If ``online_analysis_interval`` is None, this option does nothing.
Default is set to 0.0 since online analysis runs by default, but a finite ``number_of_iterations`` should also
be set to ensure there is some stop condition. If target error is 0 and an infinite number of iterations is set,
then the sampler will run until the user stop it manually.
online_analysis_minimum_iterations : int >= 0, optional, default 200
Set the minimum number of iterations which must pass before online analysis is carried out.
Since the initial samples likely not to yield a good estimate of free energy, save time and just skip them
If ``online_analysis_interval`` is None, this does nothing
locality : int > 0, optional, default None
If None, the energies at all states will be computed for every replica each iteration.
If int > 0, energies will only be computed for states ``range(max(0, state-locality), min(n_states, state+locality))``.
Attributes
----------
n_replicas
n_states
iteration
mcmc_moves
sampler_states
metadata
is_completed
:param number_of_iterations: Maximum number of integer iterations that will be run
:param online_analysis_interval: How frequently to carry out online analysis in number of iterations
:param online_analysis_target_error: Target free energy difference error float at which simulation will be stopped during online analysis, in dimensionless energy
:param online_analysis_minimum_iterations: Minimum number of iterations needed before online analysis is run as int
"""
# -------------------------------------------------------------------------
# Constructors.
# -------------------------------------------------------------------------
def __init__(self, mcmc_moves=None, number_of_iterations=1,
online_analysis_interval=200, online_analysis_target_error=0.0,
online_analysis_minimum_iterations=200,
locality=None):
# Warn that API is experimental
logger.warn('Warning: The openmmtools.multistate API is experimental and may change in future releases')
# These will be set on initialization. See function
# create() for explanation of single variables.
self._thermodynamic_states = None
self._unsampled_states = None
self._sampler_states = None
self._replica_thermodynamic_states = None
self._iteration = None
self._energy_thermodynamic_states = None
self._neighborhoods = None
self._energy_unsampled_states = None
self._n_accepted_matrix = None
self._n_proposed_matrix = None
self._reporter = None
self._metadata = None
# Handling default propagator.
if mcmc_moves is None:
# This will be converted to a list in create().
self._mcmc_moves = mcmc.LangevinDynamicsMove(timestep=2.0 * unit.femtosecond,
collision_rate=5.0 / unit.picosecond,
n_steps=500, reassign_velocities=True,
n_restart_attempts=6)
else:
self._mcmc_moves = copy.deepcopy(mcmc_moves)
# Store constructor parameters. Everything is marked for internal
# usage because any change to these attribute implies a change
# in the storage file as well. Use properties for checks.
self.number_of_iterations = number_of_iterations
# Store locality
self.locality = locality
# Online analysis options.
self.online_analysis_interval = online_analysis_interval
self.online_analysis_target_error = online_analysis_target_error
self.online_analysis_minimum_iterations = online_analysis_minimum_iterations
self._online_error_trap_counter = 0 # Counter for errors in the online estimate
self._online_error_bank = []
self._last_mbar_f_k = None
self._last_err_free_energy = None
self._have_displayed_citations_before = False
# Check convergence.
if self.number_of_iterations == np.inf:
if self.online_analysis_target_error == 0.0:
logger.warning("WARNING! You have specified an unlimited number of iterations and a target error "
"for online analysis of 0.0! Your simulation may never reach 'completed' state!")
elif self.online_analysis_interval is None:
logger.warning("WARNING! This simulation will never be considered 'complete' since there is no "
"specified maximum number of iterations!")
@classmethod
def from_storage(cls, storage):
"""Constructor from an existing storage file.
Parameters
----------
storage : str or Reporter
If str: The path to the storage file.
If :class:`Reporter`: uses the :class:`Reporter` options
In the future this will be able to take a Storage class as well.
Returns
-------
sampler : MultiStateSampler
A new instance of MultiStateSampler (or subclass) in the same state of the
last stored iteration.
"""
# Handle case in which storage is a string.
reporter = cls._reporter_from_storage(storage, check_exist=True)
try:
# Open the reporter to read the data.
reporter.open(mode='r')
sampler = cls._instantiate_sampler_from_reporter(reporter)
sampler._restore_sampler_from_reporter(reporter)
finally:
# Close reporter in reading mode.
reporter.close()
# We open the reporter only in node 0 in append mode ready for use
sampler._reporter = reporter
mpiplus.run_single_node(0, sampler._reporter.open, mode='a',
broadcast_result=False, sync_nodes=False)
# Don't write the new last iteration, we have not technically
# written anything yet, so there is no "junk".
return sampler
# TODO use Python 3.6 namedtuple syntax when we drop Python 3.5 support.
Status = typing.NamedTuple('Status', [
('iteration', int),
('target_error', float),
('is_completed', bool)
])
@classmethod
def read_status(cls, storage):
"""Read the status of the calculation from the storage file.
This class method can be used to quickly check the status of the
simulation before loading the full ``ReplicaExchange`` object
from disk.
Parameters
----------
storage : str or Reporter
The path to the storage file or the reporter object.
Returns
-------
status : ReplicaExchange.Status
The status of the replica-exchange calculation. It has three
fields: ``iteration``, ``target_error``, and ``is_completed``.
"""
# Handle case in which storage is a string.
reporter = cls._reporter_from_storage(storage, check_exist=True)
# Read iteration and online analysis info.
try:
reporter.open(mode='r')
options = reporter.read_dict('options')
iteration = reporter.read_last_iteration(last_checkpoint=False)
# Search for last cached free energies only if online analysis is activated.
target_error = None
last_err_free_energy = None
# Check if online analysis is set AND that the target error is a stopping condition (> 0)
if (options['online_analysis_interval'] is not None and
options['online_analysis_target_error'] != 0.0):
target_error = options['online_analysis_target_error']
try:
last_err_free_energy = cls._read_last_free_energy(reporter, iteration)[1][1]
except TypeError:
# Trap for undefined free energy (has not been run yet)
last_err_free_energy = np.inf
finally:
reporter.close()
# Check if the calculation is done.
number_of_iterations = options['number_of_iterations']
online_analysis_target_error = options['online_analysis_target_error']
is_completed = cls._is_completed_static(number_of_iterations, iteration,
last_err_free_energy,
online_analysis_target_error)
return cls.Status(iteration=iteration, target_error=target_error,
is_completed=is_completed)
# -------------------------------------------------------------------------
# Public properties.
# -------------------------------------------------------------------------
@property
def n_states(self):
"""The integer number of thermodynamic states (read-only)."""
if self._thermodynamic_states is None:
return 0
else:
return len(self._thermodynamic_states)
@property
def n_replicas(self):
"""The integer number of replicas (read-only)."""
if self._sampler_states is None:
return 0
else:
return len(self._sampler_states)
@property
def iteration(self):
"""The integer current iteration of the simulation (read-only).
If the simulation has not been created yet, this is None.
"""
return self._iteration
@property
def mcmc_moves(self):
"""A copy of the MCMCMoves list used to propagate the simulation.
This can be set only before creation.
"""
return copy.deepcopy(self._mcmc_moves)
@mcmc_moves.setter
def mcmc_moves(self, new_value):
if self._thermodynamic_states is not None:
# We can't modify representation of the MCMCMoves because it's
# impossible to delete groups/variables from an NetCDF file. We
# could support this by JSONizing the dict serialization and
# store it as a string instead, if we needed this.
raise RuntimeError('Cannot modify MCMCMoves after creation.')
# If this is a single MCMCMove, it'll be transformed to a list in create().
self._mcmc_moves = copy.deepcopy(new_value)
@property
def sampler_states(self):
"""A copy of the sampler states list at the current iteration.
This can be set only before running.
"""
return copy.deepcopy(self._sampler_states)
@sampler_states.setter
def sampler_states(self, value):
if self._iteration != 0:
raise RuntimeError('Sampler states can be assigned only between '
'create() and run().')
if len(value) != self.n_replicas:
raise ValueError('Passed {} sampler states for {} replicas'.format(
len(value), self.n_replicas))
# Update sampler state in the object and on storage.
self._sampler_states = copy.deepcopy(value)
mpiplus.run_single_node(0, self._reporter.write_sampler_states,
self._sampler_states, self._iteration)
@property
def is_periodic(self):
"""Return True if system is periodic, False if not, and None if not initialized"""
if self._sampler_states is None:
return None
return self._thermodynamic_states[0].is_periodic
class _StoredProperty(object):
"""
Descriptor of a property stored as an option.
validate_function is a simple function for checking things like "X > 0", but exposes both the
ReplicaExchange instance and the new value for the variable, in that order.
More complex checks which relies on the ReplicaExchange instance, like "if Y == True, then check X" can be
accessed through the instance object of the function
"""
def __init__(self, option_name, validate_function=None):
self._option_name = option_name
self._validate_function = validate_function
def __get__(self, instance, owner_class=None):
return getattr(instance, '_' + self._option_name)
def __set__(self, instance, new_value):
if self._validate_function is not None:
new_value = self._validate_function(instance, new_value)
setattr(instance, '_' + self._option_name, new_value)
# Update storage if we ReplicaExchange is initialized.
if instance._reporter is not None and instance._reporter.is_open():
mpiplus.run_single_node(0, instance._store_options)
# ----------------------------------
# Value Validation of the properties
# Should be @staticmethod with arguments of (instance, value) in that order, even if instance is not used
# ----------------------------------
@staticmethod
def _number_of_iterations_validator(_, number_of_iterations):
# Support infinite number of iterations.
if not (0 <= number_of_iterations <= float('inf')):
raise ValueError('Accepted values for number_of_iterations are'
'non-negative integers and infinity.')
return number_of_iterations
@staticmethod
def _oa_interval_validator(_, online_analysis_interval):
"""Check the online_analysis_interval value for consistency"""
if online_analysis_interval is not None and (
type(online_analysis_interval) != int or online_analysis_interval < 1):
raise ValueError('online_analysis_interval must be an integer >=1 or None')
return online_analysis_interval
@staticmethod
def _oa_target_error_validator(instance, online_analysis_target_error):
if instance.online_analysis_interval is not None:
if online_analysis_target_error < 0:
raise ValueError("online_analysis_target_error must be a float >= 0")
elif online_analysis_target_error == 0 and instance.number_of_iterations is None:
logger.warning("online_analysis_target_error of 0 and number of iterations undefined "
"will never converge!")
return online_analysis_target_error
@staticmethod
def _oa_min_iter_validator(instance, online_analysis_minimum_iterations):
if (instance.online_analysis_interval is not None and
(type(
online_analysis_minimum_iterations) is not int or online_analysis_minimum_iterations < 0)):
raise ValueError("online_analysis_minimum_iterations must be an integer >= 0")
return online_analysis_minimum_iterations
@staticmethod
def _locality_validator(_, locality):
if locality is not None:
if (type(locality) != int) or (locality <= 0):
raise ValueError("locality must be an int > 0")
return locality
number_of_iterations = _StoredProperty('number_of_iterations',
validate_function=_StoredProperty._number_of_iterations_validator)
online_analysis_interval = _StoredProperty('online_analysis_interval',
validate_function=_StoredProperty._oa_interval_validator) #:interval to carry out online analysis
online_analysis_target_error = _StoredProperty('online_analysis_target_error',
validate_function=_StoredProperty._oa_target_error_validator)
online_analysis_minimum_iterations = _StoredProperty('online_analysis_minimum_iterations',
validate_function=_StoredProperty._oa_min_iter_validator)
locality = _StoredProperty('locality', validate_function=_StoredProperty._locality_validator)
@property
def metadata(self):
"""A copy of the metadata dictionary passed on creation (read-only)."""
return copy.deepcopy(self._metadata)
@property
def is_completed(self):
"""Check if we have reached any of the stop target criteria (read-only)"""
return self._is_completed()
# -------------------------------------------------------------------------
# Main public interface.
# -------------------------------------------------------------------------
_TITLE_TEMPLATE = ('Multi-state sampler simulation created using MultiStateSampler class '
'of yank.multistate on {}')
def create(self, thermodynamic_states: list, sampler_states, storage,
initial_thermodynamic_states=None, unsampled_thermodynamic_states=None,
metadata=None):
"""Create new multistate sampler simulation.
Parameters
----------
thermodynamic_states : list of states.ThermodynamicState
Thermodynamic states to simulate, where one replica is allocated per state.
Each state must have a system with the same number of atoms.
sampler_states : states.SamplerState or list
One or more sets of initial sampler states.
The number of replicas is taken to be the number of sampler states provided.
If the sampler states do not have box_vectors attached and the system is periodic,
an exception will be thrown.
storage : str or instanced Reporter
If str: the path to the storage file. Default checkpoint options from Reporter class are used
If Reporter: Uses the reporter options and storage path
In the future this will be able to take a Storage class as well.
initial_thermodynamic_states : None or list or array-like of int of length len(sampler_states), optional,
default: None.
Initial thermodynamic_state index for each sampler_state.
If no initial distribution is chosen, ``sampler_states`` are distributed between the
``thermodynamic_states`` following these rules:
* If ``len(thermodynamic_states) == len(sampler_states)``: 1-to-1 distribution
* If ``len(thermodynamic_states) > len(sampler_states)``: First and last state distributed first
remaining ``sampler_states`` spaced evenly by index until ``sampler_states`` are depleted.
If there is only one ``sampler_state``, then the only first ``thermodynamic_state`` will be chosen
* If ``len(thermodynamic_states) < len(sampler_states)``, each ``thermodynamic_state`` receives an
equal number of ``sampler_states`` until there are insufficient number of ``sampler_states`` remaining
to give each ``thermodynamic_state`` an equal number. Then the rules from the previous point are
followed.
unsampled_thermodynamic_states : list of states.ThermodynamicState, optional, default=None
These are ThermodynamicStates that are not propagated, but their
reduced potential is computed at each iteration for each replica.
These energy can be used as data for reweighting schemes (default
is None).
metadata : dict, optional, default=None
Simulation metadata to be stored in the file.
"""
# Handle case in which storage is a string and not a Reporter object.
self._reporter = self._reporter_from_storage(storage, check_exist=False)
# Check if netcdf files exist. This is run only on MPI node 0 and
# broadcasted. This is to avoid the case where the other nodes
# arrive to this line after node 0 has already created the storage
# file, causing an error.
if mpiplus.run_single_node(0, self._reporter.storage_exists, broadcast_result=True):
raise RuntimeError('Storage file {} already exists; cowardly '
'refusing to overwrite.'.format(self._reporter.filepath))
# Make sure sampler_states is an iterable of SamplerStates.
if isinstance(sampler_states, states.SamplerState):
sampler_states = [sampler_states]
# Initialize internal attribute and dataset.
self._pre_write_create(thermodynamic_states, sampler_states, storage,
initial_thermodynamic_states=initial_thermodynamic_states,
unsampled_thermodynamic_states=unsampled_thermodynamic_states,
metadata=metadata)
# Display papers to be cited.
self._display_citations()
self._initialize_reporter()
@utils.with_timer('Minimizing all replicas')
def minimize(self, tolerance=1.0 * unit.kilojoules_per_mole / unit.nanometers,
max_iterations=0):
"""Minimize all replicas.
Minimized positions are stored at the end.
Parameters
----------
tolerance : simtk.unit.Quantity, optional
Minimization tolerance (units of energy/mole/length, default is
``1.0 * unit.kilojoules_per_mole / unit.nanometers``).
max_iterations : int, optional
Maximum number of iterations for minimization. If 0, minimization
continues until converged.
"""
# Check that simulation has been created.
if self.n_replicas == 0:
raise RuntimeError('Cannot minimize replicas. The simulation must be created first.')
logger.debug("Minimizing all replicas...")
# Distribute minimization across nodes. Only node 0 will get all positions.
# The other nodes, only need the positions that they use for propagation and
# computation of the energy matrix entries.
minimized_positions, sampler_state_ids = mpiplus.distribute(self._minimize_replica, range(self.n_replicas),
tolerance, max_iterations,
send_results_to=0)
# Update all sampler states. For non-0 nodes, this will update only the
# sampler states associated to the replicas propagated by this node.
for sampler_state_id, minimized_pos in zip(sampler_state_ids, minimized_positions):
self._sampler_states[sampler_state_id].positions = minimized_pos
# Save the stored positions in the storage
mpiplus.run_single_node(0, self._reporter.write_sampler_states, self._sampler_states, self._iteration)
def equilibrate(self, n_iterations, mcmc_moves=None):
"""Equilibrate all replicas.
This does not increase the iteration counter. The equilibrated
positions are stored at the end.
Parameters
----------
n_iterations : int
Number of equilibration iterations.
mcmc_moves : MCMCMove or list of MCMCMove, optional
Optionally, the MCMCMoves to use for equilibration can be
different from the ones used in production.
"""
# Check that simulation has been created.
if self.n_replicas == 0:
raise RuntimeError('Cannot equilibrate replicas. The simulation must be created first.')
# If no MCMCMove is specified, use the ones for production.
if mcmc_moves is None:
mcmc_moves = self._mcmc_moves
# Make sure there is one MCMCMove per thermodynamic state.
if isinstance(mcmc_moves, mcmc.MCMCMove):
mcmc_moves = [copy.deepcopy(mcmc_moves) for _ in range(self.n_states)]
elif len(mcmc_moves) != self.n_states:
raise RuntimeError('The number of MCMCMoves ({}) and ThermodynamicStates ({}) for equilibration'
' must be the same.'.format(len(self._mcmc_moves), self.n_states))
# Temporarily set the equilibration MCMCMoves.
production_mcmc_moves = self._mcmc_moves
self._mcmc_moves = mcmc_moves
for iteration in range(n_iterations):
logger.debug("Equilibration iteration {}/{}".format(iteration, n_iterations))
self._propagate_replicas()
# Restore production MCMCMoves.
self._mcmc_moves = production_mcmc_moves
# Update stored positions.
mpiplus.run_single_node(0, self._reporter.write_sampler_states, self._sampler_states, self._iteration)
def run(self, n_iterations=None):
"""Run the replica-exchange simulation.
This runs at most ``number_of_iterations`` iterations. Use :func:`extend`
to pass the limit.
Parameters
----------
n_iterations : int, optional
If specified, only at most the specified number of iterations
will be run (default is None).
"""
# If this is the first iteration, compute and store the
# starting energies of the minimized/equilibrated structures.
if self._iteration == 0:
try:
self._compute_energies()
# We're intercepting a possible initial NaN position here thrown by OpenMM, which is a simple exception
# So we have to under-specify this trap.
except Exception as e:
if 'coordinate is nan' in str(e).lower():
err_message = "Initial coordinates were NaN! Check your inputs!"
logger.critical(err_message)
raise SimulationNaNError(err_message)
else:
# If not the special case, raise the error normally
raise e
mpiplus.run_single_node(0, self._reporter.write_energies, self._energy_thermodynamic_states,
self._neighborhoods, self._energy_unsampled_states, self._iteration)
self._check_nan_energy()
timer = utils.Timer()
timer.start('Run ReplicaExchange')
run_initial_iteration = self._iteration
# Handle default argument and determine number of iterations to run.
if n_iterations is None:
iteration_limit = self.number_of_iterations
else:
iteration_limit = min(self._iteration + n_iterations, self.number_of_iterations)
# Main loop.
while not self._is_completed(iteration_limit):
# Increment iteration counter.
self._iteration += 1
logger.debug('*' * 80)
logger.debug('Iteration {}/{}'.format(self._iteration, iteration_limit))
logger.debug('*' * 80)
timer.start('Iteration')
# Update thermodynamic states
self._mix_replicas()
# Propagate replicas.
self._propagate_replicas()
# Compute energies of all replicas at all states
self._compute_energies()
# Write iteration to storage file
self._report_iteration()
# Update analysis
self._update_analysis()
# Computing timing information
iteration_time = timer.stop('Iteration')
partial_total_time = timer.partial('Run ReplicaExchange')
time_per_iteration = partial_total_time / (self._iteration - run_initial_iteration)
estimated_time_remaining = time_per_iteration * (iteration_limit - self._iteration)
estimated_total_time = time_per_iteration * iteration_limit
estimated_finish_time = time.time() + estimated_time_remaining
# TODO: Transmit timing information
# Show timing statistics if debug level is activated.
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Iteration took {:.3f}s.".format(iteration_time))
if estimated_time_remaining != float('inf'):
logger.debug("Estimated completion in {}, at {} (consuming total wall clock time {}).".format(
str(datetime.timedelta(seconds=estimated_time_remaining)),
time.ctime(estimated_finish_time),
str(datetime.timedelta(seconds=estimated_total_time))))
# Perform sanity checks to see if we should terminate here.
self._check_nan_energy()
def extend(self, n_iterations):
"""Extend the simulation by the given number of iterations.
Contrarily to :func:`run`, this will extend the number of iterations past
``number_of_iteration`` if requested.
Parameters
----------
n_iterations : int
The number of iterations to run.
"""
if self._iteration + n_iterations > self.number_of_iterations:
# This MUST be assigned to a property or the storage won't be updated.
self.number_of_iterations = self._iteration + n_iterations
self.run(n_iterations)
def __repr__(self):
"""Return a 'formal' representation that can be used to reconstruct the class, if possible."""
return "<instance of {}>".format(self.__class__.__name__)
def __del__(self):
# The reporter could be None if MultiStateSampler was not created.
if hasattr(self, '_reporter') and (self._reporter is not None):
mpiplus.run_single_node(0, self._reporter.close)
# -------------------------------------------------------------------------
# Internal-usage.
# -------------------------------------------------------------------------
def _pre_write_create(self,
thermodynamic_states,
sampler_states,
storage,
initial_thermodynamic_states=None,
unsampled_thermodynamic_states=None,
metadata=None):
"""
Internal function which allocates and sets up ALL variables prior to actually using them.
This is helpful to ensure subclasses have all variables created prior to writing them out with
:func:`_report_iteration`.
All calls to this function should be *identical* to :func:`create` itself
"""
# Check all systems are either periodic or not.
is_periodic = thermodynamic_states[0].is_periodic
for thermodynamic_state in thermodynamic_states:
if thermodynamic_state.is_periodic != is_periodic:
raise Exception('Thermodynamic states contain a mixture of '
'systems with and without periodic boundary conditions.')
# Check that sampler states specify box vectors if the system is periodic
if is_periodic:
for sampler_state in sampler_states:
if sampler_state.box_vectors is None:
raise Exception('All sampler states must have box_vectors defined if the system is periodic.')
# Make sure all states have same number of particles. We don't
# currently support writing storage with different n_particles
n_particles = thermodynamic_states[0].n_particles
for the_states in [thermodynamic_states, sampler_states]:
for state in the_states:
if state.n_particles != n_particles:
raise ValueError('All ThermodynamicStates and SamplerStates must '
'have the same number of particles')
# Handle default argument for metadata and add default simulation title.
default_title = (self._TITLE_TEMPLATE.format(time.asctime(time.localtime())))
if metadata is None:
metadata = dict(title=default_title)
elif 'title' not in metadata:
metadata['title'] = default_title
self._metadata = metadata
# Save thermodynamic states. This sets n_replicas.
self._thermodynamic_states = copy.deepcopy(thermodynamic_states)
# Handle default unsampled thermodynamic states.
if unsampled_thermodynamic_states is None:
self._unsampled_states = []
else:
self._unsampled_states = copy.deepcopy(unsampled_thermodynamic_states)
# Deep copy sampler states.
self._sampler_states = [copy.deepcopy(sampler_state) for sampler_state in sampler_states]
# Set initial thermodynamic state indices if not specified
if initial_thermodynamic_states is None:
initial_thermodynamic_states = self._default_initial_thermodynamic_states(thermodynamic_states,
sampler_states)
self._replica_thermodynamic_states = np.array(initial_thermodynamic_states, np.int64)
# Assign default system box vectors if None has been specified.
for replica_id, thermodynamic_state_id in enumerate(self._replica_thermodynamic_states):
sampler_state = self._sampler_states[replica_id]
if sampler_state.box_vectors is not None:
continue
thermodynamic_state = self._thermodynamic_states[thermodynamic_state_id]
sampler_state.box_vectors = thermodynamic_state.system.getDefaultPeriodicBoxVectors()
# Ensure there is an MCMCMove for each thermodynamic state.
if isinstance(self._mcmc_moves, mcmc.MCMCMove):
self._mcmc_moves = [copy.deepcopy(self._mcmc_moves) for _ in range(self.n_states)]
elif len(self._mcmc_moves) != self.n_states:
raise RuntimeError('The number of MCMCMoves ({}) and ThermodynamicStates ({}) must '
'be the same.'.format(len(self._mcmc_moves), self.n_states))
# Reset iteration counter.
self._iteration = 0
# Reset statistics.
# _n_accepted_matrix[i][j] is the number of swaps proposed between thermodynamic states i and j.
# _n_proposed_matrix[i][j] is the number of swaps proposed between thermodynamic states i and j.
self._n_accepted_matrix = np.zeros([self.n_states, self.n_states], np.int64)
self._n_proposed_matrix = np.zeros([self.n_states, self.n_states], np.int64)
# Allocate memory for energy matrix. energy_thermodynamic/unsampled_states[k][l]
# is the reduced potential computed at the positions of SamplerState sampler_states[k]
# and ThermodynamicState thermodynamic/unsampled_states[l].
self._energy_thermodynamic_states = np.zeros([self.n_replicas, self.n_states], np.float64)
self._neighborhoods = np.zeros([self.n_replicas, self.n_states], 'i1')
self._energy_unsampled_states = np.zeros([self.n_replicas, len(self._unsampled_states)], np.float64)
@classmethod
def _instantiate_sampler_from_reporter(cls, reporter):
"""
Creates a new instance of the reporter on disk and sampler which can then be manipulated.
Does not set any variables, use :func:`_restore_sampler_from_reporter` after calling this to set them.
Helper function to break up the :func:`from_storage` method in a way that subclasses can specialize
Parameters
----------
reporter : Reporter
A reporter open for reading.
Returns
-------
sampler : MultiStateSampler
A new instance of MultiStateSampler (or subclass) with options
restored from disk.
"""
# Retrieve options and create new simulation.
options = reporter.read_dict('options')
options['mcmc_moves'] = reporter.read_mcmc_moves()
sampler = cls(**options)
# Display papers to be cited.
sampler._display_citations()
return sampler
def _restore_sampler_from_reporter(self, reporter):
"""
(Re-)initialize the instanced sampler from the reporter. Intended to be called as the second half of a
:func:`from_storage` method after the :class:`MultiStateSampler` has been instanced from disk.
The ``self.reporter`` instance of this sampler will be in an open state for append mode after this has been set,
and the ``reporter`` used as argument will be closed. In the event they are the same, reporter will be
returned as open in append mode.
Note: Needs an already initialized reporter to work correctly.
Warning: can overwrite the current state of this :class:`MultiStateSampler` instance.
Helper function to break up the from_storage method in a way that subclasses can specialize
Parameters
----------
reporter : multistate.MultiStateReporter
Reporter open for reading.
"""
# Read the last iteration reported to ensure we don't include junk
# data written just before a crash.
logger.debug("Reading storage file {}...".format(reporter.filepath))
metadata = reporter.read_dict('metadata')
thermodynamic_states, unsampled_states = reporter.read_thermodynamic_states()
def _read_options(check_iteration):
internal_sampler_states = reporter.read_sampler_states(iteration=check_iteration)
internal_state_indices = reporter.read_replica_thermodynamic_states(iteration=check_iteration)
internal_energy_thermodynamic_states, internal_neighborhoods, internal_energy_unsampled_states = \
reporter.read_energies(iteration=check_iteration)
internal_n_accepted_matrix, internal_n_proposed_matrix = \
reporter.read_mixing_statistics(iteration=check_iteration)
# Search for last cached free energies only if online analysis is activated.
internal_last_mbar_f_k, internal_last_err_free_energy = None, None
if self.online_analysis_interval is not None:
online_analysis_info = self._read_last_free_energy(reporter, check_iteration)
try:
internal_last_mbar_f_k, (_, internal_last_err_free_energy) = online_analysis_info
except TypeError:
# Trap case where online analysis is set but not run yet and (_, ...) = None is not iterable
pass
return (internal_sampler_states, internal_state_indices, internal_energy_thermodynamic_states,
internal_neighborhoods, internal_energy_unsampled_states, internal_n_accepted_matrix,
internal_n_proposed_matrix, internal_last_mbar_f_k, internal_last_err_free_energy)
# Keep trying to resume further and further back from the most recent checkpoint back
checkpoints = reporter.read_checkpoint_iterations()
checkpoint_reverse_iter = iter(checkpoints[::-1])
while True:
try:
checkpoint = next(checkpoint_reverse_iter)
output_data = _read_options(checkpoint)
# Found data, can escape loop
break
except StopIteration:
raise self._throw_restoration_error("Attempting to restore from any checkpoint failed. "
"Either your data is fully corrupted or something has gone very "
"wrong to see this message. "
"Please open an issue on the GitHub issue tracker if you see this!")
except:
# Trap all other errors caught by the load process
continue
if checkpoint < checkpoints[-1]:
logger.warning("Could not use most recent checkpoint at {}, instead pulled from {}".format(checkpoints[-1],
checkpoint))
(sampler_states, state_indices, energy_thermodynamic_states, neighborhoods, energy_unsampled_states,
n_accepted_matrix, n_proposed_matrix, last_mbar_f_k, last_err_free_energy) = output_data
# Assign attributes.
self._iteration = int(checkpoint) # The int() can probably be removed when pinned to NetCDF4 >=1.4.0
self._thermodynamic_states = thermodynamic_states
self._unsampled_states = unsampled_states
self._sampler_states = sampler_states
self._replica_thermodynamic_states = state_indices
self._energy_thermodynamic_states = energy_thermodynamic_states
self._neighborhoods = neighborhoods
self._energy_unsampled_states = energy_unsampled_states
self._n_accepted_matrix = n_accepted_matrix
self._n_proposed_matrix = n_proposed_matrix
self._metadata = metadata
self._last_mbar_f_k = last_mbar_f_k
self._last_err_free_energy = last_err_free_energy
def _check_nan_energy(self):
"""Checks that energies are finite and abort otherwise.
Checks both sampled and unsampled thermodynamic states.
"""
# Find faulty replicas to create error message.
nan_replicas = []
# Check sampled thermodynamic states first.
state_type = 'thermodynamic state'
for replica_id, state_id in enumerate(self._replica_thermodynamic_states):
neighborhood = self._neighborhood(state_id)
energies_neighborhood = self._energy_thermodynamic_states[replica_id, neighborhood]
if np.any(np.isnan(energies_neighborhood)):
nan_replicas.append((replica_id, energies_neighborhood))
# If there are no NaNs in energies, look for NaNs in the unsampled states energies.
if (len(nan_replicas) == 0) and (self._energy_unsampled_states.shape[1] > 0):
state_type = 'unsampled thermodynamic state'
for replica_id in range(self.n_replicas):
if np.any(np.isnan(self._energy_unsampled_states[replica_id])):
nan_replicas.append((replica_id, self._energy_unsampled_states[replica_id]))
# Raise exception if we have found some NaN energies.
if len(nan_replicas) > 0:
# Log failed replica, its thermo state, and the energy matrix row.
err_msg = "NaN encountered in {} energies for the following replicas and states".format(state_type)
for replica_id, energy_row in nan_replicas:
err_msg += '\n\tEnergies for positions at replica {} (current state {}): {} kT'.format(
replica_id, self._replica_thermodynamic_states[replica_id], energy_row)
logger.critical(err_msg)
raise SimulationNaNError(err_msg)
@mpiplus.on_single_node(rank=0, broadcast_result=False, sync_nodes=False)
def _display_citations(self, overwrite_global=False, citation_stack=None):
"""
Display papers to be cited.
The overwrite_global command will force the citation to display even if the "have_citations_been_shown" variable
is True
"""
# TODO Add original citations for various replica-exchange schemes.
# TODO Show subset of OpenMM citations based on what features are being used.
openmm_citations = """\
Friedrichs MS, Eastman P, Vaidyanathan V, Houston M, LeGrand S, Beberg AL, Ensign DL, Bruns CM, and Pande VS. Accelerating molecular dynamic simulations on graphics processing unit. J. Comput. Chem. 30:864, 2009. DOI: 10.1002/jcc.21209
Eastman P and Pande VS. OpenMM: A hardware-independent framework for molecular simulations. Comput. Sci. Eng. 12:34, 2010. DOI: 10.1109/MCSE.2010.27
Eastman P and Pande VS. Efficient nonbonded interactions for molecular dynamics on a graphics processing unit. J. Comput. Chem. 31:1268, 2010. DOI: 10.1002/jcc.21413
Eastman P and Pande VS. Constant constraint matrix approximation: A robust, parallelizable constraint method for molecular simulations. J. Chem. Theor. Comput. 6:434, 2010. DOI: 10.1021/ct900463w"""
mbar_citations = """\
Shirts MR and Chodera JD. Statistically optimal analysis of samples from multiple equilibrium states. J. Chem. Phys. 129:124105, 2008. DOI: 10.1063/1.2978177"""
if citation_stack is not None:
citation_stack = [openmm_citations] + citation_stack
else:
citation_stack = [openmm_citations]
if overwrite_global or (not self._have_displayed_citations_before and not self._global_citation_silence):
print("Please cite the following:")
print("")
for citation in citation_stack:
print(citation)
self._have_displayed_citations_before = True
# -------------------------------------------------------------------------
# Internal-usage: Initialization and storage utilities.
# -------------------------------------------------------------------------
@classmethod
def _default_initial_thermodynamic_states(cls, thermodynamic_states, sampler_states):
"""
Create the initial_thermodynamic_states obeying the following rules:
* If ``len(thermodynamic_states) == len(sampler_states)``: 1-to-1 distribution
* If ``len(thermodynamic_states) > len(sampler_states)``: First and last state distributed first
remaining ``sampler_states`` spaced evenly by index until ``sampler_states`` are depleted.
If there is only one ``sampler_state``, then the only first ``thermodynamic_state`` will be chosen
* If ``len(thermodynamic_states) < len(sampler_states)``, each ``thermodynamic_state`` receives an
equal number of ``sampler_states`` until there are insufficient number of ``sampler_states`` remaining
to give each ``thermodynamic_state`` an equal number. Then the rules from the previous point are
followed.
"""
n_thermo = len(thermodynamic_states)
n_sampler = len(sampler_states)
thermo_indices = np.arange(n_thermo, dtype=int)
initial_thermo_states = np.zeros(n_sampler, dtype=int)
# Determine how many loops we can do
loops = n_sampler // n_thermo # Floor division (//)
n_looped = n_thermo * loops
initial_thermo_states[:n_looped] = np.tile(thermo_indices, loops)
# Distribute remaining values, -1 from n_thermo to handle indices correctly
initial_thermo_states[n_looped:] = np.linspace(0, n_thermo - 1, n_sampler - n_looped, dtype=int)
return initial_thermo_states
@staticmethod
def _does_file_exist(file_path):
"""Check if there is a file at the given path."""
return os.path.exists(file_path) and os.path.getsize(file_path) > 0
@staticmethod
def _reporter_from_storage(storage, check_exist=True):
"""Return the Reporter object associated to this storage.
If check_exist is True, FileNotFoundError is raised if the files
are not found. The return reporter is closed.
"""
if isinstance(storage, str):
# Open a reporter to read the data.
reporter = multistate.MultiStateReporter(storage)
else:
reporter = storage
# Check if netcdf file exists.
if check_exist and not reporter.storage_exists():
raise FileNotFoundError('Storage file {} or its subfiles do not exist; '
'cannot read status.'.format(reporter.filepath))
return reporter
@mpiplus.on_single_node(rank=0, broadcast_result=False, sync_nodes=True)
def _initialize_reporter(self):
"""Initialize the reporter and store initial information.
This is executed only on MPI node 0 and it is blocking. This is to
avoid the case where the other nodes skip ahead and try to read
from a file that hasn't been created yet.
"""
self._reporter.open(mode='w')
self._reporter.write_thermodynamic_states(self._thermodynamic_states,
self._unsampled_states)
# Store run metadata and ReplicaExchange options.
self._store_options()
self._reporter.write_dict('metadata', self._metadata)
# Store initial conditions. This forces the storage to be synchronized.
self._report_iteration()
@mpiplus.on_single_node(rank=0, broadcast_result=False, sync_nodes=False)
@mpiplus.delayed_termination
@utils.with_timer('Writing iteration information to storage')
def _report_iteration(self):
"""Store positions, states, and energies of current iteration.
This is executed only on MPI node 0 and it's not blocking. The
termination is delayed so that the file is not written only with
partial data if the program gets interrupted.
Subclasses should not attempt to modify this function as it can
force either duplicated or missed ``sync()`` calls. In the event
that they MUST overwrite this function, the last call in the whole
stack should be :func:multistate.MultiStateReporter.write_last_iteration
"""
# Call report_iteration_items for a subclass-friendly function
self._report_iteration_items()
self._reporter.write_timestamp(self._iteration)
self._reporter.write_last_iteration(self._iteration)
@mpiplus.on_single_node(rank=0, broadcast_result=False, sync_nodes=False)
@mpiplus.delayed_termination
def _report_iteration_items(self):
"""
Sub-function of :func:`_report_iteration` which handles all the actual individual item reporting in a
sub-class friendly way. The final actions of writing timestamp, last-good-iteration, and syncing
should be left to the :func:`_report_iteration` and subclasses should extend this function instead
"""
self._reporter.write_sampler_states(self._sampler_states, self._iteration)
self._reporter.write_replica_thermodynamic_states(self._replica_thermodynamic_states, self._iteration)
self._reporter.write_mcmc_moves(self._mcmc_moves) # MCMCMoves can store internal statistics.
self._reporter.write_energies(self._energy_thermodynamic_states, self._neighborhoods, self._energy_unsampled_states,
self._iteration)
self._reporter.write_mixing_statistics(self._n_accepted_matrix, self._n_proposed_matrix, self._iteration)
@classmethod
def default_options(cls):
"""
dict of all default class options (keyword arguments for __init__ for class and superclasses)
"""
options_to_report = dict()
for c in inspect.getmro(cls):
parameter_names, _, _, defaults, _, _, _ = inspect.getfullargspec(c.__init__)
if defaults:
class_options = {parameter_name: defaults[index] for (index, parameter_name) in
enumerate(parameter_names[-len(defaults):])}
options_to_report.update(class_options)
options_to_report.pop('mcmc_moves')
return options_to_report
@property
def options(self):
"""
dict of all class options (keyword arguments for __init__ for class and superclasses)
"""
options_to_report = dict()
for cls in inspect.getmro(type(self)):
parameter_names, _, _, defaults, _, _, _ = inspect.getfullargspec(cls.__init__)
if defaults:
class_options = {parameter_name: getattr(self, '_' + parameter_name) for
parameter_name in parameter_names[-len(defaults):]}
options_to_report.update(class_options)
options_to_report.pop('mcmc_moves')
return options_to_report
def _store_options(self):
"""Store __init__ parameters (beside MCMCMoves) in storage file."""
logger.debug("Storing general ReplicaExchange options...")
self._reporter.write_dict('options', self.options)
# -------------------------------------------------------------------------
# Locality
# -------------------------------------------------------------------------
def _neighborhood(self, state_index):
"""Compute the states in the local neighborhood determined by self.locality
Parameters
----------
state_index : int
The current state
Returns
-------
neighborhood : list of int
The states in the local neighborhood
"""
if self.locality is None:
# Global neighborhood
return list(range(0, self.n_states))
else:
# Local neighborhood specified by 'locality'
return list(range(max(0, state_index - self.locality), min(self.n_states, state_index + self.locality + 1)))
# -------------------------------------------------------------------------
# Internal-usage: Distributed tasks.
# -------------------------------------------------------------------------
@utils.with_timer('Propagating all replicas')
def _propagate_replicas(self):
"""Propagate all replicas."""
# TODO Report on efficiency of dyanmics (fraction of time wasted to overhead).
logger.debug("Propagating all replicas...")
# Distribute propagation across nodes. Only node 0 will get all positions
# and box vectors. The other nodes, only need the positions that they use
# for propagation and computation of the energy matrix entries.
propagated_states, replica_ids = mpiplus.distribute(self._propagate_replica, range(self.n_replicas),
send_results_to=0)
# Update all sampler states. For non-0 nodes, this will update only the
# sampler states associated to the replicas propagated by this node.
for replica_id, propagated_state in zip(replica_ids, propagated_states):
self._sampler_states[replica_id].__setstate__(propagated_state, ignore_velocities=True)
# Gather all MCMCMoves statistics. All nodes must have these up-to-date
# since they are tied to the ThermodynamicState, not the replica.
all_statistics = mpiplus.distribute(self._get_replica_move_statistics, range(self.n_replicas),
send_results_to='all')
for replica_id in range(self.n_replicas):
if len(all_statistics[replica_id]) > 0:
thermodynamic_state_id = self._replica_thermodynamic_states[replica_id]
self._mcmc_moves[thermodynamic_state_id].statistics = all_statistics[replica_id]
def _propagate_replica(self, replica_id):
"""Propagate thermodynamic state associated to the given replica."""
# Retrieve thermodynamic, sampler states, and MCMC move of this replica.
thermodynamic_state_id = self._replica_thermodynamic_states[replica_id]
thermodynamic_state = self._thermodynamic_states[thermodynamic_state_id]
mcmc_move = self._mcmc_moves[thermodynamic_state_id]
sampler_state = self._sampler_states[replica_id]
# Apply MCMC move.
try:
mcmc_move.apply(thermodynamic_state, sampler_state)
except mcmc.IntegratorMoveError as e:
# Save NaNnig context and MCMove before aborting.
output_dir = os.path.join(os.path.dirname(self._reporter.filepath), 'nan-error-logs')
file_name = 'iteration{}-replica{}-state{}'.format(self._iteration, replica_id,
thermodynamic_state_id)
e.serialize_error(os.path.join(output_dir, file_name))
message = ('Propagating replica {} at state {} resulted in a NaN!\n'
'The state of the system and integrator before the error were saved'
' in {}').format(replica_id, thermodynamic_state_id, output_dir)
logger.critical(message)
raise SimulationNaNError(message)
# Send the new state to the root node. We can ignore velocities as we're not saving them.
return sampler_state.__getstate__(ignore_velocities=True)
def _get_replica_move_statistics(self, replica_id):
"""Return the statistics of the MCMCMove currently associated to this replica."""
thermodynamic_state_id = self._replica_thermodynamic_states[replica_id]
mcmc_move = self._mcmc_moves[thermodynamic_state_id]
try:
move_statistics = mcmc_move.statistics
except AttributeError:
move_statistics = {}
return move_statistics
def _minimize_replica(self, replica_id, tolerance, max_iterations):
"""Minimize the specified replica.
"""
# Retrieve thermodynamic and sampler states.
thermodynamic_state_id = self._replica_thermodynamic_states[replica_id]
thermodynamic_state = self._thermodynamic_states[thermodynamic_state_id]
sampler_state = self._sampler_states[replica_id]
# Use the FIRE minimizer
integrator = FIREMinimizationIntegrator(tolerance=tolerance)
# Create context
context = thermodynamic_state.create_context(integrator)
# Set initial positions and box vectors.
sampler_state.apply_to_context(context)
# Compute the initial energy of the system for logging.
initial_energy = thermodynamic_state.reduced_potential(context)
logger.debug('Replica {}/{}: initial energy {:8.3f}kT'.format(
replica_id + 1, self.n_replicas, initial_energy))
# Minimize energy.
try:
if max_iterations == 0:
logger.debug('Using FIRE: tolerance {} minimizing to convergence'.format(tolerance))
while integrator.getGlobalVariableByName('converged') < 1:
integrator.step(50)
else:
logger.debug('Using FIRE: tolerance {} max_iterations {}'.format(tolerance, max_iterations))
integrator.step(max_iterations)
except Exception as e:
if str(e) == 'Particle coordinate is nan':
logger.debug('NaN encountered in FIRE minimizer; falling back to L-BFGS after resetting positions')
sampler_state.apply_to_context(context)
openmm.LocalEnergyMinimizer.minimize(context, tolerance, max_iterations)
else:
raise e
# Get the minimized positions.
sampler_state.update_from_context(context)
# Compute the final energy of the system for logging.
final_energy = thermodynamic_state.reduced_potential(sampler_state)
logger.debug('Replica {}/{}: final energy {:8.3f}kT'.format(
replica_id + 1, self.n_replicas, final_energy))
# TODO if energy > 0, use slower openmm minimizer
# Clean up the integrator
del context
# Return minimized positions.
return sampler_state.positions
@utils.with_timer('Computing energy matrix')
def _compute_energies(self):
"""Compute energies of all replicas at all states."""
# Determine neighborhoods (all nodes)
self._neighborhoods[:,:] = False
for (replica_index, state_index) in enumerate(self._replica_thermodynamic_states):
neighborhood = self._neighborhood(state_index)
self._neighborhoods[replica_index, neighborhood] = True
# Distribute energy computation across nodes. Only node 0 receives
# all the energies since it needs to store them and mix states.
new_energies, replica_ids = mpiplus.distribute(self._compute_replica_energies, range(self.n_replicas),
send_results_to=0)
# Update energy matrices. Non-0 nodes update only the energies computed by this replica.
for replica_id, energies in zip(replica_ids, new_energies):
energy_thermodynamic_states, energy_unsampled_states = energies # Unpack.
neighborhood = self._neighborhood(self._replica_thermodynamic_states[replica_id])
self._energy_thermodynamic_states[replica_id, neighborhood] = energy_thermodynamic_states
self._energy_unsampled_states[replica_id] = energy_unsampled_states
def _compute_replica_energies(self, replica_id):
"""Compute the energy for the replica in every ThermodynamicState."""
# Determine neighborhood
state_index = self._replica_thermodynamic_states[replica_id]
neighborhood = self._neighborhood(state_index)
# Only compute energies of the sampled states over neighborhoods.
energy_neighborhood_states = np.zeros(len(neighborhood))
energy_unsampled_states = np.zeros(len(self._unsampled_states))
neighborhood_thermodynamic_states = [self._thermodynamic_states[n] for n in neighborhood]
# Retrieve sampler state associated to this replica.
sampler_state = self._sampler_states[replica_id]
# Compute energy for all thermodynamic states.
for energies, the_states in [(energy_neighborhood_states, neighborhood_thermodynamic_states),
(energy_unsampled_states, self._unsampled_states)]:
# Group thermodynamic states by compatibility.
compatible_groups, original_indices = states.group_by_compatibility(the_states)
# Compute the reduced potentials of all the compatible states.
for compatible_group, state_indices in zip(compatible_groups, original_indices):
# Get the context, any Integrator works.
context, integrator = cache.global_context_cache.get_context(compatible_group[0])
# Update positions and box vectors. We don't need
# to set Context velocities for the potential.
sampler_state.apply_to_context(context, ignore_velocities=True)
# Compute and update the reduced potentials.
compatible_energies = states.ThermodynamicState.reduced_potential_at_states(
context, compatible_group)
for energy_idx, state_idx in enumerate(state_indices):
energies[state_idx] = compatible_energies[energy_idx]
# Return the new energies.
return energy_neighborhood_states, energy_unsampled_states
# -------------------------------------------------------------------------
# Internal-usage: Replicas mixing.
# -------------------------------------------------------------------------
@mpiplus.on_single_node(0, broadcast_result=True)
def _mix_replicas(self):
"""Do nothing to replicas."""
logger.debug("Mixing replicas (does nothing for MultiStateSampler)...")
# Reset storage to keep track of swap attempts this iteration.
self._n_accepted_matrix[:, :] = 0
self._n_proposed_matrix[:, :] = 0
# Determine fraction of swaps accepted this iteration.
n_swaps_proposed = self._n_proposed_matrix.sum()
n_swaps_accepted = self._n_accepted_matrix.sum()
swap_fraction_accepted = 0.0
if n_swaps_proposed > 0:
swap_fraction_accepted = n_swaps_accepted / n_swaps_proposed # Python 3 uses true division for /
logger.debug("Accepted {}/{} attempted swaps ({:.1f}%)".format(n_swaps_accepted, n_swaps_proposed,
swap_fraction_accepted * 100.0))
# -------------------------------------------------------------------------
# Internal-usage: Offline and online analysis
# -------------------------------------------------------------------------
@mpiplus.on_single_node(rank=0, broadcast_result=True)
@mpiplus.delayed_termination
@utils.with_timer('Computing offline free energy estimate')
def _offline_analysis(self):
"""Compute offline estimate of free energies
This scheme only works with global localities.
"""
# TODO: Currently, this just uses MBAR, which only works for global neighborhoods.
# TODO: Add Local WHAM support.
if self.locality is not None:
raise Exception('Cannot use MBAR with non-global locality.')
# This relative import is down here because having it at the top causes an ImportError.
# __init__ pulls in multistate, which pulls in analyze, which pulls in MultiState. Because the first
# MultiStateSampler never finished importing, its not in the name space which causes relative analyze import of
# MultiStateSampler to crash as neither of them are the __main__ package.
# https://stackoverflow.com/questions/6351805/cyclic-module-dependencies-and-relative-imports-in-python
from openmmtools.multistate.multistateanalyzer import MultiStateSamplerAnalyzer
# Start the analysis
bump_error_counter = False
# Set up analyzer
# Unbias restraints is False because this will quickly accumulate large time to re-read the trajectories
# and unbias the restraints every online-analysis. Current model is to use the last_mbar_f_k as a
# hot-start to the analysis. Once we store unbias info as part of a CustomCVForce, we can revisit this choice.
analysis = MultiStateSamplerAnalyzer(self._reporter, analysis_kwargs={'initial_f_k': self._last_mbar_f_k},
unbias_restraint=False)
# Indices for online analysis, "i'th index, j'th index"
idx, jdx = 0, -1
timer = utils.Timer()
timer.start("MBAR")
logger.debug("Computing free energy with MBAR...")
try: # Trap errors for MBAR being under sampled and the W_nk matrix not being normalized correctly
mbar = analysis.mbar
free_energy, err_free_energy = analysis.get_free_energy()
except ParameterError as e:
# We don't update self._last_err_free_energy here since if it
# wasn't below the target threshold before, it won't stop MultiStateSampler now.
bump_error_counter = True
self._online_error_bank.append(e)
if len(self._online_error_bank) > 6:
# Cache only the last set
self._online_error_bank.pop(0)
free_energy = None
else:
self._last_mbar_f_k = mbar.f_k
free_energy = free_energy[idx, jdx]
self._last_err_free_energy = err_free_energy[idx, jdx]
logger.debug("Current Free Energy Estimate is {} +- {} kT".format(free_energy,
self._last_err_free_energy))
# Trap a case when errors don't converge (usually due to under sampling)
if np.isnan(self._last_err_free_energy):
self._last_err_free_energy = np.inf
timer.stop("MBAR")
# Raise an exception after 6 times MBAR gave an error.
if bump_error_counter:
self._online_error_trap_counter += 1
# Will never be true, but code left in place in case we change logic to allow again
if self._online_error_trap_counter >= np.inf:
logger.debug("Thrown MBAR Errors:")
for err in self._online_error_bank:
logger.debug(str(err))
raise RuntimeError("Online Analysis has failed too many times! Please "
"check the latest logs to see the latest thrown errors!")
# Don't write out the free energy in case of error.
return
# Write out the numbers
self._reporter.write_online_data_dynamic_and_static(self._iteration,
f_k=self._last_mbar_f_k,
free_energy=(free_energy, self._last_err_free_energy))
return self._last_err_free_energy
@mpiplus.on_single_node(rank=0, broadcast_result=True)
@mpiplus.delayed_termination
@utils.with_timer('Computing online free energy estimate')
def _online_analysis(self, gamma0=1.0):
"""Perform online analysis of free energies
This scheme works with all localities: global and local.
"""
timer = utils.Timer()
timer.start("Online analysis")
from scipy.special import logsumexp
# TODO: This is experimental
gamma = gamma0 / float(self._iteration+1)
if self._last_mbar_f_k is None:
self._last_mbar_f_k = np.zeros([self.n_states], np.float64)
logZ = - self._last_mbar_f_k
for (replica_index, state_index) in enumerate(self._replica_thermodynamic_states):
neighborhood = self._neighborhood(state_index)
u_k = self._energy_thermodynamic_states[replica_index,:]
log_P_k = np.zeros([self.n_states], np.float64)
log_pi_k = np.zeros([self.n_states], np.float64)
log_weights = np.zeros([self.n_states], np.float64)
log_P_k[neighborhood] = log_weights[neighborhood] - u_k[neighborhood]
log_P_k[neighborhood] -= logsumexp(log_P_k[neighborhood])
logZ[neighborhood] += gamma * np.exp(log_P_k[neighborhood] - log_pi_k[neighborhood])
# Subtract off logZ[0] to prevent logZ from growing without bound
logZ[:] -= logZ[0]
self._last_mbar_f_k = -logZ
free_energy = self._last_mbar_f_k[-1] - self._last_mbar_f_k[0]
self._last_err_free_energy = np.Inf
# Store free energy estimate
self._reporter.write_online_data_dynamic_and_static(self._iteration,
f_k=self._last_mbar_f_k,
free_energy=(free_energy, self._last_err_free_energy))
timer.stop("Online analysis")
# Report online analysis to debug log
logger.debug('*** ONLINE analysis free energies:')
msg = ' '
for x in self._last_mbar_f_k:
msg += '%8.1f' % x
logger.debug(msg)
return self._last_err_free_energy
def _update_analysis(self):
"""Update online analysis of free energies"""
# TODO: Currently, this just calls the offline analysis at certain intervals, if requested.
# TODO: Refactor this to always compute fast online analysis, updating with offline analysis infrequently.
# TODO: Simplify this
if self.online_analysis_interval is None:
logger.debug('No online analysis requested')
analysis_to_perform = None
elif self._iteration < self.online_analysis_minimum_iterations:
logger.debug('Not enough iterations for online analysis (self.online_analysis_minimum_iterations = %d)' % self.online_analysis_minimum_iterations)
analysis_to_perform = 'online'
elif self._iteration % self.online_analysis_interval != 0:
logger.debug('Not an online analysis iteration')
analysis_to_perform = 'online'
elif self.locality is not None:
logger.debug('Not a global locality')
analysis_to_perform = 'online'
else:
logger.debug('Will perform offline analysis')
# All conditions are met for offline analysis
analysis_to_perform = 'offline'
# Execute selected analysis (only runs on node 0)
if analysis_to_perform == 'online':
self._last_err_free_energy = self._online_analysis()
elif analysis_to_perform == 'offline':
self._last_err_free_energy = self._offline_analysis()
return
@staticmethod
def _read_last_free_energy(reporter, iteration):
"""Get the last free energy computed from online analysis"""
last_f_k = None
last_free_energy = None
# Search for a valid free energy from the given iteration
# to the start of the calculation.
try:
free_energy_data = reporter.read_online_analysis_data(None, 'f_k', 'free_energy')
last_f_k = free_energy_data['f_k']
last_free_energy = free_energy_data['free_energy']
except ValueError:
for index in range(iteration, 0, -1):
try:
free_energy_data = reporter.read_online_analysis_data(index, 'f_k', 'free_energy')
last_f_k = free_energy_data['f_k']
last_free_energy = free_energy_data['free_energy']
except (IndexError, KeyError, ValueError):
# No such f_k written yet (or variable created).
break
# Find an f_k that is not all zeros (or masked and empty)
if not (np.ma.is_masked(last_f_k) or np.all(last_f_k == 0)):
break # Don't need to continue the loop if we already found one
return last_f_k, last_free_energy
def _is_completed(self, iteration_limit=None):
"""Check if we have reached the required number of iterations or statistical error."""
if iteration_limit is None:
iteration_limit = self.number_of_iterations
return self._is_completed_static(iteration_limit, self._iteration,
self._last_err_free_energy,
self.online_analysis_target_error)
@staticmethod
def _is_completed_static(iteration_limit, iteration, last_err_free_energy,
online_analysis_target_error):
"""Check if we have reached the required number of iterations or statistical error."""
# Return if we have reached the number of iterations
# or the statistical error target required.
if (iteration >= iteration_limit or (
last_err_free_energy is not None and last_err_free_energy <= online_analysis_target_error)):
return True
return False
@staticmethod
def _throw_restoration_error(message):
"""Masking function to hide the RestorationError class without exposing it or making it a 'hidden' (_X) error"""
class RestorationError(Exception):
"""Represent errors occurring during attempts to restore simulations."""
def __init__(self, error_message):
super().__init__(error_message)
# Critical messages which have halted a simulation, badly
logger.critical(error_message)
raise RestorationError(message)
# -------------------------------------------------------------------------
# Internal-usage: Test globals
# -------------------------------------------------------------------------
_global_citation_silence = False
# ==============================================================================
# MAIN AND TESTS
# ==============================================================================
if __name__ == "__main__":
import doctest
doctest.testmod() | unknown | codeparrot/codeparrot-clean | ||
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/Normalization.h>
#include <ATen/TensorIterator.h>
#include <ATen/native/cpu/Loops.h>
#include <ATen/cpu/vec/vec.h>
#include <ATen/Dispatch.h>
namespace at::native {
namespace {
void renorm_scale_factor_impl(TensorIteratorBase& iter, double maxnorm) {
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "renorm_scale_factor_cpu", [&] {
using vec_t = at::vec::Vectorized<scalar_t>;
const auto maxnorm_s = static_cast<scalar_t>(maxnorm);
const auto maxnorm_v = vec_t(maxnorm_s);
const auto eps_v = vec_t(static_cast<scalar_t>(1e-7));
const auto one_v = vec_t(1.0);
cpu_kernel_vec(
iter,
[maxnorm_s](scalar_t norm) -> scalar_t {
const auto eps = static_cast<scalar_t>(1e-7);
return (norm > maxnorm_s) ?
maxnorm_s / (norm + eps) : static_cast<scalar_t>(1.0);
},
[maxnorm_v, eps_v, one_v](vec_t norm) -> vec_t {
auto fct = maxnorm_v / (norm + eps_v);
return vec_t::blendv(one_v, fct, norm > maxnorm_v);
});
});
}
} // namespace (anonymous)
REGISTER_DISPATCH(renorm_scale_factor_stub, &renorm_scale_factor_impl)
} // namespace at::native | cpp | github | https://github.com/pytorch/pytorch | aten/src/ATen/native/cpu/RenormKernel.cpp |
"""
Test the scalar constructors, which also do type-coercion
"""
import fractions
import inspect
import platform
import sys
import types
from typing import Any, Literal
import pytest
import numpy as np
from numpy._core import sctypes
from numpy.testing import assert_equal, assert_raises
class TestAsIntegerRatio:
# derived in part from the cpython test "test_floatasratio"
@pytest.mark.parametrize("ftype", [
np.half, np.single, np.double, np.longdouble])
@pytest.mark.parametrize("f, ratio", [
(0.875, (7, 8)),
(-0.875, (-7, 8)),
(0.0, (0, 1)),
(11.5, (23, 2)),
])
def test_small(self, ftype, f, ratio):
assert_equal(ftype(f).as_integer_ratio(), ratio)
@pytest.mark.parametrize("ftype", [
np.half, np.single, np.double, np.longdouble])
def test_simple_fractions(self, ftype):
R = fractions.Fraction
assert_equal(R(0, 1),
R(*ftype(0.0).as_integer_ratio()))
assert_equal(R(5, 2),
R(*ftype(2.5).as_integer_ratio()))
assert_equal(R(1, 2),
R(*ftype(0.5).as_integer_ratio()))
assert_equal(R(-2100, 1),
R(*ftype(-2100.0).as_integer_ratio()))
@pytest.mark.parametrize("ftype", [
np.half, np.single, np.double, np.longdouble])
def test_errors(self, ftype):
assert_raises(OverflowError, ftype('inf').as_integer_ratio)
assert_raises(OverflowError, ftype('-inf').as_integer_ratio)
assert_raises(ValueError, ftype('nan').as_integer_ratio)
def test_against_known_values(self):
R = fractions.Fraction
assert_equal(R(1075, 512),
R(*np.half(2.1).as_integer_ratio()))
assert_equal(R(-1075, 512),
R(*np.half(-2.1).as_integer_ratio()))
assert_equal(R(4404019, 2097152),
R(*np.single(2.1).as_integer_ratio()))
assert_equal(R(-4404019, 2097152),
R(*np.single(-2.1).as_integer_ratio()))
assert_equal(R(4728779608739021, 2251799813685248),
R(*np.double(2.1).as_integer_ratio()))
assert_equal(R(-4728779608739021, 2251799813685248),
R(*np.double(-2.1).as_integer_ratio()))
# longdouble is platform dependent
@pytest.mark.parametrize("ftype, frac_vals, exp_vals", [
# dtype test cases generated using hypothesis
# first five generated cases per dtype
(np.half, [0.0, 0.01154830649280303, 0.31082276347447274,
0.527350517124794, 0.8308562335072596],
[0, 1, 0, -8, 12]),
(np.single, [0.0, 0.09248576989263226, 0.8160498218131407,
0.17389442853722373, 0.7956044195067877],
[0, 12, 10, 17, -26]),
(np.double, [0.0, 0.031066908499895136, 0.5214135908877832,
0.45780736035689296, 0.5906586745934036],
[0, -801, 51, 194, -653]),
pytest.param(
np.longdouble,
[0.0, 0.20492557202724854, 0.4277180662199366, 0.9888085019891495,
0.9620175814461964],
[0, -7400, 14266, -7822, -8721],
marks=[
pytest.mark.skipif(
np.finfo(np.double) == np.finfo(np.longdouble),
reason="long double is same as double"),
pytest.mark.skipif(
platform.machine().startswith("ppc"),
reason="IBM double double"),
]
)
])
def test_roundtrip(self, ftype, frac_vals, exp_vals):
for frac, exp in zip(frac_vals, exp_vals):
f = np.ldexp(ftype(frac), exp)
assert f.dtype == ftype
n, d = f.as_integer_ratio()
try:
nf = np.longdouble(n)
df = np.longdouble(d)
if not np.isfinite(df):
raise OverflowError
except (OverflowError, RuntimeWarning):
# the values may not fit in any float type
pytest.skip("longdouble too small on this platform")
assert_equal(nf / df, f, f"{n}/{d}")
class TestIsInteger:
@pytest.mark.parametrize("str_value", ["inf", "nan"])
@pytest.mark.parametrize("code", np.typecodes["Float"])
def test_special(self, code: str, str_value: str) -> None:
cls = np.dtype(code).type
value = cls(str_value)
assert not value.is_integer()
@pytest.mark.parametrize(
"code", np.typecodes["Float"] + np.typecodes["AllInteger"]
)
def test_true(self, code: str) -> None:
float_array = np.arange(-5, 5).astype(code)
for value in float_array:
assert value.is_integer()
@pytest.mark.parametrize("code", np.typecodes["Float"])
def test_false(self, code: str) -> None:
float_array = np.arange(-5, 5).astype(code)
float_array *= 1.1
for value in float_array:
if value == 0:
continue
assert not value.is_integer()
class TestClassGetItem:
@pytest.mark.parametrize("cls", [
np.number,
np.integer,
np.inexact,
np.unsignedinteger,
np.signedinteger,
np.floating,
])
def test_abc(self, cls: type[np.number]) -> None:
alias = cls[Any]
assert isinstance(alias, types.GenericAlias)
assert alias.__origin__ is cls
def test_abc_complexfloating(self) -> None:
alias = np.complexfloating[Any, Any]
assert isinstance(alias, types.GenericAlias)
assert alias.__origin__ is np.complexfloating
@pytest.mark.parametrize("arg_len", range(4))
def test_abc_complexfloating_subscript_tuple(self, arg_len: int) -> None:
arg_tup = (Any,) * arg_len
if arg_len in (1, 2):
assert np.complexfloating[arg_tup]
else:
match = f"Too {'few' if arg_len == 0 else 'many'} arguments"
with pytest.raises(TypeError, match=match):
np.complexfloating[arg_tup]
@pytest.mark.parametrize("cls", [np.generic, np.flexible, np.character])
def test_abc_non_numeric(self, cls: type[np.generic]) -> None:
with pytest.raises(TypeError):
cls[Any]
@pytest.mark.parametrize("code", np.typecodes["All"])
def test_concrete(self, code: str) -> None:
cls = np.dtype(code).type
if cls in {np.bool, np.datetime64}:
# these are intentionally subscriptable
assert cls[Any]
else:
with pytest.raises(TypeError):
cls[Any]
@pytest.mark.parametrize("arg_len", range(4))
def test_subscript_tuple(self, arg_len: int) -> None:
arg_tup = (Any,) * arg_len
if arg_len == 1:
assert np.number[arg_tup]
else:
with pytest.raises(TypeError):
np.number[arg_tup]
def test_subscript_scalar(self) -> None:
assert np.number[Any]
@pytest.mark.parametrize("subscript", [Literal[True], Literal[False]])
def test_subscript_bool(self, subscript: Literal[True, False]) -> None:
assert isinstance(np.bool[subscript], types.GenericAlias)
class TestBitCount:
# derived in part from the cpython test "test_bit_count"
@pytest.mark.parametrize("itype", sctypes['int'] + sctypes['uint'])
def test_small(self, itype):
for a in range(max(np.iinfo(itype).min, 0), 128):
msg = f"Smoke test for {itype}({a}).bit_count()"
assert itype(a).bit_count() == a.bit_count(), msg
def test_bit_count(self):
for exp in [10, 17, 63]:
a = 2**exp
assert np.uint64(a).bit_count() == 1
assert np.uint64(a - 1).bit_count() == exp
assert np.uint64(a ^ 63).bit_count() == 7
assert np.uint64((a - 1) ^ 510).bit_count() == exp - 8
class TestDevice:
"""
Test scalar.device attribute and scalar.to_device() method.
"""
scalars = [np.bool(True), np.int64(1), np.uint64(1), np.float64(1.0),
np.complex128(1 + 1j)]
@pytest.mark.parametrize("scalar", scalars)
def test_device(self, scalar):
assert scalar.device == "cpu"
@pytest.mark.parametrize("scalar", scalars)
def test_to_device(self, scalar):
assert scalar.to_device("cpu") is scalar
@pytest.mark.parametrize("scalar", scalars)
def test___array_namespace__(self, scalar):
assert scalar.__array_namespace__() is np
@pytest.mark.parametrize("scalar", [np.bool(True), np.int8(1), np.float64(1)])
def test_array_wrap(scalar):
# Test scalars array wrap as long as it exists. NumPy itself should
# probably not use it, so it may not be necessary to keep it around.
arr0d = np.array(3, dtype=np.int8)
# Third argument not passed, None, or True "decays" to scalar.
# (I don't think NumPy would pass `None`, but it seems clear to support)
assert type(scalar.__array_wrap__(arr0d)) is np.int8
assert type(scalar.__array_wrap__(arr0d, None, None)) is np.int8
assert type(scalar.__array_wrap__(arr0d, None, True)) is np.int8
# Otherwise, result should be the input
assert scalar.__array_wrap__(arr0d, None, False) is arr0d
# An old bug. A non 0-d array cannot be converted to scalar:
arr1d = np.array([3], dtype=np.int8)
assert scalar.__array_wrap__(arr1d) is arr1d
assert scalar.__array_wrap__(arr1d, None, True) is arr1d
@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
class TestSignature:
# test that scalar types have a valid __text_signature__ or __signature__ set
@pytest.mark.parametrize(
"sctype",
[
*sctypes["int"],
*sctypes["uint"],
*sctypes["float"],
*sctypes["complex"],
*sctypes["others"],
np.datetime64,
np.timedelta64,
],
)
def test_constructor_signatures(self, sctype: type[np.generic]):
try:
sig = inspect.signature(sctype)
except ValueError:
pytest.fail(f"missing signature: {sctype}")
assert sig.parameters
@pytest.mark.parametrize(
"sctype",
[np.integer, *sctypes["int"], *sctypes["uint"], *sctypes["float"]],
)
def test_method_signatures_is_integer(self, sctype: type[np.integer | np.floating]):
try:
sig = inspect.signature(sctype.is_integer)
except ValueError:
pytest.fail(f"missing signature: {sctype.__name__}.is_integer")
assert len(sig.parameters) == 1
assert sig.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY
@pytest.mark.parametrize("sctype", sctypes["float"])
def test_method_signatures_as_integer_ratio(self, sctype: type[np.floating]):
try:
sig = inspect.signature(sctype.as_integer_ratio)
except ValueError:
pytest.fail(f"missing signature: {sctype.__name__}.as_integer_ratio")
assert len(sig.parameters) == 1
assert sig.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY
@pytest.mark.parametrize(
"method_name",
[
"__array_namespace__", "__copy__", "__deepcopy__", "all", "any", "argmax",
"argmin", "argsort", "astype", "byteswap", "choose", "clip", "compress",
"conj", "conjugate", "copy", "cumprod", "cumsum", "diagonal", "dump",
"dumps", "fill", "flatten", "getfield", "item", "max", "mean", "min",
"nonzero", "prod", "put", "ravel", "repeat", "reshape", "resize", "round",
"searchsorted", "setfield", "setflags", "sort", "squeeze", "std", "sum",
"swapaxes", "take", "to_device", "tobytes", "tofile", "tolist", "trace",
"transpose", "var", "view",
],
)
def test_array_scalar_method_signatures(self, method_name: str):
# methods shared by np.generic and np.ndarray should have the same signature
fn_generic = getattr(np.generic, method_name)
sig_generic = inspect.signature(fn_generic)
assert "self" in sig_generic.parameters
assert sig_generic.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY
fn_ndarray = getattr(np.ndarray, method_name)
sig_ndarray = inspect.signature(fn_ndarray)
assert sig_generic == sig_ndarray | python | github | https://github.com/numpy/numpy | numpy/_core/tests/test_scalar_methods.py |
/*
* Copyright 2010-2025 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.impl.base.test.cases.annotations
import org.jetbrains.kotlin.analysis.api.annotations.KaAnnotation
import org.jetbrains.kotlin.analysis.api.symbols.KaDebugRenderer
import org.jetbrains.kotlin.analysis.api.symbols.markers.KaAnnotatedSymbol
import org.jetbrains.kotlin.analysis.test.framework.base.AbstractAnalysisApiBasedTest
import org.jetbrains.kotlin.analysis.test.framework.projectStructure.KtTestModule
import org.jetbrains.kotlin.analysis.test.framework.services.expressionMarkerProvider
import org.jetbrains.kotlin.name.ClassId
import org.jetbrains.kotlin.psi.KtDeclaration
import org.jetbrains.kotlin.psi.KtFile
import org.jetbrains.kotlin.test.directives.model.DirectivesContainer
import org.jetbrains.kotlin.test.directives.model.SimpleDirectivesContainer
import org.jetbrains.kotlin.test.directives.model.singleValue
import org.jetbrains.kotlin.test.services.TestServices
import org.jetbrains.kotlin.test.services.assertions
abstract class AbstractAnalysisApiSpecificAnnotationOnDeclarationTest : AbstractAnalysisApiBasedTest() {
override val additionalDirectives: List<DirectivesContainer>
get() = super.additionalDirectives + listOf(Directives)
override fun doTestByMainFile(mainFile: KtFile, mainModule: KtTestModule, testServices: TestServices) {
val ktDeclaration = testServices.expressionMarkerProvider.getBottommostElementOfTypeAtCaret<KtDeclaration>(mainFile)
val classIdString = mainModule.testModule.directives.singleValue(Directives.CLASS_ID)
val actual = copyAwareAnalyzeForTest(ktDeclaration) { contextDeclaration ->
val declarationSymbol = contextDeclaration.symbol as KaAnnotatedSymbol
val annotationList = declarationSymbol.annotations
val classId = ClassId.fromString(classIdString)
val renderer = KaDebugRenderer()
fun renderAnnotation(application: KaAnnotation): String = buildString {
appendLine("${KtDeclaration::class.simpleName}: ${contextDeclaration::class.simpleName} ${contextDeclaration.name}")
append(renderer.renderAnnotationApplication(useSiteSession, application))
}
testServices.assertions.assertTrue(classId in annotationList) {
"ClassId $classId is not found in the annotation list"
}
val rawList = renderAnnotation(annotationList[classId].single())
val resolvedList = renderAnnotation(annotationList.single { it.classId == classId })
testServices.assertions.assertEquals(resolvedList, rawList) {
"Result before and after resolve are different"
}
resolvedList
}
testServices.assertions.assertEqualsToTestOutputFile(actual)
}
private object Directives : SimpleDirectivesContainer() {
val CLASS_ID by stringDirective("ClassId of expected annotation")
}
} | kotlin | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-impl-base/testFixtures/org/jetbrains/kotlin/analysis/api/impl/base/test/cases/annotations/AbstractAnalysisApiSpecificAnnotationOnDeclarationTest.kt |
# (C) British Crown Copyright 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.fileformats.pp.as_fields` function."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from iris.coords import DimCoord
from iris.fileformats._ff_cross_references import STASH_TRANS
import iris.fileformats.pp as pp
from iris.tests import mock
import iris.tests.stock as stock
class TestAsFields(tests.IrisTest):
def setUp(self):
self.cube = stock.realistic_3d()
def test_cube_only(self):
fields = pp.as_fields(self.cube)
for field in fields:
self.assertEqual(field.lbcode, 101)
def test_field_coords(self):
fields = pp.as_fields(self.cube,
field_coords=['grid_longitude',
'grid_latitude'])
for field in fields:
self.assertEqual(field.lbcode, 101)
if __name__ == "__main__":
tests.main() | unknown | codeparrot/codeparrot-clean | ||
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import datetime
import json
from copy import deepcopy
from collections import defaultdict
__version__ = '0.0.8.dev'
class Color(object):
"""
A color type. Internally stored as RGB, and does not support transparency.
"""
def __init__(self, rgb):
if not isinstance(rgb, tuple):
rgb = tuple(rgb)
self.rgb = rgb
def __eq__(self, other):
return self.rgb == other.rgb
def __ne__(self, other):
return self.rgb != other.rgb
@property
def css(self):
return 'rgb(%d, %d, %d)' % self.rgb
def __str__(self):
return str(self.rgb)
class WarpThread(object):
"""
Represents a single warp thread.
"""
def __init__(self, color=None, shaft=None):
if color and not isinstance(color, Color):
color = Color(color)
self.color = color
self.shaft = shaft
def __repr__(self):
return '<WarpThread color:%s shaft:%s>' % (self.color.rgb, self.shaft)
class WeftThread(object):
"""
Represents a single weft thread.
"""
def __init__(self, color=None, shafts=None, treadles=None):
if color and not isinstance(color, Color):
color = Color(color)
self.color = color
assert not (shafts and treadles), \
"can't have both shafts (liftplan) and treadles specified"
self.treadles = treadles or set()
self.shafts = shafts or set()
@property
def connected_shafts(self):
if self.shafts:
return self.shafts
else:
assert self.treadles
ret = set()
for treadle in self.treadles:
ret.update(treadle.shafts)
return ret
def __repr__(self):
if self.treadles:
return '<WeftThread color:%s treadles:%s>' % (self.color.rgb,
self.treadles)
else:
return '<WeftThread color:%s shafts:%s>' % (self.color.rgb,
self.shafts)
class Shaft(object):
"""
Represents a single shaft of the loom.
"""
pass
class Treadle(object):
"""
Represents a single treadle of the loom.
"""
def __init__(self, shafts=None):
self.shafts = shafts or set()
class DraftError(Exception):
pass
class Draft(object):
"""
The core representation of a weaving draft.
"""
def __init__(self, num_shafts, num_treadles=0, liftplan=False,
rising_shed=True, start_at_lowest_thread=True,
date=None, title='', author='', address='',
email='', telephone='', fax='', notes=''):
self.liftplan = liftplan or (num_treadles == 0)
self.rising_shed = rising_shed
self.start_at_lowest_thread = start_at_lowest_thread
self.shafts = []
for __ in range(num_shafts):
self.shafts.append(Shaft())
self.treadles = []
for __ in range(num_treadles):
self.treadles.append(Treadle())
self.warp = []
self.weft = []
self.date = date or datetime.date.today().strftime('%b %d, %Y')
self.title = title
self.author = author
self.address = address
self.email = email
self.telephone = telephone
self.fax = fax
self.notes = notes
@classmethod
def from_json(cls, s):
"""
Construct a new Draft instance from its JSON representation.
Counterpart to ``.to_json()``.
"""
obj = json.loads(s)
warp = obj.pop('warp')
weft = obj.pop('weft')
tieup = obj.pop('tieup')
draft = cls(**obj)
for thread_obj in warp:
draft.add_warp_thread(
color=thread_obj['color'],
shaft=draft.shafts[thread_obj['shaft']],
)
for thread_obj in weft:
draft.add_weft_thread(
color=thread_obj['color'],
shafts=set(draft.shafts[n] for n in thread_obj['shafts']),
treadles=set(draft.treadles[n] for n in
thread_obj['treadles']),
)
for ii, shaft_nos in enumerate(tieup):
draft.treadles[ii].shafts = set(draft.shafts[n] for n in shaft_nos)
return draft
def to_json(self):
"""
Serialize a Draft to its JSON representation. Counterpart to
``.from_json()``.
"""
return json.dumps({
'liftplan': self.liftplan,
'rising_shed': self.rising_shed,
'num_shafts': len(self.shafts),
'num_treadles': len(self.treadles),
'warp': [{
'color': thread.color.rgb,
'shaft': self.shafts.index(thread.shaft),
} for thread in self.warp],
'weft': [{
'color': thread.color.rgb,
'treadles': [self.treadles.index(tr)
for tr in thread.treadles],
'shafts': [self.shafts.index(sh)
for sh in thread.connected_shafts],
} for thread in self.weft],
'tieup': [
[self.shafts.index(sh) for sh in treadle.shafts]
for treadle in self.treadles
],
'date': self.date,
'title': self.title,
'author': self.author,
'address': self.address,
'email': self.email,
'telephone': self.telephone,
'fax': self.fax,
'notes': self.notes,
})
def copy(self):
"""
Make a complete copy of this draft.
"""
return deepcopy(self)
def add_warp_thread(self, color=None, index=None, shaft=0):
"""
Add a warp thread to this draft.
"""
if not isinstance(shaft, Shaft):
shaft = self.shafts[shaft]
thread = WarpThread(
color=color,
shaft=shaft,
)
if index is None:
self.warp.append(thread)
else:
self.warp.insert(index, thread)
def add_weft_thread(self, color=None, index=None,
shafts=None, treadles=None):
"""
Add a weft thread to this draft.
"""
shafts = shafts or set()
shaft_objs = set()
for shaft in shafts:
if not isinstance(shaft, Shaft):
shaft = self.shafts[shaft]
shaft_objs.add(shaft)
treadles = treadles or set()
treadle_objs = set()
for treadle in treadles:
if not isinstance(treadle, Treadle):
treadle = self.treadles[treadle]
treadle_objs.add(treadle)
thread = WeftThread(
color=color,
shafts=shaft_objs,
treadles=treadle_objs,
)
if index is None:
self.weft.append(thread)
else:
self.weft.insert(index, thread)
def compute_drawdown_at(self, position):
"""
Return the thread that is on top (visible) at the specified
zero-indexed position.
"""
x, y = position
warp_thread = self.warp[x]
weft_thread = self.weft[y]
connected_shafts = weft_thread.connected_shafts
warp_at_rest = warp_thread.shaft not in connected_shafts
if warp_at_rest ^ self.rising_shed:
return warp_thread
else:
return weft_thread
def compute_drawdown(self):
"""
Compute a 2D array containing the thread visible at each position.
"""
num_warp_threads = len(self.warp)
num_weft_threads = len(self.weft)
return [[self.compute_drawdown_at((x, y))
for y in range(num_weft_threads)]
for x in range(num_warp_threads)]
def compute_floats(self):
"""
Return an iterator over every float, yielding a tuple for each one::
(start, end, visible, length, thread)
FIXME: This ignores the back side of the fabric. Should it?
"""
num_warp_threads = len(self.warp)
num_weft_threads = len(self.weft)
drawdown = self.compute_drawdown()
# Iterate over each warp thread, then each weft thread
# For each thread, find the position of each change in state
for x, thread in enumerate(self.warp):
this_vis_state = (thread == drawdown[x][0])
last = this_start = (x, 0)
for y in range(1, num_weft_threads):
check_vis_state = (thread == drawdown[x][y])
if check_vis_state != this_vis_state:
length = last[1] - this_start[1]
yield this_start, last, this_vis_state, length, thread
this_vis_state = check_vis_state
this_start = x, y
last = x, y
length = last[1] - this_start[1]
yield this_start, last, this_vis_state, length, thread
for y, thread in enumerate(self.weft):
this_vis_state = (thread == drawdown[0][y])
last = this_start = (0, y)
for x in range(1, num_warp_threads):
check_vis_state = (thread == drawdown[x][y])
if check_vis_state != this_vis_state:
length = last[0] - this_start[0]
yield this_start, last, this_vis_state, length, thread
this_vis_state = check_vis_state
this_start = x, y
last = x, y
length = last[0] - this_start[0]
yield this_start, last, this_vis_state, length, thread
def compute_longest_floats(self):
"""
Return a tuple indicating the longest floats for warp, weft.
FIXME This might be producing incorrect results.
"""
floats = list(self.compute_floats())
return (
max(length
for start, end, visible, length, thread in floats
if isinstance(thread, WarpThread)),
max(length
for start, end, visible, length, thread in floats
if isinstance(thread, WeftThread)),
)
def reduce_shafts(self):
"""
Optimize to use the fewest number of shafts, to attempt to make a
complex draft possible to weave on a loom with fewer shafts. Note that
this may make the threading more complex or less periodic.
"""
raise NotImplementedError
def reduce_treadles(self):
"""
Optimize to use the fewest number of total treadles, to attempt to make
a complex draft possible to weave on a loom with a smaller number of
treadles. Note that this may require that more treadles are active on
any given pick.
Cannot be called on a liftplan draft.
"""
raise NotImplementedError
def reduce_active_treadles(self):
"""
Optimize to use the fewest number of active treadles on any given pick,
because not every weaver is an octopus. Note that this may mean using
more total treadles.
Cannot be called on a liftplan draft.
"""
if self.liftplan:
raise ValueError("can't reduce treadles on a liftplan draft")
if True or max(len(thread.treadles) for thread in self.weft) > 1:
used_shaft_combos = defaultdict(list)
for thread in self.weft:
used_shaft_combos[frozenset(thread.connected_shafts)].\
append(thread)
self.treadles = []
for shafts, threads in used_shaft_combos.items():
treadle = Treadle(shafts=set(shafts))
self.treadles.append(treadle)
for thread in threads:
thread.treadles = set([treadle])
def sort_threading(self):
"""
Reorder the shaft assignment in threading so that it follows as
sequential of an order as possible.
For a liftplan draft, will change the threading and liftplan.
For a treadled draft, will change the threading and tieup, won't change
the treadling.
"""
raise NotImplementedError
def sort_treadles(self):
"""
Reorder the treadle assignment in tieup so that it follows as
sequential of an order as possible in treadling.
Will change the tieup and treadling, won't change the threading. If
sorting both threading and treadles, call ``.sort_threading()`` before
calling ``.sort_treadles()``.
Cannot be called on a liftplan draft.
"""
raise NotImplementedError
def invert_shed(self):
"""
Convert from rising shed to sinking shed, or vice versa. Note that this
will actually update the threading/tie-up to preserve the same
drawdown: if this is not desired, simply change the .rising_shed
attribute.
"""
self.rising_shed = not self.rising_shed
for thread in self.weft:
thread.shafts = self.shafts - thread.shafts
for treadle in self.treadles:
treadle.shafts = self.shafts - treadle.shafts
def rotate(self):
"""
Rotate the draft: the weft becomes the warp, and vice versa.
"""
raise NotImplementedError
def flip_weftwise(self):
"""
Flip/mirror along the weft axis: e.g. looking at the front of the loom,
the left side of the fabric becomes the right, and the right becomes
the left.
"""
self.warp.reverse()
def flip_warpwise(self):
"""
Flip/mirror along the warp axis: e.g. looking at the front of the loom,
the near side of the fabric becomes the far, and the far becomes
the near.
"""
self.weft.reverse()
def selvedges_continuous(self):
"""
Check whether or not both selvedge threads are "continuous" (will be
picked up on every pick).
"""
return (self.selvedge_continuous(False) and
self.selvedge_continuous(True))
def selvedge_continuous(self, low):
"""
Check whether the selvedge corresponding to the lowest-number thread is
continuous.
"""
# For the low selvedge:
# If this draft starts at the lowest thread, there needs to be a
# transition between threads 1 and 2 (0-indexed), threads 3 and 4, etc.
# Otherwise, there needs to be a transition between 0 and 1, 2 and 3,
# etc.
# For the high selvedge:
# If this draft starts at the highest thread, there needs to be a
# transition between threads 0 and 1, threads 2 and 3, etc.
offset = 0 if low ^ self.start_at_lowest_thread else 1
if low:
thread = self.warp[0]
else:
thread = self.warp[-1]
for ii in range(offset, len(self.weft) - 1, 2):
a_state = thread.shaft in self.weft[ii].connected_shafts
b_state = thread.shaft in self.weft[ii + 1].connected_shafts
if not a_state ^ b_state:
return False
return True
def make_selvedges_continuous(self, add_new_shafts=False):
"""
Make the selvedge threads "continuous": that is, threaded and treadled
such that they are picked up on every pick. This method will try to use
the liftplan/tieup and switch selvedge threads to alternate shafts. If
that is impossible and ``add_new_shafts`` new shafts will be added to
handle the selvedge threads.
FIXME This method works, but it does not necessarily produce the
subjectively "best" solution in terms of aesthetics and structure. For
example, it may result in longer floats than necessary.
"""
for low_thread in (False, True):
success = False
if low_thread:
warp_thread = self.warp[0]
else:
warp_thread = self.warp[-1]
if self.selvedge_continuous(low_thread):
success = True
continue
for shaft in self.shafts:
warp_thread.shaft = shaft
if self.selvedge_continuous(low_thread):
success = True
break
if not success:
if add_new_shafts:
raise NotImplementedError
else:
raise DraftError("cannot make continuous selvedges")
def compute_weft_crossings(self):
"""
Iterate over each weft row and compute the total number of thread
crossings in that row. Useful for determining sett.
"""
raise NotImplementedError
def compute_warp_crossings(self):
"""
Iterate over each warp row and compute the total number of thread
crossings in that row.
"""
raise NotImplementedError
def repeat(self, n):
"""
Given a base draft, make it repeat with N units in each direction.
"""
initial_warp = list(self.warp)
initial_weft = list(self.weft)
for ii in range(n):
for thread in initial_warp:
self.add_warp_thread(
color=thread.color,
shaft=thread.shaft,
)
for thread in initial_weft:
self.add_weft_thread(
color=thread.color,
treadles=thread.treadles,
shafts=thread.shafts,
)
def advance(self):
"""
Given a base draft, make it 'advance'. Essentially:
1. Repeat the draft N times, where N is the number of shafts, in
both the warp and weft directions.
2. On each successive repeat, offset the threading by 1 additional
shaft and the treadling by one additional treadle.
"""
initial_warp = list(self.warp)
initial_weft = list(self.weft)
num_shafts = len(self.shafts)
num_treadles = len(self.treadles)
for ii in range(1, num_shafts):
print("ADVANCE %d" % ii)
for thread in initial_warp:
print(" thread")
initial_shaft = self.shafts.index(thread.shaft)
print(" initial shaft: %d" % initial_shaft)
new_shaft = (initial_shaft + ii) % num_shafts
print(" new shaft: %d" % new_shaft)
self.add_warp_thread(
color=thread.color,
shaft=new_shaft,
)
for thread in initial_weft:
initial_treadles = [self.treadles.index(treadle)
for treadle in thread.treadles]
new_treadles = [(treadle + ii) % num_treadles
for treadle in initial_treadles]
initial_shafts = [self.shafts.index(shaft)
for shaft in thread.shafts]
new_shafts = [(shaft + ii) % num_shafts
for shaft in initial_shafts]
self.add_weft_thread(
color=thread.color,
treadles=new_treadles,
shafts=new_shafts,
)
def all_threads_attached(self):
"""
Check whether all threads (weft and warp) will be "attached" to the
fabric, instead of just falling off.
"""
raise NotImplementedError | unknown | codeparrot/codeparrot-clean | ||
#ifndef HEX_LL_H
#define HEX_LL_H
extern const signed char hexval_table[256];
static inline unsigned int hexval(unsigned char c)
{
return hexval_table[c];
}
/*
* Convert two consecutive hexadecimal digits into a char. Return a
* negative value on error. Don't run over the end of short strings.
*/
static inline int hex2chr(const char *s)
{
unsigned int val = hexval(s[0]);
return (val & ~0xf) ? val : (val << 4) | hexval(s[1]);
}
/*
* Read `len` pairs of hexadecimal digits from `hex` and write the
* values to `binary` as `len` bytes. Return 0 on success, or -1 if
* the input does not consist of hex digits).
*/
int hex_to_bytes(unsigned char *binary, const char *hex, size_t len);
#endif | c | github | https://github.com/git/git | hex-ll.h |
#ifndef TREE_WALK_H
#define TREE_WALK_H
#include "hash.h"
struct index_state;
struct repository;
/**
* The tree walking API is used to traverse and inspect trees.
*/
/**
* An entry in a tree. Each entry has a sha1 identifier, pathname, and mode.
*/
struct name_entry {
struct object_id oid;
const char *path;
int pathlen;
unsigned int mode;
};
/**
* A semi-opaque data structure used to maintain the current state of the walk.
*/
struct tree_desc {
const struct git_hash_algo *algo;
/*
* pointer into the memory representation of the tree. It always
* points at the current entry being visited.
*/
const void *buffer;
/* points to the current entry being visited. */
struct name_entry entry;
/* counts the number of bytes left in the `buffer`. */
unsigned int size;
/* option flags passed via init_tree_desc_gently() */
enum tree_desc_flags {
TREE_DESC_RAW_MODES = (1 << 0),
} flags;
};
/**
* Decode the entry currently being visited (the one pointed to by
* `tree_desc's` `entry` member) and return the sha1 of the entry. The
* `pathp` and `modep` arguments are set to the entry's pathname and mode
* respectively.
*/
static inline const struct object_id *tree_entry_extract(struct tree_desc *desc, const char **pathp, unsigned short *modep)
{
*pathp = desc->entry.path;
*modep = desc->entry.mode;
return &desc->entry.oid;
}
/**
* Calculate the length of a tree entry's pathname. This utilizes the
* memory structure of a tree entry to avoid the overhead of using a
* generic strlen().
*/
static inline int tree_entry_len(const struct name_entry *ne)
{
return ne->pathlen;
}
/*
* The _gently versions of these functions warn and return false on a
* corrupt tree entry rather than dying,
*/
/**
* Walk to the next entry in a tree. This is commonly used in conjunction
* with `tree_entry_extract` to inspect the current entry.
*/
void update_tree_entry(struct tree_desc *);
int update_tree_entry_gently(struct tree_desc *);
/**
* Initialize a `tree_desc` and decode its first entry. The buffer and
* size parameters are assumed to be the same as the buffer and size
* members of `struct tree`.
*/
void init_tree_desc(struct tree_desc *desc, const struct object_id *tree_oid,
const void *buf, unsigned long size);
int init_tree_desc_gently(struct tree_desc *desc, const struct object_id *oid,
const void *buf, unsigned long size,
enum tree_desc_flags flags);
/*
* Visit the next entry in a tree. Returns 1 when there are more entries
* left to visit and 0 when all entries have been visited. This is
* commonly used in the test of a while loop.
*/
int tree_entry(struct tree_desc *, struct name_entry *);
int tree_entry_gently(struct tree_desc *, struct name_entry *);
/**
* Initialize a `tree_desc` and decode its first entry given the
* object ID of a tree. Returns the `buffer` member if the latter
* is a valid tree identifier and NULL otherwise.
*/
void *fill_tree_descriptor(struct repository *r,
struct tree_desc *desc,
const struct object_id *oid);
struct traverse_info;
typedef int (*traverse_callback_t)(int n, unsigned long mask, unsigned long dirmask, struct name_entry *entry, struct traverse_info *);
/**
* Traverse `n` number of trees in parallel. The `fn` callback member of
* `traverse_info` is called once for each tree entry.
*/
int traverse_trees(struct index_state *istate, int n, struct tree_desc *t, struct traverse_info *info);
enum get_oid_result get_tree_entry_follow_symlinks(struct repository *r, struct object_id *tree_oid, const char *name, struct object_id *result, struct strbuf *result_path, unsigned short *mode);
/**
* A structure used to maintain the state of a traversal.
*/
struct traverse_info {
const char *traverse_path;
/*
* points to the traverse_info which was used to descend into the
* current tree. If this is the top-level tree `prev` will point to
* a dummy traverse_info.
*/
struct traverse_info *prev;
/* is the entry for the current tree (if the tree is a subtree). */
const char *name;
size_t namelen;
unsigned mode;
/* is the length of the full path for the current tree. */
size_t pathlen;
struct pathspec *pathspec;
/* can be used by callbacks to maintain directory-file conflicts. */
unsigned long df_conflicts;
/* a callback called for each entry in the tree.
*
* The arguments passed to the traverse callback are as follows:
*
* - `n` counts the number of trees being traversed.
*
* - `mask` has its nth bit set if something exists in the nth entry.
*
* - `dirmask` has its nth bit set if the nth tree's entry is a directory.
*
* - `entry` is an array of size `n` where the nth entry is from the nth tree.
*
* - `info` maintains the state of the traversal.
*
* Returning a negative value will terminate the traversal. Otherwise the
* return value is treated as an update mask. If the nth bit is set the nth tree
* will be updated and if the bit is not set the nth tree entry will be the
* same in the next callback invocation.
*/
traverse_callback_t fn;
/* can be anything the `fn` callback would want to use. */
void *data;
/* tells whether to stop at the first error or not. */
int show_all_errors;
};
/**
* Walk trees starting with "tree_oid" to find the entry for "name", and
* return the the object name and the mode of the found entry via the
* "oid" and "mode" parameters. Return 0 if the entry is found, and -1
* otherwise.
*/
int get_tree_entry(struct repository *repo, const struct object_id *tree_oid,
const char *name, struct object_id *oid,
unsigned short *mode);
/**
* Generate the full pathname of a tree entry based from the root of the
* traversal. For example, if the traversal has recursed into another
* tree named "bar" the pathname of an entry "baz" in the "bar"
* tree would be "bar/baz".
*/
char *make_traverse_path(char *path, size_t pathlen, const struct traverse_info *info,
const char *name, size_t namelen);
/**
* Convenience wrapper to `make_traverse_path` into a strbuf.
*/
void strbuf_make_traverse_path(struct strbuf *out,
const struct traverse_info *info,
const char *name, size_t namelen);
/**
* Initialize a `traverse_info` given the pathname of the tree to start
* traversing from.
*/
void setup_traverse_info(struct traverse_info *info, const char *base);
/**
* Calculate the length of a pathname returned by `make_traverse_path`.
* This utilizes the memory structure of a tree entry to avoid the
* overhead of using a generic strlen().
*/
static inline size_t traverse_path_len(const struct traverse_info *info,
size_t namelen)
{
return st_add(info->pathlen, namelen);
}
/* in general, positive means "kind of interesting" */
enum interesting {
all_entries_not_interesting = -1, /* no, and no subsequent entries will be either */
entry_not_interesting = 0,
entry_interesting = 1,
all_entries_interesting = 2 /* yes, and all subsequent entries will be */
};
enum interesting tree_entry_interesting(struct index_state *istate,
const struct name_entry *,
struct strbuf *,
const struct pathspec *ps);
#endif | c | github | https://github.com/git/git | tree-walk.h |
"""
Base-backends for django-rcsfield.
Used to hold common functionality of all backends.
Every backend module implementd a very simple API.
Three functions are exported:
* fetch(key, revision): knows how to fetch a specific revision of the entity
referenced by ``key``
* commit(key, data): knows how to commit changed ``data`` to the entity
referenced by ``key``
* initial(): does optional setup needed for the backend to work. called on
``post_syncdb`` signal.
* get_revisions(key): returns a list of revisions in which the entity
identifed by ``key`` was changed.
* move(key_from, key_to): knows how to move an entity from ``key_from``
to ``key_to`` while keeping the history. this method is optional.
* diff(key1, rev1, key2, rev2): returns a unified diff of the contents
of ``key1``@``rev1`` against ``key2``@``rev2``.
"""
import difflib
class NoSuchRevision(Exception):
pass
class BaseBackend(object):
"""
Base-class for all rcsfield backends.
"""
def initial(self):
"""
called on ``post_syncdb`` can do some initial setup needed for
the backend to work correctly.
"""
pass
def commit(self, key, data):
"""
versionize a change of ``key`` with new ``data``.
"""
raise NotImplementedError
def fetch(self, key, rev):
"""
fetched the data of ``key`` for revision ``rev``.
"""
raise NotImplementedError
def get_revisions(self, key):
"""
return a list of all revisions in which ``key`` changed
"""
raise NotImplementedError
def move(self, key_from, key_to):
"""
Moves an entity from ``key_from`` to ``key_to`` while keeping
the history. This is useful to migrate a repository after the
``rcskey_format`` of a ``RcsTextField`` was changed.
"""
raise NotImplementedError
def diff(self, key1, rev1, key2, rev2):
"""
Returns a textual unified diff of two entities at specified revisions.
Takes two parameters for keyname to support diffing renamed files.
"""
c1 = self.fetch(key1, rev1)
c2 = self.fetch(key2, rev2)
diff = difflib.unified_diff(c1.splitlines(1),
c2.splitlines(1),
'Revision: %s' % rev1,
'Revision: %s' % rev2
)
return diff | unknown | codeparrot/codeparrot-clean | ||
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Model and functions related to blackbody radiation.
.. _blackbody-planck-law:
Blackbody Radiation
-------------------
Blackbody flux is calculated with Planck law
(:ref:`Rybicki & Lightman 1979 <ref-rybicki1979>`):
.. math::
B_{\\lambda}(T) = \\frac{2 h c^{2} / \\lambda^{5}}{exp(h c / \\lambda k T) - 1}
B_{\\nu}(T) = \\frac{2 h \\nu^{3} / c^{2}}{exp(h \\nu / k T) - 1}
where the unit of :math:`B_{\\lambda}(T)` is
:math:`erg \\; s^{-1} cm^{-2} \\mathring{A}^{-1} sr^{-1}`, and
:math:`B_{\\nu}(T)` is :math:`erg \\; s^{-1} cm^{-2} Hz^{-1} sr^{-1}`.
:func:`~astropy.modeling.blackbody.blackbody_lambda` and
:func:`~astropy.modeling.blackbody.blackbody_nu` calculate the
blackbody flux for :math:`B_{\\lambda}(T)` and :math:`B_{\\nu}(T)`,
respectively.
For blackbody representation as a model, see :class:`BlackBody1D`.
.. _blackbody-examples:
Examples
^^^^^^^^
>>> import numpy as np
>>> from astropy import units as u
>>> from astropy.modeling.blackbody import blackbody_lambda, blackbody_nu
Calculate blackbody flux for 5000 K at 100 and 10000 Angstrom while suppressing
any Numpy warnings:
>>> wavelengths = [100, 10000] * u.AA
>>> temperature = 5000 * u.K
>>> with np.errstate(all='ignore'):
... flux_lam = blackbody_lambda(wavelengths, temperature)
... flux_nu = blackbody_nu(wavelengths, temperature)
>>> flux_lam # doctest: +FLOAT_CMP
<Quantity [ 1.27452545e-108, 7.10190526e+005] erg / (Angstrom cm2 s sr)>
>>> flux_nu # doctest: +FLOAT_CMP
<Quantity [ 4.25135927e-123, 2.36894060e-005] erg / (cm2 Hz s sr)>
Alternatively, the same results for ``flux_nu`` can be computed using
:class:`BlackBody1D` with blackbody representation as a model. The difference between
this and the former approach is in one additional step outlined as follows:
>>> from astropy import constants as const
>>> from astropy.modeling import models
>>> temperature = 5000 * u.K
>>> bolometric_flux = const.sigma_sb * temperature ** 4 / np.pi
>>> bolometric_flux.to(u.erg / (u.cm * u.cm * u.s)) # doctest: +FLOAT_CMP
<Quantity 1.12808367e+10 erg / (cm2 s)>
>>> wavelengths = [100, 10000] * u.AA
>>> bb_astro = models.BlackBody1D(temperature, bolometric_flux=bolometric_flux)
>>> bb_astro(wavelengths).to(u.erg / (u.cm * u.cm * u.Hz * u.s)) / u.sr # doctest: +FLOAT_CMP
<Quantity [4.25102471e-123, 2.36893879e-005] erg / (cm2 Hz s sr)>
where ``bb_astro(wavelengths)`` computes the equivalent result as ``flux_nu`` above.
Plot a blackbody spectrum for 5000 K:
.. plot::
import matplotlib.pyplot as plt
import numpy as np
from astropy import constants as const
from astropy import units as u
from astropy.modeling.blackbody import blackbody_lambda
temperature = 5000 * u.K
wavemax = (const.b_wien / temperature).to(u.AA) # Wien's displacement law
waveset = np.logspace(
0, np.log10(wavemax.value + 10 * wavemax.value), num=1000) * u.AA
with np.errstate(all='ignore'):
flux = blackbody_lambda(waveset, temperature)
fig, ax = plt.subplots(figsize=(8, 5))
ax.plot(waveset.value, flux.value)
ax.axvline(wavemax.value, ls='--')
ax.get_yaxis().get_major_formatter().set_powerlimits((0, 1))
ax.set_xlabel(r'$\\lambda$ ({0})'.format(waveset.unit))
ax.set_ylabel(r'$B_{\\lambda}(T)$')
ax.set_title('Blackbody, T = {0}'.format(temperature))
Note that an array of temperatures can also be given instead of a single
temperature. In this case, the Numpy broadcasting rules apply: for instance, if
the frequency and temperature have the same shape, the output will have this
shape too, while if the frequency is a 2-d array with shape ``(n, m)`` and the
temperature is an array with shape ``(m,)``, the output will have a shape
``(n, m)``.
See Also
^^^^^^^^
.. _ref-rybicki1979:
Rybicki, G. B., & Lightman, A. P. 1979, Radiative Processes in Astrophysics (New York, NY: Wiley)
"""
import warnings
from collections import OrderedDict
import numpy as np
from .core import Fittable1DModel
from .parameters import Parameter
from astropy import constants as const
from astropy import units as u
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ['BlackBody1D', 'blackbody_nu', 'blackbody_lambda']
# Units
FNU = u.erg / (u.cm**2 * u.s * u.Hz)
FLAM = u.erg / (u.cm**2 * u.s * u.AA)
# Some platform implementations of expm1() are buggy and Numpy uses
# them anyways--the bug is that on certain large inputs it returns
# NaN instead of INF like it should (it should only return NaN on a
# NaN input
# See https://github.com/astropy/astropy/issues/4171
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
_has_buggy_expm1 = np.isnan(np.expm1(1000)) or np.isnan(np.expm1(1e10))
class BlackBody1D(Fittable1DModel):
"""
One dimensional blackbody model.
Parameters
----------
temperature : :class:`~astropy.units.Quantity`
Blackbody temperature.
bolometric_flux : :class:`~astropy.units.Quantity`
The bolometric flux of the blackbody (i.e., the integral over the
spectral axis).
Notes
-----
Model formula:
.. math:: f(x) = \\pi B_{\\nu} f_{\\text{bolometric}} / (\\sigma T^{4})
Examples
--------
>>> from astropy.modeling import models
>>> from astropy import units as u
>>> bb = models.BlackBody1D()
>>> bb(6000 * u.AA) # doctest: +FLOAT_CMP
<Quantity 1.3585381201978953e-15 erg / (cm2 Hz s)>
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import BlackBody1D
from astropy.modeling.blackbody import FLAM
from astropy import units as u
from astropy.visualization import quantity_support
bb = BlackBody1D(temperature=5778*u.K)
wav = np.arange(1000, 110000) * u.AA
flux = bb(wav).to(FLAM, u.spectral_density(wav))
with quantity_support():
plt.figure()
plt.semilogx(wav, flux)
plt.axvline(bb.lambda_max.to(u.AA).value, ls='--')
plt.show()
"""
# We parametrize this model with a temperature and a bolometric flux. The
# bolometric flux is the integral of the model over the spectral axis. This
# is more useful than simply having an amplitude parameter.
temperature = Parameter(default=5000, min=0, unit=u.K)
bolometric_flux = Parameter(default=1, min=0, unit=u.erg / u.cm ** 2 / u.s)
# We allow values without units to be passed when evaluating the model, and
# in this case the input x values are assumed to be frequencies in Hz.
_input_units_allow_dimensionless = True
# We enable the spectral equivalency by default for the spectral axis
input_units_equivalencies = {'x': u.spectral()}
def evaluate(self, x, temperature, bolometric_flux):
"""Evaluate the model.
Parameters
----------
x : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Frequency at which to compute the blackbody. If no units are given,
this defaults to Hz.
temperature : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Temperature of the blackbody. If no units are given, this defaults
to Kelvin.
bolometric_flux : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Desired integral for the blackbody.
Returns
-------
y : number or ndarray
Blackbody spectrum. The units are determined from the units of
``bolometric_flux``.
"""
# We need to make sure that we attach units to the temperature if it
# doesn't have any units. We do this because even though blackbody_nu
# can take temperature values without units, the / temperature ** 4
# factor needs units to be defined.
if isinstance(temperature, u.Quantity):
temperature = temperature.to(u.K, equivalencies=u.temperature())
else:
temperature = u.Quantity(temperature, u.K)
# We normalize the returned blackbody so that the integral would be
# unity, and we then multiply by the bolometric flux. A normalized
# blackbody has f_nu = pi * B_nu / (sigma * T^4), which is what we
# calculate here. We convert to 1/Hz to make sure the units are
# simplified as much as possible, then we multiply by the bolometric
# flux to get the normalization right.
fnu = ((np.pi * u.sr * blackbody_nu(x, temperature) /
const.sigma_sb / temperature ** 4).to(1 / u.Hz) *
bolometric_flux)
# If the bolometric_flux parameter has no unit, we should drop the /Hz
# and return a unitless value. This occurs for instance during fitting,
# since we drop the units temporarily.
if hasattr(bolometric_flux, 'unit'):
return fnu
else:
return fnu.value
@property
def input_units(self):
# The input units are those of the 'x' value, which should always be
# Hz. Because we do this, and because input_units_allow_dimensionless
# is set to True, dimensionless values are assumed to be in Hz.
return {'x': u.Hz}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('temperature', u.K),
('bolometric_flux', outputs_unit['y'] * u.Hz)])
@property
def lambda_max(self):
"""Peak wavelength when the curve is expressed as power density."""
return const.b_wien / self.temperature
def blackbody_nu(in_x, temperature):
"""Calculate blackbody flux per steradian, :math:`B_{\\nu}(T)`.
.. note::
Use `numpy.errstate` to suppress Numpy warnings, if desired.
.. warning::
Output values might contain ``nan`` and ``inf``.
Parameters
----------
in_x : number, array-like, or `~astropy.units.Quantity`
Frequency, wavelength, or wave number.
If not a Quantity, it is assumed to be in Hz.
temperature : number, array-like, or `~astropy.units.Quantity`
Blackbody temperature.
If not a Quantity, it is assumed to be in Kelvin.
Returns
-------
flux : `~astropy.units.Quantity`
Blackbody monochromatic flux in
:math:`erg \\; cm^{-2} s^{-1} Hz^{-1} sr^{-1}`.
Raises
------
ValueError
Invalid temperature.
ZeroDivisionError
Wavelength is zero (when converting to frequency).
"""
# Convert to units for calculations, also force double precision
with u.add_enabled_equivalencies(u.spectral() + u.temperature()):
freq = u.Quantity(in_x, u.Hz, dtype=np.float64)
temp = u.Quantity(temperature, u.K, dtype=np.float64)
# Check if input values are physically possible
if np.any(temp < 0):
raise ValueError(f'Temperature should be positive: {temp}')
if not np.all(np.isfinite(freq)) or np.any(freq <= 0):
warnings.warn('Input contains invalid wavelength/frequency value(s)',
AstropyUserWarning)
log_boltz = const.h * freq / (const.k_B * temp)
boltzm1 = np.expm1(log_boltz)
if _has_buggy_expm1:
# Replace incorrect nan results with infs--any result of 'nan' is
# incorrect unless the input (in log_boltz) happened to be nan to begin
# with. (As noted in #4393 ideally this would be replaced by a version
# of expm1 that doesn't have this bug, rather than fixing incorrect
# results after the fact...)
boltzm1_nans = np.isnan(boltzm1)
if np.any(boltzm1_nans):
if boltzm1.isscalar and not np.isnan(log_boltz):
boltzm1 = np.inf
else:
boltzm1[np.where(~np.isnan(log_boltz) & boltzm1_nans)] = np.inf
# Calculate blackbody flux
bb_nu = (2.0 * const.h * freq ** 3 / (const.c ** 2 * boltzm1))
flux = bb_nu.to(FNU, u.spectral_density(freq))
return flux / u.sr # Add per steradian to output flux unit
def blackbody_lambda(in_x, temperature):
"""Like :func:`blackbody_nu` but for :math:`B_{\\lambda}(T)`.
Parameters
----------
in_x : number, array-like, or `~astropy.units.Quantity`
Frequency, wavelength, or wave number.
If not a Quantity, it is assumed to be in Angstrom.
temperature : number, array-like, or `~astropy.units.Quantity`
Blackbody temperature.
If not a Quantity, it is assumed to be in Kelvin.
Returns
-------
flux : `~astropy.units.Quantity`
Blackbody monochromatic flux in
:math:`erg \\; cm^{-2} s^{-1} \\mathring{A}^{-1} sr^{-1}`.
"""
if getattr(in_x, 'unit', None) is None:
in_x = u.Quantity(in_x, u.AA)
bb_nu = blackbody_nu(in_x, temperature) * u.sr # Remove sr for conversion
flux = bb_nu.to(FLAM, u.spectral_density(in_x))
return flux / u.sr # Add per steradian to output flux unit | unknown | codeparrot/codeparrot-clean | ||
"""Flask SQLAlchemy ORM models for Social Auth"""
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship, backref
from sqlalchemy.schema import UniqueConstraint
from sqlalchemy.ext.declarative import declarative_base
from social.utils import setting_name, module_member
from social.storage.sqlalchemy_orm import SQLAlchemyUserMixin, \
SQLAlchemyAssociationMixin, \
SQLAlchemyNonceMixin, \
SQLAlchemyCodeMixin, \
BaseSQLAlchemyStorage
PSABase = declarative_base()
class _AppSession(PSABase):
__abstract__ = True
@classmethod
def _set_session(cls, app_session):
cls.app_session = app_session
@classmethod
def _session(cls):
return cls.app_session
class UserSocialAuth(_AppSession, SQLAlchemyUserMixin):
"""Social Auth association model"""
# Temporary override of constraints to avoid an error on the still-to-be
# missing column uid.
__table_args__ = ()
@classmethod
def user_model(cls):
return cls.user.property.argument
@classmethod
def username_max_length(cls):
user_model = cls.user_model()
return user_model.__table__.columns.get('username').type.length
class Nonce(_AppSession, SQLAlchemyNonceMixin):
"""One use numbers"""
pass
class Association(_AppSession, SQLAlchemyAssociationMixin):
"""OpenId account association"""
pass
class Code(_AppSession, SQLAlchemyCodeMixin):
pass
class FlaskStorage(BaseSQLAlchemyStorage):
user = UserSocialAuth
nonce = Nonce
association = Association
code = Code
def init_social(app, session):
UID_LENGTH = app.config.get(setting_name('UID_LENGTH'), 255)
User = module_member(app.config[setting_name('USER_MODEL')])
_AppSession._set_session(session)
UserSocialAuth.__table_args__ = (UniqueConstraint('provider', 'uid'),)
UserSocialAuth.uid = Column(String(UID_LENGTH))
UserSocialAuth.user_id = Column(Integer, ForeignKey(User.id),
nullable=False, index=True)
UserSocialAuth.user = relationship(User, backref=backref('social_auth',
lazy='dynamic')) | unknown | codeparrot/codeparrot-clean | ||
__all__ = ['Distribution']
import re
import os
import sys
import warnings
import distutils.log
import distutils.core
import distutils.cmd
from distutils.core import Distribution as _Distribution
from distutils.errors import (DistutilsOptionError, DistutilsPlatformError,
DistutilsSetupError)
from setuptools.depends import Require
from setuptools.compat import numeric_types, basestring
import pkg_resources
def _get_unpatched(cls):
"""Protect against re-patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first.
"""
while cls.__module__.startswith('setuptools'):
cls, = cls.__bases__
if not cls.__module__.startswith('distutils'):
raise AssertionError(
"distutils has already been patched by %r" % cls
)
return cls
_Distribution = _get_unpatched(_Distribution)
sequence = tuple, list
def check_importable(dist, attr, value):
try:
ep = pkg_resources.EntryPoint.parse('x='+value)
assert not ep.extras
except (TypeError,ValueError,AttributeError,AssertionError):
raise DistutilsSetupError(
"%r must be importable 'module:attrs' string (got %r)"
% (attr,value)
)
def assert_string_list(dist, attr, value):
"""Verify that value is a string list or None"""
try:
assert ''.join(value)!=value
except (TypeError,ValueError,AttributeError,AssertionError):
raise DistutilsSetupError(
"%r must be a list of strings (got %r)" % (attr,value)
)
def check_nsp(dist, attr, value):
"""Verify that namespace packages are valid"""
assert_string_list(dist,attr,value)
for nsp in value:
if not dist.has_contents_for(nsp):
raise DistutilsSetupError(
"Distribution contains no modules or packages for " +
"namespace package %r" % nsp
)
if '.' in nsp:
parent = '.'.join(nsp.split('.')[:-1])
if parent not in value:
distutils.log.warn(
"WARNING: %r is declared as a package namespace, but %r"
" is not: please correct this in setup.py", nsp, parent
)
def check_extras(dist, attr, value):
"""Verify that extras_require mapping is valid"""
try:
for k,v in value.items():
if ':' in k:
k,m = k.split(':',1)
if pkg_resources.invalid_marker(m):
raise DistutilsSetupError("Invalid environment marker: "+m)
list(pkg_resources.parse_requirements(v))
except (TypeError,ValueError,AttributeError):
raise DistutilsSetupError(
"'extras_require' must be a dictionary whose values are "
"strings or lists of strings containing valid project/version "
"requirement specifiers."
)
def assert_bool(dist, attr, value):
"""Verify that value is True, False, 0, or 1"""
if bool(value) != value:
raise DistutilsSetupError(
"%r must be a boolean value (got %r)" % (attr,value)
)
def check_requirements(dist, attr, value):
"""Verify that install_requires is a valid requirements list"""
try:
list(pkg_resources.parse_requirements(value))
except (TypeError,ValueError):
raise DistutilsSetupError(
"%r must be a string or list of strings "
"containing valid project/version requirement specifiers" % (attr,)
)
def check_entry_points(dist, attr, value):
"""Verify that entry_points map is parseable"""
try:
pkg_resources.EntryPoint.parse_map(value)
except ValueError:
e = sys.exc_info()[1]
raise DistutilsSetupError(e)
def check_test_suite(dist, attr, value):
if not isinstance(value,basestring):
raise DistutilsSetupError("test_suite must be a string")
def check_package_data(dist, attr, value):
"""Verify that value is a dictionary of package names to glob lists"""
if isinstance(value,dict):
for k,v in value.items():
if not isinstance(k,str): break
try: iter(v)
except TypeError:
break
else:
return
raise DistutilsSetupError(
attr+" must be a dictionary mapping package names to lists of "
"wildcard patterns"
)
def check_packages(dist, attr, value):
for pkgname in value:
if not re.match(r'\w+(\.\w+)*', pkgname):
distutils.log.warn(
"WARNING: %r not a valid package name; please use only"
".-separated package names in setup.py", pkgname
)
class Distribution(_Distribution):
"""Distribution with support for features, tests, and package data
This is an enhanced version of 'distutils.dist.Distribution' that
effectively adds the following new optional keyword arguments to 'setup()':
'install_requires' -- a string or sequence of strings specifying project
versions that the distribution requires when installed, in the format
used by 'pkg_resources.require()'. They will be installed
automatically when the package is installed. If you wish to use
packages that are not available in PyPI, or want to give your users an
alternate download location, you can add a 'find_links' option to the
'[easy_install]' section of your project's 'setup.cfg' file, and then
setuptools will scan the listed web pages for links that satisfy the
requirements.
'extras_require' -- a dictionary mapping names of optional "extras" to the
additional requirement(s) that using those extras incurs. For example,
this::
extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
indicates that the distribution can optionally provide an extra
capability called "reST", but it can only be used if docutils and
reSTedit are installed. If the user installs your package using
EasyInstall and requests one of your extras, the corresponding
additional requirements will be installed if needed.
'features' **deprecated** -- a dictionary mapping option names to
'setuptools.Feature'
objects. Features are a portion of the distribution that can be
included or excluded based on user options, inter-feature dependencies,
and availability on the current system. Excluded features are omitted
from all setup commands, including source and binary distributions, so
you can create multiple distributions from the same source tree.
Feature names should be valid Python identifiers, except that they may
contain the '-' (minus) sign. Features can be included or excluded
via the command line options '--with-X' and '--without-X', where 'X' is
the name of the feature. Whether a feature is included by default, and
whether you are allowed to control this from the command line, is
determined by the Feature object. See the 'Feature' class for more
information.
'test_suite' -- the name of a test suite to run for the 'test' command.
If the user runs 'python setup.py test', the package will be installed,
and the named test suite will be run. The format is the same as
would be used on a 'unittest.py' command line. That is, it is the
dotted name of an object to import and call to generate a test suite.
'package_data' -- a dictionary mapping package names to lists of filenames
or globs to use to find data files contained in the named packages.
If the dictionary has filenames or globs listed under '""' (the empty
string), those names will be searched for in every package, in addition
to any names for the specific package. Data files found using these
names/globs will be installed along with the package, in the same
location as the package. Note that globs are allowed to reference
the contents of non-package subdirectories, as long as you use '/' as
a path separator. (Globs are automatically converted to
platform-specific paths at runtime.)
In addition to these new keywords, this class also has several new methods
for manipulating the distribution's contents. For example, the 'include()'
and 'exclude()' methods can be thought of as in-place add and subtract
commands that add or remove packages, modules, extensions, and so on from
the distribution. They are used by the feature subsystem to configure the
distribution for the included and excluded features.
"""
_patched_dist = None
def patch_missing_pkg_info(self, attrs):
# Fake up a replacement for the data that would normally come from
# PKG-INFO, but which might not yet be built if this is a fresh
# checkout.
#
if not attrs or 'name' not in attrs or 'version' not in attrs:
return
key = pkg_resources.safe_name(str(attrs['name'])).lower()
dist = pkg_resources.working_set.by_key.get(key)
if dist is not None and not dist.has_metadata('PKG-INFO'):
dist._version = pkg_resources.safe_version(str(attrs['version']))
self._patched_dist = dist
def __init__(self, attrs=None):
have_package_data = hasattr(self, "package_data")
if not have_package_data:
self.package_data = {}
_attrs_dict = attrs or {}
if 'features' in _attrs_dict or 'require_features' in _attrs_dict:
Feature.warn_deprecated()
self.require_features = []
self.features = {}
self.dist_files = []
self.src_root = attrs and attrs.pop("src_root", None)
self.patch_missing_pkg_info(attrs)
# Make sure we have any eggs needed to interpret 'attrs'
if attrs is not None:
self.dependency_links = attrs.pop('dependency_links', [])
assert_string_list(self,'dependency_links',self.dependency_links)
if attrs and 'setup_requires' in attrs:
self.fetch_build_eggs(attrs.pop('setup_requires'))
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
if not hasattr(self,ep.name):
setattr(self,ep.name,None)
_Distribution.__init__(self,attrs)
if isinstance(self.metadata.version, numeric_types):
# Some people apparently take "version number" too literally :)
self.metadata.version = str(self.metadata.version)
def parse_command_line(self):
"""Process features after parsing command line options"""
result = _Distribution.parse_command_line(self)
if self.features:
self._finalize_features()
return result
def _feature_attrname(self,name):
"""Convert feature name to corresponding option attribute name"""
return 'with_'+name.replace('-','_')
def fetch_build_eggs(self, requires):
"""Resolve pre-setup requirements"""
from pkg_resources import working_set, parse_requirements
for dist in working_set.resolve(
parse_requirements(requires), installer=self.fetch_build_egg
):
working_set.add(dist)
def finalize_options(self):
_Distribution.finalize_options(self)
if self.features:
self._set_global_opts_from_features()
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
value = getattr(self,ep.name,None)
if value is not None:
ep.require(installer=self.fetch_build_egg)
ep.load()(self, ep.name, value)
if getattr(self, 'convert_2to3_doctests', None):
# XXX may convert to set here when we can rely on set being builtin
self.convert_2to3_doctests = [os.path.abspath(p) for p in self.convert_2to3_doctests]
else:
self.convert_2to3_doctests = []
def fetch_build_egg(self, req):
"""Fetch an egg needed for building"""
try:
cmd = self._egg_fetcher
cmd.package_index.to_scan = []
except AttributeError:
from setuptools.command.easy_install import easy_install
dist = self.__class__({'script_args':['easy_install']})
dist.parse_config_files()
opts = dist.get_option_dict('easy_install')
keep = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts'
)
for key in list(opts):
if key not in keep:
del opts[key] # don't use any other settings
if self.dependency_links:
links = self.dependency_links[:]
if 'find_links' in opts:
links = opts['find_links'][1].split() + links
opts['find_links'] = ('setup', links)
cmd = easy_install(
dist, args=["x"], install_dir=os.curdir, exclude_scripts=True,
always_copy=False, build_directory=None, editable=False,
upgrade=False, multi_version=True, no_report=True, user=False
)
cmd.ensure_finalized()
self._egg_fetcher = cmd
return cmd.easy_install(req)
def _set_global_opts_from_features(self):
"""Add --with-X/--without-X options based on optional features"""
go = []
no = self.negative_opt.copy()
for name,feature in self.features.items():
self._set_feature(name,None)
feature.validate(self)
if feature.optional:
descr = feature.description
incdef = ' (default)'
excdef=''
if not feature.include_by_default():
excdef, incdef = incdef, excdef
go.append(('with-'+name, None, 'include '+descr+incdef))
go.append(('without-'+name, None, 'exclude '+descr+excdef))
no['without-'+name] = 'with-'+name
self.global_options = self.feature_options = go + self.global_options
self.negative_opt = self.feature_negopt = no
def _finalize_features(self):
"""Add/remove features and resolve dependencies between them"""
# First, flag all the enabled items (and thus their dependencies)
for name,feature in self.features.items():
enabled = self.feature_is_included(name)
if enabled or (enabled is None and feature.include_by_default()):
feature.include_in(self)
self._set_feature(name,1)
# Then disable the rest, so that off-by-default features don't
# get flagged as errors when they're required by an enabled feature
for name,feature in self.features.items():
if not self.feature_is_included(name):
feature.exclude_from(self)
self._set_feature(name,0)
def get_command_class(self, command):
"""Pluggable version of get_command_class()"""
if command in self.cmdclass:
return self.cmdclass[command]
for ep in pkg_resources.iter_entry_points('distutils.commands',command):
ep.require(installer=self.fetch_build_egg)
self.cmdclass[command] = cmdclass = ep.load()
return cmdclass
else:
return _Distribution.get_command_class(self, command)
def print_commands(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
cmdclass = ep.load(False) # don't require extras, we're not running
self.cmdclass[ep.name] = cmdclass
return _Distribution.print_commands(self)
def _set_feature(self,name,status):
"""Set feature's inclusion status"""
setattr(self,self._feature_attrname(name),status)
def feature_is_included(self,name):
"""Return 1 if feature is included, 0 if excluded, 'None' if unknown"""
return getattr(self,self._feature_attrname(name))
def include_feature(self,name):
"""Request inclusion of feature named 'name'"""
if self.feature_is_included(name)==0:
descr = self.features[name].description
raise DistutilsOptionError(
descr + " is required, but was excluded or is not available"
)
self.features[name].include_in(self)
self._set_feature(name,1)
def include(self,**attrs):
"""Add items to distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would add 'x' to
the distribution's 'py_modules' attribute, if it was not already
there.
Currently, this method only supports inclusion for attributes that are
lists or tuples. If you need to add support for adding to other
attributes in this or a subclass, you can add an '_include_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
will try to call 'dist._include_foo({"bar":"baz"})', which can then
handle whatever special inclusion logic is needed.
"""
for k,v in attrs.items():
include = getattr(self, '_include_'+k, None)
if include:
include(v)
else:
self._include_misc(k,v)
def exclude_package(self,package):
"""Remove packages, modules, and extensions in named package"""
pfx = package+'.'
if self.packages:
self.packages = [
p for p in self.packages
if p != package and not p.startswith(pfx)
]
if self.py_modules:
self.py_modules = [
p for p in self.py_modules
if p != package and not p.startswith(pfx)
]
if self.ext_modules:
self.ext_modules = [
p for p in self.ext_modules
if p.name != package and not p.name.startswith(pfx)
]
def has_contents_for(self,package):
"""Return true if 'exclude_package(package)' would do something"""
pfx = package+'.'
for p in self.iter_distribution_names():
if p==package or p.startswith(pfx):
return True
def _exclude_misc(self,name,value):
"""Handle 'exclude()' for list/tuple attrs without a special handler"""
if not isinstance(value,sequence):
raise DistutilsSetupError(
"%s: setting must be a list or tuple (%r)" % (name, value)
)
try:
old = getattr(self,name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is not None and not isinstance(old,sequence):
raise DistutilsSetupError(
name+": this setting cannot be changed via include/exclude"
)
elif old:
setattr(self,name,[item for item in old if item not in value])
def _include_misc(self,name,value):
"""Handle 'include()' for list/tuple attrs without a special handler"""
if not isinstance(value,sequence):
raise DistutilsSetupError(
"%s: setting must be a list (%r)" % (name, value)
)
try:
old = getattr(self,name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is None:
setattr(self,name,value)
elif not isinstance(old,sequence):
raise DistutilsSetupError(
name+": this setting cannot be changed via include/exclude"
)
else:
setattr(self,name,old+[item for item in value if item not in old])
def exclude(self,**attrs):
"""Remove items from distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
the distribution's 'py_modules' attribute. Excluding packages uses
the 'exclude_package()' method, so all of the package's contained
packages, modules, and extensions are also excluded.
Currently, this method only supports exclusion from attributes that are
lists or tuples. If you need to add support for excluding from other
attributes in this or a subclass, you can add an '_exclude_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
handle whatever special exclusion logic is needed.
"""
for k,v in attrs.items():
exclude = getattr(self, '_exclude_'+k, None)
if exclude:
exclude(v)
else:
self._exclude_misc(k,v)
def _exclude_packages(self,packages):
if not isinstance(packages,sequence):
raise DistutilsSetupError(
"packages: setting must be a list or tuple (%r)" % (packages,)
)
list(map(self.exclude_package, packages))
def _parse_command_opts(self, parser, args):
# Remove --with-X/--without-X options when processing command args
self.global_options = self.__class__.global_options
self.negative_opt = self.__class__.negative_opt
# First, expand any aliases
command = args[0]
aliases = self.get_option_dict('aliases')
while command in aliases:
src,alias = aliases[command]
del aliases[command] # ensure each alias can expand only once!
import shlex
args[:1] = shlex.split(alias,True)
command = args[0]
nargs = _Distribution._parse_command_opts(self, parser, args)
# Handle commands that want to consume all remaining arguments
cmd_class = self.get_command_class(command)
if getattr(cmd_class,'command_consumes_arguments',None):
self.get_option_dict(command)['args'] = ("command line", nargs)
if nargs is not None:
return []
return nargs
def get_cmdline_options(self):
"""Return a '{cmd: {opt:val}}' map of all command-line options
Option names are all long, but do not include the leading '--', and
contain dashes rather than underscores. If the option doesn't take
an argument (e.g. '--quiet'), the 'val' is 'None'.
Note that options provided by config files are intentionally excluded.
"""
d = {}
for cmd,opts in self.command_options.items():
for opt,(src,val) in opts.items():
if src != "command line":
continue
opt = opt.replace('_','-')
if val==0:
cmdobj = self.get_command_obj(cmd)
neg_opt = self.negative_opt.copy()
neg_opt.update(getattr(cmdobj,'negative_opt',{}))
for neg,pos in neg_opt.items():
if pos==opt:
opt=neg
val=None
break
else:
raise AssertionError("Shouldn't be able to get here")
elif val==1:
val = None
d.setdefault(cmd,{})[opt] = val
return d
def iter_distribution_names(self):
"""Yield all packages, modules, and extension names in distribution"""
for pkg in self.packages or ():
yield pkg
for module in self.py_modules or ():
yield module
for ext in self.ext_modules or ():
if isinstance(ext,tuple):
name, buildinfo = ext
else:
name = ext.name
if name.endswith('module'):
name = name[:-6]
yield name
def handle_display_options(self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
import sys
if sys.version_info < (3,) or self.help_commands:
return _Distribution.handle_display_options(self, option_order)
# Stdout may be StringIO (e.g. in tests)
import io
if not isinstance(sys.stdout, io.TextIOWrapper):
return _Distribution.handle_display_options(self, option_order)
# Don't wrap stdout if utf-8 is already the encoding. Provides
# workaround for #334.
if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
return _Distribution.handle_display_options(self, option_order)
# Print metadata in UTF-8 no matter the platform
encoding = sys.stdout.encoding
errors = sys.stdout.errors
newline = sys.platform != 'win32' and '\n' or None
line_buffering = sys.stdout.line_buffering
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)
try:
return _Distribution.handle_display_options(self, option_order)
finally:
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), encoding, errors, newline, line_buffering)
# Install it throughout the distutils
for module in distutils.dist, distutils.core, distutils.cmd:
module.Distribution = Distribution
class Feature:
"""
**deprecated** -- The `Feature` facility was never completely implemented
or supported, `has reported issues
<https://bitbucket.org/pypa/setuptools/issue/58>`_ and will be removed in
a future version.
A subset of the distribution that can be excluded if unneeded/wanted
Features are created using these keyword arguments:
'description' -- a short, human readable description of the feature, to
be used in error messages, and option help messages.
'standard' -- if true, the feature is included by default if it is
available on the current system. Otherwise, the feature is only
included if requested via a command line '--with-X' option, or if
another included feature requires it. The default setting is 'False'.
'available' -- if true, the feature is available for installation on the
current system. The default setting is 'True'.
'optional' -- if true, the feature's inclusion can be controlled from the
command line, using the '--with-X' or '--without-X' options. If
false, the feature's inclusion status is determined automatically,
based on 'availabile', 'standard', and whether any other feature
requires it. The default setting is 'True'.
'require_features' -- a string or sequence of strings naming features
that should also be included if this feature is included. Defaults to
empty list. May also contain 'Require' objects that should be
added/removed from the distribution.
'remove' -- a string or list of strings naming packages to be removed
from the distribution if this feature is *not* included. If the
feature *is* included, this argument is ignored. This argument exists
to support removing features that "crosscut" a distribution, such as
defining a 'tests' feature that removes all the 'tests' subpackages
provided by other features. The default for this argument is an empty
list. (Note: the named package(s) or modules must exist in the base
distribution when the 'setup()' function is initially called.)
other keywords -- any other keyword arguments are saved, and passed to
the distribution's 'include()' and 'exclude()' methods when the
feature is included or excluded, respectively. So, for example, you
could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be
added or removed from the distribution as appropriate.
A feature must include at least one 'requires', 'remove', or other
keyword argument. Otherwise, it can't affect the distribution in any way.
Note also that you can subclass 'Feature' to create your own specialized
feature types that modify the distribution in other ways when included or
excluded. See the docstrings for the various methods here for more detail.
Aside from the methods, the only feature attributes that distributions look
at are 'description' and 'optional'.
"""
@staticmethod
def warn_deprecated():
warnings.warn(
"Features are deprecated and will be removed in a future "
"version. See http://bitbucket.org/pypa/setuptools/65.",
DeprecationWarning,
stacklevel=3,
)
def __init__(self, description, standard=False, available=True,
optional=True, require_features=(), remove=(), **extras):
self.warn_deprecated()
self.description = description
self.standard = standard
self.available = available
self.optional = optional
if isinstance(require_features,(str,Require)):
require_features = require_features,
self.require_features = [
r for r in require_features if isinstance(r,str)
]
er = [r for r in require_features if not isinstance(r,str)]
if er: extras['require_features'] = er
if isinstance(remove,str):
remove = remove,
self.remove = remove
self.extras = extras
if not remove and not require_features and not extras:
raise DistutilsSetupError(
"Feature %s: must define 'require_features', 'remove', or at least one"
" of 'packages', 'py_modules', etc."
)
def include_by_default(self):
"""Should this feature be included by default?"""
return self.available and self.standard
def include_in(self,dist):
"""Ensure feature and its requirements are included in distribution
You may override this in a subclass to perform additional operations on
the distribution. Note that this method may be called more than once
per feature, and so should be idempotent.
"""
if not self.available:
raise DistutilsPlatformError(
self.description+" is required,"
"but is not available on this platform"
)
dist.include(**self.extras)
for f in self.require_features:
dist.include_feature(f)
def exclude_from(self,dist):
"""Ensure feature is excluded from distribution
You may override this in a subclass to perform additional operations on
the distribution. This method will be called at most once per
feature, and only after all included features have been asked to
include themselves.
"""
dist.exclude(**self.extras)
if self.remove:
for item in self.remove:
dist.exclude_package(item)
def validate(self,dist):
"""Verify that feature makes sense in context of distribution
This method is called by the distribution just before it parses its
command line. It checks to ensure that the 'remove' attribute, if any,
contains only valid package/module names that are present in the base
distribution when 'setup()' is called. You may override it in a
subclass to perform any other required validation of the feature
against a target distribution.
"""
for item in self.remove:
if not dist.has_contents_for(item):
raise DistutilsSetupError(
"%s wants to be able to remove %s, but the distribution"
" doesn't contain any packages or modules under %s"
% (self.description, item, item)
) | unknown | codeparrot/codeparrot-clean | ||
# Based on the buildah connection plugin
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# Connection plugin to interact with existing podman containers.
# https://github.com/containers/libpod
#
# Written by: Tomas Tomecek (https://github.com/TomasTomecek)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import distutils.spawn
import shlex
import shutil
import subprocess
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes, to_native
from ansible.plugins.connection import ConnectionBase, ensure_connect
from ansible.utils.display import Display
display = Display()
DOCUMENTATION = """
author: Tomas Tomecek (ttomecek@redhat.com)
connection: podman
short_description: Interact with an existing podman container
description:
- Run commands or put/fetch files to an existing container using podman tool.
version_added: 2.8
options:
remote_addr:
description:
- The ID of the container you want to access.
default: inventory_hostname
vars:
- name: ansible_host
remote_user:
description:
- User specified via name or UID which is used to execute commands inside the container. If you
specify the user via UID, you must set C(ANSIBLE_REMOTE_TMP) to a path that exits
inside the container and is writable by Ansible.
ini:
- section: defaults
key: remote_user
env:
- name: ANSIBLE_REMOTE_USER
vars:
- name: ansible_user
podman_extra_args:
description:
- Extra arguments to pass to the podman command line.
default: ''
ini:
- section: defaults
key: podman_extra_args
vars:
- name: ansible_podman_extra_args
env:
- name: ANSIBLE_PODMAN_EXTRA_ARGS
podman_executable:
description:
- Executable for podman command.
default: podman
vars:
- name: ansible_podman_executable
env:
- name: ANSIBLE_PODMAN_EXECUTABLE
"""
# this _has to be_ named Connection
class Connection(ConnectionBase):
"""
This is a connection plugin for podman. It uses podman binary to interact with the containers
"""
# String used to identify this Connection class from other classes
transport = 'podman'
has_pipelining = True
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self._container_id = self._play_context.remote_addr
self._connected = False
# container filesystem will be mounted here on host
self._mount_point = None
self.user = self._play_context.remote_user
def _podman(self, cmd, cmd_args=None, in_data=None, use_container_id=True):
"""
run podman executable
:param cmd: podman's command to execute (str)
:param cmd_args: list of arguments to pass to the command (list of str/bytes)
:param in_data: data passed to podman's stdin
:return: return code, stdout, stderr
"""
podman_exec = self.get_option('podman_executable')
podman_cmd = distutils.spawn.find_executable(podman_exec)
if not podman_cmd:
raise AnsibleError("%s command not found in PATH" % podman_exec)
local_cmd = [podman_cmd]
if self.get_option('podman_extra_args'):
local_cmd += shlex.split(
to_native(
self.get_option('podman_extra_args'),
errors='surrogate_or_strict'))
local_cmd.append(cmd)
if use_container_id:
local_cmd.append(self._container_id)
if cmd_args:
local_cmd += cmd_args
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
display.vvv("RUN %s" % (local_cmd,), host=self._container_id)
p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate(input=in_data)
display.vvvvv("STDOUT %s" % stdout)
display.vvvvv("STDERR %s" % stderr)
display.vvvvv("RC CODE %s" % p.returncode)
stdout = to_bytes(stdout, errors='surrogate_or_strict')
stderr = to_bytes(stderr, errors='surrogate_or_strict')
return p.returncode, stdout, stderr
def _connect(self):
"""
no persistent connection is being maintained, mount container's filesystem
so we can easily access it
"""
super(Connection, self)._connect()
rc, self._mount_point, stderr = self._podman("mount")
if rc != 0:
display.v("Failed to mount container %s: %s" % (self._container_id, stderr.strip()))
else:
self._mount_point = self._mount_point.strip()
display.vvvvv("MOUNTPOINT %s RC %s STDERR %r" % (self._mount_point, rc, stderr))
self._connected = True
@ensure_connect
def exec_command(self, cmd, in_data=None, sudoable=False):
""" run specified command in a running OCI container using podman """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
# shlex.split has a bug with text strings on Python-2.6 and can only handle text strings on Python-3
cmd_args_list = shlex.split(to_native(cmd, errors='surrogate_or_strict'))
if self.user:
cmd_args_list += ["--user", self.user]
rc, stdout, stderr = self._podman("exec", cmd_args_list, in_data)
display.vvvvv("STDOUT %r STDERR %r" % (stderr, stderr))
return rc, stdout, stderr
def put_file(self, in_path, out_path):
""" Place a local file located in 'in_path' inside container at 'out_path' """
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._container_id)
if not self._mount_point:
rc, stdout, stderr = self._podman(
"cp", [in_path, self._container_id + ":" + out_path], use_container_id=False
)
if rc != 0:
if 'cannot copy into running rootless container with pause set' in to_native(stderr):
rc, stdout, stderr = self._podman(
"cp", ["--pause=false", in_path, self._container_id + ":" + out_path], use_container_id=False
)
if rc != 0:
raise AnsibleError(
"Failed to copy file from %s to %s in container %s\n%s" % (
in_path, out_path, self._container_id, stderr)
)
else:
real_out_path = self._mount_point + to_bytes(out_path, errors='surrogate_or_strict')
shutil.copyfile(
to_bytes(in_path, errors='surrogate_or_strict'),
to_bytes(real_out_path, errors='surrogate_or_strict')
)
def fetch_file(self, in_path, out_path):
""" obtain file specified via 'in_path' from the container and place it at 'out_path' """
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._container_id)
if not self._mount_point:
rc, stdout, stderr = self._podman(
"cp", [self._container_id + ":" + in_path, out_path], use_container_id=False)
if rc != 0:
raise AnsibleError("Failed to fetch file from %s to %s from container %s\n%s" % (
in_path, out_path, self._container_id, stderr))
else:
real_in_path = self._mount_point + to_bytes(in_path, errors='surrogate_or_strict')
shutil.copyfile(
to_bytes(real_in_path, errors='surrogate_or_strict'),
to_bytes(out_path, errors='surrogate_or_strict')
)
def close(self):
""" unmount container's filesystem """
super(Connection, self).close()
# we actually don't need to unmount since the container is mounted anyway
# rc, stdout, stderr = self._podman("umount")
# display.vvvvv("RC %s STDOUT %r STDERR %r" % (rc, stdout, stderr))
self._connected = False | unknown | codeparrot/codeparrot-clean | ||
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RFitModels(RPackage):
"""Compare Fitted Models"""
homepage = "https://cran.r-project.org/package=fit.models"
url = "https://cran.r-project.org/src/contrib/fit.models_0.5-14.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/fit.models"
version('0.5-14', '159b5c57953db4c917bc186ddacdff51')
version('0.5-13', 'c9ff87e98189bcc3be597e3833408497')
depends_on('r-lattice', type=('build', 'run')) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright: (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from itertools import product
import pytest
# the module we are actually testing (sort of)
from ansible.module_utils.facts.system.distribution import DistributionFactCollector
# to generate the testcase data, you can use the script gen_distribution_version_testcase.py in hacking/tests
TESTSETS = [
{
"platform.dist": [
"centos",
"7.2.1511",
"Core"
],
"input": {
"/etc/redhat-release": "CentOS Linux release 7.2.1511 (Core) \n",
"/etc/os-release": (
"NAME=\"CentOS Linux\"\nVERSION=\"7 (Core)\"\nID=\"centos\"\nID_LIKE=\"rhel fedora\"\nVERSION_ID=\"7\"\n"
"PRETTY_NAME=\"CentOS Linux 7 (Core)\"\nANSI_COLOR=\"0;31\"\nCPE_NAME=\"cpe:/o:centos:centos:7\"\n"
"HOME_URL=\"https://www.centos.org/\"\nBUG_REPORT_URL=\"https://bugs.centos.org/\"\n\nCENTOS_MANTISBT_PROJECT=\"CentOS-7\"\n"
"CENTOS_MANTISBT_PROJECT_VERSION=\"7\"\nREDHAT_SUPPORT_PRODUCT=\"centos\"\nREDHAT_SUPPORT_PRODUCT_VERSION=\"7\"\n\n"
),
"/etc/system-release": "CentOS Linux release 7.2.1511 (Core) \n"
},
"name": "CentOS 7.2.1511",
"result": {
"distribution_release": "Core",
"distribution": "CentOS",
"distribution_major_version": "7",
"os_family": "RedHat",
"distribution_version": "7.2.1511",
}
},
{
"name": "CentOS 6.7",
"platform.dist": [
"centos",
"6.7",
"Final"
],
"input": {
"/etc/redhat-release": "CentOS release 6.7 (Final)\n",
"/etc/lsb-release": (
"LSB_VERSION=base-4.0-amd64:base-4.0-noarch:core-4.0-amd64:core-4.0-noarch:graphics-4.0-amd64:graphics-4.0-noarch:"
"printing-4.0-amd64:printing-4.0-noarch\n"
),
"/etc/system-release": "CentOS release 6.7 (Final)\n"
},
"result": {
"distribution_release": "Final",
"distribution": "CentOS",
"distribution_major_version": "6",
"os_family": "RedHat",
"distribution_version": "6.7"
}
},
{
"name": "RedHat 7.2",
"platform.dist": [
"redhat",
"7.2",
"Maipo"
],
"input": {
"/etc/redhat-release": "Red Hat Enterprise Linux Server release 7.2 (Maipo)\n",
"/etc/os-release": (
"NAME=\"Red Hat Enterprise Linux Server\"\nVERSION=\"7.2 (Maipo)\"\nID=\"rhel\"\nID_LIKE=\"fedora\"\nVERSION_ID=\"7.2\"\n"
"PRETTY_NAME=\"Red Hat Enterprise Linux Server 7.2 (Maipo)\"\nANSI_COLOR=\"0;31\"\n"
"CPE_NAME=\"cpe:/o:redhat:enterprise_linux:7.2:GA:server\"\nHOME_URL=\"https://www.redhat.com/\"\n"
"BUG_REPORT_URL=\"https://bugzilla.redhat.com/\"\n\nREDHAT_BUGZILLA_PRODUCT=\"Red Hat Enterprise Linux 7\"\n"
"REDHAT_BUGZILLA_PRODUCT_VERSION=7.2\nREDHAT_SUPPORT_PRODUCT=\"Red Hat Enterprise Linux\"\n"
"REDHAT_SUPPORT_PRODUCT_VERSION=\"7.2\"\n"
),
"/etc/system-release": "Red Hat Enterprise Linux Server release 7.2 (Maipo)\n"
},
"result": {
"distribution_release": "Maipo",
"distribution": "RedHat",
"distribution_major_version": "7",
"os_family": "RedHat",
"distribution_version": "7.2"
}
},
{
"name": "RedHat 6.7",
"platform.dist": [
"redhat",
"6.7",
"Santiago"
],
"input": {
"/etc/redhat-release": "Red Hat Enterprise Linux Server release 6.7 (Santiago)\n",
"/etc/lsb-release": (
"LSB_VERSION=base-4.0-amd64:base-4.0-noarch:core-4.0-amd64:core-4.0-noarch:graphics-4.0-amd64:graphics-4.0-noarch:"
"printing-4.0-amd64:printing-4.0-noarch\n"
),
"/etc/system-release": "Red Hat Enterprise Linux Server release 6.7 (Santiago)\n"
},
"result": {
"distribution_release": "Santiago",
"distribution": "RedHat",
"distribution_major_version": "6",
"os_family": "RedHat",
"distribution_version": "6.7"
}
},
{
"name": "Virtuozzo 7.3",
"platform.dist": [
"redhat",
"7.3",
""
],
"input": {
"/etc/redhat-release": "Virtuozzo Linux release 7.3\n",
"/etc/os-release": (
"NAME=\"Virtuozzo\"\n"
"VERSION=\"7.0.3\"\n"
"ID=\"virtuozzo\"\n"
"ID_LIKE=\"rhel fedora\"\n"
"VERSION_ID=\"7\"\n"
"PRETTY_NAME=\"Virtuozzo release 7.0.3\"\n"
"ANSI_COLOR=\"0;31\"\n"
"CPE_NAME=\"cpe:/o:virtuozzoproject:vz:7\"\n"
"HOME_URL=\"http://www.virtuozzo.com\"\n"
"BUG_REPORT_URL=\"https://bugs.openvz.org/\"\n"
),
"/etc/system-release": "Virtuozzo release 7.0.3 (640)\n"
},
"result": {
"distribution_release": "NA",
"distribution": "Virtuozzo",
"distribution_major_version": "7",
"os_family": "RedHat",
"distribution_version": "7.3"
}
},
{
"name": "openSUSE Leap 42.1",
"input": {
"/etc/os-release": """
NAME="openSUSE Leap"
VERSION="42.1"
VERSION_ID="42.1"
PRETTY_NAME="openSUSE Leap 42.1 (x86_64)"
ID=opensuse
ANSI_COLOR="0;32"
CPE_NAME="cpe:/o:opensuse:opensuse:42.1"
BUG_REPORT_URL="https://bugs.opensuse.org"
HOME_URL="https://opensuse.org/"
ID_LIKE="suse"
""",
"/etc/SuSE-release": """
openSUSE 42.1 (x86_64)
VERSION = 42.1
CODENAME = Malachite
# /etc/SuSE-release is deprecated and will be removed in the future, use /etc/os-release instead
"""
},
"platform.dist": ['SuSE', '42.1', 'x86_64'],
"result": {
"distribution": "openSUSE Leap",
"distribution_major_version": "42",
"distribution_release": "1",
"os_family": "Suse",
"distribution_version": "42.1",
}
},
{
'name': 'openSUSE 13.2',
'input': {
'/etc/SuSE-release': """openSUSE 13.2 (x86_64)
VERSION = 13.2
CODENAME = Harlequin
# /etc/SuSE-release is deprecated and will be removed in the future, use /etc/os-release instead
""",
'/etc/os-release': """NAME=openSUSE
VERSION="13.2 (Harlequin)"
VERSION_ID="13.2"
PRETTY_NAME="openSUSE 13.2 (Harlequin) (x86_64)"
ID=opensuse
ANSI_COLOR="0;32"
CPE_NAME="cpe:/o:opensuse:opensuse:13.2"
BUG_REPORT_URL="https://bugs.opensuse.org"
HOME_URL="https://opensuse.org/"
ID_LIKE="suse"
"""
},
'platform.dist': ('SuSE', '13.2', 'x86_64'),
'result': {
'distribution': u'openSUSE',
'distribution_major_version': u'13',
'distribution_release': u'2',
'os_family': u'Suse',
'distribution_version': u'13.2'
}
},
{
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/os-release": (
"NAME=\"openSUSE Tumbleweed\"\n# VERSION=\"20160917\"\nID=opensuse\nID_LIKE=\"suse\"\nVERSION_ID=\"20160917\"\n"
"PRETTY_NAME=\"openSUSE Tumbleweed\"\nANSI_COLOR=\"0;32\"\nCPE_NAME=\"cpe:/o:opensuse:tumbleweed:20160917\"\n"
"BUG_REPORT_URL=\"https://bugs.opensuse.org\"\nHOME_URL=\"https://www.opensuse.org/\"\n"
)
},
"name": "openSUSE Tumbleweed 20160917",
"result": {
"distribution_release": "",
"distribution": "openSUSE Tumbleweed",
"distribution_major_version": "NA",
"os_family": "Suse",
"distribution_version": "20160917"
}
},
{ # see https://github.com/ansible/ansible/issues/14837
"name": "SLES 11.3",
"input": {
"/etc/SuSE-release": """
SUSE Linux Enterprise Server 11 (x86_64)
VERSION = 11
PATCHLEVEL = 3
"""
},
"platform.dist": ['SuSE', '11', 'x86_64'],
"result": {
"distribution": "SLES",
"distribution_major_version": "11",
"distribution_release": "3",
"os_family": "Suse",
"distribution_version": "11.3",
}
},
{ # see https://github.com/ansible/ansible/issues/14837
"name": "SLES 11.4",
"input": {
"/etc/SuSE-release": """
SUSE Linux Enterprise Server 11 (x86_64)
VERSION = 11
PATCHLEVEL = 4
""",
"/etc/os-release": """
NAME="SLES"
VERSION="11.4"
VERSION_ID="11.4"
PRETTY_NAME="SUSE Linux Enterprise Server 11 SP4"
ID="sles"
ANSI_COLOR="0;32"
CPE_NAME="cpe:/o:suse:sles:11:4"
""",
},
"platform.dist": ['SuSE', '11', 'x86_64'],
"result":{
"distribution": "SLES",
"distribution_major_version": "11",
"distribution_release": "4",
"os_family": "Suse",
"distribution_version": "11.4",
}
},
{ # see https://github.com/ansible/ansible/issues/14837
"name": "SLES 12 SP0",
"input": {
"/etc/SuSE-release": """
SUSE Linux Enterprise Server 12 (x86_64)
VERSION = 12
PATCHLEVEL = 0
# This file is deprecated and will be removed in a future service pack or release.
# Please check /etc/os-release for details about this release.
""",
"/etc/os-release": """
NAME="SLES"
VERSION="12"
VERSION_ID="12"
PRETTY_NAME="SUSE Linux Enterprise Server 12"
ID="sles"
ANSI_COLOR="0;32"
CPE_NAME="cpe:/o:suse:sles:12"
""",
},
"platform.dist": ['SuSE', '12', 'x86_64'],
"result": {
"distribution": "SLES",
"distribution_major_version": "12",
"distribution_release": "0",
"os_family": "Suse",
"distribution_version": "12",
}
},
{ # see https://github.com/ansible/ansible/issues/14837
"name": "SLES 12 SP1",
"input": {
"/etc/SuSE-release": """
SUSE Linux Enterprise Server 12 (x86_64)
VERSION = 12
PATCHLEVEL = 0
# This file is deprecated and will be removed in a future service pack or release.
# Please check /etc/os-release for details about this release.
""",
"/etc/os-release": """
NAME="SLES"
VERSION="12-SP1"
VERSION_ID="12.1"
PRETTY_NAME="SUSE Linux Enterprise Server 12 SP1"
ID="sles"
ANSI_COLOR="0;32"
CPE_NAME="cpe:/o:suse:sles:12:sp1"
""",
},
"platform.dist": ['SuSE', '12', 'x86_64'],
"result":{
"distribution": "SLES",
"distribution_major_version": "12",
"distribution_release": "1",
"os_family": "Suse",
"distribution_version": "12.1",
}
},
{
"name": "Debian stretch/sid",
"input": {
"/etc/os-release": """
PRETTY_NAME="Debian GNU/Linux stretch/sid"
NAME="Debian GNU/Linux"
ID=debian
HOME_URL="https://www.debian.org/"
SUPPORT_URL="https://www.debian.org/support"
BUG_REPORT_URL="https://bugs.debian.org/"
""",
"/etc/debian_version": """
stretch/sid
""",
},
"platform.dist": ('debian', 'stretch/sid', ''),
"result": {
"distribution": "Debian",
"distribution_major_version": "stretch/sid",
"distribution_release": "NA",
"os_family": "Debian",
"distribution_version": "stretch/sid",
}
},
{
'name': "Debian 7.9",
'input': {
'/etc/os-release': """PRETTY_NAME="Debian GNU/Linux 7 (wheezy)"
NAME="Debian GNU/Linux"
VERSION_ID="7"
VERSION="7 (wheezy)"
ID=debian
ANSI_COLOR="1;31"
HOME_URL="http://www.debian.org/"
SUPPORT_URL="http://www.debian.org/support/"
BUG_REPORT_URL="http://bugs.debian.org/"
"""
},
'platform.dist': ('debian', '7.9', ''),
'result': {
'distribution': u'Debian',
'distribution_major_version': u'7',
'distribution_release': u'wheezy',
"os_family": "Debian",
'distribution_version': u'7.9'
}
},
{
"platform.dist": [
"Ubuntu",
"16.04",
"xenial"
],
"input": {
"/etc/os-release": (
"NAME=\"Ubuntu\"\nVERSION=\"16.04 LTS (Xenial Xerus)\"\nID=ubuntu\nID_LIKE=debian\nPRETTY_NAME=\"Ubuntu 16.04 LTS\"\n"
"VERSION_ID=\"16.04\"\nHOME_URL=\"http://www.ubuntu.com/\"\nSUPPORT_URL=\"http://help.ubuntu.com/\"\n"
"BUG_REPORT_URL=\"http://bugs.launchpad.net/ubuntu/\"\nUBUNTU_CODENAME=xenial\n"
),
"/etc/lsb-release": "DISTRIB_ID=Ubuntu\nDISTRIB_RELEASE=16.04\nDISTRIB_CODENAME=xenial\nDISTRIB_DESCRIPTION=\"Ubuntu 16.04 LTS\"\n"
},
"name": "Ubuntu 16.04",
"result": {
"distribution_release": "xenial",
"distribution": "Ubuntu",
"distribution_major_version": "16",
"os_family": "Debian",
"distribution_version": "16.04"
}
},
{
'name': "Ubuntu 10.04 guess",
'input':
{
'/etc/lsb-release': """DISTRIB_ID=Ubuntu
DISTRIB_RELEASE=10.04
DISTRIB_CODENAME=lucid
DISTRIB_DESCRIPTION="Ubuntu 10.04.4 LTS
"""
},
'platform.dist': ('Ubuntu', '10.04', 'lucid'),
'result':
{
'distribution': u'Ubuntu',
'distribution_major_version': u'10',
'distribution_release': u'lucid',
"os_family": "Debian",
'distribution_version': u'10.04'
}
},
{
'name': "Ubuntu 14.04",
'input': {
'/etc/lsb-release': """DISTRIB_ID=Ubuntu
DISTRIB_RELEASE=14.04
DISTRIB_CODENAME=trusty
DISTRIB_DESCRIPTION="Ubuntu 14.04.4 LTS"
""",
'/etc/os-release': """NAME="Ubuntu"
VERSION="14.04.4 LTS, Trusty Tahr"
ID=ubuntu
ID_LIKE=debian
PRETTY_NAME="Ubuntu 14.04.4 LTS"
VERSION_ID="14.04"
HOME_URL="http://www.ubuntu.com/"
SUPPORT_URL="http://help.ubuntu.com/"
BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"
"""
},
'platform.dist': ('Ubuntu', '14.04', 'trusty'),
'result': {
'distribution': u'Ubuntu',
'distribution_major_version': u'14',
'distribution_release': u'trusty',
"os_family": "Debian",
'distribution_version': u'14.04'
}
},
{
'name': "Ubuntu 12.04",
'input': {'/etc/lsb-release': """DISTRIB_ID=Ubuntu
DISTRIB_RELEASE=12.04
DISTRIB_CODENAME=precise
DISTRIB_DESCRIPTION="Ubuntu 12.04.5 LTS"
""",
'/etc/os-release': """NAME="Ubuntu"
VERSION="12.04.5 LTS, Precise Pangolin"
ID=ubuntu
ID_LIKE=debian
PRETTY_NAME="Ubuntu precise (12.04.5 LTS)"
VERSION_ID="12.04"
"""},
'platform.dist': ('Ubuntu', '12.04', 'precise'),
'result': {'distribution': u'Ubuntu',
'distribution_major_version': u'12',
'distribution_release': u'precise',
"os_family": "Debian",
'distribution_version': u'12.04'}
},
{
"platform.dist": [
"neon",
"16.04",
"xenial"
],
"input": {
"/etc/os-release": ("NAME=\"KDE neon\"\nVERSION=\"5.8\"\nID=neon\nID_LIKE=\"ubuntu debian\"\nPRETTY_NAME=\"KDE neon User Edition 5.8\"\n"
"VERSION_ID=\"16.04\"\nHOME_URL=\"http://neon.kde.org/\"\nSUPPORT_URL=\"http://neon.kde.org/\"\n"
"BUG_REPORT_URL=\"http://bugs.kde.org/\"\nVERSION_CODENAME=xenial\nUBUNTU_CODENAME=xenial\n"),
"/etc/lsb-release": "DISTRIB_ID=neon\nDISTRIB_RELEASE=16.04\nDISTRIB_CODENAME=xenial\nDISTRIB_DESCRIPTION=\"KDE neon User Edition 5.8\"\n"
},
"name": "KDE neon 16.04",
"result": {
"distribution_release": "xenial",
"distribution": "KDE neon",
"distribution_major_version": "16",
"os_family": "Debian",
"distribution_version": "16.04"
}
},
{
'name': 'Core OS',
'input': {
'/etc/os-release': """
NAME=CoreOS
ID=coreos
VERSION=976.0.0
VERSION_ID=976.0.0
BUILD_ID=2016-03-03-2324
PRETTY_NAME="CoreOS 976.0.0 (Coeur Rouge)"
ANSI_COLOR="1;32"
HOME_URL="https://coreos.com/"
BUG_REPORT_URL="https://github.com/coreos/bugs/issues"
""",
'/etc/lsb-release': """DISTRIB_ID=CoreOS
DISTRIB_RELEASE=976.0.0
DISTRIB_CODENAME="Coeur Rouge"
DISTRIB_DESCRIPTION="CoreOS 976.0.0 (Coeur Rouge)"
""",
},
'platform.dist': ('', '', ''),
'platform.release': '',
'result': {
"distribution": "CoreOS",
"distribution_major_version": "NA",
"distribution_release": "NA",
"distribution_version": "976.0.0",
}
},
# Solaris and derivatives: https://gist.github.com/natefoo/7af6f3d47bb008669467
{
"name": "SmartOS Global Zone",
"uname_v": "joyent_20160330T234717Z",
"result": {
"distribution_release": "SmartOS 20160330T234717Z x86_64",
"distribution": "SmartOS",
"os_family": "Solaris",
"distribution_version": "joyent_20160330T234717Z"
},
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/release": (" SmartOS 20160330T234717Z x86_64\n"
" Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.\n"
" Copyright 2010-2012 Joyent, Inc. All Rights Reserved.\n"
" Use is subject to license terms.\n\n"
" Built with the following components:\n\n[\n"
" { \"repo\": \"smartos-live\", \"branch\": \"release-20160331\", \"rev\": \"a77c410f2afe6dc9853a915733caec3609cc50f1\", "
"\"commit_date\": \"1459340323\", \"url\": \"git@github.com:joyent/smartos-live.git\" }\n , "
"{ \"repo\": \"illumos-joyent\", \"branch\": \"release-20160331\", \"rev\": \"ab664c06caf06e9ce7586bff956e7709df1e702e\", "
"\"commit_date\": \"1459362533\", \"url\": \"/root/data/jenkins/workspace/smartos/MG/build/illumos-joyent\" }\n"
" , { \"repo\": \"illumos-extra\", \"branch\": \"release-20160331\", "
"\"rev\": \"cc723855bceace3df7860b607c9e3827d47e0ff4\", \"commit_date\": \"1458153188\", "
"\"url\": \"/root/data/jenkins/workspace/smartos/MG/build/illumos-extra\" }\n , "
"{ \"repo\": \"kvm\", \"branch\": \"release-20160331\", \"rev\": \"a8befd521c7e673749c64f118585814009fe4b73\", "
"\"commit_date\": \"1450081968\", \"url\": \"/root/data/jenkins/workspace/smartos/MG/build/illumos-kvm\" }\n , "
"{ \"repo\": \"kvm-cmd\", \"branch\": \"release-20160331\", \"rev\": \"c1a197c8e4582c68739ab08f7e3198b2392c9820\", "
"\"commit_date\": \"1454723558\", \"url\": \"/root/data/jenkins/workspace/smartos/MG/build/illumos-kvm-cmd\" }\n , "
"{ \"repo\": \"mdata-client\", \"branch\": \"release-20160331\", \"rev\": \"58158c44603a3316928975deccc5d10864832770\", "
"\"commit_date\": \"1429917227\", \"url\": \"/root/data/jenkins/workspace/smartos/MG/build/mdata-client\" }\n]\n")
},
"platform.system": "SunOS"
},
{
"name": "SmartOS Zone",
"uname_v": "joyent_20160330T234717Z",
"result": {
"distribution_release": "SmartOS x86_64",
"distribution": "SmartOS",
"os_family": "Solaris",
"distribution_version": "14.3.0"
},
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/release": (" SmartOS x86_64\n Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.\n"
" Copyright 2010-2013 Joyent, Inc. All Rights Reserved.\n Use is subject to license terms.\n"
" See joyent_20141002T182809Z for assembly date and time.\n"),
"/etc/product": "Name: Joyent Instance\nImage: base64 14.3.0\nDocumentation: http://wiki.joyent.com/jpc2/Base+Instance\n"
},
"platform.system": "SunOS"
},
{
"name": "OpenIndiana",
"uname_v": "oi_151a9",
"result": {
"distribution_release": "OpenIndiana Development oi_151.1.9 X86 (powered by illumos)",
"distribution": "OpenIndiana",
"os_family": "Solaris",
"distribution_version": "oi_151a9"
},
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/release": (" OpenIndiana Development oi_151.1.9 X86 (powered by illumos)\n Copyright 2011 Oracle and/or its affiliates. "
"All rights reserved.\n Use is subject to license terms.\n "
"Assembled 17 January 2014\n")
},
"platform.system": "SunOS"
},
{
"name": "OmniOS",
"uname_v": "omnios-10b9c79",
"result": {
"distribution_release": "OmniOS v11 r151012",
"distribution": "OmniOS",
"os_family": "Solaris",
"distribution_version": "r151012"
},
"platform.dist": [
"",
"",
""
],
# "platform.release": 'OmniOS',
"input": {
"/etc/release": (
" OmniOS v11 r151012\n Copyright 2014 OmniTI Computer Consulting, Inc. All rights reserved.\n Use is subject to license terms.\n\n"
)
},
"platform.system": "SunOS"
},
{
"name": "Nexenta 3",
"uname_v": "NexentaOS_134f",
"result": {
"distribution_release": "Open Storage Appliance v3.1.6",
"distribution": "Nexenta",
"os_family": "Solaris",
"distribution_version": "3.1.6"
},
"platform.dist": [
"",
"",
""
],
"platform.release:": "",
"input": {
"/etc/release": (" Open Storage Appliance v3.1.6\n Copyright (c) 2014 Nexenta Systems, Inc. "
"All Rights Reserved.\n Copyright (c) 2011 Oracle. All Rights Reserved.\n "
"Use is subject to license terms.\n")
},
"platform.system": "SunOS"
},
{
"name": "Nexenta 4",
"uname_v": "NexentaOS_4:cd604cd066",
"result": {
"distribution_release": "Open Storage Appliance 4.0.3-FP2",
"distribution": "Nexenta",
"os_family": "Solaris",
"distribution_version": "4.0.3-FP2"
},
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/release": (" Open Storage Appliance 4.0.3-FP2\n Copyright (c) 2014 Nexenta Systems, Inc. "
"All Rights Reserved.\n Copyright (c) 2010 Oracle. All Rights Reserved.\n "
"Use is subject to license terms.\n")
},
"platform.system": "SunOS"
},
{
"name": "Solaris 10",
"uname_v": "Generic_141445-09",
"result": {
"distribution_release": "Solaris 10 10/09 s10x_u8wos_08a X86",
"distribution": "Solaris",
"os_family": "Solaris",
"distribution_version": "10"
},
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/release": (" Solaris 10 10/09 s10x_u8wos_08a X86\n Copyright 2009 Sun Microsystems, Inc. "
"All Rights Reserved.\n Use is subject to license terms.\n "
"Assembled 16 September 2009\n")
},
"platform.system": "SunOS"
},
{
"name": "Solaris 11",
"uname_v": "11.0",
"result": {
"distribution_release": "Oracle Solaris 11 11/11 X86",
"distribution": "Solaris",
"os_family": "Solaris",
"distribution_version": "11"
},
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/release": (" Oracle Solaris 11 11/11 X86\n Copyright (c) 1983, 2011, Oracle and/or its affiliates. "
"All rights reserved.\n Assembled 18 October 2011\n")
},
"platform.system": "SunOS"
},
{
"name": "Solaris 11.3",
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/release": (
" Oracle Solaris 11.3 X86\n Copyright (c) 1983, 2015, Oracle and/or its affiliates. "
"All rights reserved.\n Assembled 06 October 2015\n"
)
},
"platform.system": "SunOS",
"result": {
"distribution_release": "Oracle Solaris 11.3 X86",
"distribution": "Solaris",
"os_family": "Solaris",
"distribution_version": "11.3"
}
},
{
"name": "Solaris 10",
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/release": (" Oracle Solaris 10 1/13 s10x_u11wos_24a X86\n Copyright (c) 1983, 2013, Oracle and/or its affiliates. "
"All rights reserved.\n Assembled 17 January 2013\n")
},
"platform.system": "SunOS",
"result": {
"distribution_release": "Oracle Solaris 10 1/13 s10x_u11wos_24a X86",
"distribution": "Solaris",
"os_family": "Solaris",
"distribution_version": "10"
}
},
{
"name": "Fedora 22",
"platform.dist": [
"fedora",
"22",
"Twenty Two"
],
"input": {
"/etc/redhat-release": "Fedora release 22 (Twenty Two)\n",
"/etc/os-release": (
"NAME=Fedora\nVERSION=\"22 (Twenty Two)\"\nID=fedora\nVERSION_ID=22\nPRETTY_NAME=\"Fedora 22 (Twenty Two)\"\n"
"ANSI_COLOR=\"0;34\"\nCPE_NAME=\"cpe:/o:fedoraproject:fedora:22\"\nHOME_URL=\"https://fedoraproject.org/\"\n"
"BUG_REPORT_URL=\"https://bugzilla.redhat.com/\"\nREDHAT_BUGZILLA_PRODUCT=\"Fedora\"\nREDHAT_BUGZILLA_PRODUCT_VERSION=22\n"
"REDHAT_SUPPORT_PRODUCT=\"Fedora\"\nREDHAT_SUPPORT_PRODUCT_VERSION=22\n"
"PRIVACY_POLICY_URL=https://fedoraproject.org/wiki/Legal:PrivacyPolicy\n"
),
"/etc/system-release": "Fedora release 22 (Twenty Two)\n"
},
"result": {
"distribution_release": "Twenty Two",
"distribution": "Fedora",
"distribution_major_version": "22",
"os_family": "RedHat",
"distribution_version": "22"
}
},
{
"platform.dist": [
"fedora",
"25",
"Rawhide"
],
"input": {
"/etc/redhat-release": "Fedora release 25 (Rawhide)\n",
"/etc/os-release": (
"NAME=Fedora\nVERSION=\"25 (Workstation Edition)\"\nID=fedora\nVERSION_ID=25\n"
"PRETTY_NAME=\"Fedora 25 (Workstation Edition)\"\nANSI_COLOR=\"0;34\"\nCPE_NAME=\"cpe:/o:fedoraproject:fedora:25\"\n"
"HOME_URL=\"https://fedoraproject.org/\"\nBUG_REPORT_URL=\"https://bugzilla.redhat.com/\"\n"
"REDHAT_BUGZILLA_PRODUCT=\"Fedora\"\nREDHAT_BUGZILLA_PRODUCT_VERSION=rawhide\nREDHAT_SUPPORT_PRODUCT=\"Fedora\"\n"
"REDHAT_SUPPORT_PRODUCT_VERSION=rawhide\nPRIVACY_POLICY_URL=https://fedoraproject.org/wiki/Legal:PrivacyPolicy\n"
"VARIANT=\"Workstation Edition\"\nVARIANT_ID=workstation\n"
),
"/etc/system-release": "Fedora release 25 (Rawhide)\n"
},
"name": "Fedora 25",
"result": {
"distribution_release": "Rawhide",
"distribution": "Fedora",
"distribution_major_version": "25",
"os_family": "RedHat",
"distribution_version": "25"
}
},
{
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/sourcemage-release": ("Source Mage GNU/Linux x86_64-pc-linux-gnu\nInstalled from tarball using chroot image (Grimoire 0.61-rc) "
"on Thu May 17 17:31:37 UTC 2012\n")
},
"name": "SMGL NA",
"result": {
"distribution_release": "NA",
"distribution": "SMGL",
"distribution_major_version": "NA",
"os_family": "SMGL",
"distribution_version": "NA"
}
},
# ArchLinux with an empty /etc/arch-release and a /etc/os-release with "NAME=Arch Linux"
{
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/os-release": "NAME=\"Arch Linux\"\nPRETTY_NAME=\"Arch Linux\"\nID=arch\nID_LIKE=archlinux\nANSI_COLOR=\"0;36\"\nHOME_URL=\"https://www.archlinux.org/\"\nSUPPORT_URL=\"https://bbs.archlinux.org/\"\nBUG_REPORT_URL=\"https://bugs.archlinux.org/\"\n\n", # noqa
"/etc/arch-release": "",
},
"name": "Arch Linux NA",
"result": {
"distribution_release": "NA",
"distribution": "Archlinux",
"distribution_major_version": "NA",
"os_family": "Archlinux",
"distribution_version": "NA"
}
},
# ClearLinux https://github.com/ansible/ansible/issues/31501#issuecomment-340861535
{
"platform.dist": [
"Clear Linux OS for Intel Architecture",
"18450",
"clear-linux-os"
],
"input": {
"/usr/lib/os-release": '''
NAME="Clear Linux OS for Intel Architecture"
VERSION=1
ID=clear-linux-os
VERSION_ID=18450
PRETTY_NAME="Clear Linux OS for Intel Architecture"
ANSI_COLOR="1;35"
HOME_URL="https://clearlinux.org"
SUPPORT_URL="https://clearlinux.org"
BUG_REPORT_URL="mailto:dev@lists.clearlinux.org"
PRIVACY_POLICY_URL="http://www.intel.com/privacy"
'''
},
"name": "Clear Linux OS for Intel Architecture 1",
"result": {
"distribution_release": "clear-linux-os",
"distribution": "ClearLinux",
"distribution_major_version": "18450",
"os_family": "ClearLinux",
"distribution_version": "18450"
}
},
# ArchLinux with no /etc/arch-release but with a /etc/os-release with NAME=Arch Linux
# The fact needs to map 'Arch Linux' to 'Archlinux' for compat with 2.3 and earlier facts
{
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/os-release": "NAME=\"Arch Linux\"\nPRETTY_NAME=\"Arch Linux\"\nID=arch\nID_LIKE=archlinux\nANSI_COLOR=\"0;36\"\nHOME_URL=\"https://www.archlinux.org/\"\nSUPPORT_URL=\"https://bbs.archlinux.org/\"\nBUG_REPORT_URL=\"https://bugs.archlinux.org/\"\n\n", # noqa
},
"name": "Arch Linux no arch-release NA",
"result": {
"distribution_release": "NA",
"distribution": "Archlinux",
"distribution_major_version": "NA",
"os_family": "Archlinux",
"distribution_version": "NA"
}
}
]
@pytest.mark.parametrize("stdin, testcase", product([{}], TESTSETS), ids=lambda x: x['name'], indirect=['stdin'])
def test_distribution_version(am, mocker, testcase):
"""tests the distribution parsing code of the Facts class
testsets have
* a name (for output/debugging only)
* input files that are faked
* those should be complete and also include "irrelevant" files that might be mistaken as coming from other distributions
* all files that are not listed here are assumed to not exist at all
* the output of pythons platform.dist()
* results for the ansible variables distribution* and os_family
"""
# prepare some mock functions to get the testdata in
def mock_get_file_content(fname, default=None, strip=True):
"""give fake content if it exists, otherwise pretend the file is empty"""
data = default
if fname in testcase['input']:
# for debugging
print('faked %s for %s' % (fname, testcase['name']))
data = testcase['input'][fname].strip()
if strip and data is not None:
data = data.strip()
return data
def mock_get_uname_version(am):
return testcase.get('uname_v', None)
def mock_file_exists(fname, allow_empty=False):
if fname not in testcase['input']:
return False
if allow_empty:
return True
return bool(len(testcase['input'][fname]))
def mock_platform_system():
return testcase.get('platform.system', 'Linux')
def mock_platform_release():
return testcase.get('platform.release', '')
def mock_platform_version():
return testcase.get('platform.version', '')
mocker.patch('ansible.module_utils.facts.system.distribution.get_file_content', mock_get_file_content)
mocker.patch('ansible.module_utils.facts.system.distribution.get_uname_version', mock_get_uname_version)
mocker.patch('ansible.module_utils.facts.system.distribution._file_exists', mock_file_exists)
mocker.patch('platform.dist', lambda: testcase['platform.dist'])
mocker.patch('platform.system', mock_platform_system)
mocker.patch('platform.release', mock_platform_release)
mocker.patch('platform.version', mock_platform_version)
# run Facts()
distro_collector = DistributionFactCollector()
generated_facts = distro_collector.collect(am)
# compare with the expected output
# testcase['result'] has a list of variables and values it expects Facts() to set
for key, val in testcase['result'].items():
assert key in generated_facts
msg = 'Comparing value of %s on %s, should: %s, is: %s' %\
(key, testcase['name'], val, generated_facts[key])
assert generated_facts[key] == val, msg | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
from __future__ import absolute_import
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2015 The OctoPrint Project - Released under terms of the AGPLv3 License"
import os
from jinja2.loaders import FileSystemLoader, TemplateNotFound, split_template_path
class FilteredFileSystemLoader(FileSystemLoader):
"""
Jinja2 ``FileSystemLoader`` subclass that allows filtering templates.
Only such templates will be accessible for whose paths the provided
``path_filter`` filter function returns True.
``path_filter`` will receive the actual path on disc and should behave just
like callables provided to Python's internal ``filter`` function, returning
``True`` if the path is cleared and ``False`` if it is supposed to be removed
from results and hence ``filter(path_filter, iterable)`` should be
equivalent to ``[item for item in iterable if path_filter(item)]``.
If ``path_filter`` is not set or not a ``callable``, the loader will
behave just like the regular Jinja2 ``FileSystemLoader``.
"""
def __init__(self, searchpath, path_filter=None, **kwargs):
FileSystemLoader.__init__(self, searchpath, **kwargs)
self.path_filter = path_filter
def get_source(self, environment, template):
if callable(self.path_filter):
pieces = split_template_path(template)
if not self._combined_filter(os.path.join(*pieces)):
raise TemplateNotFound(template)
return FileSystemLoader.get_source(self, environment, template)
def list_templates(self):
result = FileSystemLoader.list_templates(self)
if callable(self.path_filter):
result = sorted(filter(self._combined_filter, result))
return result
def _combined_filter(self, path):
filter_results = map(lambda x: not os.path.exists(os.path.join(x, path)) or self.path_filter(os.path.join(x, path)),
self.searchpath)
return all(filter_results) | unknown | codeparrot/codeparrot-clean | ||
"""Tests for jslex."""
# originally from https://bitbucket.org/ned/jslex
from django.test import SimpleTestCase
from django.utils.jslex import JsLexer, prepare_js_for_gettext
class JsTokensTest(SimpleTestCase):
LEX_CASES = [
# ids
("a ABC $ _ a123", ["id a", "id ABC", "id $", "id _", "id a123"]),
("\\u1234 abc\\u0020 \\u0065_\\u0067", ["id \\u1234", "id abc\\u0020", "id \\u0065_\\u0067"]),
# numbers
("123 1.234 0.123e-3 0 1E+40 1e1 .123", [
"dnum 123", "dnum 1.234", "dnum 0.123e-3", "dnum 0", "dnum 1E+40",
"dnum 1e1", "dnum .123",
]),
("0x1 0xabCD 0XABcd", ["hnum 0x1", "hnum 0xabCD", "hnum 0XABcd"]),
("010 0377 090", ["onum 010", "onum 0377", "dnum 0", "dnum 90"]),
("0xa123ghi", ["hnum 0xa123", "id ghi"]),
# keywords
("function Function FUNCTION", ["keyword function", "id Function", "id FUNCTION"]),
("const constructor in inherits", ["keyword const", "id constructor", "keyword in", "id inherits"]),
("true true_enough", ["reserved true", "id true_enough"]),
# strings
(''' 'hello' "hello" ''', ["string 'hello'", 'string "hello"']),
(r""" 'don\'t' "don\"t" '"' "'" '\'' "\"" """, [
r"""string 'don\'t'""", r'''string "don\"t"''', r"""string '"'""",
r'''string "'"''', r"""string '\''""", r'''string "\""'''
]),
(r'"ƃuıxǝ⅂ ʇdıɹɔsɐʌɐſ\""', [r'string "ƃuıxǝ⅂ ʇdıɹɔsɐʌɐſ\""']),
# comments
("a//b", ["id a", "linecomment //b"]),
("/****/a/=2//hello", ["comment /****/", "id a", "punct /=", "dnum 2", "linecomment //hello"]),
("/*\n * Header\n */\na=1;", ["comment /*\n * Header\n */", "id a", "punct =", "dnum 1", "punct ;"]),
# punctuation
("a+++b", ["id a", "punct ++", "punct +", "id b"]),
# regex
(r"a=/a*/,1", ["id a", "punct =", "regex /a*/", "punct ,", "dnum 1"]),
(r"a=/a*[^/]+/,1", ["id a", "punct =", "regex /a*[^/]+/", "punct ,", "dnum 1"]),
(r"a=/a*\[^/,1", ["id a", "punct =", r"regex /a*\[^/", "punct ,", "dnum 1"]),
(r"a=/\//,1", ["id a", "punct =", r"regex /\//", "punct ,", "dnum 1"]),
# next two are from http://www.mozilla.org/js/language/js20-2002-04/rationale/syntax.html#regular-expressions
("""for (var x = a in foo && "</x>" || mot ? z:/x:3;x<5;y</g/i) {xyz(x++);}""",
["keyword for", "punct (", "keyword var", "id x", "punct =", "id a", "keyword in",
"id foo", "punct &&", 'string "</x>"', "punct ||", "id mot", "punct ?", "id z",
"punct :", "regex /x:3;x<5;y</g", "punct /", "id i", "punct )", "punct {",
"id xyz", "punct (", "id x", "punct ++", "punct )", "punct ;", "punct }"]),
("""for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y</g/i) {xyz(x++);}""",
["keyword for", "punct (", "keyword var", "id x", "punct =", "id a", "keyword in",
"id foo", "punct &&", 'string "</x>"', "punct ||", "id mot", "punct ?", "id z",
"punct /", "id x", "punct :", "dnum 3", "punct ;", "id x", "punct <", "dnum 5",
"punct ;", "id y", "punct <", "regex /g/i", "punct )", "punct {",
"id xyz", "punct (", "id x", "punct ++", "punct )", "punct ;", "punct }"]),
# Various "illegal" regexes that are valid according to the std.
(r"""/????/, /++++/, /[----]/ """, ["regex /????/", "punct ,", "regex /++++/", "punct ,", "regex /[----]/"]),
# Stress cases from http://stackoverflow.com/questions/5533925/what-javascript-constructs-does-jslex-incorrectly-lex/5573409#5573409 # NOQA
(r"""/\[/""", [r"""regex /\[/"""]),
(r"""/[i]/""", [r"""regex /[i]/"""]),
(r"""/[\]]/""", [r"""regex /[\]]/"""]),
(r"""/a[\]]/""", [r"""regex /a[\]]/"""]),
(r"""/a[\]]b/""", [r"""regex /a[\]]b/"""]),
(r"""/[\]/]/gi""", [r"""regex /[\]/]/gi"""]),
(r"""/\[[^\]]+\]/gi""", [r"""regex /\[[^\]]+\]/gi"""]),
(r"""
rexl.re = {
NAME: /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/,
UNQUOTED_LITERAL: /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/,
QUOTED_LITERAL: /^'(?:[^']|'')*'/,
NUMERIC_LITERAL: /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/,
SYMBOL: /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/
};
""", # NOQA
["id rexl", "punct .", "id re", "punct =", "punct {",
"id NAME", "punct :", r"""regex /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/""", "punct ,",
"id UNQUOTED_LITERAL", "punct :", r"""regex /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/""",
"punct ,",
"id QUOTED_LITERAL", "punct :", r"""regex /^'(?:[^']|'')*'/""", "punct ,",
"id NUMERIC_LITERAL", "punct :", r"""regex /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/""", "punct ,",
"id SYMBOL", "punct :", r"""regex /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/""", # NOQA
"punct }", "punct ;"
]),
(r"""
rexl.re = {
NAME: /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/,
UNQUOTED_LITERAL: /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/,
QUOTED_LITERAL: /^'(?:[^']|'')*'/,
NUMERIC_LITERAL: /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/,
SYMBOL: /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/
};
str = '"';
""", # NOQA
["id rexl", "punct .", "id re", "punct =", "punct {",
"id NAME", "punct :", r"""regex /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/""", "punct ,",
"id UNQUOTED_LITERAL", "punct :", r"""regex /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/""",
"punct ,",
"id QUOTED_LITERAL", "punct :", r"""regex /^'(?:[^']|'')*'/""", "punct ,",
"id NUMERIC_LITERAL", "punct :", r"""regex /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/""", "punct ,",
"id SYMBOL", "punct :", r"""regex /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/""", # NOQA
"punct }", "punct ;",
"id str", "punct =", """string '"'""", "punct ;",
]),
(r""" this._js = "e.str(\"" + this.value.replace(/\\/g, "\\\\").replace(/"/g, "\\\"") + "\")"; """,
["keyword this", "punct .", "id _js", "punct =", r'''string "e.str(\""''', "punct +", "keyword this",
"punct .", "id value", "punct .", "id replace", "punct (", r"regex /\\/g", "punct ,", r'string "\\\\"',
"punct )",
"punct .", "id replace", "punct (", r'regex /"/g', "punct ,", r'string "\\\""', "punct )", "punct +",
r'string "\")"', "punct ;"]),
]
def make_function(input, toks):
def test_func(self):
lexer = JsLexer()
result = ["%s %s" % (name, tok) for name, tok in lexer.lex(input) if name != 'ws']
self.assertEqual(result, toks)
return test_func
for i, (input, toks) in enumerate(JsTokensTest.LEX_CASES):
setattr(JsTokensTest, "test_case_%d" % i, make_function(input, toks))
GETTEXT_CASES = (
(
r"""
a = 1; /* /[0-9]+/ */
b = 0x2a0b / 1; // /[0-9]+/
c = 3;
""",
r"""
a = 1; /* /[0-9]+/ */
b = 0x2a0b / 1; // /[0-9]+/
c = 3;
"""
), (
r"""
a = 1.234e-5;
/*
* /[0-9+/
*/
b = .0123;
""",
r"""
a = 1.234e-5;
/*
* /[0-9+/
*/
b = .0123;
"""
), (
r"""
x = y / z;
alert(gettext("hello"));
x /= 3;
""",
r"""
x = y / z;
alert(gettext("hello"));
x /= 3;
"""
), (
r"""
s = "Hello \"th/foo/ere\"";
s = 'He\x23llo \'th/foo/ere\'';
s = 'slash quote \", just quote "';
""",
r"""
s = "Hello \"th/foo/ere\"";
s = "He\x23llo \'th/foo/ere\'";
s = "slash quote \", just quote \"";
"""
), (
r"""
s = "Line continuation\
continued /hello/ still the string";/hello/;
""",
r"""
s = "Line continuation\
continued /hello/ still the string";"REGEX";
"""
), (
r"""
var regex = /pattern/;
var regex2 = /matter/gm;
var regex3 = /[*/]+/gm.foo("hey");
""",
r"""
var regex = "REGEX";
var regex2 = "REGEX";
var regex3 = "REGEX".foo("hey");
"""
), (
r"""
for (var x = a in foo && "</x>" || mot ? z:/x:3;x<5;y</g/i) {xyz(x++);}
for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y</g/i) {xyz(x++);}
""",
r"""
for (var x = a in foo && "</x>" || mot ? z:"REGEX"/i) {xyz(x++);}
for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y<"REGEX") {xyz(x++);}
"""
), (
"""
\\u1234xyz = gettext('Hello there');
""", r"""
Uu1234xyz = gettext("Hello there");
"""
)
)
class JsToCForGettextTest(SimpleTestCase):
pass
def make_function(js, c):
def test_func(self):
self.assertEqual(prepare_js_for_gettext(js), c)
return test_func
for i, pair in enumerate(GETTEXT_CASES):
setattr(JsToCForGettextTest, "test_case_%d" % i, make_function(*pair)) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (c) 2020 Mockito contributors
* This program is made available under the terms of the MIT License.
*/
package org.mockitoinline;
import org.junit.Test;
import org.mockito.Mockito;
import static junit.framework.TestCase.assertEquals;
public class InitializationTest {
@Test
public void assure_initialization_prior_to_instrumentation() {
@SuppressWarnings("unused")
SampleEnum mock = Mockito.mock(SampleEnum.class);
SampleEnum[] values = SampleEnum.values();
assertEquals("VALUE", values[0].name());
}
public enum SampleEnum {
VALUE
}
} | java | github | https://github.com/mockito/mockito | mockito-integration-tests/inline-mocks-tests/src/test/java/org/mockitoinline/InitializationTest.java |
use std::{borrow::Cow, fmt::Write as _};
use criterion::{black_box, criterion_group, criterion_main, Criterion};
fn compare_quoters(c: &mut Criterion) {
let mut group = c.benchmark_group("Compare Quoters");
let quoter = actix_router::Quoter::new(b"", b"");
let path_quoted = (0..=0x7f).fold(String::new(), |mut buf, c| {
write!(&mut buf, "%{:02X}", c).unwrap();
buf
});
let path_unquoted = ('\u{00}'..='\u{7f}').collect::<String>();
group.bench_function("quoter_unquoted", |b| {
b.iter(|| {
for _ in 0..10 {
black_box(quoter.requote(path_unquoted.as_bytes()));
}
});
});
group.bench_function("percent_encode_unquoted", |b| {
b.iter(|| {
for _ in 0..10 {
let decode = percent_encoding::percent_decode(path_unquoted.as_bytes());
black_box(Into::<Cow<'_, [u8]>>::into(decode));
}
});
});
group.bench_function("quoter_quoted", |b| {
b.iter(|| {
for _ in 0..10 {
black_box(quoter.requote(path_quoted.as_bytes()));
}
});
});
group.bench_function("percent_encode_quoted", |b| {
b.iter(|| {
for _ in 0..10 {
let decode = percent_encoding::percent_decode(path_quoted.as_bytes());
black_box(Into::<Cow<'_, [u8]>>::into(decode));
}
});
});
group.finish();
}
criterion_group!(benches, compare_quoters);
criterion_main!(benches); | rust | github | https://github.com/actix/actix-web | actix-router/benches/quoter.rs |
//// [tests/cases/compiler/arrowFunctionInConstructorArgument1.ts] ////
//// [arrowFunctionInConstructorArgument1.ts]
class C {
constructor(x: () => void) { }
}
var c = new C(() => { return asdf; } ) // should error
//// [arrowFunctionInConstructorArgument1.js]
"use strict";
class C {
constructor(x) { }
}
var c = new C(() => { return asdf; }); // should error | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/arrowFunctionInConstructorArgument1.js |
/* File generated by Tools/ssl/make_ssl_data.py */
/* Generated on 2025-03-26T13:48:34.811613+00:00 */
/* Generated from Git commit OpenSSL_1_1_1w-0-ge04bd3433f */
/* generated from args.lib2errnum */
static struct py_ssl_library_code library_codes[] = {
#ifdef ERR_LIB_ASN1
{"ASN1", ERR_LIB_ASN1},
#endif
#ifdef ERR_LIB_ASYNC
{"ASYNC", ERR_LIB_ASYNC},
#endif
#ifdef ERR_LIB_BIO
{"BIO", ERR_LIB_BIO},
#endif
#ifdef ERR_LIB_BN
{"BN", ERR_LIB_BN},
#endif
#ifdef ERR_LIB_BUF
{"BUF", ERR_LIB_BUF},
#endif
#ifdef ERR_LIB_CMS
{"CMS", ERR_LIB_CMS},
#endif
#ifdef ERR_LIB_COMP
{"COMP", ERR_LIB_COMP},
#endif
#ifdef ERR_LIB_CONF
{"CONF", ERR_LIB_CONF},
#endif
#ifdef ERR_LIB_CRYPTO
{"CRYPTO", ERR_LIB_CRYPTO},
#endif
#ifdef ERR_LIB_CT
{"CT", ERR_LIB_CT},
#endif
#ifdef ERR_LIB_DH
{"DH", ERR_LIB_DH},
#endif
#ifdef ERR_LIB_DSA
{"DSA", ERR_LIB_DSA},
#endif
#ifdef ERR_LIB_DSO
{"DSO", ERR_LIB_DSO},
#endif
#ifdef ERR_LIB_EC
{"EC", ERR_LIB_EC},
#endif
#ifdef ERR_LIB_ECDH
{"ECDH", ERR_LIB_ECDH},
#endif
#ifdef ERR_LIB_ECDSA
{"ECDSA", ERR_LIB_ECDSA},
#endif
#ifdef ERR_LIB_ENGINE
{"ENGINE", ERR_LIB_ENGINE},
#endif
#ifdef ERR_LIB_EVP
{"EVP", ERR_LIB_EVP},
#endif
#ifdef ERR_LIB_FIPS
{"FIPS", ERR_LIB_FIPS},
#endif
#ifdef ERR_LIB_HMAC
{"HMAC", ERR_LIB_HMAC},
#endif
#ifdef ERR_LIB_JPAKE
{"JPAKE", ERR_LIB_JPAKE},
#endif
#ifdef ERR_LIB_KDF
{"KDF", ERR_LIB_KDF},
#endif
#ifdef ERR_LIB_METH
{"METH", ERR_LIB_METH},
#endif
#ifdef ERR_LIB_NONE
{"NONE", ERR_LIB_NONE},
#endif
#ifdef ERR_LIB_OBJ
{"OBJ", ERR_LIB_OBJ},
#endif
#ifdef ERR_LIB_OCSP
{"OCSP", ERR_LIB_OCSP},
#endif
#ifdef ERR_LIB_OSSL_STORE
{"OSSL_STORE", ERR_LIB_OSSL_STORE},
#endif
#ifdef ERR_LIB_PEM
{"PEM", ERR_LIB_PEM},
#endif
#ifdef ERR_LIB_PKCS12
{"PKCS12", ERR_LIB_PKCS12},
#endif
#ifdef ERR_LIB_PKCS7
{"PKCS7", ERR_LIB_PKCS7},
#endif
#ifdef ERR_LIB_PROXY
{"PROXY", ERR_LIB_PROXY},
#endif
#ifdef ERR_LIB_RAND
{"RAND", ERR_LIB_RAND},
#endif
#ifdef ERR_LIB_RSA
{"RSA", ERR_LIB_RSA},
#endif
#ifdef ERR_LIB_RSAREF
{"RSAREF", ERR_LIB_RSAREF},
#endif
#ifdef ERR_LIB_SM2
{"SM2", ERR_LIB_SM2},
#endif
#ifdef ERR_LIB_SSL
{"SSL", ERR_LIB_SSL},
#endif
#ifdef ERR_LIB_SSL2
{"SSL2", ERR_LIB_SSL2},
#endif
#ifdef ERR_LIB_SSL23
{"SSL23", ERR_LIB_SSL23},
#endif
#ifdef ERR_LIB_SSL3
{"SSL3", ERR_LIB_SSL3},
#endif
#ifdef ERR_LIB_SYS
{"SYS", ERR_LIB_SYS},
#endif
#ifdef ERR_LIB_TS
{"TS", ERR_LIB_TS},
#endif
#ifdef ERR_LIB_UI
{"UI", ERR_LIB_UI},
#endif
#ifdef ERR_LIB_USER
{"USER", ERR_LIB_USER},
#endif
#ifdef ERR_LIB_X509
{"X509", ERR_LIB_X509},
#endif
#ifdef ERR_LIB_X509V3
{"X509V3", ERR_LIB_X509V3},
#endif
{NULL, 0} /* sentinel */
};
/* generated from args.reasons */
static struct py_ssl_error_code error_codes[] = {
#ifdef ASN1_R_ADDING_OBJECT
{"ADDING_OBJECT", ERR_LIB_ASN1, ASN1_R_ADDING_OBJECT},
#else
{"ADDING_OBJECT", 13, 171},
#endif
#ifdef ASN1_R_ASN1_PARSE_ERROR
{"ASN1_PARSE_ERROR", ERR_LIB_ASN1, ASN1_R_ASN1_PARSE_ERROR},
#else
{"ASN1_PARSE_ERROR", 13, 203},
#endif
#ifdef ASN1_R_ASN1_SIG_PARSE_ERROR
{"ASN1_SIG_PARSE_ERROR", ERR_LIB_ASN1, ASN1_R_ASN1_SIG_PARSE_ERROR},
#else
{"ASN1_SIG_PARSE_ERROR", 13, 204},
#endif
#ifdef ASN1_R_AUX_ERROR
{"AUX_ERROR", ERR_LIB_ASN1, ASN1_R_AUX_ERROR},
#else
{"AUX_ERROR", 13, 100},
#endif
#ifdef ASN1_R_BAD_OBJECT_HEADER
{"BAD_OBJECT_HEADER", ERR_LIB_ASN1, ASN1_R_BAD_OBJECT_HEADER},
#else
{"BAD_OBJECT_HEADER", 13, 102},
#endif
#ifdef ASN1_R_BAD_TEMPLATE
{"BAD_TEMPLATE", ERR_LIB_ASN1, ASN1_R_BAD_TEMPLATE},
#else
{"BAD_TEMPLATE", 13, 230},
#endif
#ifdef ASN1_R_BMPSTRING_IS_WRONG_LENGTH
{"BMPSTRING_IS_WRONG_LENGTH", ERR_LIB_ASN1, ASN1_R_BMPSTRING_IS_WRONG_LENGTH},
#else
{"BMPSTRING_IS_WRONG_LENGTH", 13, 214},
#endif
#ifdef ASN1_R_BN_LIB
{"BN_LIB", ERR_LIB_ASN1, ASN1_R_BN_LIB},
#else
{"BN_LIB", 13, 105},
#endif
#ifdef ASN1_R_BOOLEAN_IS_WRONG_LENGTH
{"BOOLEAN_IS_WRONG_LENGTH", ERR_LIB_ASN1, ASN1_R_BOOLEAN_IS_WRONG_LENGTH},
#else
{"BOOLEAN_IS_WRONG_LENGTH", 13, 106},
#endif
#ifdef ASN1_R_BUFFER_TOO_SMALL
{"BUFFER_TOO_SMALL", ERR_LIB_ASN1, ASN1_R_BUFFER_TOO_SMALL},
#else
{"BUFFER_TOO_SMALL", 13, 107},
#endif
#ifdef ASN1_R_CIPHER_HAS_NO_OBJECT_IDENTIFIER
{"CIPHER_HAS_NO_OBJECT_IDENTIFIER", ERR_LIB_ASN1, ASN1_R_CIPHER_HAS_NO_OBJECT_IDENTIFIER},
#else
{"CIPHER_HAS_NO_OBJECT_IDENTIFIER", 13, 108},
#endif
#ifdef ASN1_R_CONTEXT_NOT_INITIALISED
{"CONTEXT_NOT_INITIALISED", ERR_LIB_ASN1, ASN1_R_CONTEXT_NOT_INITIALISED},
#else
{"CONTEXT_NOT_INITIALISED", 13, 217},
#endif
#ifdef ASN1_R_DATA_IS_WRONG
{"DATA_IS_WRONG", ERR_LIB_ASN1, ASN1_R_DATA_IS_WRONG},
#else
{"DATA_IS_WRONG", 13, 109},
#endif
#ifdef ASN1_R_DECODE_ERROR
{"DECODE_ERROR", ERR_LIB_ASN1, ASN1_R_DECODE_ERROR},
#else
{"DECODE_ERROR", 13, 110},
#endif
#ifdef ASN1_R_DEPTH_EXCEEDED
{"DEPTH_EXCEEDED", ERR_LIB_ASN1, ASN1_R_DEPTH_EXCEEDED},
#else
{"DEPTH_EXCEEDED", 13, 174},
#endif
#ifdef ASN1_R_DIGEST_AND_KEY_TYPE_NOT_SUPPORTED
{"DIGEST_AND_KEY_TYPE_NOT_SUPPORTED", ERR_LIB_ASN1, ASN1_R_DIGEST_AND_KEY_TYPE_NOT_SUPPORTED},
#else
{"DIGEST_AND_KEY_TYPE_NOT_SUPPORTED", 13, 198},
#endif
#ifdef ASN1_R_ENCODE_ERROR
{"ENCODE_ERROR", ERR_LIB_ASN1, ASN1_R_ENCODE_ERROR},
#else
{"ENCODE_ERROR", 13, 112},
#endif
#ifdef ASN1_R_ERROR_GETTING_TIME
{"ERROR_GETTING_TIME", ERR_LIB_ASN1, ASN1_R_ERROR_GETTING_TIME},
#else
{"ERROR_GETTING_TIME", 13, 173},
#endif
#ifdef ASN1_R_ERROR_LOADING_SECTION
{"ERROR_LOADING_SECTION", ERR_LIB_ASN1, ASN1_R_ERROR_LOADING_SECTION},
#else
{"ERROR_LOADING_SECTION", 13, 172},
#endif
#ifdef ASN1_R_ERROR_SETTING_CIPHER_PARAMS
{"ERROR_SETTING_CIPHER_PARAMS", ERR_LIB_ASN1, ASN1_R_ERROR_SETTING_CIPHER_PARAMS},
#else
{"ERROR_SETTING_CIPHER_PARAMS", 13, 114},
#endif
#ifdef ASN1_R_EXPECTING_AN_INTEGER
{"EXPECTING_AN_INTEGER", ERR_LIB_ASN1, ASN1_R_EXPECTING_AN_INTEGER},
#else
{"EXPECTING_AN_INTEGER", 13, 115},
#endif
#ifdef ASN1_R_EXPECTING_AN_OBJECT
{"EXPECTING_AN_OBJECT", ERR_LIB_ASN1, ASN1_R_EXPECTING_AN_OBJECT},
#else
{"EXPECTING_AN_OBJECT", 13, 116},
#endif
#ifdef ASN1_R_EXPLICIT_LENGTH_MISMATCH
{"EXPLICIT_LENGTH_MISMATCH", ERR_LIB_ASN1, ASN1_R_EXPLICIT_LENGTH_MISMATCH},
#else
{"EXPLICIT_LENGTH_MISMATCH", 13, 119},
#endif
#ifdef ASN1_R_EXPLICIT_TAG_NOT_CONSTRUCTED
{"EXPLICIT_TAG_NOT_CONSTRUCTED", ERR_LIB_ASN1, ASN1_R_EXPLICIT_TAG_NOT_CONSTRUCTED},
#else
{"EXPLICIT_TAG_NOT_CONSTRUCTED", 13, 120},
#endif
#ifdef ASN1_R_FIELD_MISSING
{"FIELD_MISSING", ERR_LIB_ASN1, ASN1_R_FIELD_MISSING},
#else
{"FIELD_MISSING", 13, 121},
#endif
#ifdef ASN1_R_FIRST_NUM_TOO_LARGE
{"FIRST_NUM_TOO_LARGE", ERR_LIB_ASN1, ASN1_R_FIRST_NUM_TOO_LARGE},
#else
{"FIRST_NUM_TOO_LARGE", 13, 122},
#endif
#ifdef ASN1_R_HEADER_TOO_LONG
{"HEADER_TOO_LONG", ERR_LIB_ASN1, ASN1_R_HEADER_TOO_LONG},
#else
{"HEADER_TOO_LONG", 13, 123},
#endif
#ifdef ASN1_R_ILLEGAL_BITSTRING_FORMAT
{"ILLEGAL_BITSTRING_FORMAT", ERR_LIB_ASN1, ASN1_R_ILLEGAL_BITSTRING_FORMAT},
#else
{"ILLEGAL_BITSTRING_FORMAT", 13, 175},
#endif
#ifdef ASN1_R_ILLEGAL_BOOLEAN
{"ILLEGAL_BOOLEAN", ERR_LIB_ASN1, ASN1_R_ILLEGAL_BOOLEAN},
#else
{"ILLEGAL_BOOLEAN", 13, 176},
#endif
#ifdef ASN1_R_ILLEGAL_CHARACTERS
{"ILLEGAL_CHARACTERS", ERR_LIB_ASN1, ASN1_R_ILLEGAL_CHARACTERS},
#else
{"ILLEGAL_CHARACTERS", 13, 124},
#endif
#ifdef ASN1_R_ILLEGAL_FORMAT
{"ILLEGAL_FORMAT", ERR_LIB_ASN1, ASN1_R_ILLEGAL_FORMAT},
#else
{"ILLEGAL_FORMAT", 13, 177},
#endif
#ifdef ASN1_R_ILLEGAL_HEX
{"ILLEGAL_HEX", ERR_LIB_ASN1, ASN1_R_ILLEGAL_HEX},
#else
{"ILLEGAL_HEX", 13, 178},
#endif
#ifdef ASN1_R_ILLEGAL_IMPLICIT_TAG
{"ILLEGAL_IMPLICIT_TAG", ERR_LIB_ASN1, ASN1_R_ILLEGAL_IMPLICIT_TAG},
#else
{"ILLEGAL_IMPLICIT_TAG", 13, 179},
#endif
#ifdef ASN1_R_ILLEGAL_INTEGER
{"ILLEGAL_INTEGER", ERR_LIB_ASN1, ASN1_R_ILLEGAL_INTEGER},
#else
{"ILLEGAL_INTEGER", 13, 180},
#endif
#ifdef ASN1_R_ILLEGAL_NEGATIVE_VALUE
{"ILLEGAL_NEGATIVE_VALUE", ERR_LIB_ASN1, ASN1_R_ILLEGAL_NEGATIVE_VALUE},
#else
{"ILLEGAL_NEGATIVE_VALUE", 13, 226},
#endif
#ifdef ASN1_R_ILLEGAL_NESTED_TAGGING
{"ILLEGAL_NESTED_TAGGING", ERR_LIB_ASN1, ASN1_R_ILLEGAL_NESTED_TAGGING},
#else
{"ILLEGAL_NESTED_TAGGING", 13, 181},
#endif
#ifdef ASN1_R_ILLEGAL_NULL
{"ILLEGAL_NULL", ERR_LIB_ASN1, ASN1_R_ILLEGAL_NULL},
#else
{"ILLEGAL_NULL", 13, 125},
#endif
#ifdef ASN1_R_ILLEGAL_NULL_VALUE
{"ILLEGAL_NULL_VALUE", ERR_LIB_ASN1, ASN1_R_ILLEGAL_NULL_VALUE},
#else
{"ILLEGAL_NULL_VALUE", 13, 182},
#endif
#ifdef ASN1_R_ILLEGAL_OBJECT
{"ILLEGAL_OBJECT", ERR_LIB_ASN1, ASN1_R_ILLEGAL_OBJECT},
#else
{"ILLEGAL_OBJECT", 13, 183},
#endif
#ifdef ASN1_R_ILLEGAL_OPTIONAL_ANY
{"ILLEGAL_OPTIONAL_ANY", ERR_LIB_ASN1, ASN1_R_ILLEGAL_OPTIONAL_ANY},
#else
{"ILLEGAL_OPTIONAL_ANY", 13, 126},
#endif
#ifdef ASN1_R_ILLEGAL_OPTIONS_ON_ITEM_TEMPLATE
{"ILLEGAL_OPTIONS_ON_ITEM_TEMPLATE", ERR_LIB_ASN1, ASN1_R_ILLEGAL_OPTIONS_ON_ITEM_TEMPLATE},
#else
{"ILLEGAL_OPTIONS_ON_ITEM_TEMPLATE", 13, 170},
#endif
#ifdef ASN1_R_ILLEGAL_PADDING
{"ILLEGAL_PADDING", ERR_LIB_ASN1, ASN1_R_ILLEGAL_PADDING},
#else
{"ILLEGAL_PADDING", 13, 221},
#endif
#ifdef ASN1_R_ILLEGAL_TAGGED_ANY
{"ILLEGAL_TAGGED_ANY", ERR_LIB_ASN1, ASN1_R_ILLEGAL_TAGGED_ANY},
#else
{"ILLEGAL_TAGGED_ANY", 13, 127},
#endif
#ifdef ASN1_R_ILLEGAL_TIME_VALUE
{"ILLEGAL_TIME_VALUE", ERR_LIB_ASN1, ASN1_R_ILLEGAL_TIME_VALUE},
#else
{"ILLEGAL_TIME_VALUE", 13, 184},
#endif
#ifdef ASN1_R_ILLEGAL_ZERO_CONTENT
{"ILLEGAL_ZERO_CONTENT", ERR_LIB_ASN1, ASN1_R_ILLEGAL_ZERO_CONTENT},
#else
{"ILLEGAL_ZERO_CONTENT", 13, 222},
#endif
#ifdef ASN1_R_INTEGER_NOT_ASCII_FORMAT
{"INTEGER_NOT_ASCII_FORMAT", ERR_LIB_ASN1, ASN1_R_INTEGER_NOT_ASCII_FORMAT},
#else
{"INTEGER_NOT_ASCII_FORMAT", 13, 185},
#endif
#ifdef ASN1_R_INTEGER_TOO_LARGE_FOR_LONG
{"INTEGER_TOO_LARGE_FOR_LONG", ERR_LIB_ASN1, ASN1_R_INTEGER_TOO_LARGE_FOR_LONG},
#else
{"INTEGER_TOO_LARGE_FOR_LONG", 13, 128},
#endif
#ifdef ASN1_R_INVALID_BIT_STRING_BITS_LEFT
{"INVALID_BIT_STRING_BITS_LEFT", ERR_LIB_ASN1, ASN1_R_INVALID_BIT_STRING_BITS_LEFT},
#else
{"INVALID_BIT_STRING_BITS_LEFT", 13, 220},
#endif
#ifdef ASN1_R_INVALID_BMPSTRING_LENGTH
{"INVALID_BMPSTRING_LENGTH", ERR_LIB_ASN1, ASN1_R_INVALID_BMPSTRING_LENGTH},
#else
{"INVALID_BMPSTRING_LENGTH", 13, 129},
#endif
#ifdef ASN1_R_INVALID_DIGIT
{"INVALID_DIGIT", ERR_LIB_ASN1, ASN1_R_INVALID_DIGIT},
#else
{"INVALID_DIGIT", 13, 130},
#endif
#ifdef ASN1_R_INVALID_MIME_TYPE
{"INVALID_MIME_TYPE", ERR_LIB_ASN1, ASN1_R_INVALID_MIME_TYPE},
#else
{"INVALID_MIME_TYPE", 13, 205},
#endif
#ifdef ASN1_R_INVALID_MODIFIER
{"INVALID_MODIFIER", ERR_LIB_ASN1, ASN1_R_INVALID_MODIFIER},
#else
{"INVALID_MODIFIER", 13, 186},
#endif
#ifdef ASN1_R_INVALID_NUMBER
{"INVALID_NUMBER", ERR_LIB_ASN1, ASN1_R_INVALID_NUMBER},
#else
{"INVALID_NUMBER", 13, 187},
#endif
#ifdef ASN1_R_INVALID_OBJECT_ENCODING
{"INVALID_OBJECT_ENCODING", ERR_LIB_ASN1, ASN1_R_INVALID_OBJECT_ENCODING},
#else
{"INVALID_OBJECT_ENCODING", 13, 216},
#endif
#ifdef ASN1_R_INVALID_SCRYPT_PARAMETERS
{"INVALID_SCRYPT_PARAMETERS", ERR_LIB_ASN1, ASN1_R_INVALID_SCRYPT_PARAMETERS},
#else
{"INVALID_SCRYPT_PARAMETERS", 13, 227},
#endif
#ifdef ASN1_R_INVALID_SEPARATOR
{"INVALID_SEPARATOR", ERR_LIB_ASN1, ASN1_R_INVALID_SEPARATOR},
#else
{"INVALID_SEPARATOR", 13, 131},
#endif
#ifdef ASN1_R_INVALID_STRING_TABLE_VALUE
{"INVALID_STRING_TABLE_VALUE", ERR_LIB_ASN1, ASN1_R_INVALID_STRING_TABLE_VALUE},
#else
{"INVALID_STRING_TABLE_VALUE", 13, 218},
#endif
#ifdef ASN1_R_INVALID_UNIVERSALSTRING_LENGTH
{"INVALID_UNIVERSALSTRING_LENGTH", ERR_LIB_ASN1, ASN1_R_INVALID_UNIVERSALSTRING_LENGTH},
#else
{"INVALID_UNIVERSALSTRING_LENGTH", 13, 133},
#endif
#ifdef ASN1_R_INVALID_UTF8STRING
{"INVALID_UTF8STRING", ERR_LIB_ASN1, ASN1_R_INVALID_UTF8STRING},
#else
{"INVALID_UTF8STRING", 13, 134},
#endif
#ifdef ASN1_R_INVALID_VALUE
{"INVALID_VALUE", ERR_LIB_ASN1, ASN1_R_INVALID_VALUE},
#else
{"INVALID_VALUE", 13, 219},
#endif
#ifdef ASN1_R_LIST_ERROR
{"LIST_ERROR", ERR_LIB_ASN1, ASN1_R_LIST_ERROR},
#else
{"LIST_ERROR", 13, 188},
#endif
#ifdef ASN1_R_MIME_NO_CONTENT_TYPE
{"MIME_NO_CONTENT_TYPE", ERR_LIB_ASN1, ASN1_R_MIME_NO_CONTENT_TYPE},
#else
{"MIME_NO_CONTENT_TYPE", 13, 206},
#endif
#ifdef ASN1_R_MIME_PARSE_ERROR
{"MIME_PARSE_ERROR", ERR_LIB_ASN1, ASN1_R_MIME_PARSE_ERROR},
#else
{"MIME_PARSE_ERROR", 13, 207},
#endif
#ifdef ASN1_R_MIME_SIG_PARSE_ERROR
{"MIME_SIG_PARSE_ERROR", ERR_LIB_ASN1, ASN1_R_MIME_SIG_PARSE_ERROR},
#else
{"MIME_SIG_PARSE_ERROR", 13, 208},
#endif
#ifdef ASN1_R_MISSING_EOC
{"MISSING_EOC", ERR_LIB_ASN1, ASN1_R_MISSING_EOC},
#else
{"MISSING_EOC", 13, 137},
#endif
#ifdef ASN1_R_MISSING_SECOND_NUMBER
{"MISSING_SECOND_NUMBER", ERR_LIB_ASN1, ASN1_R_MISSING_SECOND_NUMBER},
#else
{"MISSING_SECOND_NUMBER", 13, 138},
#endif
#ifdef ASN1_R_MISSING_VALUE
{"MISSING_VALUE", ERR_LIB_ASN1, ASN1_R_MISSING_VALUE},
#else
{"MISSING_VALUE", 13, 189},
#endif
#ifdef ASN1_R_MSTRING_NOT_UNIVERSAL
{"MSTRING_NOT_UNIVERSAL", ERR_LIB_ASN1, ASN1_R_MSTRING_NOT_UNIVERSAL},
#else
{"MSTRING_NOT_UNIVERSAL", 13, 139},
#endif
#ifdef ASN1_R_MSTRING_WRONG_TAG
{"MSTRING_WRONG_TAG", ERR_LIB_ASN1, ASN1_R_MSTRING_WRONG_TAG},
#else
{"MSTRING_WRONG_TAG", 13, 140},
#endif
#ifdef ASN1_R_NESTED_ASN1_STRING
{"NESTED_ASN1_STRING", ERR_LIB_ASN1, ASN1_R_NESTED_ASN1_STRING},
#else
{"NESTED_ASN1_STRING", 13, 197},
#endif
#ifdef ASN1_R_NESTED_TOO_DEEP
{"NESTED_TOO_DEEP", ERR_LIB_ASN1, ASN1_R_NESTED_TOO_DEEP},
#else
{"NESTED_TOO_DEEP", 13, 201},
#endif
#ifdef ASN1_R_NON_HEX_CHARACTERS
{"NON_HEX_CHARACTERS", ERR_LIB_ASN1, ASN1_R_NON_HEX_CHARACTERS},
#else
{"NON_HEX_CHARACTERS", 13, 141},
#endif
#ifdef ASN1_R_NOT_ASCII_FORMAT
{"NOT_ASCII_FORMAT", ERR_LIB_ASN1, ASN1_R_NOT_ASCII_FORMAT},
#else
{"NOT_ASCII_FORMAT", 13, 190},
#endif
#ifdef ASN1_R_NOT_ENOUGH_DATA
{"NOT_ENOUGH_DATA", ERR_LIB_ASN1, ASN1_R_NOT_ENOUGH_DATA},
#else
{"NOT_ENOUGH_DATA", 13, 142},
#endif
#ifdef ASN1_R_NO_CONTENT_TYPE
{"NO_CONTENT_TYPE", ERR_LIB_ASN1, ASN1_R_NO_CONTENT_TYPE},
#else
{"NO_CONTENT_TYPE", 13, 209},
#endif
#ifdef ASN1_R_NO_MATCHING_CHOICE_TYPE
{"NO_MATCHING_CHOICE_TYPE", ERR_LIB_ASN1, ASN1_R_NO_MATCHING_CHOICE_TYPE},
#else
{"NO_MATCHING_CHOICE_TYPE", 13, 143},
#endif
#ifdef ASN1_R_NO_MULTIPART_BODY_FAILURE
{"NO_MULTIPART_BODY_FAILURE", ERR_LIB_ASN1, ASN1_R_NO_MULTIPART_BODY_FAILURE},
#else
{"NO_MULTIPART_BODY_FAILURE", 13, 210},
#endif
#ifdef ASN1_R_NO_MULTIPART_BOUNDARY
{"NO_MULTIPART_BOUNDARY", ERR_LIB_ASN1, ASN1_R_NO_MULTIPART_BOUNDARY},
#else
{"NO_MULTIPART_BOUNDARY", 13, 211},
#endif
#ifdef ASN1_R_NO_SIG_CONTENT_TYPE
{"NO_SIG_CONTENT_TYPE", ERR_LIB_ASN1, ASN1_R_NO_SIG_CONTENT_TYPE},
#else
{"NO_SIG_CONTENT_TYPE", 13, 212},
#endif
#ifdef ASN1_R_NULL_IS_WRONG_LENGTH
{"NULL_IS_WRONG_LENGTH", ERR_LIB_ASN1, ASN1_R_NULL_IS_WRONG_LENGTH},
#else
{"NULL_IS_WRONG_LENGTH", 13, 144},
#endif
#ifdef ASN1_R_OBJECT_NOT_ASCII_FORMAT
{"OBJECT_NOT_ASCII_FORMAT", ERR_LIB_ASN1, ASN1_R_OBJECT_NOT_ASCII_FORMAT},
#else
{"OBJECT_NOT_ASCII_FORMAT", 13, 191},
#endif
#ifdef ASN1_R_ODD_NUMBER_OF_CHARS
{"ODD_NUMBER_OF_CHARS", ERR_LIB_ASN1, ASN1_R_ODD_NUMBER_OF_CHARS},
#else
{"ODD_NUMBER_OF_CHARS", 13, 145},
#endif
#ifdef ASN1_R_SECOND_NUMBER_TOO_LARGE
{"SECOND_NUMBER_TOO_LARGE", ERR_LIB_ASN1, ASN1_R_SECOND_NUMBER_TOO_LARGE},
#else
{"SECOND_NUMBER_TOO_LARGE", 13, 147},
#endif
#ifdef ASN1_R_SEQUENCE_LENGTH_MISMATCH
{"SEQUENCE_LENGTH_MISMATCH", ERR_LIB_ASN1, ASN1_R_SEQUENCE_LENGTH_MISMATCH},
#else
{"SEQUENCE_LENGTH_MISMATCH", 13, 148},
#endif
#ifdef ASN1_R_SEQUENCE_NOT_CONSTRUCTED
{"SEQUENCE_NOT_CONSTRUCTED", ERR_LIB_ASN1, ASN1_R_SEQUENCE_NOT_CONSTRUCTED},
#else
{"SEQUENCE_NOT_CONSTRUCTED", 13, 149},
#endif
#ifdef ASN1_R_SEQUENCE_OR_SET_NEEDS_CONFIG
{"SEQUENCE_OR_SET_NEEDS_CONFIG", ERR_LIB_ASN1, ASN1_R_SEQUENCE_OR_SET_NEEDS_CONFIG},
#else
{"SEQUENCE_OR_SET_NEEDS_CONFIG", 13, 192},
#endif
#ifdef ASN1_R_SHORT_LINE
{"SHORT_LINE", ERR_LIB_ASN1, ASN1_R_SHORT_LINE},
#else
{"SHORT_LINE", 13, 150},
#endif
#ifdef ASN1_R_SIG_INVALID_MIME_TYPE
{"SIG_INVALID_MIME_TYPE", ERR_LIB_ASN1, ASN1_R_SIG_INVALID_MIME_TYPE},
#else
{"SIG_INVALID_MIME_TYPE", 13, 213},
#endif
#ifdef ASN1_R_STREAMING_NOT_SUPPORTED
{"STREAMING_NOT_SUPPORTED", ERR_LIB_ASN1, ASN1_R_STREAMING_NOT_SUPPORTED},
#else
{"STREAMING_NOT_SUPPORTED", 13, 202},
#endif
#ifdef ASN1_R_STRING_TOO_LONG
{"STRING_TOO_LONG", ERR_LIB_ASN1, ASN1_R_STRING_TOO_LONG},
#else
{"STRING_TOO_LONG", 13, 151},
#endif
#ifdef ASN1_R_STRING_TOO_SHORT
{"STRING_TOO_SHORT", ERR_LIB_ASN1, ASN1_R_STRING_TOO_SHORT},
#else
{"STRING_TOO_SHORT", 13, 152},
#endif
#ifdef ASN1_R_THE_ASN1_OBJECT_IDENTIFIER_IS_NOT_KNOWN_FOR_THIS_MD
{"THE_ASN1_OBJECT_IDENTIFIER_IS_NOT_KNOWN_FOR_THIS_MD", ERR_LIB_ASN1, ASN1_R_THE_ASN1_OBJECT_IDENTIFIER_IS_NOT_KNOWN_FOR_THIS_MD},
#else
{"THE_ASN1_OBJECT_IDENTIFIER_IS_NOT_KNOWN_FOR_THIS_MD", 13, 154},
#endif
#ifdef ASN1_R_TIME_NOT_ASCII_FORMAT
{"TIME_NOT_ASCII_FORMAT", ERR_LIB_ASN1, ASN1_R_TIME_NOT_ASCII_FORMAT},
#else
{"TIME_NOT_ASCII_FORMAT", 13, 193},
#endif
#ifdef ASN1_R_TOO_LARGE
{"TOO_LARGE", ERR_LIB_ASN1, ASN1_R_TOO_LARGE},
#else
{"TOO_LARGE", 13, 223},
#endif
#ifdef ASN1_R_TOO_LONG
{"TOO_LONG", ERR_LIB_ASN1, ASN1_R_TOO_LONG},
#else
{"TOO_LONG", 13, 155},
#endif
#ifdef ASN1_R_TOO_SMALL
{"TOO_SMALL", ERR_LIB_ASN1, ASN1_R_TOO_SMALL},
#else
{"TOO_SMALL", 13, 224},
#endif
#ifdef ASN1_R_TYPE_NOT_CONSTRUCTED
{"TYPE_NOT_CONSTRUCTED", ERR_LIB_ASN1, ASN1_R_TYPE_NOT_CONSTRUCTED},
#else
{"TYPE_NOT_CONSTRUCTED", 13, 156},
#endif
#ifdef ASN1_R_TYPE_NOT_PRIMITIVE
{"TYPE_NOT_PRIMITIVE", ERR_LIB_ASN1, ASN1_R_TYPE_NOT_PRIMITIVE},
#else
{"TYPE_NOT_PRIMITIVE", 13, 195},
#endif
#ifdef ASN1_R_UNEXPECTED_EOC
{"UNEXPECTED_EOC", ERR_LIB_ASN1, ASN1_R_UNEXPECTED_EOC},
#else
{"UNEXPECTED_EOC", 13, 159},
#endif
#ifdef ASN1_R_UNIVERSALSTRING_IS_WRONG_LENGTH
{"UNIVERSALSTRING_IS_WRONG_LENGTH", ERR_LIB_ASN1, ASN1_R_UNIVERSALSTRING_IS_WRONG_LENGTH},
#else
{"UNIVERSALSTRING_IS_WRONG_LENGTH", 13, 215},
#endif
#ifdef ASN1_R_UNKNOWN_FORMAT
{"UNKNOWN_FORMAT", ERR_LIB_ASN1, ASN1_R_UNKNOWN_FORMAT},
#else
{"UNKNOWN_FORMAT", 13, 160},
#endif
#ifdef ASN1_R_UNKNOWN_MESSAGE_DIGEST_ALGORITHM
{"UNKNOWN_MESSAGE_DIGEST_ALGORITHM", ERR_LIB_ASN1, ASN1_R_UNKNOWN_MESSAGE_DIGEST_ALGORITHM},
#else
{"UNKNOWN_MESSAGE_DIGEST_ALGORITHM", 13, 161},
#endif
#ifdef ASN1_R_UNKNOWN_OBJECT_TYPE
{"UNKNOWN_OBJECT_TYPE", ERR_LIB_ASN1, ASN1_R_UNKNOWN_OBJECT_TYPE},
#else
{"UNKNOWN_OBJECT_TYPE", 13, 162},
#endif
#ifdef ASN1_R_UNKNOWN_PUBLIC_KEY_TYPE
{"UNKNOWN_PUBLIC_KEY_TYPE", ERR_LIB_ASN1, ASN1_R_UNKNOWN_PUBLIC_KEY_TYPE},
#else
{"UNKNOWN_PUBLIC_KEY_TYPE", 13, 163},
#endif
#ifdef ASN1_R_UNKNOWN_SIGNATURE_ALGORITHM
{"UNKNOWN_SIGNATURE_ALGORITHM", ERR_LIB_ASN1, ASN1_R_UNKNOWN_SIGNATURE_ALGORITHM},
#else
{"UNKNOWN_SIGNATURE_ALGORITHM", 13, 199},
#endif
#ifdef ASN1_R_UNKNOWN_TAG
{"UNKNOWN_TAG", ERR_LIB_ASN1, ASN1_R_UNKNOWN_TAG},
#else
{"UNKNOWN_TAG", 13, 194},
#endif
#ifdef ASN1_R_UNSUPPORTED_ANY_DEFINED_BY_TYPE
{"UNSUPPORTED_ANY_DEFINED_BY_TYPE", ERR_LIB_ASN1, ASN1_R_UNSUPPORTED_ANY_DEFINED_BY_TYPE},
#else
{"UNSUPPORTED_ANY_DEFINED_BY_TYPE", 13, 164},
#endif
#ifdef ASN1_R_UNSUPPORTED_CIPHER
{"UNSUPPORTED_CIPHER", ERR_LIB_ASN1, ASN1_R_UNSUPPORTED_CIPHER},
#else
{"UNSUPPORTED_CIPHER", 13, 228},
#endif
#ifdef ASN1_R_UNSUPPORTED_PUBLIC_KEY_TYPE
{"UNSUPPORTED_PUBLIC_KEY_TYPE", ERR_LIB_ASN1, ASN1_R_UNSUPPORTED_PUBLIC_KEY_TYPE},
#else
{"UNSUPPORTED_PUBLIC_KEY_TYPE", 13, 167},
#endif
#ifdef ASN1_R_UNSUPPORTED_TYPE
{"UNSUPPORTED_TYPE", ERR_LIB_ASN1, ASN1_R_UNSUPPORTED_TYPE},
#else
{"UNSUPPORTED_TYPE", 13, 196},
#endif
#ifdef ASN1_R_WRONG_INTEGER_TYPE
{"WRONG_INTEGER_TYPE", ERR_LIB_ASN1, ASN1_R_WRONG_INTEGER_TYPE},
#else
{"WRONG_INTEGER_TYPE", 13, 225},
#endif
#ifdef ASN1_R_WRONG_PUBLIC_KEY_TYPE
{"WRONG_PUBLIC_KEY_TYPE", ERR_LIB_ASN1, ASN1_R_WRONG_PUBLIC_KEY_TYPE},
#else
{"WRONG_PUBLIC_KEY_TYPE", 13, 200},
#endif
#ifdef ASN1_R_WRONG_TAG
{"WRONG_TAG", ERR_LIB_ASN1, ASN1_R_WRONG_TAG},
#else
{"WRONG_TAG", 13, 168},
#endif
#ifdef ASYNC_R_FAILED_TO_SET_POOL
{"FAILED_TO_SET_POOL", ERR_LIB_ASYNC, ASYNC_R_FAILED_TO_SET_POOL},
#else
{"FAILED_TO_SET_POOL", 51, 101},
#endif
#ifdef ASYNC_R_FAILED_TO_SWAP_CONTEXT
{"FAILED_TO_SWAP_CONTEXT", ERR_LIB_ASYNC, ASYNC_R_FAILED_TO_SWAP_CONTEXT},
#else
{"FAILED_TO_SWAP_CONTEXT", 51, 102},
#endif
#ifdef ASYNC_R_INIT_FAILED
{"INIT_FAILED", ERR_LIB_ASYNC, ASYNC_R_INIT_FAILED},
#else
{"INIT_FAILED", 51, 105},
#endif
#ifdef ASYNC_R_INVALID_POOL_SIZE
{"INVALID_POOL_SIZE", ERR_LIB_ASYNC, ASYNC_R_INVALID_POOL_SIZE},
#else
{"INVALID_POOL_SIZE", 51, 103},
#endif
#ifdef BIO_R_ACCEPT_ERROR
{"ACCEPT_ERROR", ERR_LIB_BIO, BIO_R_ACCEPT_ERROR},
#else
{"ACCEPT_ERROR", 32, 100},
#endif
#ifdef BIO_R_ADDRINFO_ADDR_IS_NOT_AF_INET
{"ADDRINFO_ADDR_IS_NOT_AF_INET", ERR_LIB_BIO, BIO_R_ADDRINFO_ADDR_IS_NOT_AF_INET},
#else
{"ADDRINFO_ADDR_IS_NOT_AF_INET", 32, 141},
#endif
#ifdef BIO_R_AMBIGUOUS_HOST_OR_SERVICE
{"AMBIGUOUS_HOST_OR_SERVICE", ERR_LIB_BIO, BIO_R_AMBIGUOUS_HOST_OR_SERVICE},
#else
{"AMBIGUOUS_HOST_OR_SERVICE", 32, 129},
#endif
#ifdef BIO_R_BAD_FOPEN_MODE
{"BAD_FOPEN_MODE", ERR_LIB_BIO, BIO_R_BAD_FOPEN_MODE},
#else
{"BAD_FOPEN_MODE", 32, 101},
#endif
#ifdef BIO_R_BROKEN_PIPE
{"BROKEN_PIPE", ERR_LIB_BIO, BIO_R_BROKEN_PIPE},
#else
{"BROKEN_PIPE", 32, 124},
#endif
#ifdef BIO_R_CONNECT_ERROR
{"CONNECT_ERROR", ERR_LIB_BIO, BIO_R_CONNECT_ERROR},
#else
{"CONNECT_ERROR", 32, 103},
#endif
#ifdef BIO_R_GETHOSTBYNAME_ADDR_IS_NOT_AF_INET
{"GETHOSTBYNAME_ADDR_IS_NOT_AF_INET", ERR_LIB_BIO, BIO_R_GETHOSTBYNAME_ADDR_IS_NOT_AF_INET},
#else
{"GETHOSTBYNAME_ADDR_IS_NOT_AF_INET", 32, 107},
#endif
#ifdef BIO_R_GETSOCKNAME_ERROR
{"GETSOCKNAME_ERROR", ERR_LIB_BIO, BIO_R_GETSOCKNAME_ERROR},
#else
{"GETSOCKNAME_ERROR", 32, 132},
#endif
#ifdef BIO_R_GETSOCKNAME_TRUNCATED_ADDRESS
{"GETSOCKNAME_TRUNCATED_ADDRESS", ERR_LIB_BIO, BIO_R_GETSOCKNAME_TRUNCATED_ADDRESS},
#else
{"GETSOCKNAME_TRUNCATED_ADDRESS", 32, 133},
#endif
#ifdef BIO_R_GETTING_SOCKTYPE
{"GETTING_SOCKTYPE", ERR_LIB_BIO, BIO_R_GETTING_SOCKTYPE},
#else
{"GETTING_SOCKTYPE", 32, 134},
#endif
#ifdef BIO_R_INVALID_ARGUMENT
{"INVALID_ARGUMENT", ERR_LIB_BIO, BIO_R_INVALID_ARGUMENT},
#else
{"INVALID_ARGUMENT", 32, 125},
#endif
#ifdef BIO_R_INVALID_SOCKET
{"INVALID_SOCKET", ERR_LIB_BIO, BIO_R_INVALID_SOCKET},
#else
{"INVALID_SOCKET", 32, 135},
#endif
#ifdef BIO_R_IN_USE
{"IN_USE", ERR_LIB_BIO, BIO_R_IN_USE},
#else
{"IN_USE", 32, 123},
#endif
#ifdef BIO_R_LENGTH_TOO_LONG
{"LENGTH_TOO_LONG", ERR_LIB_BIO, BIO_R_LENGTH_TOO_LONG},
#else
{"LENGTH_TOO_LONG", 32, 102},
#endif
#ifdef BIO_R_LISTEN_V6_ONLY
{"LISTEN_V6_ONLY", ERR_LIB_BIO, BIO_R_LISTEN_V6_ONLY},
#else
{"LISTEN_V6_ONLY", 32, 136},
#endif
#ifdef BIO_R_LOOKUP_RETURNED_NOTHING
{"LOOKUP_RETURNED_NOTHING", ERR_LIB_BIO, BIO_R_LOOKUP_RETURNED_NOTHING},
#else
{"LOOKUP_RETURNED_NOTHING", 32, 142},
#endif
#ifdef BIO_R_MALFORMED_HOST_OR_SERVICE
{"MALFORMED_HOST_OR_SERVICE", ERR_LIB_BIO, BIO_R_MALFORMED_HOST_OR_SERVICE},
#else
{"MALFORMED_HOST_OR_SERVICE", 32, 130},
#endif
#ifdef BIO_R_NBIO_CONNECT_ERROR
{"NBIO_CONNECT_ERROR", ERR_LIB_BIO, BIO_R_NBIO_CONNECT_ERROR},
#else
{"NBIO_CONNECT_ERROR", 32, 110},
#endif
#ifdef BIO_R_NO_ACCEPT_ADDR_OR_SERVICE_SPECIFIED
{"NO_ACCEPT_ADDR_OR_SERVICE_SPECIFIED", ERR_LIB_BIO, BIO_R_NO_ACCEPT_ADDR_OR_SERVICE_SPECIFIED},
#else
{"NO_ACCEPT_ADDR_OR_SERVICE_SPECIFIED", 32, 143},
#endif
#ifdef BIO_R_NO_HOSTNAME_OR_SERVICE_SPECIFIED
{"NO_HOSTNAME_OR_SERVICE_SPECIFIED", ERR_LIB_BIO, BIO_R_NO_HOSTNAME_OR_SERVICE_SPECIFIED},
#else
{"NO_HOSTNAME_OR_SERVICE_SPECIFIED", 32, 144},
#endif
#ifdef BIO_R_NO_PORT_DEFINED
{"NO_PORT_DEFINED", ERR_LIB_BIO, BIO_R_NO_PORT_DEFINED},
#else
{"NO_PORT_DEFINED", 32, 113},
#endif
#ifdef BIO_R_NO_SUCH_FILE
{"NO_SUCH_FILE", ERR_LIB_BIO, BIO_R_NO_SUCH_FILE},
#else
{"NO_SUCH_FILE", 32, 128},
#endif
#ifdef BIO_R_NULL_PARAMETER
{"NULL_PARAMETER", ERR_LIB_BIO, BIO_R_NULL_PARAMETER},
#else
{"NULL_PARAMETER", 32, 115},
#endif
#ifdef BIO_R_UNABLE_TO_BIND_SOCKET
{"UNABLE_TO_BIND_SOCKET", ERR_LIB_BIO, BIO_R_UNABLE_TO_BIND_SOCKET},
#else
{"UNABLE_TO_BIND_SOCKET", 32, 117},
#endif
#ifdef BIO_R_UNABLE_TO_CREATE_SOCKET
{"UNABLE_TO_CREATE_SOCKET", ERR_LIB_BIO, BIO_R_UNABLE_TO_CREATE_SOCKET},
#else
{"UNABLE_TO_CREATE_SOCKET", 32, 118},
#endif
#ifdef BIO_R_UNABLE_TO_KEEPALIVE
{"UNABLE_TO_KEEPALIVE", ERR_LIB_BIO, BIO_R_UNABLE_TO_KEEPALIVE},
#else
{"UNABLE_TO_KEEPALIVE", 32, 137},
#endif
#ifdef BIO_R_UNABLE_TO_LISTEN_SOCKET
{"UNABLE_TO_LISTEN_SOCKET", ERR_LIB_BIO, BIO_R_UNABLE_TO_LISTEN_SOCKET},
#else
{"UNABLE_TO_LISTEN_SOCKET", 32, 119},
#endif
#ifdef BIO_R_UNABLE_TO_NODELAY
{"UNABLE_TO_NODELAY", ERR_LIB_BIO, BIO_R_UNABLE_TO_NODELAY},
#else
{"UNABLE_TO_NODELAY", 32, 138},
#endif
#ifdef BIO_R_UNABLE_TO_REUSEADDR
{"UNABLE_TO_REUSEADDR", ERR_LIB_BIO, BIO_R_UNABLE_TO_REUSEADDR},
#else
{"UNABLE_TO_REUSEADDR", 32, 139},
#endif
#ifdef BIO_R_UNAVAILABLE_IP_FAMILY
{"UNAVAILABLE_IP_FAMILY", ERR_LIB_BIO, BIO_R_UNAVAILABLE_IP_FAMILY},
#else
{"UNAVAILABLE_IP_FAMILY", 32, 145},
#endif
#ifdef BIO_R_UNINITIALIZED
{"UNINITIALIZED", ERR_LIB_BIO, BIO_R_UNINITIALIZED},
#else
{"UNINITIALIZED", 32, 120},
#endif
#ifdef BIO_R_UNKNOWN_INFO_TYPE
{"UNKNOWN_INFO_TYPE", ERR_LIB_BIO, BIO_R_UNKNOWN_INFO_TYPE},
#else
{"UNKNOWN_INFO_TYPE", 32, 140},
#endif
#ifdef BIO_R_UNSUPPORTED_IP_FAMILY
{"UNSUPPORTED_IP_FAMILY", ERR_LIB_BIO, BIO_R_UNSUPPORTED_IP_FAMILY},
#else
{"UNSUPPORTED_IP_FAMILY", 32, 146},
#endif
#ifdef BIO_R_UNSUPPORTED_METHOD
{"UNSUPPORTED_METHOD", ERR_LIB_BIO, BIO_R_UNSUPPORTED_METHOD},
#else
{"UNSUPPORTED_METHOD", 32, 121},
#endif
#ifdef BIO_R_UNSUPPORTED_PROTOCOL_FAMILY
{"UNSUPPORTED_PROTOCOL_FAMILY", ERR_LIB_BIO, BIO_R_UNSUPPORTED_PROTOCOL_FAMILY},
#else
{"UNSUPPORTED_PROTOCOL_FAMILY", 32, 131},
#endif
#ifdef BIO_R_WRITE_TO_READ_ONLY_BIO
{"WRITE_TO_READ_ONLY_BIO", ERR_LIB_BIO, BIO_R_WRITE_TO_READ_ONLY_BIO},
#else
{"WRITE_TO_READ_ONLY_BIO", 32, 126},
#endif
#ifdef BIO_R_WSASTARTUP
{"WSASTARTUP", ERR_LIB_BIO, BIO_R_WSASTARTUP},
#else
{"WSASTARTUP", 32, 122},
#endif
#ifdef BN_R_ARG2_LT_ARG3
{"ARG2_LT_ARG3", ERR_LIB_BN, BN_R_ARG2_LT_ARG3},
#else
{"ARG2_LT_ARG3", 3, 100},
#endif
#ifdef BN_R_BAD_RECIPROCAL
{"BAD_RECIPROCAL", ERR_LIB_BN, BN_R_BAD_RECIPROCAL},
#else
{"BAD_RECIPROCAL", 3, 101},
#endif
#ifdef BN_R_BIGNUM_TOO_LONG
{"BIGNUM_TOO_LONG", ERR_LIB_BN, BN_R_BIGNUM_TOO_LONG},
#else
{"BIGNUM_TOO_LONG", 3, 114},
#endif
#ifdef BN_R_BITS_TOO_SMALL
{"BITS_TOO_SMALL", ERR_LIB_BN, BN_R_BITS_TOO_SMALL},
#else
{"BITS_TOO_SMALL", 3, 118},
#endif
#ifdef BN_R_CALLED_WITH_EVEN_MODULUS
{"CALLED_WITH_EVEN_MODULUS", ERR_LIB_BN, BN_R_CALLED_WITH_EVEN_MODULUS},
#else
{"CALLED_WITH_EVEN_MODULUS", 3, 102},
#endif
#ifdef BN_R_DIV_BY_ZERO
{"DIV_BY_ZERO", ERR_LIB_BN, BN_R_DIV_BY_ZERO},
#else
{"DIV_BY_ZERO", 3, 103},
#endif
#ifdef BN_R_ENCODING_ERROR
{"ENCODING_ERROR", ERR_LIB_BN, BN_R_ENCODING_ERROR},
#else
{"ENCODING_ERROR", 3, 104},
#endif
#ifdef BN_R_EXPAND_ON_STATIC_BIGNUM_DATA
{"EXPAND_ON_STATIC_BIGNUM_DATA", ERR_LIB_BN, BN_R_EXPAND_ON_STATIC_BIGNUM_DATA},
#else
{"EXPAND_ON_STATIC_BIGNUM_DATA", 3, 105},
#endif
#ifdef BN_R_INPUT_NOT_REDUCED
{"INPUT_NOT_REDUCED", ERR_LIB_BN, BN_R_INPUT_NOT_REDUCED},
#else
{"INPUT_NOT_REDUCED", 3, 110},
#endif
#ifdef BN_R_INVALID_LENGTH
{"INVALID_LENGTH", ERR_LIB_BN, BN_R_INVALID_LENGTH},
#else
{"INVALID_LENGTH", 3, 106},
#endif
#ifdef BN_R_INVALID_RANGE
{"INVALID_RANGE", ERR_LIB_BN, BN_R_INVALID_RANGE},
#else
{"INVALID_RANGE", 3, 115},
#endif
#ifdef BN_R_INVALID_SHIFT
{"INVALID_SHIFT", ERR_LIB_BN, BN_R_INVALID_SHIFT},
#else
{"INVALID_SHIFT", 3, 119},
#endif
#ifdef BN_R_NOT_A_SQUARE
{"NOT_A_SQUARE", ERR_LIB_BN, BN_R_NOT_A_SQUARE},
#else
{"NOT_A_SQUARE", 3, 111},
#endif
#ifdef BN_R_NOT_INITIALIZED
{"NOT_INITIALIZED", ERR_LIB_BN, BN_R_NOT_INITIALIZED},
#else
{"NOT_INITIALIZED", 3, 107},
#endif
#ifdef BN_R_NO_INVERSE
{"NO_INVERSE", ERR_LIB_BN, BN_R_NO_INVERSE},
#else
{"NO_INVERSE", 3, 108},
#endif
#ifdef BN_R_NO_SOLUTION
{"NO_SOLUTION", ERR_LIB_BN, BN_R_NO_SOLUTION},
#else
{"NO_SOLUTION", 3, 116},
#endif
#ifdef BN_R_PRIVATE_KEY_TOO_LARGE
{"PRIVATE_KEY_TOO_LARGE", ERR_LIB_BN, BN_R_PRIVATE_KEY_TOO_LARGE},
#else
{"PRIVATE_KEY_TOO_LARGE", 3, 117},
#endif
#ifdef BN_R_P_IS_NOT_PRIME
{"P_IS_NOT_PRIME", ERR_LIB_BN, BN_R_P_IS_NOT_PRIME},
#else
{"P_IS_NOT_PRIME", 3, 112},
#endif
#ifdef BN_R_TOO_MANY_ITERATIONS
{"TOO_MANY_ITERATIONS", ERR_LIB_BN, BN_R_TOO_MANY_ITERATIONS},
#else
{"TOO_MANY_ITERATIONS", 3, 113},
#endif
#ifdef BN_R_TOO_MANY_TEMPORARY_VARIABLES
{"TOO_MANY_TEMPORARY_VARIABLES", ERR_LIB_BN, BN_R_TOO_MANY_TEMPORARY_VARIABLES},
#else
{"TOO_MANY_TEMPORARY_VARIABLES", 3, 109},
#endif
#ifdef CMS_R_ADD_SIGNER_ERROR
{"ADD_SIGNER_ERROR", ERR_LIB_CMS, CMS_R_ADD_SIGNER_ERROR},
#else
{"ADD_SIGNER_ERROR", 46, 99},
#endif
#ifdef CMS_R_ATTRIBUTE_ERROR
{"ATTRIBUTE_ERROR", ERR_LIB_CMS, CMS_R_ATTRIBUTE_ERROR},
#else
{"ATTRIBUTE_ERROR", 46, 161},
#endif
#ifdef CMS_R_CERTIFICATE_ALREADY_PRESENT
{"CERTIFICATE_ALREADY_PRESENT", ERR_LIB_CMS, CMS_R_CERTIFICATE_ALREADY_PRESENT},
#else
{"CERTIFICATE_ALREADY_PRESENT", 46, 175},
#endif
#ifdef CMS_R_CERTIFICATE_HAS_NO_KEYID
{"CERTIFICATE_HAS_NO_KEYID", ERR_LIB_CMS, CMS_R_CERTIFICATE_HAS_NO_KEYID},
#else
{"CERTIFICATE_HAS_NO_KEYID", 46, 160},
#endif
#ifdef CMS_R_CERTIFICATE_VERIFY_ERROR
{"CERTIFICATE_VERIFY_ERROR", ERR_LIB_CMS, CMS_R_CERTIFICATE_VERIFY_ERROR},
#else
{"CERTIFICATE_VERIFY_ERROR", 46, 100},
#endif
#ifdef CMS_R_CIPHER_INITIALISATION_ERROR
{"CIPHER_INITIALISATION_ERROR", ERR_LIB_CMS, CMS_R_CIPHER_INITIALISATION_ERROR},
#else
{"CIPHER_INITIALISATION_ERROR", 46, 101},
#endif
#ifdef CMS_R_CIPHER_PARAMETER_INITIALISATION_ERROR
{"CIPHER_PARAMETER_INITIALISATION_ERROR", ERR_LIB_CMS, CMS_R_CIPHER_PARAMETER_INITIALISATION_ERROR},
#else
{"CIPHER_PARAMETER_INITIALISATION_ERROR", 46, 102},
#endif
#ifdef CMS_R_CMS_DATAFINAL_ERROR
{"CMS_DATAFINAL_ERROR", ERR_LIB_CMS, CMS_R_CMS_DATAFINAL_ERROR},
#else
{"CMS_DATAFINAL_ERROR", 46, 103},
#endif
#ifdef CMS_R_CMS_LIB
{"CMS_LIB", ERR_LIB_CMS, CMS_R_CMS_LIB},
#else
{"CMS_LIB", 46, 104},
#endif
#ifdef CMS_R_CONTENTIDENTIFIER_MISMATCH
{"CONTENTIDENTIFIER_MISMATCH", ERR_LIB_CMS, CMS_R_CONTENTIDENTIFIER_MISMATCH},
#else
{"CONTENTIDENTIFIER_MISMATCH", 46, 170},
#endif
#ifdef CMS_R_CONTENT_NOT_FOUND
{"CONTENT_NOT_FOUND", ERR_LIB_CMS, CMS_R_CONTENT_NOT_FOUND},
#else
{"CONTENT_NOT_FOUND", 46, 105},
#endif
#ifdef CMS_R_CONTENT_TYPE_MISMATCH
{"CONTENT_TYPE_MISMATCH", ERR_LIB_CMS, CMS_R_CONTENT_TYPE_MISMATCH},
#else
{"CONTENT_TYPE_MISMATCH", 46, 171},
#endif
#ifdef CMS_R_CONTENT_TYPE_NOT_COMPRESSED_DATA
{"CONTENT_TYPE_NOT_COMPRESSED_DATA", ERR_LIB_CMS, CMS_R_CONTENT_TYPE_NOT_COMPRESSED_DATA},
#else
{"CONTENT_TYPE_NOT_COMPRESSED_DATA", 46, 106},
#endif
#ifdef CMS_R_CONTENT_TYPE_NOT_ENVELOPED_DATA
{"CONTENT_TYPE_NOT_ENVELOPED_DATA", ERR_LIB_CMS, CMS_R_CONTENT_TYPE_NOT_ENVELOPED_DATA},
#else
{"CONTENT_TYPE_NOT_ENVELOPED_DATA", 46, 107},
#endif
#ifdef CMS_R_CONTENT_TYPE_NOT_SIGNED_DATA
{"CONTENT_TYPE_NOT_SIGNED_DATA", ERR_LIB_CMS, CMS_R_CONTENT_TYPE_NOT_SIGNED_DATA},
#else
{"CONTENT_TYPE_NOT_SIGNED_DATA", 46, 108},
#endif
#ifdef CMS_R_CONTENT_VERIFY_ERROR
{"CONTENT_VERIFY_ERROR", ERR_LIB_CMS, CMS_R_CONTENT_VERIFY_ERROR},
#else
{"CONTENT_VERIFY_ERROR", 46, 109},
#endif
#ifdef CMS_R_CTRL_ERROR
{"CTRL_ERROR", ERR_LIB_CMS, CMS_R_CTRL_ERROR},
#else
{"CTRL_ERROR", 46, 110},
#endif
#ifdef CMS_R_CTRL_FAILURE
{"CTRL_FAILURE", ERR_LIB_CMS, CMS_R_CTRL_FAILURE},
#else
{"CTRL_FAILURE", 46, 111},
#endif
#ifdef CMS_R_DECRYPT_ERROR
{"DECRYPT_ERROR", ERR_LIB_CMS, CMS_R_DECRYPT_ERROR},
#else
{"DECRYPT_ERROR", 46, 112},
#endif
#ifdef CMS_R_ERROR_GETTING_PUBLIC_KEY
{"ERROR_GETTING_PUBLIC_KEY", ERR_LIB_CMS, CMS_R_ERROR_GETTING_PUBLIC_KEY},
#else
{"ERROR_GETTING_PUBLIC_KEY", 46, 113},
#endif
#ifdef CMS_R_ERROR_READING_MESSAGEDIGEST_ATTRIBUTE
{"ERROR_READING_MESSAGEDIGEST_ATTRIBUTE", ERR_LIB_CMS, CMS_R_ERROR_READING_MESSAGEDIGEST_ATTRIBUTE},
#else
{"ERROR_READING_MESSAGEDIGEST_ATTRIBUTE", 46, 114},
#endif
#ifdef CMS_R_ERROR_SETTING_KEY
{"ERROR_SETTING_KEY", ERR_LIB_CMS, CMS_R_ERROR_SETTING_KEY},
#else
{"ERROR_SETTING_KEY", 46, 115},
#endif
#ifdef CMS_R_ERROR_SETTING_RECIPIENTINFO
{"ERROR_SETTING_RECIPIENTINFO", ERR_LIB_CMS, CMS_R_ERROR_SETTING_RECIPIENTINFO},
#else
{"ERROR_SETTING_RECIPIENTINFO", 46, 116},
#endif
#ifdef CMS_R_INVALID_ENCRYPTED_KEY_LENGTH
{"INVALID_ENCRYPTED_KEY_LENGTH", ERR_LIB_CMS, CMS_R_INVALID_ENCRYPTED_KEY_LENGTH},
#else
{"INVALID_ENCRYPTED_KEY_LENGTH", 46, 117},
#endif
#ifdef CMS_R_INVALID_KEY_ENCRYPTION_PARAMETER
{"INVALID_KEY_ENCRYPTION_PARAMETER", ERR_LIB_CMS, CMS_R_INVALID_KEY_ENCRYPTION_PARAMETER},
#else
{"INVALID_KEY_ENCRYPTION_PARAMETER", 46, 176},
#endif
#ifdef CMS_R_INVALID_KEY_LENGTH
{"INVALID_KEY_LENGTH", ERR_LIB_CMS, CMS_R_INVALID_KEY_LENGTH},
#else
{"INVALID_KEY_LENGTH", 46, 118},
#endif
#ifdef CMS_R_MD_BIO_INIT_ERROR
{"MD_BIO_INIT_ERROR", ERR_LIB_CMS, CMS_R_MD_BIO_INIT_ERROR},
#else
{"MD_BIO_INIT_ERROR", 46, 119},
#endif
#ifdef CMS_R_MESSAGEDIGEST_ATTRIBUTE_WRONG_LENGTH
{"MESSAGEDIGEST_ATTRIBUTE_WRONG_LENGTH", ERR_LIB_CMS, CMS_R_MESSAGEDIGEST_ATTRIBUTE_WRONG_LENGTH},
#else
{"MESSAGEDIGEST_ATTRIBUTE_WRONG_LENGTH", 46, 120},
#endif
#ifdef CMS_R_MESSAGEDIGEST_WRONG_LENGTH
{"MESSAGEDIGEST_WRONG_LENGTH", ERR_LIB_CMS, CMS_R_MESSAGEDIGEST_WRONG_LENGTH},
#else
{"MESSAGEDIGEST_WRONG_LENGTH", 46, 121},
#endif
#ifdef CMS_R_MSGSIGDIGEST_ERROR
{"MSGSIGDIGEST_ERROR", ERR_LIB_CMS, CMS_R_MSGSIGDIGEST_ERROR},
#else
{"MSGSIGDIGEST_ERROR", 46, 172},
#endif
#ifdef CMS_R_MSGSIGDIGEST_VERIFICATION_FAILURE
{"MSGSIGDIGEST_VERIFICATION_FAILURE", ERR_LIB_CMS, CMS_R_MSGSIGDIGEST_VERIFICATION_FAILURE},
#else
{"MSGSIGDIGEST_VERIFICATION_FAILURE", 46, 162},
#endif
#ifdef CMS_R_MSGSIGDIGEST_WRONG_LENGTH
{"MSGSIGDIGEST_WRONG_LENGTH", ERR_LIB_CMS, CMS_R_MSGSIGDIGEST_WRONG_LENGTH},
#else
{"MSGSIGDIGEST_WRONG_LENGTH", 46, 163},
#endif
#ifdef CMS_R_NEED_ONE_SIGNER
{"NEED_ONE_SIGNER", ERR_LIB_CMS, CMS_R_NEED_ONE_SIGNER},
#else
{"NEED_ONE_SIGNER", 46, 164},
#endif
#ifdef CMS_R_NOT_A_SIGNED_RECEIPT
{"NOT_A_SIGNED_RECEIPT", ERR_LIB_CMS, CMS_R_NOT_A_SIGNED_RECEIPT},
#else
{"NOT_A_SIGNED_RECEIPT", 46, 165},
#endif
#ifdef CMS_R_NOT_ENCRYPTED_DATA
{"NOT_ENCRYPTED_DATA", ERR_LIB_CMS, CMS_R_NOT_ENCRYPTED_DATA},
#else
{"NOT_ENCRYPTED_DATA", 46, 122},
#endif
#ifdef CMS_R_NOT_KEK
{"NOT_KEK", ERR_LIB_CMS, CMS_R_NOT_KEK},
#else
{"NOT_KEK", 46, 123},
#endif
#ifdef CMS_R_NOT_KEY_AGREEMENT
{"NOT_KEY_AGREEMENT", ERR_LIB_CMS, CMS_R_NOT_KEY_AGREEMENT},
#else
{"NOT_KEY_AGREEMENT", 46, 181},
#endif
#ifdef CMS_R_NOT_KEY_TRANSPORT
{"NOT_KEY_TRANSPORT", ERR_LIB_CMS, CMS_R_NOT_KEY_TRANSPORT},
#else
{"NOT_KEY_TRANSPORT", 46, 124},
#endif
#ifdef CMS_R_NOT_PWRI
{"NOT_PWRI", ERR_LIB_CMS, CMS_R_NOT_PWRI},
#else
{"NOT_PWRI", 46, 177},
#endif
#ifdef CMS_R_NOT_SUPPORTED_FOR_THIS_KEY_TYPE
{"NOT_SUPPORTED_FOR_THIS_KEY_TYPE", ERR_LIB_CMS, CMS_R_NOT_SUPPORTED_FOR_THIS_KEY_TYPE},
#else
{"NOT_SUPPORTED_FOR_THIS_KEY_TYPE", 46, 125},
#endif
#ifdef CMS_R_NO_CIPHER
{"NO_CIPHER", ERR_LIB_CMS, CMS_R_NO_CIPHER},
#else
{"NO_CIPHER", 46, 126},
#endif
#ifdef CMS_R_NO_CONTENT
{"NO_CONTENT", ERR_LIB_CMS, CMS_R_NO_CONTENT},
#else
{"NO_CONTENT", 46, 127},
#endif
#ifdef CMS_R_NO_CONTENT_TYPE
{"NO_CONTENT_TYPE", ERR_LIB_CMS, CMS_R_NO_CONTENT_TYPE},
#else
{"NO_CONTENT_TYPE", 46, 173},
#endif
#ifdef CMS_R_NO_DEFAULT_DIGEST
{"NO_DEFAULT_DIGEST", ERR_LIB_CMS, CMS_R_NO_DEFAULT_DIGEST},
#else
{"NO_DEFAULT_DIGEST", 46, 128},
#endif
#ifdef CMS_R_NO_DIGEST_SET
{"NO_DIGEST_SET", ERR_LIB_CMS, CMS_R_NO_DIGEST_SET},
#else
{"NO_DIGEST_SET", 46, 129},
#endif
#ifdef CMS_R_NO_KEY
{"NO_KEY", ERR_LIB_CMS, CMS_R_NO_KEY},
#else
{"NO_KEY", 46, 130},
#endif
#ifdef CMS_R_NO_KEY_OR_CERT
{"NO_KEY_OR_CERT", ERR_LIB_CMS, CMS_R_NO_KEY_OR_CERT},
#else
{"NO_KEY_OR_CERT", 46, 174},
#endif
#ifdef CMS_R_NO_MATCHING_DIGEST
{"NO_MATCHING_DIGEST", ERR_LIB_CMS, CMS_R_NO_MATCHING_DIGEST},
#else
{"NO_MATCHING_DIGEST", 46, 131},
#endif
#ifdef CMS_R_NO_MATCHING_RECIPIENT
{"NO_MATCHING_RECIPIENT", ERR_LIB_CMS, CMS_R_NO_MATCHING_RECIPIENT},
#else
{"NO_MATCHING_RECIPIENT", 46, 132},
#endif
#ifdef CMS_R_NO_MATCHING_SIGNATURE
{"NO_MATCHING_SIGNATURE", ERR_LIB_CMS, CMS_R_NO_MATCHING_SIGNATURE},
#else
{"NO_MATCHING_SIGNATURE", 46, 166},
#endif
#ifdef CMS_R_NO_MSGSIGDIGEST
{"NO_MSGSIGDIGEST", ERR_LIB_CMS, CMS_R_NO_MSGSIGDIGEST},
#else
{"NO_MSGSIGDIGEST", 46, 167},
#endif
#ifdef CMS_R_NO_PASSWORD
{"NO_PASSWORD", ERR_LIB_CMS, CMS_R_NO_PASSWORD},
#else
{"NO_PASSWORD", 46, 178},
#endif
#ifdef CMS_R_NO_PRIVATE_KEY
{"NO_PRIVATE_KEY", ERR_LIB_CMS, CMS_R_NO_PRIVATE_KEY},
#else
{"NO_PRIVATE_KEY", 46, 133},
#endif
#ifdef CMS_R_NO_PUBLIC_KEY
{"NO_PUBLIC_KEY", ERR_LIB_CMS, CMS_R_NO_PUBLIC_KEY},
#else
{"NO_PUBLIC_KEY", 46, 134},
#endif
#ifdef CMS_R_NO_RECEIPT_REQUEST
{"NO_RECEIPT_REQUEST", ERR_LIB_CMS, CMS_R_NO_RECEIPT_REQUEST},
#else
{"NO_RECEIPT_REQUEST", 46, 168},
#endif
#ifdef CMS_R_NO_SIGNERS
{"NO_SIGNERS", ERR_LIB_CMS, CMS_R_NO_SIGNERS},
#else
{"NO_SIGNERS", 46, 135},
#endif
#ifdef CMS_R_PRIVATE_KEY_DOES_NOT_MATCH_CERTIFICATE
{"PRIVATE_KEY_DOES_NOT_MATCH_CERTIFICATE", ERR_LIB_CMS, CMS_R_PRIVATE_KEY_DOES_NOT_MATCH_CERTIFICATE},
#else
{"PRIVATE_KEY_DOES_NOT_MATCH_CERTIFICATE", 46, 136},
#endif
#ifdef CMS_R_RECEIPT_DECODE_ERROR
{"RECEIPT_DECODE_ERROR", ERR_LIB_CMS, CMS_R_RECEIPT_DECODE_ERROR},
#else
{"RECEIPT_DECODE_ERROR", 46, 169},
#endif
#ifdef CMS_R_RECIPIENT_ERROR
{"RECIPIENT_ERROR", ERR_LIB_CMS, CMS_R_RECIPIENT_ERROR},
#else
{"RECIPIENT_ERROR", 46, 137},
#endif
#ifdef CMS_R_SIGNER_CERTIFICATE_NOT_FOUND
{"SIGNER_CERTIFICATE_NOT_FOUND", ERR_LIB_CMS, CMS_R_SIGNER_CERTIFICATE_NOT_FOUND},
#else
{"SIGNER_CERTIFICATE_NOT_FOUND", 46, 138},
#endif
#ifdef CMS_R_SIGNFINAL_ERROR
{"SIGNFINAL_ERROR", ERR_LIB_CMS, CMS_R_SIGNFINAL_ERROR},
#else
{"SIGNFINAL_ERROR", 46, 139},
#endif
#ifdef CMS_R_SMIME_TEXT_ERROR
{"SMIME_TEXT_ERROR", ERR_LIB_CMS, CMS_R_SMIME_TEXT_ERROR},
#else
{"SMIME_TEXT_ERROR", 46, 140},
#endif
#ifdef CMS_R_STORE_INIT_ERROR
{"STORE_INIT_ERROR", ERR_LIB_CMS, CMS_R_STORE_INIT_ERROR},
#else
{"STORE_INIT_ERROR", 46, 141},
#endif
#ifdef CMS_R_TYPE_NOT_COMPRESSED_DATA
{"TYPE_NOT_COMPRESSED_DATA", ERR_LIB_CMS, CMS_R_TYPE_NOT_COMPRESSED_DATA},
#else
{"TYPE_NOT_COMPRESSED_DATA", 46, 142},
#endif
#ifdef CMS_R_TYPE_NOT_DATA
{"TYPE_NOT_DATA", ERR_LIB_CMS, CMS_R_TYPE_NOT_DATA},
#else
{"TYPE_NOT_DATA", 46, 143},
#endif
#ifdef CMS_R_TYPE_NOT_DIGESTED_DATA
{"TYPE_NOT_DIGESTED_DATA", ERR_LIB_CMS, CMS_R_TYPE_NOT_DIGESTED_DATA},
#else
{"TYPE_NOT_DIGESTED_DATA", 46, 144},
#endif
#ifdef CMS_R_TYPE_NOT_ENCRYPTED_DATA
{"TYPE_NOT_ENCRYPTED_DATA", ERR_LIB_CMS, CMS_R_TYPE_NOT_ENCRYPTED_DATA},
#else
{"TYPE_NOT_ENCRYPTED_DATA", 46, 145},
#endif
#ifdef CMS_R_TYPE_NOT_ENVELOPED_DATA
{"TYPE_NOT_ENVELOPED_DATA", ERR_LIB_CMS, CMS_R_TYPE_NOT_ENVELOPED_DATA},
#else
{"TYPE_NOT_ENVELOPED_DATA", 46, 146},
#endif
#ifdef CMS_R_UNABLE_TO_FINALIZE_CONTEXT
{"UNABLE_TO_FINALIZE_CONTEXT", ERR_LIB_CMS, CMS_R_UNABLE_TO_FINALIZE_CONTEXT},
#else
{"UNABLE_TO_FINALIZE_CONTEXT", 46, 147},
#endif
#ifdef CMS_R_UNKNOWN_CIPHER
{"UNKNOWN_CIPHER", ERR_LIB_CMS, CMS_R_UNKNOWN_CIPHER},
#else
{"UNKNOWN_CIPHER", 46, 148},
#endif
#ifdef CMS_R_UNKNOWN_DIGEST_ALGORITHM
{"UNKNOWN_DIGEST_ALGORITHM", ERR_LIB_CMS, CMS_R_UNKNOWN_DIGEST_ALGORITHM},
#else
{"UNKNOWN_DIGEST_ALGORITHM", 46, 149},
#endif
#ifdef CMS_R_UNKNOWN_ID
{"UNKNOWN_ID", ERR_LIB_CMS, CMS_R_UNKNOWN_ID},
#else
{"UNKNOWN_ID", 46, 150},
#endif
#ifdef CMS_R_UNSUPPORTED_COMPRESSION_ALGORITHM
{"UNSUPPORTED_COMPRESSION_ALGORITHM", ERR_LIB_CMS, CMS_R_UNSUPPORTED_COMPRESSION_ALGORITHM},
#else
{"UNSUPPORTED_COMPRESSION_ALGORITHM", 46, 151},
#endif
#ifdef CMS_R_UNSUPPORTED_CONTENT_ENCRYPTION_ALGORITHM
{"UNSUPPORTED_CONTENT_ENCRYPTION_ALGORITHM", ERR_LIB_CMS, CMS_R_UNSUPPORTED_CONTENT_ENCRYPTION_ALGORITHM},
#else
{"UNSUPPORTED_CONTENT_ENCRYPTION_ALGORITHM", 46, 194},
#endif
#ifdef CMS_R_UNSUPPORTED_CONTENT_TYPE
{"UNSUPPORTED_CONTENT_TYPE", ERR_LIB_CMS, CMS_R_UNSUPPORTED_CONTENT_TYPE},
#else
{"UNSUPPORTED_CONTENT_TYPE", 46, 152},
#endif
#ifdef CMS_R_UNSUPPORTED_KEK_ALGORITHM
{"UNSUPPORTED_KEK_ALGORITHM", ERR_LIB_CMS, CMS_R_UNSUPPORTED_KEK_ALGORITHM},
#else
{"UNSUPPORTED_KEK_ALGORITHM", 46, 153},
#endif
#ifdef CMS_R_UNSUPPORTED_KEY_ENCRYPTION_ALGORITHM
{"UNSUPPORTED_KEY_ENCRYPTION_ALGORITHM", ERR_LIB_CMS, CMS_R_UNSUPPORTED_KEY_ENCRYPTION_ALGORITHM},
#else
{"UNSUPPORTED_KEY_ENCRYPTION_ALGORITHM", 46, 179},
#endif
#ifdef CMS_R_UNSUPPORTED_RECIPIENTINFO_TYPE
{"UNSUPPORTED_RECIPIENTINFO_TYPE", ERR_LIB_CMS, CMS_R_UNSUPPORTED_RECIPIENTINFO_TYPE},
#else
{"UNSUPPORTED_RECIPIENTINFO_TYPE", 46, 155},
#endif
#ifdef CMS_R_UNSUPPORTED_RECIPIENT_TYPE
{"UNSUPPORTED_RECIPIENT_TYPE", ERR_LIB_CMS, CMS_R_UNSUPPORTED_RECIPIENT_TYPE},
#else
{"UNSUPPORTED_RECIPIENT_TYPE", 46, 154},
#endif
#ifdef CMS_R_UNSUPPORTED_TYPE
{"UNSUPPORTED_TYPE", ERR_LIB_CMS, CMS_R_UNSUPPORTED_TYPE},
#else
{"UNSUPPORTED_TYPE", 46, 156},
#endif
#ifdef CMS_R_UNWRAP_ERROR
{"UNWRAP_ERROR", ERR_LIB_CMS, CMS_R_UNWRAP_ERROR},
#else
{"UNWRAP_ERROR", 46, 157},
#endif
#ifdef CMS_R_UNWRAP_FAILURE
{"UNWRAP_FAILURE", ERR_LIB_CMS, CMS_R_UNWRAP_FAILURE},
#else
{"UNWRAP_FAILURE", 46, 180},
#endif
#ifdef CMS_R_VERIFICATION_FAILURE
{"VERIFICATION_FAILURE", ERR_LIB_CMS, CMS_R_VERIFICATION_FAILURE},
#else
{"VERIFICATION_FAILURE", 46, 158},
#endif
#ifdef CMS_R_WRAP_ERROR
{"WRAP_ERROR", ERR_LIB_CMS, CMS_R_WRAP_ERROR},
#else
{"WRAP_ERROR", 46, 159},
#endif
#ifdef COMP_R_ZLIB_DEFLATE_ERROR
{"ZLIB_DEFLATE_ERROR", ERR_LIB_COMP, COMP_R_ZLIB_DEFLATE_ERROR},
#else
{"ZLIB_DEFLATE_ERROR", 41, 99},
#endif
#ifdef COMP_R_ZLIB_INFLATE_ERROR
{"ZLIB_INFLATE_ERROR", ERR_LIB_COMP, COMP_R_ZLIB_INFLATE_ERROR},
#else
{"ZLIB_INFLATE_ERROR", 41, 100},
#endif
#ifdef COMP_R_ZLIB_NOT_SUPPORTED
{"ZLIB_NOT_SUPPORTED", ERR_LIB_COMP, COMP_R_ZLIB_NOT_SUPPORTED},
#else
{"ZLIB_NOT_SUPPORTED", 41, 101},
#endif
#ifdef CONF_R_ERROR_LOADING_DSO
{"ERROR_LOADING_DSO", ERR_LIB_CONF, CONF_R_ERROR_LOADING_DSO},
#else
{"ERROR_LOADING_DSO", 14, 110},
#endif
#ifdef CONF_R_LIST_CANNOT_BE_NULL
{"LIST_CANNOT_BE_NULL", ERR_LIB_CONF, CONF_R_LIST_CANNOT_BE_NULL},
#else
{"LIST_CANNOT_BE_NULL", 14, 115},
#endif
#ifdef CONF_R_MISSING_CLOSE_SQUARE_BRACKET
{"MISSING_CLOSE_SQUARE_BRACKET", ERR_LIB_CONF, CONF_R_MISSING_CLOSE_SQUARE_BRACKET},
#else
{"MISSING_CLOSE_SQUARE_BRACKET", 14, 100},
#endif
#ifdef CONF_R_MISSING_EQUAL_SIGN
{"MISSING_EQUAL_SIGN", ERR_LIB_CONF, CONF_R_MISSING_EQUAL_SIGN},
#else
{"MISSING_EQUAL_SIGN", 14, 101},
#endif
#ifdef CONF_R_MISSING_INIT_FUNCTION
{"MISSING_INIT_FUNCTION", ERR_LIB_CONF, CONF_R_MISSING_INIT_FUNCTION},
#else
{"MISSING_INIT_FUNCTION", 14, 112},
#endif
#ifdef CONF_R_MODULE_INITIALIZATION_ERROR
{"MODULE_INITIALIZATION_ERROR", ERR_LIB_CONF, CONF_R_MODULE_INITIALIZATION_ERROR},
#else
{"MODULE_INITIALIZATION_ERROR", 14, 109},
#endif
#ifdef CONF_R_NO_CLOSE_BRACE
{"NO_CLOSE_BRACE", ERR_LIB_CONF, CONF_R_NO_CLOSE_BRACE},
#else
{"NO_CLOSE_BRACE", 14, 102},
#endif
#ifdef CONF_R_NO_CONF
{"NO_CONF", ERR_LIB_CONF, CONF_R_NO_CONF},
#else
{"NO_CONF", 14, 105},
#endif
#ifdef CONF_R_NO_CONF_OR_ENVIRONMENT_VARIABLE
{"NO_CONF_OR_ENVIRONMENT_VARIABLE", ERR_LIB_CONF, CONF_R_NO_CONF_OR_ENVIRONMENT_VARIABLE},
#else
{"NO_CONF_OR_ENVIRONMENT_VARIABLE", 14, 106},
#endif
#ifdef CONF_R_NO_SECTION
{"NO_SECTION", ERR_LIB_CONF, CONF_R_NO_SECTION},
#else
{"NO_SECTION", 14, 107},
#endif
#ifdef CONF_R_NO_SUCH_FILE
{"NO_SUCH_FILE", ERR_LIB_CONF, CONF_R_NO_SUCH_FILE},
#else
{"NO_SUCH_FILE", 14, 114},
#endif
#ifdef CONF_R_NO_VALUE
{"NO_VALUE", ERR_LIB_CONF, CONF_R_NO_VALUE},
#else
{"NO_VALUE", 14, 108},
#endif
#ifdef CONF_R_NUMBER_TOO_LARGE
{"NUMBER_TOO_LARGE", ERR_LIB_CONF, CONF_R_NUMBER_TOO_LARGE},
#else
{"NUMBER_TOO_LARGE", 14, 121},
#endif
#ifdef CONF_R_RECURSIVE_DIRECTORY_INCLUDE
{"RECURSIVE_DIRECTORY_INCLUDE", ERR_LIB_CONF, CONF_R_RECURSIVE_DIRECTORY_INCLUDE},
#else
{"RECURSIVE_DIRECTORY_INCLUDE", 14, 111},
#endif
#ifdef CONF_R_SSL_COMMAND_SECTION_EMPTY
{"SSL_COMMAND_SECTION_EMPTY", ERR_LIB_CONF, CONF_R_SSL_COMMAND_SECTION_EMPTY},
#else
{"SSL_COMMAND_SECTION_EMPTY", 14, 117},
#endif
#ifdef CONF_R_SSL_COMMAND_SECTION_NOT_FOUND
{"SSL_COMMAND_SECTION_NOT_FOUND", ERR_LIB_CONF, CONF_R_SSL_COMMAND_SECTION_NOT_FOUND},
#else
{"SSL_COMMAND_SECTION_NOT_FOUND", 14, 118},
#endif
#ifdef CONF_R_SSL_SECTION_EMPTY
{"SSL_SECTION_EMPTY", ERR_LIB_CONF, CONF_R_SSL_SECTION_EMPTY},
#else
{"SSL_SECTION_EMPTY", 14, 119},
#endif
#ifdef CONF_R_SSL_SECTION_NOT_FOUND
{"SSL_SECTION_NOT_FOUND", ERR_LIB_CONF, CONF_R_SSL_SECTION_NOT_FOUND},
#else
{"SSL_SECTION_NOT_FOUND", 14, 120},
#endif
#ifdef CONF_R_UNABLE_TO_CREATE_NEW_SECTION
{"UNABLE_TO_CREATE_NEW_SECTION", ERR_LIB_CONF, CONF_R_UNABLE_TO_CREATE_NEW_SECTION},
#else
{"UNABLE_TO_CREATE_NEW_SECTION", 14, 103},
#endif
#ifdef CONF_R_UNKNOWN_MODULE_NAME
{"UNKNOWN_MODULE_NAME", ERR_LIB_CONF, CONF_R_UNKNOWN_MODULE_NAME},
#else
{"UNKNOWN_MODULE_NAME", 14, 113},
#endif
#ifdef CONF_R_VARIABLE_EXPANSION_TOO_LONG
{"VARIABLE_EXPANSION_TOO_LONG", ERR_LIB_CONF, CONF_R_VARIABLE_EXPANSION_TOO_LONG},
#else
{"VARIABLE_EXPANSION_TOO_LONG", 14, 116},
#endif
#ifdef CONF_R_VARIABLE_HAS_NO_VALUE
{"VARIABLE_HAS_NO_VALUE", ERR_LIB_CONF, CONF_R_VARIABLE_HAS_NO_VALUE},
#else
{"VARIABLE_HAS_NO_VALUE", 14, 104},
#endif
#ifdef CRYPTO_R_FIPS_MODE_NOT_SUPPORTED
{"FIPS_MODE_NOT_SUPPORTED", ERR_LIB_CRYPTO, CRYPTO_R_FIPS_MODE_NOT_SUPPORTED},
#else
{"FIPS_MODE_NOT_SUPPORTED", 15, 101},
#endif
#ifdef CRYPTO_R_ILLEGAL_HEX_DIGIT
{"ILLEGAL_HEX_DIGIT", ERR_LIB_CRYPTO, CRYPTO_R_ILLEGAL_HEX_DIGIT},
#else
{"ILLEGAL_HEX_DIGIT", 15, 102},
#endif
#ifdef CRYPTO_R_ODD_NUMBER_OF_DIGITS
{"ODD_NUMBER_OF_DIGITS", ERR_LIB_CRYPTO, CRYPTO_R_ODD_NUMBER_OF_DIGITS},
#else
{"ODD_NUMBER_OF_DIGITS", 15, 103},
#endif
#ifdef CT_R_BASE64_DECODE_ERROR
{"BASE64_DECODE_ERROR", ERR_LIB_CT, CT_R_BASE64_DECODE_ERROR},
#else
{"BASE64_DECODE_ERROR", 50, 108},
#endif
#ifdef CT_R_INVALID_LOG_ID_LENGTH
{"INVALID_LOG_ID_LENGTH", ERR_LIB_CT, CT_R_INVALID_LOG_ID_LENGTH},
#else
{"INVALID_LOG_ID_LENGTH", 50, 100},
#endif
#ifdef CT_R_LOG_CONF_INVALID
{"LOG_CONF_INVALID", ERR_LIB_CT, CT_R_LOG_CONF_INVALID},
#else
{"LOG_CONF_INVALID", 50, 109},
#endif
#ifdef CT_R_LOG_CONF_INVALID_KEY
{"LOG_CONF_INVALID_KEY", ERR_LIB_CT, CT_R_LOG_CONF_INVALID_KEY},
#else
{"LOG_CONF_INVALID_KEY", 50, 110},
#endif
#ifdef CT_R_LOG_CONF_MISSING_DESCRIPTION
{"LOG_CONF_MISSING_DESCRIPTION", ERR_LIB_CT, CT_R_LOG_CONF_MISSING_DESCRIPTION},
#else
{"LOG_CONF_MISSING_DESCRIPTION", 50, 111},
#endif
#ifdef CT_R_LOG_CONF_MISSING_KEY
{"LOG_CONF_MISSING_KEY", ERR_LIB_CT, CT_R_LOG_CONF_MISSING_KEY},
#else
{"LOG_CONF_MISSING_KEY", 50, 112},
#endif
#ifdef CT_R_LOG_KEY_INVALID
{"LOG_KEY_INVALID", ERR_LIB_CT, CT_R_LOG_KEY_INVALID},
#else
{"LOG_KEY_INVALID", 50, 113},
#endif
#ifdef CT_R_SCT_FUTURE_TIMESTAMP
{"SCT_FUTURE_TIMESTAMP", ERR_LIB_CT, CT_R_SCT_FUTURE_TIMESTAMP},
#else
{"SCT_FUTURE_TIMESTAMP", 50, 116},
#endif
#ifdef CT_R_SCT_INVALID
{"SCT_INVALID", ERR_LIB_CT, CT_R_SCT_INVALID},
#else
{"SCT_INVALID", 50, 104},
#endif
#ifdef CT_R_SCT_INVALID_SIGNATURE
{"SCT_INVALID_SIGNATURE", ERR_LIB_CT, CT_R_SCT_INVALID_SIGNATURE},
#else
{"SCT_INVALID_SIGNATURE", 50, 107},
#endif
#ifdef CT_R_SCT_LIST_INVALID
{"SCT_LIST_INVALID", ERR_LIB_CT, CT_R_SCT_LIST_INVALID},
#else
{"SCT_LIST_INVALID", 50, 105},
#endif
#ifdef CT_R_SCT_LOG_ID_MISMATCH
{"SCT_LOG_ID_MISMATCH", ERR_LIB_CT, CT_R_SCT_LOG_ID_MISMATCH},
#else
{"SCT_LOG_ID_MISMATCH", 50, 114},
#endif
#ifdef CT_R_SCT_NOT_SET
{"SCT_NOT_SET", ERR_LIB_CT, CT_R_SCT_NOT_SET},
#else
{"SCT_NOT_SET", 50, 106},
#endif
#ifdef CT_R_SCT_UNSUPPORTED_VERSION
{"SCT_UNSUPPORTED_VERSION", ERR_LIB_CT, CT_R_SCT_UNSUPPORTED_VERSION},
#else
{"SCT_UNSUPPORTED_VERSION", 50, 115},
#endif
#ifdef CT_R_UNRECOGNIZED_SIGNATURE_NID
{"UNRECOGNIZED_SIGNATURE_NID", ERR_LIB_CT, CT_R_UNRECOGNIZED_SIGNATURE_NID},
#else
{"UNRECOGNIZED_SIGNATURE_NID", 50, 101},
#endif
#ifdef CT_R_UNSUPPORTED_ENTRY_TYPE
{"UNSUPPORTED_ENTRY_TYPE", ERR_LIB_CT, CT_R_UNSUPPORTED_ENTRY_TYPE},
#else
{"UNSUPPORTED_ENTRY_TYPE", 50, 102},
#endif
#ifdef CT_R_UNSUPPORTED_VERSION
{"UNSUPPORTED_VERSION", ERR_LIB_CT, CT_R_UNSUPPORTED_VERSION},
#else
{"UNSUPPORTED_VERSION", 50, 103},
#endif
#ifdef DH_R_BAD_GENERATOR
{"BAD_GENERATOR", ERR_LIB_DH, DH_R_BAD_GENERATOR},
#else
{"BAD_GENERATOR", 5, 101},
#endif
#ifdef DH_R_BN_DECODE_ERROR
{"BN_DECODE_ERROR", ERR_LIB_DH, DH_R_BN_DECODE_ERROR},
#else
{"BN_DECODE_ERROR", 5, 109},
#endif
#ifdef DH_R_BN_ERROR
{"BN_ERROR", ERR_LIB_DH, DH_R_BN_ERROR},
#else
{"BN_ERROR", 5, 106},
#endif
#ifdef DH_R_CHECK_INVALID_J_VALUE
{"CHECK_INVALID_J_VALUE", ERR_LIB_DH, DH_R_CHECK_INVALID_J_VALUE},
#else
{"CHECK_INVALID_J_VALUE", 5, 115},
#endif
#ifdef DH_R_CHECK_INVALID_Q_VALUE
{"CHECK_INVALID_Q_VALUE", ERR_LIB_DH, DH_R_CHECK_INVALID_Q_VALUE},
#else
{"CHECK_INVALID_Q_VALUE", 5, 116},
#endif
#ifdef DH_R_CHECK_PUBKEY_INVALID
{"CHECK_PUBKEY_INVALID", ERR_LIB_DH, DH_R_CHECK_PUBKEY_INVALID},
#else
{"CHECK_PUBKEY_INVALID", 5, 122},
#endif
#ifdef DH_R_CHECK_PUBKEY_TOO_LARGE
{"CHECK_PUBKEY_TOO_LARGE", ERR_LIB_DH, DH_R_CHECK_PUBKEY_TOO_LARGE},
#else
{"CHECK_PUBKEY_TOO_LARGE", 5, 123},
#endif
#ifdef DH_R_CHECK_PUBKEY_TOO_SMALL
{"CHECK_PUBKEY_TOO_SMALL", ERR_LIB_DH, DH_R_CHECK_PUBKEY_TOO_SMALL},
#else
{"CHECK_PUBKEY_TOO_SMALL", 5, 124},
#endif
#ifdef DH_R_CHECK_P_NOT_PRIME
{"CHECK_P_NOT_PRIME", ERR_LIB_DH, DH_R_CHECK_P_NOT_PRIME},
#else
{"CHECK_P_NOT_PRIME", 5, 117},
#endif
#ifdef DH_R_CHECK_P_NOT_SAFE_PRIME
{"CHECK_P_NOT_SAFE_PRIME", ERR_LIB_DH, DH_R_CHECK_P_NOT_SAFE_PRIME},
#else
{"CHECK_P_NOT_SAFE_PRIME", 5, 118},
#endif
#ifdef DH_R_CHECK_Q_NOT_PRIME
{"CHECK_Q_NOT_PRIME", ERR_LIB_DH, DH_R_CHECK_Q_NOT_PRIME},
#else
{"CHECK_Q_NOT_PRIME", 5, 119},
#endif
#ifdef DH_R_DECODE_ERROR
{"DECODE_ERROR", ERR_LIB_DH, DH_R_DECODE_ERROR},
#else
{"DECODE_ERROR", 5, 104},
#endif
#ifdef DH_R_INVALID_PARAMETER_NAME
{"INVALID_PARAMETER_NAME", ERR_LIB_DH, DH_R_INVALID_PARAMETER_NAME},
#else
{"INVALID_PARAMETER_NAME", 5, 110},
#endif
#ifdef DH_R_INVALID_PARAMETER_NID
{"INVALID_PARAMETER_NID", ERR_LIB_DH, DH_R_INVALID_PARAMETER_NID},
#else
{"INVALID_PARAMETER_NID", 5, 114},
#endif
#ifdef DH_R_INVALID_PUBKEY
{"INVALID_PUBKEY", ERR_LIB_DH, DH_R_INVALID_PUBKEY},
#else
{"INVALID_PUBKEY", 5, 102},
#endif
#ifdef DH_R_KDF_PARAMETER_ERROR
{"KDF_PARAMETER_ERROR", ERR_LIB_DH, DH_R_KDF_PARAMETER_ERROR},
#else
{"KDF_PARAMETER_ERROR", 5, 112},
#endif
#ifdef DH_R_KEYS_NOT_SET
{"KEYS_NOT_SET", ERR_LIB_DH, DH_R_KEYS_NOT_SET},
#else
{"KEYS_NOT_SET", 5, 108},
#endif
#ifdef DH_R_MISSING_PUBKEY
{"MISSING_PUBKEY", ERR_LIB_DH, DH_R_MISSING_PUBKEY},
#else
{"MISSING_PUBKEY", 5, 125},
#endif
#ifdef DH_R_MODULUS_TOO_LARGE
{"MODULUS_TOO_LARGE", ERR_LIB_DH, DH_R_MODULUS_TOO_LARGE},
#else
{"MODULUS_TOO_LARGE", 5, 103},
#endif
#ifdef DH_R_NOT_SUITABLE_GENERATOR
{"NOT_SUITABLE_GENERATOR", ERR_LIB_DH, DH_R_NOT_SUITABLE_GENERATOR},
#else
{"NOT_SUITABLE_GENERATOR", 5, 120},
#endif
#ifdef DH_R_NO_PARAMETERS_SET
{"NO_PARAMETERS_SET", ERR_LIB_DH, DH_R_NO_PARAMETERS_SET},
#else
{"NO_PARAMETERS_SET", 5, 107},
#endif
#ifdef DH_R_NO_PRIVATE_VALUE
{"NO_PRIVATE_VALUE", ERR_LIB_DH, DH_R_NO_PRIVATE_VALUE},
#else
{"NO_PRIVATE_VALUE", 5, 100},
#endif
#ifdef DH_R_PARAMETER_ENCODING_ERROR
{"PARAMETER_ENCODING_ERROR", ERR_LIB_DH, DH_R_PARAMETER_ENCODING_ERROR},
#else
{"PARAMETER_ENCODING_ERROR", 5, 105},
#endif
#ifdef DH_R_PEER_KEY_ERROR
{"PEER_KEY_ERROR", ERR_LIB_DH, DH_R_PEER_KEY_ERROR},
#else
{"PEER_KEY_ERROR", 5, 111},
#endif
#ifdef DH_R_SHARED_INFO_ERROR
{"SHARED_INFO_ERROR", ERR_LIB_DH, DH_R_SHARED_INFO_ERROR},
#else
{"SHARED_INFO_ERROR", 5, 113},
#endif
#ifdef DH_R_UNABLE_TO_CHECK_GENERATOR
{"UNABLE_TO_CHECK_GENERATOR", ERR_LIB_DH, DH_R_UNABLE_TO_CHECK_GENERATOR},
#else
{"UNABLE_TO_CHECK_GENERATOR", 5, 121},
#endif
#ifdef DSA_R_BAD_Q_VALUE
{"BAD_Q_VALUE", ERR_LIB_DSA, DSA_R_BAD_Q_VALUE},
#else
{"BAD_Q_VALUE", 10, 102},
#endif
#ifdef DSA_R_BN_DECODE_ERROR
{"BN_DECODE_ERROR", ERR_LIB_DSA, DSA_R_BN_DECODE_ERROR},
#else
{"BN_DECODE_ERROR", 10, 108},
#endif
#ifdef DSA_R_BN_ERROR
{"BN_ERROR", ERR_LIB_DSA, DSA_R_BN_ERROR},
#else
{"BN_ERROR", 10, 109},
#endif
#ifdef DSA_R_DECODE_ERROR
{"DECODE_ERROR", ERR_LIB_DSA, DSA_R_DECODE_ERROR},
#else
{"DECODE_ERROR", 10, 104},
#endif
#ifdef DSA_R_INVALID_DIGEST_TYPE
{"INVALID_DIGEST_TYPE", ERR_LIB_DSA, DSA_R_INVALID_DIGEST_TYPE},
#else
{"INVALID_DIGEST_TYPE", 10, 106},
#endif
#ifdef DSA_R_INVALID_PARAMETERS
{"INVALID_PARAMETERS", ERR_LIB_DSA, DSA_R_INVALID_PARAMETERS},
#else
{"INVALID_PARAMETERS", 10, 112},
#endif
#ifdef DSA_R_MISSING_PARAMETERS
{"MISSING_PARAMETERS", ERR_LIB_DSA, DSA_R_MISSING_PARAMETERS},
#else
{"MISSING_PARAMETERS", 10, 101},
#endif
#ifdef DSA_R_MISSING_PRIVATE_KEY
{"MISSING_PRIVATE_KEY", ERR_LIB_DSA, DSA_R_MISSING_PRIVATE_KEY},
#else
{"MISSING_PRIVATE_KEY", 10, 111},
#endif
#ifdef DSA_R_MODULUS_TOO_LARGE
{"MODULUS_TOO_LARGE", ERR_LIB_DSA, DSA_R_MODULUS_TOO_LARGE},
#else
{"MODULUS_TOO_LARGE", 10, 103},
#endif
#ifdef DSA_R_NO_PARAMETERS_SET
{"NO_PARAMETERS_SET", ERR_LIB_DSA, DSA_R_NO_PARAMETERS_SET},
#else
{"NO_PARAMETERS_SET", 10, 107},
#endif
#ifdef DSA_R_PARAMETER_ENCODING_ERROR
{"PARAMETER_ENCODING_ERROR", ERR_LIB_DSA, DSA_R_PARAMETER_ENCODING_ERROR},
#else
{"PARAMETER_ENCODING_ERROR", 10, 105},
#endif
#ifdef DSA_R_Q_NOT_PRIME
{"Q_NOT_PRIME", ERR_LIB_DSA, DSA_R_Q_NOT_PRIME},
#else
{"Q_NOT_PRIME", 10, 113},
#endif
#ifdef DSA_R_SEED_LEN_SMALL
{"SEED_LEN_SMALL", ERR_LIB_DSA, DSA_R_SEED_LEN_SMALL},
#else
{"SEED_LEN_SMALL", 10, 110},
#endif
#ifdef DSO_R_CTRL_FAILED
{"CTRL_FAILED", ERR_LIB_DSO, DSO_R_CTRL_FAILED},
#else
{"CTRL_FAILED", 37, 100},
#endif
#ifdef DSO_R_DSO_ALREADY_LOADED
{"DSO_ALREADY_LOADED", ERR_LIB_DSO, DSO_R_DSO_ALREADY_LOADED},
#else
{"DSO_ALREADY_LOADED", 37, 110},
#endif
#ifdef DSO_R_EMPTY_FILE_STRUCTURE
{"EMPTY_FILE_STRUCTURE", ERR_LIB_DSO, DSO_R_EMPTY_FILE_STRUCTURE},
#else
{"EMPTY_FILE_STRUCTURE", 37, 113},
#endif
#ifdef DSO_R_FAILURE
{"FAILURE", ERR_LIB_DSO, DSO_R_FAILURE},
#else
{"FAILURE", 37, 114},
#endif
#ifdef DSO_R_FILENAME_TOO_BIG
{"FILENAME_TOO_BIG", ERR_LIB_DSO, DSO_R_FILENAME_TOO_BIG},
#else
{"FILENAME_TOO_BIG", 37, 101},
#endif
#ifdef DSO_R_FINISH_FAILED
{"FINISH_FAILED", ERR_LIB_DSO, DSO_R_FINISH_FAILED},
#else
{"FINISH_FAILED", 37, 102},
#endif
#ifdef DSO_R_INCORRECT_FILE_SYNTAX
{"INCORRECT_FILE_SYNTAX", ERR_LIB_DSO, DSO_R_INCORRECT_FILE_SYNTAX},
#else
{"INCORRECT_FILE_SYNTAX", 37, 115},
#endif
#ifdef DSO_R_LOAD_FAILED
{"LOAD_FAILED", ERR_LIB_DSO, DSO_R_LOAD_FAILED},
#else
{"LOAD_FAILED", 37, 103},
#endif
#ifdef DSO_R_NAME_TRANSLATION_FAILED
{"NAME_TRANSLATION_FAILED", ERR_LIB_DSO, DSO_R_NAME_TRANSLATION_FAILED},
#else
{"NAME_TRANSLATION_FAILED", 37, 109},
#endif
#ifdef DSO_R_NO_FILENAME
{"NO_FILENAME", ERR_LIB_DSO, DSO_R_NO_FILENAME},
#else
{"NO_FILENAME", 37, 111},
#endif
#ifdef DSO_R_NULL_HANDLE
{"NULL_HANDLE", ERR_LIB_DSO, DSO_R_NULL_HANDLE},
#else
{"NULL_HANDLE", 37, 104},
#endif
#ifdef DSO_R_SET_FILENAME_FAILED
{"SET_FILENAME_FAILED", ERR_LIB_DSO, DSO_R_SET_FILENAME_FAILED},
#else
{"SET_FILENAME_FAILED", 37, 112},
#endif
#ifdef DSO_R_STACK_ERROR
{"STACK_ERROR", ERR_LIB_DSO, DSO_R_STACK_ERROR},
#else
{"STACK_ERROR", 37, 105},
#endif
#ifdef DSO_R_SYM_FAILURE
{"SYM_FAILURE", ERR_LIB_DSO, DSO_R_SYM_FAILURE},
#else
{"SYM_FAILURE", 37, 106},
#endif
#ifdef DSO_R_UNLOAD_FAILED
{"UNLOAD_FAILED", ERR_LIB_DSO, DSO_R_UNLOAD_FAILED},
#else
{"UNLOAD_FAILED", 37, 107},
#endif
#ifdef DSO_R_UNSUPPORTED
{"UNSUPPORTED", ERR_LIB_DSO, DSO_R_UNSUPPORTED},
#else
{"UNSUPPORTED", 37, 108},
#endif
#ifdef EC_R_ASN1_ERROR
{"ASN1_ERROR", ERR_LIB_EC, EC_R_ASN1_ERROR},
#else
{"ASN1_ERROR", 16, 115},
#endif
#ifdef EC_R_BAD_SIGNATURE
{"BAD_SIGNATURE", ERR_LIB_EC, EC_R_BAD_SIGNATURE},
#else
{"BAD_SIGNATURE", 16, 156},
#endif
#ifdef EC_R_BIGNUM_OUT_OF_RANGE
{"BIGNUM_OUT_OF_RANGE", ERR_LIB_EC, EC_R_BIGNUM_OUT_OF_RANGE},
#else
{"BIGNUM_OUT_OF_RANGE", 16, 144},
#endif
#ifdef EC_R_BUFFER_TOO_SMALL
{"BUFFER_TOO_SMALL", ERR_LIB_EC, EC_R_BUFFER_TOO_SMALL},
#else
{"BUFFER_TOO_SMALL", 16, 100},
#endif
#ifdef EC_R_CANNOT_INVERT
{"CANNOT_INVERT", ERR_LIB_EC, EC_R_CANNOT_INVERT},
#else
{"CANNOT_INVERT", 16, 165},
#endif
#ifdef EC_R_COORDINATES_OUT_OF_RANGE
{"COORDINATES_OUT_OF_RANGE", ERR_LIB_EC, EC_R_COORDINATES_OUT_OF_RANGE},
#else
{"COORDINATES_OUT_OF_RANGE", 16, 146},
#endif
#ifdef EC_R_CURVE_DOES_NOT_SUPPORT_ECDH
{"CURVE_DOES_NOT_SUPPORT_ECDH", ERR_LIB_EC, EC_R_CURVE_DOES_NOT_SUPPORT_ECDH},
#else
{"CURVE_DOES_NOT_SUPPORT_ECDH", 16, 160},
#endif
#ifdef EC_R_CURVE_DOES_NOT_SUPPORT_SIGNING
{"CURVE_DOES_NOT_SUPPORT_SIGNING", ERR_LIB_EC, EC_R_CURVE_DOES_NOT_SUPPORT_SIGNING},
#else
{"CURVE_DOES_NOT_SUPPORT_SIGNING", 16, 159},
#endif
#ifdef EC_R_D2I_ECPKPARAMETERS_FAILURE
{"D2I_ECPKPARAMETERS_FAILURE", ERR_LIB_EC, EC_R_D2I_ECPKPARAMETERS_FAILURE},
#else
{"D2I_ECPKPARAMETERS_FAILURE", 16, 117},
#endif
#ifdef EC_R_DECODE_ERROR
{"DECODE_ERROR", ERR_LIB_EC, EC_R_DECODE_ERROR},
#else
{"DECODE_ERROR", 16, 142},
#endif
#ifdef EC_R_DISCRIMINANT_IS_ZERO
{"DISCRIMINANT_IS_ZERO", ERR_LIB_EC, EC_R_DISCRIMINANT_IS_ZERO},
#else
{"DISCRIMINANT_IS_ZERO", 16, 118},
#endif
#ifdef EC_R_EC_GROUP_NEW_BY_NAME_FAILURE
{"EC_GROUP_NEW_BY_NAME_FAILURE", ERR_LIB_EC, EC_R_EC_GROUP_NEW_BY_NAME_FAILURE},
#else
{"EC_GROUP_NEW_BY_NAME_FAILURE", 16, 119},
#endif
#ifdef EC_R_FIELD_TOO_LARGE
{"FIELD_TOO_LARGE", ERR_LIB_EC, EC_R_FIELD_TOO_LARGE},
#else
{"FIELD_TOO_LARGE", 16, 143},
#endif
#ifdef EC_R_GF2M_NOT_SUPPORTED
{"GF2M_NOT_SUPPORTED", ERR_LIB_EC, EC_R_GF2M_NOT_SUPPORTED},
#else
{"GF2M_NOT_SUPPORTED", 16, 147},
#endif
#ifdef EC_R_GROUP2PKPARAMETERS_FAILURE
{"GROUP2PKPARAMETERS_FAILURE", ERR_LIB_EC, EC_R_GROUP2PKPARAMETERS_FAILURE},
#else
{"GROUP2PKPARAMETERS_FAILURE", 16, 120},
#endif
#ifdef EC_R_I2D_ECPKPARAMETERS_FAILURE
{"I2D_ECPKPARAMETERS_FAILURE", ERR_LIB_EC, EC_R_I2D_ECPKPARAMETERS_FAILURE},
#else
{"I2D_ECPKPARAMETERS_FAILURE", 16, 121},
#endif
#ifdef EC_R_INCOMPATIBLE_OBJECTS
{"INCOMPATIBLE_OBJECTS", ERR_LIB_EC, EC_R_INCOMPATIBLE_OBJECTS},
#else
{"INCOMPATIBLE_OBJECTS", 16, 101},
#endif
#ifdef EC_R_INVALID_ARGUMENT
{"INVALID_ARGUMENT", ERR_LIB_EC, EC_R_INVALID_ARGUMENT},
#else
{"INVALID_ARGUMENT", 16, 112},
#endif
#ifdef EC_R_INVALID_COMPRESSED_POINT
{"INVALID_COMPRESSED_POINT", ERR_LIB_EC, EC_R_INVALID_COMPRESSED_POINT},
#else
{"INVALID_COMPRESSED_POINT", 16, 110},
#endif
#ifdef EC_R_INVALID_COMPRESSION_BIT
{"INVALID_COMPRESSION_BIT", ERR_LIB_EC, EC_R_INVALID_COMPRESSION_BIT},
#else
{"INVALID_COMPRESSION_BIT", 16, 109},
#endif
#ifdef EC_R_INVALID_CURVE
{"INVALID_CURVE", ERR_LIB_EC, EC_R_INVALID_CURVE},
#else
{"INVALID_CURVE", 16, 141},
#endif
#ifdef EC_R_INVALID_DIGEST
{"INVALID_DIGEST", ERR_LIB_EC, EC_R_INVALID_DIGEST},
#else
{"INVALID_DIGEST", 16, 151},
#endif
#ifdef EC_R_INVALID_DIGEST_TYPE
{"INVALID_DIGEST_TYPE", ERR_LIB_EC, EC_R_INVALID_DIGEST_TYPE},
#else
{"INVALID_DIGEST_TYPE", 16, 138},
#endif
#ifdef EC_R_INVALID_ENCODING
{"INVALID_ENCODING", ERR_LIB_EC, EC_R_INVALID_ENCODING},
#else
{"INVALID_ENCODING", 16, 102},
#endif
#ifdef EC_R_INVALID_FIELD
{"INVALID_FIELD", ERR_LIB_EC, EC_R_INVALID_FIELD},
#else
{"INVALID_FIELD", 16, 103},
#endif
#ifdef EC_R_INVALID_FORM
{"INVALID_FORM", ERR_LIB_EC, EC_R_INVALID_FORM},
#else
{"INVALID_FORM", 16, 104},
#endif
#ifdef EC_R_INVALID_GROUP_ORDER
{"INVALID_GROUP_ORDER", ERR_LIB_EC, EC_R_INVALID_GROUP_ORDER},
#else
{"INVALID_GROUP_ORDER", 16, 122},
#endif
#ifdef EC_R_INVALID_KEY
{"INVALID_KEY", ERR_LIB_EC, EC_R_INVALID_KEY},
#else
{"INVALID_KEY", 16, 116},
#endif
#ifdef EC_R_INVALID_OUTPUT_LENGTH
{"INVALID_OUTPUT_LENGTH", ERR_LIB_EC, EC_R_INVALID_OUTPUT_LENGTH},
#else
{"INVALID_OUTPUT_LENGTH", 16, 161},
#endif
#ifdef EC_R_INVALID_PEER_KEY
{"INVALID_PEER_KEY", ERR_LIB_EC, EC_R_INVALID_PEER_KEY},
#else
{"INVALID_PEER_KEY", 16, 133},
#endif
#ifdef EC_R_INVALID_PENTANOMIAL_BASIS
{"INVALID_PENTANOMIAL_BASIS", ERR_LIB_EC, EC_R_INVALID_PENTANOMIAL_BASIS},
#else
{"INVALID_PENTANOMIAL_BASIS", 16, 132},
#endif
#ifdef EC_R_INVALID_PRIVATE_KEY
{"INVALID_PRIVATE_KEY", ERR_LIB_EC, EC_R_INVALID_PRIVATE_KEY},
#else
{"INVALID_PRIVATE_KEY", 16, 123},
#endif
#ifdef EC_R_INVALID_TRINOMIAL_BASIS
{"INVALID_TRINOMIAL_BASIS", ERR_LIB_EC, EC_R_INVALID_TRINOMIAL_BASIS},
#else
{"INVALID_TRINOMIAL_BASIS", 16, 137},
#endif
#ifdef EC_R_KDF_PARAMETER_ERROR
{"KDF_PARAMETER_ERROR", ERR_LIB_EC, EC_R_KDF_PARAMETER_ERROR},
#else
{"KDF_PARAMETER_ERROR", 16, 148},
#endif
#ifdef EC_R_KEYS_NOT_SET
{"KEYS_NOT_SET", ERR_LIB_EC, EC_R_KEYS_NOT_SET},
#else
{"KEYS_NOT_SET", 16, 140},
#endif
#ifdef EC_R_LADDER_POST_FAILURE
{"LADDER_POST_FAILURE", ERR_LIB_EC, EC_R_LADDER_POST_FAILURE},
#else
{"LADDER_POST_FAILURE", 16, 136},
#endif
#ifdef EC_R_LADDER_PRE_FAILURE
{"LADDER_PRE_FAILURE", ERR_LIB_EC, EC_R_LADDER_PRE_FAILURE},
#else
{"LADDER_PRE_FAILURE", 16, 153},
#endif
#ifdef EC_R_LADDER_STEP_FAILURE
{"LADDER_STEP_FAILURE", ERR_LIB_EC, EC_R_LADDER_STEP_FAILURE},
#else
{"LADDER_STEP_FAILURE", 16, 162},
#endif
#ifdef EC_R_MISSING_OID
{"MISSING_OID", ERR_LIB_EC, EC_R_MISSING_OID},
#else
{"MISSING_OID", 16, 167},
#endif
#ifdef EC_R_MISSING_PARAMETERS
{"MISSING_PARAMETERS", ERR_LIB_EC, EC_R_MISSING_PARAMETERS},
#else
{"MISSING_PARAMETERS", 16, 124},
#endif
#ifdef EC_R_MISSING_PRIVATE_KEY
{"MISSING_PRIVATE_KEY", ERR_LIB_EC, EC_R_MISSING_PRIVATE_KEY},
#else
{"MISSING_PRIVATE_KEY", 16, 125},
#endif
#ifdef EC_R_NEED_NEW_SETUP_VALUES
{"NEED_NEW_SETUP_VALUES", ERR_LIB_EC, EC_R_NEED_NEW_SETUP_VALUES},
#else
{"NEED_NEW_SETUP_VALUES", 16, 157},
#endif
#ifdef EC_R_NOT_A_NIST_PRIME
{"NOT_A_NIST_PRIME", ERR_LIB_EC, EC_R_NOT_A_NIST_PRIME},
#else
{"NOT_A_NIST_PRIME", 16, 135},
#endif
#ifdef EC_R_NOT_IMPLEMENTED
{"NOT_IMPLEMENTED", ERR_LIB_EC, EC_R_NOT_IMPLEMENTED},
#else
{"NOT_IMPLEMENTED", 16, 126},
#endif
#ifdef EC_R_NOT_INITIALIZED
{"NOT_INITIALIZED", ERR_LIB_EC, EC_R_NOT_INITIALIZED},
#else
{"NOT_INITIALIZED", 16, 111},
#endif
#ifdef EC_R_NO_PARAMETERS_SET
{"NO_PARAMETERS_SET", ERR_LIB_EC, EC_R_NO_PARAMETERS_SET},
#else
{"NO_PARAMETERS_SET", 16, 139},
#endif
#ifdef EC_R_NO_PRIVATE_VALUE
{"NO_PRIVATE_VALUE", ERR_LIB_EC, EC_R_NO_PRIVATE_VALUE},
#else
{"NO_PRIVATE_VALUE", 16, 154},
#endif
#ifdef EC_R_OPERATION_NOT_SUPPORTED
{"OPERATION_NOT_SUPPORTED", ERR_LIB_EC, EC_R_OPERATION_NOT_SUPPORTED},
#else
{"OPERATION_NOT_SUPPORTED", 16, 152},
#endif
#ifdef EC_R_PASSED_NULL_PARAMETER
{"PASSED_NULL_PARAMETER", ERR_LIB_EC, EC_R_PASSED_NULL_PARAMETER},
#else
{"PASSED_NULL_PARAMETER", 16, 134},
#endif
#ifdef EC_R_PEER_KEY_ERROR
{"PEER_KEY_ERROR", ERR_LIB_EC, EC_R_PEER_KEY_ERROR},
#else
{"PEER_KEY_ERROR", 16, 149},
#endif
#ifdef EC_R_PKPARAMETERS2GROUP_FAILURE
{"PKPARAMETERS2GROUP_FAILURE", ERR_LIB_EC, EC_R_PKPARAMETERS2GROUP_FAILURE},
#else
{"PKPARAMETERS2GROUP_FAILURE", 16, 127},
#endif
#ifdef EC_R_POINT_ARITHMETIC_FAILURE
{"POINT_ARITHMETIC_FAILURE", ERR_LIB_EC, EC_R_POINT_ARITHMETIC_FAILURE},
#else
{"POINT_ARITHMETIC_FAILURE", 16, 155},
#endif
#ifdef EC_R_POINT_AT_INFINITY
{"POINT_AT_INFINITY", ERR_LIB_EC, EC_R_POINT_AT_INFINITY},
#else
{"POINT_AT_INFINITY", 16, 106},
#endif
#ifdef EC_R_POINT_COORDINATES_BLIND_FAILURE
{"POINT_COORDINATES_BLIND_FAILURE", ERR_LIB_EC, EC_R_POINT_COORDINATES_BLIND_FAILURE},
#else
{"POINT_COORDINATES_BLIND_FAILURE", 16, 163},
#endif
#ifdef EC_R_POINT_IS_NOT_ON_CURVE
{"POINT_IS_NOT_ON_CURVE", ERR_LIB_EC, EC_R_POINT_IS_NOT_ON_CURVE},
#else
{"POINT_IS_NOT_ON_CURVE", 16, 107},
#endif
#ifdef EC_R_RANDOM_NUMBER_GENERATION_FAILED
{"RANDOM_NUMBER_GENERATION_FAILED", ERR_LIB_EC, EC_R_RANDOM_NUMBER_GENERATION_FAILED},
#else
{"RANDOM_NUMBER_GENERATION_FAILED", 16, 158},
#endif
#ifdef EC_R_SHARED_INFO_ERROR
{"SHARED_INFO_ERROR", ERR_LIB_EC, EC_R_SHARED_INFO_ERROR},
#else
{"SHARED_INFO_ERROR", 16, 150},
#endif
#ifdef EC_R_SLOT_FULL
{"SLOT_FULL", ERR_LIB_EC, EC_R_SLOT_FULL},
#else
{"SLOT_FULL", 16, 108},
#endif
#ifdef EC_R_UNDEFINED_GENERATOR
{"UNDEFINED_GENERATOR", ERR_LIB_EC, EC_R_UNDEFINED_GENERATOR},
#else
{"UNDEFINED_GENERATOR", 16, 113},
#endif
#ifdef EC_R_UNDEFINED_ORDER
{"UNDEFINED_ORDER", ERR_LIB_EC, EC_R_UNDEFINED_ORDER},
#else
{"UNDEFINED_ORDER", 16, 128},
#endif
#ifdef EC_R_UNKNOWN_COFACTOR
{"UNKNOWN_COFACTOR", ERR_LIB_EC, EC_R_UNKNOWN_COFACTOR},
#else
{"UNKNOWN_COFACTOR", 16, 164},
#endif
#ifdef EC_R_UNKNOWN_GROUP
{"UNKNOWN_GROUP", ERR_LIB_EC, EC_R_UNKNOWN_GROUP},
#else
{"UNKNOWN_GROUP", 16, 129},
#endif
#ifdef EC_R_UNKNOWN_ORDER
{"UNKNOWN_ORDER", ERR_LIB_EC, EC_R_UNKNOWN_ORDER},
#else
{"UNKNOWN_ORDER", 16, 114},
#endif
#ifdef EC_R_UNSUPPORTED_FIELD
{"UNSUPPORTED_FIELD", ERR_LIB_EC, EC_R_UNSUPPORTED_FIELD},
#else
{"UNSUPPORTED_FIELD", 16, 131},
#endif
#ifdef EC_R_WRONG_CURVE_PARAMETERS
{"WRONG_CURVE_PARAMETERS", ERR_LIB_EC, EC_R_WRONG_CURVE_PARAMETERS},
#else
{"WRONG_CURVE_PARAMETERS", 16, 145},
#endif
#ifdef EC_R_WRONG_ORDER
{"WRONG_ORDER", ERR_LIB_EC, EC_R_WRONG_ORDER},
#else
{"WRONG_ORDER", 16, 130},
#endif
#ifdef ENGINE_R_ALREADY_LOADED
{"ALREADY_LOADED", ERR_LIB_ENGINE, ENGINE_R_ALREADY_LOADED},
#else
{"ALREADY_LOADED", 38, 100},
#endif
#ifdef ENGINE_R_ARGUMENT_IS_NOT_A_NUMBER
{"ARGUMENT_IS_NOT_A_NUMBER", ERR_LIB_ENGINE, ENGINE_R_ARGUMENT_IS_NOT_A_NUMBER},
#else
{"ARGUMENT_IS_NOT_A_NUMBER", 38, 133},
#endif
#ifdef ENGINE_R_CMD_NOT_EXECUTABLE
{"CMD_NOT_EXECUTABLE", ERR_LIB_ENGINE, ENGINE_R_CMD_NOT_EXECUTABLE},
#else
{"CMD_NOT_EXECUTABLE", 38, 134},
#endif
#ifdef ENGINE_R_COMMAND_TAKES_INPUT
{"COMMAND_TAKES_INPUT", ERR_LIB_ENGINE, ENGINE_R_COMMAND_TAKES_INPUT},
#else
{"COMMAND_TAKES_INPUT", 38, 135},
#endif
#ifdef ENGINE_R_COMMAND_TAKES_NO_INPUT
{"COMMAND_TAKES_NO_INPUT", ERR_LIB_ENGINE, ENGINE_R_COMMAND_TAKES_NO_INPUT},
#else
{"COMMAND_TAKES_NO_INPUT", 38, 136},
#endif
#ifdef ENGINE_R_CONFLICTING_ENGINE_ID
{"CONFLICTING_ENGINE_ID", ERR_LIB_ENGINE, ENGINE_R_CONFLICTING_ENGINE_ID},
#else
{"CONFLICTING_ENGINE_ID", 38, 103},
#endif
#ifdef ENGINE_R_CTRL_COMMAND_NOT_IMPLEMENTED
{"CTRL_COMMAND_NOT_IMPLEMENTED", ERR_LIB_ENGINE, ENGINE_R_CTRL_COMMAND_NOT_IMPLEMENTED},
#else
{"CTRL_COMMAND_NOT_IMPLEMENTED", 38, 119},
#endif
#ifdef ENGINE_R_DSO_FAILURE
{"DSO_FAILURE", ERR_LIB_ENGINE, ENGINE_R_DSO_FAILURE},
#else
{"DSO_FAILURE", 38, 104},
#endif
#ifdef ENGINE_R_DSO_NOT_FOUND
{"DSO_NOT_FOUND", ERR_LIB_ENGINE, ENGINE_R_DSO_NOT_FOUND},
#else
{"DSO_NOT_FOUND", 38, 132},
#endif
#ifdef ENGINE_R_ENGINES_SECTION_ERROR
{"ENGINES_SECTION_ERROR", ERR_LIB_ENGINE, ENGINE_R_ENGINES_SECTION_ERROR},
#else
{"ENGINES_SECTION_ERROR", 38, 148},
#endif
#ifdef ENGINE_R_ENGINE_CONFIGURATION_ERROR
{"ENGINE_CONFIGURATION_ERROR", ERR_LIB_ENGINE, ENGINE_R_ENGINE_CONFIGURATION_ERROR},
#else
{"ENGINE_CONFIGURATION_ERROR", 38, 102},
#endif
#ifdef ENGINE_R_ENGINE_IS_NOT_IN_LIST
{"ENGINE_IS_NOT_IN_LIST", ERR_LIB_ENGINE, ENGINE_R_ENGINE_IS_NOT_IN_LIST},
#else
{"ENGINE_IS_NOT_IN_LIST", 38, 105},
#endif
#ifdef ENGINE_R_ENGINE_SECTION_ERROR
{"ENGINE_SECTION_ERROR", ERR_LIB_ENGINE, ENGINE_R_ENGINE_SECTION_ERROR},
#else
{"ENGINE_SECTION_ERROR", 38, 149},
#endif
#ifdef ENGINE_R_FAILED_LOADING_PRIVATE_KEY
{"FAILED_LOADING_PRIVATE_KEY", ERR_LIB_ENGINE, ENGINE_R_FAILED_LOADING_PRIVATE_KEY},
#else
{"FAILED_LOADING_PRIVATE_KEY", 38, 128},
#endif
#ifdef ENGINE_R_FAILED_LOADING_PUBLIC_KEY
{"FAILED_LOADING_PUBLIC_KEY", ERR_LIB_ENGINE, ENGINE_R_FAILED_LOADING_PUBLIC_KEY},
#else
{"FAILED_LOADING_PUBLIC_KEY", 38, 129},
#endif
#ifdef ENGINE_R_FINISH_FAILED
{"FINISH_FAILED", ERR_LIB_ENGINE, ENGINE_R_FINISH_FAILED},
#else
{"FINISH_FAILED", 38, 106},
#endif
#ifdef ENGINE_R_ID_OR_NAME_MISSING
{"ID_OR_NAME_MISSING", ERR_LIB_ENGINE, ENGINE_R_ID_OR_NAME_MISSING},
#else
{"ID_OR_NAME_MISSING", 38, 108},
#endif
#ifdef ENGINE_R_INIT_FAILED
{"INIT_FAILED", ERR_LIB_ENGINE, ENGINE_R_INIT_FAILED},
#else
{"INIT_FAILED", 38, 109},
#endif
#ifdef ENGINE_R_INTERNAL_LIST_ERROR
{"INTERNAL_LIST_ERROR", ERR_LIB_ENGINE, ENGINE_R_INTERNAL_LIST_ERROR},
#else
{"INTERNAL_LIST_ERROR", 38, 110},
#endif
#ifdef ENGINE_R_INVALID_ARGUMENT
{"INVALID_ARGUMENT", ERR_LIB_ENGINE, ENGINE_R_INVALID_ARGUMENT},
#else
{"INVALID_ARGUMENT", 38, 143},
#endif
#ifdef ENGINE_R_INVALID_CMD_NAME
{"INVALID_CMD_NAME", ERR_LIB_ENGINE, ENGINE_R_INVALID_CMD_NAME},
#else
{"INVALID_CMD_NAME", 38, 137},
#endif
#ifdef ENGINE_R_INVALID_CMD_NUMBER
{"INVALID_CMD_NUMBER", ERR_LIB_ENGINE, ENGINE_R_INVALID_CMD_NUMBER},
#else
{"INVALID_CMD_NUMBER", 38, 138},
#endif
#ifdef ENGINE_R_INVALID_INIT_VALUE
{"INVALID_INIT_VALUE", ERR_LIB_ENGINE, ENGINE_R_INVALID_INIT_VALUE},
#else
{"INVALID_INIT_VALUE", 38, 151},
#endif
#ifdef ENGINE_R_INVALID_STRING
{"INVALID_STRING", ERR_LIB_ENGINE, ENGINE_R_INVALID_STRING},
#else
{"INVALID_STRING", 38, 150},
#endif
#ifdef ENGINE_R_NOT_INITIALISED
{"NOT_INITIALISED", ERR_LIB_ENGINE, ENGINE_R_NOT_INITIALISED},
#else
{"NOT_INITIALISED", 38, 117},
#endif
#ifdef ENGINE_R_NOT_LOADED
{"NOT_LOADED", ERR_LIB_ENGINE, ENGINE_R_NOT_LOADED},
#else
{"NOT_LOADED", 38, 112},
#endif
#ifdef ENGINE_R_NO_CONTROL_FUNCTION
{"NO_CONTROL_FUNCTION", ERR_LIB_ENGINE, ENGINE_R_NO_CONTROL_FUNCTION},
#else
{"NO_CONTROL_FUNCTION", 38, 120},
#endif
#ifdef ENGINE_R_NO_INDEX
{"NO_INDEX", ERR_LIB_ENGINE, ENGINE_R_NO_INDEX},
#else
{"NO_INDEX", 38, 144},
#endif
#ifdef ENGINE_R_NO_LOAD_FUNCTION
{"NO_LOAD_FUNCTION", ERR_LIB_ENGINE, ENGINE_R_NO_LOAD_FUNCTION},
#else
{"NO_LOAD_FUNCTION", 38, 125},
#endif
#ifdef ENGINE_R_NO_REFERENCE
{"NO_REFERENCE", ERR_LIB_ENGINE, ENGINE_R_NO_REFERENCE},
#else
{"NO_REFERENCE", 38, 130},
#endif
#ifdef ENGINE_R_NO_SUCH_ENGINE
{"NO_SUCH_ENGINE", ERR_LIB_ENGINE, ENGINE_R_NO_SUCH_ENGINE},
#else
{"NO_SUCH_ENGINE", 38, 116},
#endif
#ifdef ENGINE_R_UNIMPLEMENTED_CIPHER
{"UNIMPLEMENTED_CIPHER", ERR_LIB_ENGINE, ENGINE_R_UNIMPLEMENTED_CIPHER},
#else
{"UNIMPLEMENTED_CIPHER", 38, 146},
#endif
#ifdef ENGINE_R_UNIMPLEMENTED_DIGEST
{"UNIMPLEMENTED_DIGEST", ERR_LIB_ENGINE, ENGINE_R_UNIMPLEMENTED_DIGEST},
#else
{"UNIMPLEMENTED_DIGEST", 38, 147},
#endif
#ifdef ENGINE_R_UNIMPLEMENTED_PUBLIC_KEY_METHOD
{"UNIMPLEMENTED_PUBLIC_KEY_METHOD", ERR_LIB_ENGINE, ENGINE_R_UNIMPLEMENTED_PUBLIC_KEY_METHOD},
#else
{"UNIMPLEMENTED_PUBLIC_KEY_METHOD", 38, 101},
#endif
#ifdef ENGINE_R_VERSION_INCOMPATIBILITY
{"VERSION_INCOMPATIBILITY", ERR_LIB_ENGINE, ENGINE_R_VERSION_INCOMPATIBILITY},
#else
{"VERSION_INCOMPATIBILITY", 38, 145},
#endif
#ifdef EVP_R_AES_KEY_SETUP_FAILED
{"AES_KEY_SETUP_FAILED", ERR_LIB_EVP, EVP_R_AES_KEY_SETUP_FAILED},
#else
{"AES_KEY_SETUP_FAILED", 6, 143},
#endif
#ifdef EVP_R_ARIA_KEY_SETUP_FAILED
{"ARIA_KEY_SETUP_FAILED", ERR_LIB_EVP, EVP_R_ARIA_KEY_SETUP_FAILED},
#else
{"ARIA_KEY_SETUP_FAILED", 6, 176},
#endif
#ifdef EVP_R_BAD_DECRYPT
{"BAD_DECRYPT", ERR_LIB_EVP, EVP_R_BAD_DECRYPT},
#else
{"BAD_DECRYPT", 6, 100},
#endif
#ifdef EVP_R_BAD_KEY_LENGTH
{"BAD_KEY_LENGTH", ERR_LIB_EVP, EVP_R_BAD_KEY_LENGTH},
#else
{"BAD_KEY_LENGTH", 6, 195},
#endif
#ifdef EVP_R_BUFFER_TOO_SMALL
{"BUFFER_TOO_SMALL", ERR_LIB_EVP, EVP_R_BUFFER_TOO_SMALL},
#else
{"BUFFER_TOO_SMALL", 6, 155},
#endif
#ifdef EVP_R_CAMELLIA_KEY_SETUP_FAILED
{"CAMELLIA_KEY_SETUP_FAILED", ERR_LIB_EVP, EVP_R_CAMELLIA_KEY_SETUP_FAILED},
#else
{"CAMELLIA_KEY_SETUP_FAILED", 6, 157},
#endif
#ifdef EVP_R_CIPHER_PARAMETER_ERROR
{"CIPHER_PARAMETER_ERROR", ERR_LIB_EVP, EVP_R_CIPHER_PARAMETER_ERROR},
#else
{"CIPHER_PARAMETER_ERROR", 6, 122},
#endif
#ifdef EVP_R_COMMAND_NOT_SUPPORTED
{"COMMAND_NOT_SUPPORTED", ERR_LIB_EVP, EVP_R_COMMAND_NOT_SUPPORTED},
#else
{"COMMAND_NOT_SUPPORTED", 6, 147},
#endif
#ifdef EVP_R_COPY_ERROR
{"COPY_ERROR", ERR_LIB_EVP, EVP_R_COPY_ERROR},
#else
{"COPY_ERROR", 6, 173},
#endif
#ifdef EVP_R_CTRL_NOT_IMPLEMENTED
{"CTRL_NOT_IMPLEMENTED", ERR_LIB_EVP, EVP_R_CTRL_NOT_IMPLEMENTED},
#else
{"CTRL_NOT_IMPLEMENTED", 6, 132},
#endif
#ifdef EVP_R_CTRL_OPERATION_NOT_IMPLEMENTED
{"CTRL_OPERATION_NOT_IMPLEMENTED", ERR_LIB_EVP, EVP_R_CTRL_OPERATION_NOT_IMPLEMENTED},
#else
{"CTRL_OPERATION_NOT_IMPLEMENTED", 6, 133},
#endif
#ifdef EVP_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH
{"DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH", ERR_LIB_EVP, EVP_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH},
#else
{"DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH", 6, 138},
#endif
#ifdef EVP_R_DECODE_ERROR
{"DECODE_ERROR", ERR_LIB_EVP, EVP_R_DECODE_ERROR},
#else
{"DECODE_ERROR", 6, 114},
#endif
#ifdef EVP_R_DIFFERENT_KEY_TYPES
{"DIFFERENT_KEY_TYPES", ERR_LIB_EVP, EVP_R_DIFFERENT_KEY_TYPES},
#else
{"DIFFERENT_KEY_TYPES", 6, 101},
#endif
#ifdef EVP_R_DIFFERENT_PARAMETERS
{"DIFFERENT_PARAMETERS", ERR_LIB_EVP, EVP_R_DIFFERENT_PARAMETERS},
#else
{"DIFFERENT_PARAMETERS", 6, 153},
#endif
#ifdef EVP_R_ERROR_LOADING_SECTION
{"ERROR_LOADING_SECTION", ERR_LIB_EVP, EVP_R_ERROR_LOADING_SECTION},
#else
{"ERROR_LOADING_SECTION", 6, 165},
#endif
#ifdef EVP_R_ERROR_SETTING_FIPS_MODE
{"ERROR_SETTING_FIPS_MODE", ERR_LIB_EVP, EVP_R_ERROR_SETTING_FIPS_MODE},
#else
{"ERROR_SETTING_FIPS_MODE", 6, 166},
#endif
#ifdef EVP_R_EXPECTING_AN_HMAC_KEY
{"EXPECTING_AN_HMAC_KEY", ERR_LIB_EVP, EVP_R_EXPECTING_AN_HMAC_KEY},
#else
{"EXPECTING_AN_HMAC_KEY", 6, 174},
#endif
#ifdef EVP_R_EXPECTING_AN_RSA_KEY
{"EXPECTING_AN_RSA_KEY", ERR_LIB_EVP, EVP_R_EXPECTING_AN_RSA_KEY},
#else
{"EXPECTING_AN_RSA_KEY", 6, 127},
#endif
#ifdef EVP_R_EXPECTING_A_DH_KEY
{"EXPECTING_A_DH_KEY", ERR_LIB_EVP, EVP_R_EXPECTING_A_DH_KEY},
#else
{"EXPECTING_A_DH_KEY", 6, 128},
#endif
#ifdef EVP_R_EXPECTING_A_DSA_KEY
{"EXPECTING_A_DSA_KEY", ERR_LIB_EVP, EVP_R_EXPECTING_A_DSA_KEY},
#else
{"EXPECTING_A_DSA_KEY", 6, 129},
#endif
#ifdef EVP_R_EXPECTING_A_EC_KEY
{"EXPECTING_A_EC_KEY", ERR_LIB_EVP, EVP_R_EXPECTING_A_EC_KEY},
#else
{"EXPECTING_A_EC_KEY", 6, 142},
#endif
#ifdef EVP_R_EXPECTING_A_POLY1305_KEY
{"EXPECTING_A_POLY1305_KEY", ERR_LIB_EVP, EVP_R_EXPECTING_A_POLY1305_KEY},
#else
{"EXPECTING_A_POLY1305_KEY", 6, 164},
#endif
#ifdef EVP_R_EXPECTING_A_SIPHASH_KEY
{"EXPECTING_A_SIPHASH_KEY", ERR_LIB_EVP, EVP_R_EXPECTING_A_SIPHASH_KEY},
#else
{"EXPECTING_A_SIPHASH_KEY", 6, 175},
#endif
#ifdef EVP_R_FIPS_MODE_NOT_SUPPORTED
{"FIPS_MODE_NOT_SUPPORTED", ERR_LIB_EVP, EVP_R_FIPS_MODE_NOT_SUPPORTED},
#else
{"FIPS_MODE_NOT_SUPPORTED", 6, 167},
#endif
#ifdef EVP_R_GET_RAW_KEY_FAILED
{"GET_RAW_KEY_FAILED", ERR_LIB_EVP, EVP_R_GET_RAW_KEY_FAILED},
#else
{"GET_RAW_KEY_FAILED", 6, 182},
#endif
#ifdef EVP_R_ILLEGAL_SCRYPT_PARAMETERS
{"ILLEGAL_SCRYPT_PARAMETERS", ERR_LIB_EVP, EVP_R_ILLEGAL_SCRYPT_PARAMETERS},
#else
{"ILLEGAL_SCRYPT_PARAMETERS", 6, 171},
#endif
#ifdef EVP_R_INITIALIZATION_ERROR
{"INITIALIZATION_ERROR", ERR_LIB_EVP, EVP_R_INITIALIZATION_ERROR},
#else
{"INITIALIZATION_ERROR", 6, 134},
#endif
#ifdef EVP_R_INPUT_NOT_INITIALIZED
{"INPUT_NOT_INITIALIZED", ERR_LIB_EVP, EVP_R_INPUT_NOT_INITIALIZED},
#else
{"INPUT_NOT_INITIALIZED", 6, 111},
#endif
#ifdef EVP_R_INVALID_DIGEST
{"INVALID_DIGEST", ERR_LIB_EVP, EVP_R_INVALID_DIGEST},
#else
{"INVALID_DIGEST", 6, 152},
#endif
#ifdef EVP_R_INVALID_FIPS_MODE
{"INVALID_FIPS_MODE", ERR_LIB_EVP, EVP_R_INVALID_FIPS_MODE},
#else
{"INVALID_FIPS_MODE", 6, 168},
#endif
#ifdef EVP_R_INVALID_IV_LENGTH
{"INVALID_IV_LENGTH", ERR_LIB_EVP, EVP_R_INVALID_IV_LENGTH},
#else
{"INVALID_IV_LENGTH", 6, 194},
#endif
#ifdef EVP_R_INVALID_KEY
{"INVALID_KEY", ERR_LIB_EVP, EVP_R_INVALID_KEY},
#else
{"INVALID_KEY", 6, 163},
#endif
#ifdef EVP_R_INVALID_KEY_LENGTH
{"INVALID_KEY_LENGTH", ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH},
#else
{"INVALID_KEY_LENGTH", 6, 130},
#endif
#ifdef EVP_R_INVALID_OPERATION
{"INVALID_OPERATION", ERR_LIB_EVP, EVP_R_INVALID_OPERATION},
#else
{"INVALID_OPERATION", 6, 148},
#endif
#ifdef EVP_R_KEYGEN_FAILURE
{"KEYGEN_FAILURE", ERR_LIB_EVP, EVP_R_KEYGEN_FAILURE},
#else
{"KEYGEN_FAILURE", 6, 120},
#endif
#ifdef EVP_R_KEY_SETUP_FAILED
{"KEY_SETUP_FAILED", ERR_LIB_EVP, EVP_R_KEY_SETUP_FAILED},
#else
{"KEY_SETUP_FAILED", 6, 180},
#endif
#ifdef EVP_R_MEMORY_LIMIT_EXCEEDED
{"MEMORY_LIMIT_EXCEEDED", ERR_LIB_EVP, EVP_R_MEMORY_LIMIT_EXCEEDED},
#else
{"MEMORY_LIMIT_EXCEEDED", 6, 172},
#endif
#ifdef EVP_R_MESSAGE_DIGEST_IS_NULL
{"MESSAGE_DIGEST_IS_NULL", ERR_LIB_EVP, EVP_R_MESSAGE_DIGEST_IS_NULL},
#else
{"MESSAGE_DIGEST_IS_NULL", 6, 159},
#endif
#ifdef EVP_R_METHOD_NOT_SUPPORTED
{"METHOD_NOT_SUPPORTED", ERR_LIB_EVP, EVP_R_METHOD_NOT_SUPPORTED},
#else
{"METHOD_NOT_SUPPORTED", 6, 144},
#endif
#ifdef EVP_R_MISSING_PARAMETERS
{"MISSING_PARAMETERS", ERR_LIB_EVP, EVP_R_MISSING_PARAMETERS},
#else
{"MISSING_PARAMETERS", 6, 103},
#endif
#ifdef EVP_R_NOT_XOF_OR_INVALID_LENGTH
{"NOT_XOF_OR_INVALID_LENGTH", ERR_LIB_EVP, EVP_R_NOT_XOF_OR_INVALID_LENGTH},
#else
{"NOT_XOF_OR_INVALID_LENGTH", 6, 178},
#endif
#ifdef EVP_R_NO_CIPHER_SET
{"NO_CIPHER_SET", ERR_LIB_EVP, EVP_R_NO_CIPHER_SET},
#else
{"NO_CIPHER_SET", 6, 131},
#endif
#ifdef EVP_R_NO_DEFAULT_DIGEST
{"NO_DEFAULT_DIGEST", ERR_LIB_EVP, EVP_R_NO_DEFAULT_DIGEST},
#else
{"NO_DEFAULT_DIGEST", 6, 158},
#endif
#ifdef EVP_R_NO_DIGEST_SET
{"NO_DIGEST_SET", ERR_LIB_EVP, EVP_R_NO_DIGEST_SET},
#else
{"NO_DIGEST_SET", 6, 139},
#endif
#ifdef EVP_R_NO_KEY_SET
{"NO_KEY_SET", ERR_LIB_EVP, EVP_R_NO_KEY_SET},
#else
{"NO_KEY_SET", 6, 154},
#endif
#ifdef EVP_R_NO_OPERATION_SET
{"NO_OPERATION_SET", ERR_LIB_EVP, EVP_R_NO_OPERATION_SET},
#else
{"NO_OPERATION_SET", 6, 149},
#endif
#ifdef EVP_R_ONLY_ONESHOT_SUPPORTED
{"ONLY_ONESHOT_SUPPORTED", ERR_LIB_EVP, EVP_R_ONLY_ONESHOT_SUPPORTED},
#else
{"ONLY_ONESHOT_SUPPORTED", 6, 177},
#endif
#ifdef EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE
{"OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE", ERR_LIB_EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE},
#else
{"OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE", 6, 150},
#endif
#ifdef EVP_R_OPERATON_NOT_INITIALIZED
{"OPERATON_NOT_INITIALIZED", ERR_LIB_EVP, EVP_R_OPERATON_NOT_INITIALIZED},
#else
{"OPERATON_NOT_INITIALIZED", 6, 151},
#endif
#ifdef EVP_R_OUTPUT_WOULD_OVERFLOW
{"OUTPUT_WOULD_OVERFLOW", ERR_LIB_EVP, EVP_R_OUTPUT_WOULD_OVERFLOW},
#else
{"OUTPUT_WOULD_OVERFLOW", 6, 184},
#endif
#ifdef EVP_R_PARTIALLY_OVERLAPPING
{"PARTIALLY_OVERLAPPING", ERR_LIB_EVP, EVP_R_PARTIALLY_OVERLAPPING},
#else
{"PARTIALLY_OVERLAPPING", 6, 162},
#endif
#ifdef EVP_R_PBKDF2_ERROR
{"PBKDF2_ERROR", ERR_LIB_EVP, EVP_R_PBKDF2_ERROR},
#else
{"PBKDF2_ERROR", 6, 181},
#endif
#ifdef EVP_R_PKEY_APPLICATION_ASN1_METHOD_ALREADY_REGISTERED
{"PKEY_APPLICATION_ASN1_METHOD_ALREADY_REGISTERED", ERR_LIB_EVP, EVP_R_PKEY_APPLICATION_ASN1_METHOD_ALREADY_REGISTERED},
#else
{"PKEY_APPLICATION_ASN1_METHOD_ALREADY_REGISTERED", 6, 179},
#endif
#ifdef EVP_R_PRIVATE_KEY_DECODE_ERROR
{"PRIVATE_KEY_DECODE_ERROR", ERR_LIB_EVP, EVP_R_PRIVATE_KEY_DECODE_ERROR},
#else
{"PRIVATE_KEY_DECODE_ERROR", 6, 145},
#endif
#ifdef EVP_R_PRIVATE_KEY_ENCODE_ERROR
{"PRIVATE_KEY_ENCODE_ERROR", ERR_LIB_EVP, EVP_R_PRIVATE_KEY_ENCODE_ERROR},
#else
{"PRIVATE_KEY_ENCODE_ERROR", 6, 146},
#endif
#ifdef EVP_R_PUBLIC_KEY_NOT_RSA
{"PUBLIC_KEY_NOT_RSA", ERR_LIB_EVP, EVP_R_PUBLIC_KEY_NOT_RSA},
#else
{"PUBLIC_KEY_NOT_RSA", 6, 106},
#endif
#ifdef EVP_R_UNKNOWN_CIPHER
{"UNKNOWN_CIPHER", ERR_LIB_EVP, EVP_R_UNKNOWN_CIPHER},
#else
{"UNKNOWN_CIPHER", 6, 160},
#endif
#ifdef EVP_R_UNKNOWN_DIGEST
{"UNKNOWN_DIGEST", ERR_LIB_EVP, EVP_R_UNKNOWN_DIGEST},
#else
{"UNKNOWN_DIGEST", 6, 161},
#endif
#ifdef EVP_R_UNKNOWN_OPTION
{"UNKNOWN_OPTION", ERR_LIB_EVP, EVP_R_UNKNOWN_OPTION},
#else
{"UNKNOWN_OPTION", 6, 169},
#endif
#ifdef EVP_R_UNKNOWN_PBE_ALGORITHM
{"UNKNOWN_PBE_ALGORITHM", ERR_LIB_EVP, EVP_R_UNKNOWN_PBE_ALGORITHM},
#else
{"UNKNOWN_PBE_ALGORITHM", 6, 121},
#endif
#ifdef EVP_R_UNSUPPORTED_ALGORITHM
{"UNSUPPORTED_ALGORITHM", ERR_LIB_EVP, EVP_R_UNSUPPORTED_ALGORITHM},
#else
{"UNSUPPORTED_ALGORITHM", 6, 156},
#endif
#ifdef EVP_R_UNSUPPORTED_CIPHER
{"UNSUPPORTED_CIPHER", ERR_LIB_EVP, EVP_R_UNSUPPORTED_CIPHER},
#else
{"UNSUPPORTED_CIPHER", 6, 107},
#endif
#ifdef EVP_R_UNSUPPORTED_KEYLENGTH
{"UNSUPPORTED_KEYLENGTH", ERR_LIB_EVP, EVP_R_UNSUPPORTED_KEYLENGTH},
#else
{"UNSUPPORTED_KEYLENGTH", 6, 123},
#endif
#ifdef EVP_R_UNSUPPORTED_KEY_DERIVATION_FUNCTION
{"UNSUPPORTED_KEY_DERIVATION_FUNCTION", ERR_LIB_EVP, EVP_R_UNSUPPORTED_KEY_DERIVATION_FUNCTION},
#else
{"UNSUPPORTED_KEY_DERIVATION_FUNCTION", 6, 124},
#endif
#ifdef EVP_R_UNSUPPORTED_KEY_SIZE
{"UNSUPPORTED_KEY_SIZE", ERR_LIB_EVP, EVP_R_UNSUPPORTED_KEY_SIZE},
#else
{"UNSUPPORTED_KEY_SIZE", 6, 108},
#endif
#ifdef EVP_R_UNSUPPORTED_NUMBER_OF_ROUNDS
{"UNSUPPORTED_NUMBER_OF_ROUNDS", ERR_LIB_EVP, EVP_R_UNSUPPORTED_NUMBER_OF_ROUNDS},
#else
{"UNSUPPORTED_NUMBER_OF_ROUNDS", 6, 135},
#endif
#ifdef EVP_R_UNSUPPORTED_PRF
{"UNSUPPORTED_PRF", ERR_LIB_EVP, EVP_R_UNSUPPORTED_PRF},
#else
{"UNSUPPORTED_PRF", 6, 125},
#endif
#ifdef EVP_R_UNSUPPORTED_PRIVATE_KEY_ALGORITHM
{"UNSUPPORTED_PRIVATE_KEY_ALGORITHM", ERR_LIB_EVP, EVP_R_UNSUPPORTED_PRIVATE_KEY_ALGORITHM},
#else
{"UNSUPPORTED_PRIVATE_KEY_ALGORITHM", 6, 118},
#endif
#ifdef EVP_R_UNSUPPORTED_SALT_TYPE
{"UNSUPPORTED_SALT_TYPE", ERR_LIB_EVP, EVP_R_UNSUPPORTED_SALT_TYPE},
#else
{"UNSUPPORTED_SALT_TYPE", 6, 126},
#endif
#ifdef EVP_R_WRAP_MODE_NOT_ALLOWED
{"WRAP_MODE_NOT_ALLOWED", ERR_LIB_EVP, EVP_R_WRAP_MODE_NOT_ALLOWED},
#else
{"WRAP_MODE_NOT_ALLOWED", 6, 170},
#endif
#ifdef EVP_R_WRONG_FINAL_BLOCK_LENGTH
{"WRONG_FINAL_BLOCK_LENGTH", ERR_LIB_EVP, EVP_R_WRONG_FINAL_BLOCK_LENGTH},
#else
{"WRONG_FINAL_BLOCK_LENGTH", 6, 109},
#endif
#ifdef EVP_R_XTS_DUPLICATED_KEYS
{"XTS_DUPLICATED_KEYS", ERR_LIB_EVP, EVP_R_XTS_DUPLICATED_KEYS},
#else
{"XTS_DUPLICATED_KEYS", 6, 183},
#endif
#ifdef KDF_R_INVALID_DIGEST
{"INVALID_DIGEST", ERR_LIB_KDF, KDF_R_INVALID_DIGEST},
#else
{"INVALID_DIGEST", 52, 100},
#endif
#ifdef KDF_R_MISSING_ITERATION_COUNT
{"MISSING_ITERATION_COUNT", ERR_LIB_KDF, KDF_R_MISSING_ITERATION_COUNT},
#else
{"MISSING_ITERATION_COUNT", 52, 109},
#endif
#ifdef KDF_R_MISSING_KEY
{"MISSING_KEY", ERR_LIB_KDF, KDF_R_MISSING_KEY},
#else
{"MISSING_KEY", 52, 104},
#endif
#ifdef KDF_R_MISSING_MESSAGE_DIGEST
{"MISSING_MESSAGE_DIGEST", ERR_LIB_KDF, KDF_R_MISSING_MESSAGE_DIGEST},
#else
{"MISSING_MESSAGE_DIGEST", 52, 105},
#endif
#ifdef KDF_R_MISSING_PARAMETER
{"MISSING_PARAMETER", ERR_LIB_KDF, KDF_R_MISSING_PARAMETER},
#else
{"MISSING_PARAMETER", 52, 101},
#endif
#ifdef KDF_R_MISSING_PASS
{"MISSING_PASS", ERR_LIB_KDF, KDF_R_MISSING_PASS},
#else
{"MISSING_PASS", 52, 110},
#endif
#ifdef KDF_R_MISSING_SALT
{"MISSING_SALT", ERR_LIB_KDF, KDF_R_MISSING_SALT},
#else
{"MISSING_SALT", 52, 111},
#endif
#ifdef KDF_R_MISSING_SECRET
{"MISSING_SECRET", ERR_LIB_KDF, KDF_R_MISSING_SECRET},
#else
{"MISSING_SECRET", 52, 107},
#endif
#ifdef KDF_R_MISSING_SEED
{"MISSING_SEED", ERR_LIB_KDF, KDF_R_MISSING_SEED},
#else
{"MISSING_SEED", 52, 106},
#endif
#ifdef KDF_R_UNKNOWN_PARAMETER_TYPE
{"UNKNOWN_PARAMETER_TYPE", ERR_LIB_KDF, KDF_R_UNKNOWN_PARAMETER_TYPE},
#else
{"UNKNOWN_PARAMETER_TYPE", 52, 103},
#endif
#ifdef KDF_R_VALUE_ERROR
{"VALUE_ERROR", ERR_LIB_KDF, KDF_R_VALUE_ERROR},
#else
{"VALUE_ERROR", 52, 108},
#endif
#ifdef KDF_R_VALUE_MISSING
{"VALUE_MISSING", ERR_LIB_KDF, KDF_R_VALUE_MISSING},
#else
{"VALUE_MISSING", 52, 102},
#endif
#ifdef OBJ_R_OID_EXISTS
{"OID_EXISTS", ERR_LIB_OBJ, OBJ_R_OID_EXISTS},
#else
{"OID_EXISTS", 8, 102},
#endif
#ifdef OBJ_R_UNKNOWN_NID
{"UNKNOWN_NID", ERR_LIB_OBJ, OBJ_R_UNKNOWN_NID},
#else
{"UNKNOWN_NID", 8, 101},
#endif
#ifdef OCSP_R_CERTIFICATE_VERIFY_ERROR
{"CERTIFICATE_VERIFY_ERROR", ERR_LIB_OCSP, OCSP_R_CERTIFICATE_VERIFY_ERROR},
#else
{"CERTIFICATE_VERIFY_ERROR", 39, 101},
#endif
#ifdef OCSP_R_DIGEST_ERR
{"DIGEST_ERR", ERR_LIB_OCSP, OCSP_R_DIGEST_ERR},
#else
{"DIGEST_ERR", 39, 102},
#endif
#ifdef OCSP_R_ERROR_IN_NEXTUPDATE_FIELD
{"ERROR_IN_NEXTUPDATE_FIELD", ERR_LIB_OCSP, OCSP_R_ERROR_IN_NEXTUPDATE_FIELD},
#else
{"ERROR_IN_NEXTUPDATE_FIELD", 39, 122},
#endif
#ifdef OCSP_R_ERROR_IN_THISUPDATE_FIELD
{"ERROR_IN_THISUPDATE_FIELD", ERR_LIB_OCSP, OCSP_R_ERROR_IN_THISUPDATE_FIELD},
#else
{"ERROR_IN_THISUPDATE_FIELD", 39, 123},
#endif
#ifdef OCSP_R_ERROR_PARSING_URL
{"ERROR_PARSING_URL", ERR_LIB_OCSP, OCSP_R_ERROR_PARSING_URL},
#else
{"ERROR_PARSING_URL", 39, 121},
#endif
#ifdef OCSP_R_MISSING_OCSPSIGNING_USAGE
{"MISSING_OCSPSIGNING_USAGE", ERR_LIB_OCSP, OCSP_R_MISSING_OCSPSIGNING_USAGE},
#else
{"MISSING_OCSPSIGNING_USAGE", 39, 103},
#endif
#ifdef OCSP_R_NEXTUPDATE_BEFORE_THISUPDATE
{"NEXTUPDATE_BEFORE_THISUPDATE", ERR_LIB_OCSP, OCSP_R_NEXTUPDATE_BEFORE_THISUPDATE},
#else
{"NEXTUPDATE_BEFORE_THISUPDATE", 39, 124},
#endif
#ifdef OCSP_R_NOT_BASIC_RESPONSE
{"NOT_BASIC_RESPONSE", ERR_LIB_OCSP, OCSP_R_NOT_BASIC_RESPONSE},
#else
{"NOT_BASIC_RESPONSE", 39, 104},
#endif
#ifdef OCSP_R_NO_CERTIFICATES_IN_CHAIN
{"NO_CERTIFICATES_IN_CHAIN", ERR_LIB_OCSP, OCSP_R_NO_CERTIFICATES_IN_CHAIN},
#else
{"NO_CERTIFICATES_IN_CHAIN", 39, 105},
#endif
#ifdef OCSP_R_NO_RESPONSE_DATA
{"NO_RESPONSE_DATA", ERR_LIB_OCSP, OCSP_R_NO_RESPONSE_DATA},
#else
{"NO_RESPONSE_DATA", 39, 108},
#endif
#ifdef OCSP_R_NO_REVOKED_TIME
{"NO_REVOKED_TIME", ERR_LIB_OCSP, OCSP_R_NO_REVOKED_TIME},
#else
{"NO_REVOKED_TIME", 39, 109},
#endif
#ifdef OCSP_R_NO_SIGNER_KEY
{"NO_SIGNER_KEY", ERR_LIB_OCSP, OCSP_R_NO_SIGNER_KEY},
#else
{"NO_SIGNER_KEY", 39, 130},
#endif
#ifdef OCSP_R_PRIVATE_KEY_DOES_NOT_MATCH_CERTIFICATE
{"PRIVATE_KEY_DOES_NOT_MATCH_CERTIFICATE", ERR_LIB_OCSP, OCSP_R_PRIVATE_KEY_DOES_NOT_MATCH_CERTIFICATE},
#else
{"PRIVATE_KEY_DOES_NOT_MATCH_CERTIFICATE", 39, 110},
#endif
#ifdef OCSP_R_REQUEST_NOT_SIGNED
{"REQUEST_NOT_SIGNED", ERR_LIB_OCSP, OCSP_R_REQUEST_NOT_SIGNED},
#else
{"REQUEST_NOT_SIGNED", 39, 128},
#endif
#ifdef OCSP_R_RESPONSE_CONTAINS_NO_REVOCATION_DATA
{"RESPONSE_CONTAINS_NO_REVOCATION_DATA", ERR_LIB_OCSP, OCSP_R_RESPONSE_CONTAINS_NO_REVOCATION_DATA},
#else
{"RESPONSE_CONTAINS_NO_REVOCATION_DATA", 39, 111},
#endif
#ifdef OCSP_R_ROOT_CA_NOT_TRUSTED
{"ROOT_CA_NOT_TRUSTED", ERR_LIB_OCSP, OCSP_R_ROOT_CA_NOT_TRUSTED},
#else
{"ROOT_CA_NOT_TRUSTED", 39, 112},
#endif
#ifdef OCSP_R_SERVER_RESPONSE_ERROR
{"SERVER_RESPONSE_ERROR", ERR_LIB_OCSP, OCSP_R_SERVER_RESPONSE_ERROR},
#else
{"SERVER_RESPONSE_ERROR", 39, 114},
#endif
#ifdef OCSP_R_SERVER_RESPONSE_PARSE_ERROR
{"SERVER_RESPONSE_PARSE_ERROR", ERR_LIB_OCSP, OCSP_R_SERVER_RESPONSE_PARSE_ERROR},
#else
{"SERVER_RESPONSE_PARSE_ERROR", 39, 115},
#endif
#ifdef OCSP_R_SIGNATURE_FAILURE
{"SIGNATURE_FAILURE", ERR_LIB_OCSP, OCSP_R_SIGNATURE_FAILURE},
#else
{"SIGNATURE_FAILURE", 39, 117},
#endif
#ifdef OCSP_R_SIGNER_CERTIFICATE_NOT_FOUND
{"SIGNER_CERTIFICATE_NOT_FOUND", ERR_LIB_OCSP, OCSP_R_SIGNER_CERTIFICATE_NOT_FOUND},
#else
{"SIGNER_CERTIFICATE_NOT_FOUND", 39, 118},
#endif
#ifdef OCSP_R_STATUS_EXPIRED
{"STATUS_EXPIRED", ERR_LIB_OCSP, OCSP_R_STATUS_EXPIRED},
#else
{"STATUS_EXPIRED", 39, 125},
#endif
#ifdef OCSP_R_STATUS_NOT_YET_VALID
{"STATUS_NOT_YET_VALID", ERR_LIB_OCSP, OCSP_R_STATUS_NOT_YET_VALID},
#else
{"STATUS_NOT_YET_VALID", 39, 126},
#endif
#ifdef OCSP_R_STATUS_TOO_OLD
{"STATUS_TOO_OLD", ERR_LIB_OCSP, OCSP_R_STATUS_TOO_OLD},
#else
{"STATUS_TOO_OLD", 39, 127},
#endif
#ifdef OCSP_R_UNKNOWN_MESSAGE_DIGEST
{"UNKNOWN_MESSAGE_DIGEST", ERR_LIB_OCSP, OCSP_R_UNKNOWN_MESSAGE_DIGEST},
#else
{"UNKNOWN_MESSAGE_DIGEST", 39, 119},
#endif
#ifdef OCSP_R_UNKNOWN_NID
{"UNKNOWN_NID", ERR_LIB_OCSP, OCSP_R_UNKNOWN_NID},
#else
{"UNKNOWN_NID", 39, 120},
#endif
#ifdef OCSP_R_UNSUPPORTED_REQUESTORNAME_TYPE
{"UNSUPPORTED_REQUESTORNAME_TYPE", ERR_LIB_OCSP, OCSP_R_UNSUPPORTED_REQUESTORNAME_TYPE},
#else
{"UNSUPPORTED_REQUESTORNAME_TYPE", 39, 129},
#endif
#ifdef OSSL_STORE_R_AMBIGUOUS_CONTENT_TYPE
{"AMBIGUOUS_CONTENT_TYPE", ERR_LIB_OSSL_STORE, OSSL_STORE_R_AMBIGUOUS_CONTENT_TYPE},
#else
{"AMBIGUOUS_CONTENT_TYPE", 44, 107},
#endif
#ifdef OSSL_STORE_R_BAD_PASSWORD_READ
{"BAD_PASSWORD_READ", ERR_LIB_OSSL_STORE, OSSL_STORE_R_BAD_PASSWORD_READ},
#else
{"BAD_PASSWORD_READ", 44, 115},
#endif
#ifdef OSSL_STORE_R_ERROR_VERIFYING_PKCS12_MAC
{"ERROR_VERIFYING_PKCS12_MAC", ERR_LIB_OSSL_STORE, OSSL_STORE_R_ERROR_VERIFYING_PKCS12_MAC},
#else
{"ERROR_VERIFYING_PKCS12_MAC", 44, 113},
#endif
#ifdef OSSL_STORE_R_FINGERPRINT_SIZE_DOES_NOT_MATCH_DIGEST
{"FINGERPRINT_SIZE_DOES_NOT_MATCH_DIGEST", ERR_LIB_OSSL_STORE, OSSL_STORE_R_FINGERPRINT_SIZE_DOES_NOT_MATCH_DIGEST},
#else
{"FINGERPRINT_SIZE_DOES_NOT_MATCH_DIGEST", 44, 121},
#endif
#ifdef OSSL_STORE_R_INVALID_SCHEME
{"INVALID_SCHEME", ERR_LIB_OSSL_STORE, OSSL_STORE_R_INVALID_SCHEME},
#else
{"INVALID_SCHEME", 44, 106},
#endif
#ifdef OSSL_STORE_R_IS_NOT_A
{"IS_NOT_A", ERR_LIB_OSSL_STORE, OSSL_STORE_R_IS_NOT_A},
#else
{"IS_NOT_A", 44, 112},
#endif
#ifdef OSSL_STORE_R_LOADER_INCOMPLETE
{"LOADER_INCOMPLETE", ERR_LIB_OSSL_STORE, OSSL_STORE_R_LOADER_INCOMPLETE},
#else
{"LOADER_INCOMPLETE", 44, 116},
#endif
#ifdef OSSL_STORE_R_LOADING_STARTED
{"LOADING_STARTED", ERR_LIB_OSSL_STORE, OSSL_STORE_R_LOADING_STARTED},
#else
{"LOADING_STARTED", 44, 117},
#endif
#ifdef OSSL_STORE_R_NOT_A_CERTIFICATE
{"NOT_A_CERTIFICATE", ERR_LIB_OSSL_STORE, OSSL_STORE_R_NOT_A_CERTIFICATE},
#else
{"NOT_A_CERTIFICATE", 44, 100},
#endif
#ifdef OSSL_STORE_R_NOT_A_CRL
{"NOT_A_CRL", ERR_LIB_OSSL_STORE, OSSL_STORE_R_NOT_A_CRL},
#else
{"NOT_A_CRL", 44, 101},
#endif
#ifdef OSSL_STORE_R_NOT_A_KEY
{"NOT_A_KEY", ERR_LIB_OSSL_STORE, OSSL_STORE_R_NOT_A_KEY},
#else
{"NOT_A_KEY", 44, 102},
#endif
#ifdef OSSL_STORE_R_NOT_A_NAME
{"NOT_A_NAME", ERR_LIB_OSSL_STORE, OSSL_STORE_R_NOT_A_NAME},
#else
{"NOT_A_NAME", 44, 103},
#endif
#ifdef OSSL_STORE_R_NOT_PARAMETERS
{"NOT_PARAMETERS", ERR_LIB_OSSL_STORE, OSSL_STORE_R_NOT_PARAMETERS},
#else
{"NOT_PARAMETERS", 44, 104},
#endif
#ifdef OSSL_STORE_R_PASSPHRASE_CALLBACK_ERROR
{"PASSPHRASE_CALLBACK_ERROR", ERR_LIB_OSSL_STORE, OSSL_STORE_R_PASSPHRASE_CALLBACK_ERROR},
#else
{"PASSPHRASE_CALLBACK_ERROR", 44, 114},
#endif
#ifdef OSSL_STORE_R_PATH_MUST_BE_ABSOLUTE
{"PATH_MUST_BE_ABSOLUTE", ERR_LIB_OSSL_STORE, OSSL_STORE_R_PATH_MUST_BE_ABSOLUTE},
#else
{"PATH_MUST_BE_ABSOLUTE", 44, 108},
#endif
#ifdef OSSL_STORE_R_SEARCH_ONLY_SUPPORTED_FOR_DIRECTORIES
{"SEARCH_ONLY_SUPPORTED_FOR_DIRECTORIES", ERR_LIB_OSSL_STORE, OSSL_STORE_R_SEARCH_ONLY_SUPPORTED_FOR_DIRECTORIES},
#else
{"SEARCH_ONLY_SUPPORTED_FOR_DIRECTORIES", 44, 119},
#endif
#ifdef OSSL_STORE_R_UI_PROCESS_INTERRUPTED_OR_CANCELLED
{"UI_PROCESS_INTERRUPTED_OR_CANCELLED", ERR_LIB_OSSL_STORE, OSSL_STORE_R_UI_PROCESS_INTERRUPTED_OR_CANCELLED},
#else
{"UI_PROCESS_INTERRUPTED_OR_CANCELLED", 44, 109},
#endif
#ifdef OSSL_STORE_R_UNREGISTERED_SCHEME
{"UNREGISTERED_SCHEME", ERR_LIB_OSSL_STORE, OSSL_STORE_R_UNREGISTERED_SCHEME},
#else
{"UNREGISTERED_SCHEME", 44, 105},
#endif
#ifdef OSSL_STORE_R_UNSUPPORTED_CONTENT_TYPE
{"UNSUPPORTED_CONTENT_TYPE", ERR_LIB_OSSL_STORE, OSSL_STORE_R_UNSUPPORTED_CONTENT_TYPE},
#else
{"UNSUPPORTED_CONTENT_TYPE", 44, 110},
#endif
#ifdef OSSL_STORE_R_UNSUPPORTED_OPERATION
{"UNSUPPORTED_OPERATION", ERR_LIB_OSSL_STORE, OSSL_STORE_R_UNSUPPORTED_OPERATION},
#else
{"UNSUPPORTED_OPERATION", 44, 118},
#endif
#ifdef OSSL_STORE_R_UNSUPPORTED_SEARCH_TYPE
{"UNSUPPORTED_SEARCH_TYPE", ERR_LIB_OSSL_STORE, OSSL_STORE_R_UNSUPPORTED_SEARCH_TYPE},
#else
{"UNSUPPORTED_SEARCH_TYPE", 44, 120},
#endif
#ifdef OSSL_STORE_R_URI_AUTHORITY_UNSUPPORTED
{"URI_AUTHORITY_UNSUPPORTED", ERR_LIB_OSSL_STORE, OSSL_STORE_R_URI_AUTHORITY_UNSUPPORTED},
#else
{"URI_AUTHORITY_UNSUPPORTED", 44, 111},
#endif
#ifdef PEM_R_BAD_BASE64_DECODE
{"BAD_BASE64_DECODE", ERR_LIB_PEM, PEM_R_BAD_BASE64_DECODE},
#else
{"BAD_BASE64_DECODE", 9, 100},
#endif
#ifdef PEM_R_BAD_DECRYPT
{"BAD_DECRYPT", ERR_LIB_PEM, PEM_R_BAD_DECRYPT},
#else
{"BAD_DECRYPT", 9, 101},
#endif
#ifdef PEM_R_BAD_END_LINE
{"BAD_END_LINE", ERR_LIB_PEM, PEM_R_BAD_END_LINE},
#else
{"BAD_END_LINE", 9, 102},
#endif
#ifdef PEM_R_BAD_IV_CHARS
{"BAD_IV_CHARS", ERR_LIB_PEM, PEM_R_BAD_IV_CHARS},
#else
{"BAD_IV_CHARS", 9, 103},
#endif
#ifdef PEM_R_BAD_MAGIC_NUMBER
{"BAD_MAGIC_NUMBER", ERR_LIB_PEM, PEM_R_BAD_MAGIC_NUMBER},
#else
{"BAD_MAGIC_NUMBER", 9, 116},
#endif
#ifdef PEM_R_BAD_PASSWORD_READ
{"BAD_PASSWORD_READ", ERR_LIB_PEM, PEM_R_BAD_PASSWORD_READ},
#else
{"BAD_PASSWORD_READ", 9, 104},
#endif
#ifdef PEM_R_BAD_VERSION_NUMBER
{"BAD_VERSION_NUMBER", ERR_LIB_PEM, PEM_R_BAD_VERSION_NUMBER},
#else
{"BAD_VERSION_NUMBER", 9, 117},
#endif
#ifdef PEM_R_BIO_WRITE_FAILURE
{"BIO_WRITE_FAILURE", ERR_LIB_PEM, PEM_R_BIO_WRITE_FAILURE},
#else
{"BIO_WRITE_FAILURE", 9, 118},
#endif
#ifdef PEM_R_CIPHER_IS_NULL
{"CIPHER_IS_NULL", ERR_LIB_PEM, PEM_R_CIPHER_IS_NULL},
#else
{"CIPHER_IS_NULL", 9, 127},
#endif
#ifdef PEM_R_ERROR_CONVERTING_PRIVATE_KEY
{"ERROR_CONVERTING_PRIVATE_KEY", ERR_LIB_PEM, PEM_R_ERROR_CONVERTING_PRIVATE_KEY},
#else
{"ERROR_CONVERTING_PRIVATE_KEY", 9, 115},
#endif
#ifdef PEM_R_EXPECTING_PRIVATE_KEY_BLOB
{"EXPECTING_PRIVATE_KEY_BLOB", ERR_LIB_PEM, PEM_R_EXPECTING_PRIVATE_KEY_BLOB},
#else
{"EXPECTING_PRIVATE_KEY_BLOB", 9, 119},
#endif
#ifdef PEM_R_EXPECTING_PUBLIC_KEY_BLOB
{"EXPECTING_PUBLIC_KEY_BLOB", ERR_LIB_PEM, PEM_R_EXPECTING_PUBLIC_KEY_BLOB},
#else
{"EXPECTING_PUBLIC_KEY_BLOB", 9, 120},
#endif
#ifdef PEM_R_HEADER_TOO_LONG
{"HEADER_TOO_LONG", ERR_LIB_PEM, PEM_R_HEADER_TOO_LONG},
#else
{"HEADER_TOO_LONG", 9, 128},
#endif
#ifdef PEM_R_INCONSISTENT_HEADER
{"INCONSISTENT_HEADER", ERR_LIB_PEM, PEM_R_INCONSISTENT_HEADER},
#else
{"INCONSISTENT_HEADER", 9, 121},
#endif
#ifdef PEM_R_KEYBLOB_HEADER_PARSE_ERROR
{"KEYBLOB_HEADER_PARSE_ERROR", ERR_LIB_PEM, PEM_R_KEYBLOB_HEADER_PARSE_ERROR},
#else
{"KEYBLOB_HEADER_PARSE_ERROR", 9, 122},
#endif
#ifdef PEM_R_KEYBLOB_TOO_SHORT
{"KEYBLOB_TOO_SHORT", ERR_LIB_PEM, PEM_R_KEYBLOB_TOO_SHORT},
#else
{"KEYBLOB_TOO_SHORT", 9, 123},
#endif
#ifdef PEM_R_MISSING_DEK_IV
{"MISSING_DEK_IV", ERR_LIB_PEM, PEM_R_MISSING_DEK_IV},
#else
{"MISSING_DEK_IV", 9, 129},
#endif
#ifdef PEM_R_NOT_DEK_INFO
{"NOT_DEK_INFO", ERR_LIB_PEM, PEM_R_NOT_DEK_INFO},
#else
{"NOT_DEK_INFO", 9, 105},
#endif
#ifdef PEM_R_NOT_ENCRYPTED
{"NOT_ENCRYPTED", ERR_LIB_PEM, PEM_R_NOT_ENCRYPTED},
#else
{"NOT_ENCRYPTED", 9, 106},
#endif
#ifdef PEM_R_NOT_PROC_TYPE
{"NOT_PROC_TYPE", ERR_LIB_PEM, PEM_R_NOT_PROC_TYPE},
#else
{"NOT_PROC_TYPE", 9, 107},
#endif
#ifdef PEM_R_NO_START_LINE
{"NO_START_LINE", ERR_LIB_PEM, PEM_R_NO_START_LINE},
#else
{"NO_START_LINE", 9, 108},
#endif
#ifdef PEM_R_PROBLEMS_GETTING_PASSWORD
{"PROBLEMS_GETTING_PASSWORD", ERR_LIB_PEM, PEM_R_PROBLEMS_GETTING_PASSWORD},
#else
{"PROBLEMS_GETTING_PASSWORD", 9, 109},
#endif
#ifdef PEM_R_PVK_DATA_TOO_SHORT
{"PVK_DATA_TOO_SHORT", ERR_LIB_PEM, PEM_R_PVK_DATA_TOO_SHORT},
#else
{"PVK_DATA_TOO_SHORT", 9, 124},
#endif
#ifdef PEM_R_PVK_TOO_SHORT
{"PVK_TOO_SHORT", ERR_LIB_PEM, PEM_R_PVK_TOO_SHORT},
#else
{"PVK_TOO_SHORT", 9, 125},
#endif
#ifdef PEM_R_READ_KEY
{"READ_KEY", ERR_LIB_PEM, PEM_R_READ_KEY},
#else
{"READ_KEY", 9, 111},
#endif
#ifdef PEM_R_SHORT_HEADER
{"SHORT_HEADER", ERR_LIB_PEM, PEM_R_SHORT_HEADER},
#else
{"SHORT_HEADER", 9, 112},
#endif
#ifdef PEM_R_UNEXPECTED_DEK_IV
{"UNEXPECTED_DEK_IV", ERR_LIB_PEM, PEM_R_UNEXPECTED_DEK_IV},
#else
{"UNEXPECTED_DEK_IV", 9, 130},
#endif
#ifdef PEM_R_UNSUPPORTED_CIPHER
{"UNSUPPORTED_CIPHER", ERR_LIB_PEM, PEM_R_UNSUPPORTED_CIPHER},
#else
{"UNSUPPORTED_CIPHER", 9, 113},
#endif
#ifdef PEM_R_UNSUPPORTED_ENCRYPTION
{"UNSUPPORTED_ENCRYPTION", ERR_LIB_PEM, PEM_R_UNSUPPORTED_ENCRYPTION},
#else
{"UNSUPPORTED_ENCRYPTION", 9, 114},
#endif
#ifdef PEM_R_UNSUPPORTED_KEY_COMPONENTS
{"UNSUPPORTED_KEY_COMPONENTS", ERR_LIB_PEM, PEM_R_UNSUPPORTED_KEY_COMPONENTS},
#else
{"UNSUPPORTED_KEY_COMPONENTS", 9, 126},
#endif
#ifdef PEM_R_UNSUPPORTED_PUBLIC_KEY_TYPE
{"UNSUPPORTED_PUBLIC_KEY_TYPE", ERR_LIB_PEM, PEM_R_UNSUPPORTED_PUBLIC_KEY_TYPE},
#else
{"UNSUPPORTED_PUBLIC_KEY_TYPE", 9, 110},
#endif
#ifdef PKCS12_R_CANT_PACK_STRUCTURE
{"CANT_PACK_STRUCTURE", ERR_LIB_PKCS12, PKCS12_R_CANT_PACK_STRUCTURE},
#else
{"CANT_PACK_STRUCTURE", 35, 100},
#endif
#ifdef PKCS12_R_CONTENT_TYPE_NOT_DATA
{"CONTENT_TYPE_NOT_DATA", ERR_LIB_PKCS12, PKCS12_R_CONTENT_TYPE_NOT_DATA},
#else
{"CONTENT_TYPE_NOT_DATA", 35, 121},
#endif
#ifdef PKCS12_R_DECODE_ERROR
{"DECODE_ERROR", ERR_LIB_PKCS12, PKCS12_R_DECODE_ERROR},
#else
{"DECODE_ERROR", 35, 101},
#endif
#ifdef PKCS12_R_ENCODE_ERROR
{"ENCODE_ERROR", ERR_LIB_PKCS12, PKCS12_R_ENCODE_ERROR},
#else
{"ENCODE_ERROR", 35, 102},
#endif
#ifdef PKCS12_R_ENCRYPT_ERROR
{"ENCRYPT_ERROR", ERR_LIB_PKCS12, PKCS12_R_ENCRYPT_ERROR},
#else
{"ENCRYPT_ERROR", 35, 103},
#endif
#ifdef PKCS12_R_ERROR_SETTING_ENCRYPTED_DATA_TYPE
{"ERROR_SETTING_ENCRYPTED_DATA_TYPE", ERR_LIB_PKCS12, PKCS12_R_ERROR_SETTING_ENCRYPTED_DATA_TYPE},
#else
{"ERROR_SETTING_ENCRYPTED_DATA_TYPE", 35, 120},
#endif
#ifdef PKCS12_R_INVALID_NULL_ARGUMENT
{"INVALID_NULL_ARGUMENT", ERR_LIB_PKCS12, PKCS12_R_INVALID_NULL_ARGUMENT},
#else
{"INVALID_NULL_ARGUMENT", 35, 104},
#endif
#ifdef PKCS12_R_INVALID_NULL_PKCS12_POINTER
{"INVALID_NULL_PKCS12_POINTER", ERR_LIB_PKCS12, PKCS12_R_INVALID_NULL_PKCS12_POINTER},
#else
{"INVALID_NULL_PKCS12_POINTER", 35, 105},
#endif
#ifdef PKCS12_R_IV_GEN_ERROR
{"IV_GEN_ERROR", ERR_LIB_PKCS12, PKCS12_R_IV_GEN_ERROR},
#else
{"IV_GEN_ERROR", 35, 106},
#endif
#ifdef PKCS12_R_KEY_GEN_ERROR
{"KEY_GEN_ERROR", ERR_LIB_PKCS12, PKCS12_R_KEY_GEN_ERROR},
#else
{"KEY_GEN_ERROR", 35, 107},
#endif
#ifdef PKCS12_R_MAC_ABSENT
{"MAC_ABSENT", ERR_LIB_PKCS12, PKCS12_R_MAC_ABSENT},
#else
{"MAC_ABSENT", 35, 108},
#endif
#ifdef PKCS12_R_MAC_GENERATION_ERROR
{"MAC_GENERATION_ERROR", ERR_LIB_PKCS12, PKCS12_R_MAC_GENERATION_ERROR},
#else
{"MAC_GENERATION_ERROR", 35, 109},
#endif
#ifdef PKCS12_R_MAC_SETUP_ERROR
{"MAC_SETUP_ERROR", ERR_LIB_PKCS12, PKCS12_R_MAC_SETUP_ERROR},
#else
{"MAC_SETUP_ERROR", 35, 110},
#endif
#ifdef PKCS12_R_MAC_STRING_SET_ERROR
{"MAC_STRING_SET_ERROR", ERR_LIB_PKCS12, PKCS12_R_MAC_STRING_SET_ERROR},
#else
{"MAC_STRING_SET_ERROR", 35, 111},
#endif
#ifdef PKCS12_R_MAC_VERIFY_FAILURE
{"MAC_VERIFY_FAILURE", ERR_LIB_PKCS12, PKCS12_R_MAC_VERIFY_FAILURE},
#else
{"MAC_VERIFY_FAILURE", 35, 113},
#endif
#ifdef PKCS12_R_PARSE_ERROR
{"PARSE_ERROR", ERR_LIB_PKCS12, PKCS12_R_PARSE_ERROR},
#else
{"PARSE_ERROR", 35, 114},
#endif
#ifdef PKCS12_R_PKCS12_ALGOR_CIPHERINIT_ERROR
{"PKCS12_ALGOR_CIPHERINIT_ERROR", ERR_LIB_PKCS12, PKCS12_R_PKCS12_ALGOR_CIPHERINIT_ERROR},
#else
{"PKCS12_ALGOR_CIPHERINIT_ERROR", 35, 115},
#endif
#ifdef PKCS12_R_PKCS12_CIPHERFINAL_ERROR
{"PKCS12_CIPHERFINAL_ERROR", ERR_LIB_PKCS12, PKCS12_R_PKCS12_CIPHERFINAL_ERROR},
#else
{"PKCS12_CIPHERFINAL_ERROR", 35, 116},
#endif
#ifdef PKCS12_R_PKCS12_PBE_CRYPT_ERROR
{"PKCS12_PBE_CRYPT_ERROR", ERR_LIB_PKCS12, PKCS12_R_PKCS12_PBE_CRYPT_ERROR},
#else
{"PKCS12_PBE_CRYPT_ERROR", 35, 117},
#endif
#ifdef PKCS12_R_UNKNOWN_DIGEST_ALGORITHM
{"UNKNOWN_DIGEST_ALGORITHM", ERR_LIB_PKCS12, PKCS12_R_UNKNOWN_DIGEST_ALGORITHM},
#else
{"UNKNOWN_DIGEST_ALGORITHM", 35, 118},
#endif
#ifdef PKCS12_R_UNSUPPORTED_PKCS12_MODE
{"UNSUPPORTED_PKCS12_MODE", ERR_LIB_PKCS12, PKCS12_R_UNSUPPORTED_PKCS12_MODE},
#else
{"UNSUPPORTED_PKCS12_MODE", 35, 119},
#endif
#ifdef PKCS7_R_CERTIFICATE_VERIFY_ERROR
{"CERTIFICATE_VERIFY_ERROR", ERR_LIB_PKCS7, PKCS7_R_CERTIFICATE_VERIFY_ERROR},
#else
{"CERTIFICATE_VERIFY_ERROR", 33, 117},
#endif
#ifdef PKCS7_R_CIPHER_HAS_NO_OBJECT_IDENTIFIER
{"CIPHER_HAS_NO_OBJECT_IDENTIFIER", ERR_LIB_PKCS7, PKCS7_R_CIPHER_HAS_NO_OBJECT_IDENTIFIER},
#else
{"CIPHER_HAS_NO_OBJECT_IDENTIFIER", 33, 144},
#endif
#ifdef PKCS7_R_CIPHER_NOT_INITIALIZED
{"CIPHER_NOT_INITIALIZED", ERR_LIB_PKCS7, PKCS7_R_CIPHER_NOT_INITIALIZED},
#else
{"CIPHER_NOT_INITIALIZED", 33, 116},
#endif
#ifdef PKCS7_R_CONTENT_AND_DATA_PRESENT
{"CONTENT_AND_DATA_PRESENT", ERR_LIB_PKCS7, PKCS7_R_CONTENT_AND_DATA_PRESENT},
#else
{"CONTENT_AND_DATA_PRESENT", 33, 118},
#endif
#ifdef PKCS7_R_CTRL_ERROR
{"CTRL_ERROR", ERR_LIB_PKCS7, PKCS7_R_CTRL_ERROR},
#else
{"CTRL_ERROR", 33, 152},
#endif
#ifdef PKCS7_R_DECRYPT_ERROR
{"DECRYPT_ERROR", ERR_LIB_PKCS7, PKCS7_R_DECRYPT_ERROR},
#else
{"DECRYPT_ERROR", 33, 119},
#endif
#ifdef PKCS7_R_DIGEST_FAILURE
{"DIGEST_FAILURE", ERR_LIB_PKCS7, PKCS7_R_DIGEST_FAILURE},
#else
{"DIGEST_FAILURE", 33, 101},
#endif
#ifdef PKCS7_R_ENCRYPTION_CTRL_FAILURE
{"ENCRYPTION_CTRL_FAILURE", ERR_LIB_PKCS7, PKCS7_R_ENCRYPTION_CTRL_FAILURE},
#else
{"ENCRYPTION_CTRL_FAILURE", 33, 149},
#endif
#ifdef PKCS7_R_ENCRYPTION_NOT_SUPPORTED_FOR_THIS_KEY_TYPE
{"ENCRYPTION_NOT_SUPPORTED_FOR_THIS_KEY_TYPE", ERR_LIB_PKCS7, PKCS7_R_ENCRYPTION_NOT_SUPPORTED_FOR_THIS_KEY_TYPE},
#else
{"ENCRYPTION_NOT_SUPPORTED_FOR_THIS_KEY_TYPE", 33, 150},
#endif
#ifdef PKCS7_R_ERROR_ADDING_RECIPIENT
{"ERROR_ADDING_RECIPIENT", ERR_LIB_PKCS7, PKCS7_R_ERROR_ADDING_RECIPIENT},
#else
{"ERROR_ADDING_RECIPIENT", 33, 120},
#endif
#ifdef PKCS7_R_ERROR_SETTING_CIPHER
{"ERROR_SETTING_CIPHER", ERR_LIB_PKCS7, PKCS7_R_ERROR_SETTING_CIPHER},
#else
{"ERROR_SETTING_CIPHER", 33, 121},
#endif
#ifdef PKCS7_R_INVALID_NULL_POINTER
{"INVALID_NULL_POINTER", ERR_LIB_PKCS7, PKCS7_R_INVALID_NULL_POINTER},
#else
{"INVALID_NULL_POINTER", 33, 143},
#endif
#ifdef PKCS7_R_INVALID_SIGNED_DATA_TYPE
{"INVALID_SIGNED_DATA_TYPE", ERR_LIB_PKCS7, PKCS7_R_INVALID_SIGNED_DATA_TYPE},
#else
{"INVALID_SIGNED_DATA_TYPE", 33, 155},
#endif
#ifdef PKCS7_R_NO_CONTENT
{"NO_CONTENT", ERR_LIB_PKCS7, PKCS7_R_NO_CONTENT},
#else
{"NO_CONTENT", 33, 122},
#endif
#ifdef PKCS7_R_NO_DEFAULT_DIGEST
{"NO_DEFAULT_DIGEST", ERR_LIB_PKCS7, PKCS7_R_NO_DEFAULT_DIGEST},
#else
{"NO_DEFAULT_DIGEST", 33, 151},
#endif
#ifdef PKCS7_R_NO_MATCHING_DIGEST_TYPE_FOUND
{"NO_MATCHING_DIGEST_TYPE_FOUND", ERR_LIB_PKCS7, PKCS7_R_NO_MATCHING_DIGEST_TYPE_FOUND},
#else
{"NO_MATCHING_DIGEST_TYPE_FOUND", 33, 154},
#endif
#ifdef PKCS7_R_NO_RECIPIENT_MATCHES_CERTIFICATE
{"NO_RECIPIENT_MATCHES_CERTIFICATE", ERR_LIB_PKCS7, PKCS7_R_NO_RECIPIENT_MATCHES_CERTIFICATE},
#else
{"NO_RECIPIENT_MATCHES_CERTIFICATE", 33, 115},
#endif
#ifdef PKCS7_R_NO_SIGNATURES_ON_DATA
{"NO_SIGNATURES_ON_DATA", ERR_LIB_PKCS7, PKCS7_R_NO_SIGNATURES_ON_DATA},
#else
{"NO_SIGNATURES_ON_DATA", 33, 123},
#endif
#ifdef PKCS7_R_NO_SIGNERS
{"NO_SIGNERS", ERR_LIB_PKCS7, PKCS7_R_NO_SIGNERS},
#else
{"NO_SIGNERS", 33, 142},
#endif
#ifdef PKCS7_R_OPERATION_NOT_SUPPORTED_ON_THIS_TYPE
{"OPERATION_NOT_SUPPORTED_ON_THIS_TYPE", ERR_LIB_PKCS7, PKCS7_R_OPERATION_NOT_SUPPORTED_ON_THIS_TYPE},
#else
{"OPERATION_NOT_SUPPORTED_ON_THIS_TYPE", 33, 104},
#endif
#ifdef PKCS7_R_PKCS7_ADD_SIGNATURE_ERROR
{"PKCS7_ADD_SIGNATURE_ERROR", ERR_LIB_PKCS7, PKCS7_R_PKCS7_ADD_SIGNATURE_ERROR},
#else
{"PKCS7_ADD_SIGNATURE_ERROR", 33, 124},
#endif
#ifdef PKCS7_R_PKCS7_ADD_SIGNER_ERROR
{"PKCS7_ADD_SIGNER_ERROR", ERR_LIB_PKCS7, PKCS7_R_PKCS7_ADD_SIGNER_ERROR},
#else
{"PKCS7_ADD_SIGNER_ERROR", 33, 153},
#endif
#ifdef PKCS7_R_PKCS7_DATASIGN
{"PKCS7_DATASIGN", ERR_LIB_PKCS7, PKCS7_R_PKCS7_DATASIGN},
#else
{"PKCS7_DATASIGN", 33, 145},
#endif
#ifdef PKCS7_R_PRIVATE_KEY_DOES_NOT_MATCH_CERTIFICATE
{"PRIVATE_KEY_DOES_NOT_MATCH_CERTIFICATE", ERR_LIB_PKCS7, PKCS7_R_PRIVATE_KEY_DOES_NOT_MATCH_CERTIFICATE},
#else
{"PRIVATE_KEY_DOES_NOT_MATCH_CERTIFICATE", 33, 127},
#endif
#ifdef PKCS7_R_SIGNATURE_FAILURE
{"SIGNATURE_FAILURE", ERR_LIB_PKCS7, PKCS7_R_SIGNATURE_FAILURE},
#else
{"SIGNATURE_FAILURE", 33, 105},
#endif
#ifdef PKCS7_R_SIGNER_CERTIFICATE_NOT_FOUND
{"SIGNER_CERTIFICATE_NOT_FOUND", ERR_LIB_PKCS7, PKCS7_R_SIGNER_CERTIFICATE_NOT_FOUND},
#else
{"SIGNER_CERTIFICATE_NOT_FOUND", 33, 128},
#endif
#ifdef PKCS7_R_SIGNING_CTRL_FAILURE
{"SIGNING_CTRL_FAILURE", ERR_LIB_PKCS7, PKCS7_R_SIGNING_CTRL_FAILURE},
#else
{"SIGNING_CTRL_FAILURE", 33, 147},
#endif
#ifdef PKCS7_R_SIGNING_NOT_SUPPORTED_FOR_THIS_KEY_TYPE
{"SIGNING_NOT_SUPPORTED_FOR_THIS_KEY_TYPE", ERR_LIB_PKCS7, PKCS7_R_SIGNING_NOT_SUPPORTED_FOR_THIS_KEY_TYPE},
#else
{"SIGNING_NOT_SUPPORTED_FOR_THIS_KEY_TYPE", 33, 148},
#endif
#ifdef PKCS7_R_SMIME_TEXT_ERROR
{"SMIME_TEXT_ERROR", ERR_LIB_PKCS7, PKCS7_R_SMIME_TEXT_ERROR},
#else
{"SMIME_TEXT_ERROR", 33, 129},
#endif
#ifdef PKCS7_R_UNABLE_TO_FIND_CERTIFICATE
{"UNABLE_TO_FIND_CERTIFICATE", ERR_LIB_PKCS7, PKCS7_R_UNABLE_TO_FIND_CERTIFICATE},
#else
{"UNABLE_TO_FIND_CERTIFICATE", 33, 106},
#endif
#ifdef PKCS7_R_UNABLE_TO_FIND_MEM_BIO
{"UNABLE_TO_FIND_MEM_BIO", ERR_LIB_PKCS7, PKCS7_R_UNABLE_TO_FIND_MEM_BIO},
#else
{"UNABLE_TO_FIND_MEM_BIO", 33, 107},
#endif
#ifdef PKCS7_R_UNABLE_TO_FIND_MESSAGE_DIGEST
{"UNABLE_TO_FIND_MESSAGE_DIGEST", ERR_LIB_PKCS7, PKCS7_R_UNABLE_TO_FIND_MESSAGE_DIGEST},
#else
{"UNABLE_TO_FIND_MESSAGE_DIGEST", 33, 108},
#endif
#ifdef PKCS7_R_UNKNOWN_DIGEST_TYPE
{"UNKNOWN_DIGEST_TYPE", ERR_LIB_PKCS7, PKCS7_R_UNKNOWN_DIGEST_TYPE},
#else
{"UNKNOWN_DIGEST_TYPE", 33, 109},
#endif
#ifdef PKCS7_R_UNKNOWN_OPERATION
{"UNKNOWN_OPERATION", ERR_LIB_PKCS7, PKCS7_R_UNKNOWN_OPERATION},
#else
{"UNKNOWN_OPERATION", 33, 110},
#endif
#ifdef PKCS7_R_UNSUPPORTED_CIPHER_TYPE
{"UNSUPPORTED_CIPHER_TYPE", ERR_LIB_PKCS7, PKCS7_R_UNSUPPORTED_CIPHER_TYPE},
#else
{"UNSUPPORTED_CIPHER_TYPE", 33, 111},
#endif
#ifdef PKCS7_R_UNSUPPORTED_CONTENT_TYPE
{"UNSUPPORTED_CONTENT_TYPE", ERR_LIB_PKCS7, PKCS7_R_UNSUPPORTED_CONTENT_TYPE},
#else
{"UNSUPPORTED_CONTENT_TYPE", 33, 112},
#endif
#ifdef PKCS7_R_WRONG_CONTENT_TYPE
{"WRONG_CONTENT_TYPE", ERR_LIB_PKCS7, PKCS7_R_WRONG_CONTENT_TYPE},
#else
{"WRONG_CONTENT_TYPE", 33, 113},
#endif
#ifdef PKCS7_R_WRONG_PKCS7_TYPE
{"WRONG_PKCS7_TYPE", ERR_LIB_PKCS7, PKCS7_R_WRONG_PKCS7_TYPE},
#else
{"WRONG_PKCS7_TYPE", 33, 114},
#endif
#ifdef RAND_R_ADDITIONAL_INPUT_TOO_LONG
{"ADDITIONAL_INPUT_TOO_LONG", ERR_LIB_RAND, RAND_R_ADDITIONAL_INPUT_TOO_LONG},
#else
{"ADDITIONAL_INPUT_TOO_LONG", 36, 102},
#endif
#ifdef RAND_R_ALREADY_INSTANTIATED
{"ALREADY_INSTANTIATED", ERR_LIB_RAND, RAND_R_ALREADY_INSTANTIATED},
#else
{"ALREADY_INSTANTIATED", 36, 103},
#endif
#ifdef RAND_R_ARGUMENT_OUT_OF_RANGE
{"ARGUMENT_OUT_OF_RANGE", ERR_LIB_RAND, RAND_R_ARGUMENT_OUT_OF_RANGE},
#else
{"ARGUMENT_OUT_OF_RANGE", 36, 105},
#endif
#ifdef RAND_R_CANNOT_OPEN_FILE
{"CANNOT_OPEN_FILE", ERR_LIB_RAND, RAND_R_CANNOT_OPEN_FILE},
#else
{"CANNOT_OPEN_FILE", 36, 121},
#endif
#ifdef RAND_R_DRBG_ALREADY_INITIALIZED
{"DRBG_ALREADY_INITIALIZED", ERR_LIB_RAND, RAND_R_DRBG_ALREADY_INITIALIZED},
#else
{"DRBG_ALREADY_INITIALIZED", 36, 129},
#endif
#ifdef RAND_R_DRBG_NOT_INITIALISED
{"DRBG_NOT_INITIALISED", ERR_LIB_RAND, RAND_R_DRBG_NOT_INITIALISED},
#else
{"DRBG_NOT_INITIALISED", 36, 104},
#endif
#ifdef RAND_R_ENTROPY_INPUT_TOO_LONG
{"ENTROPY_INPUT_TOO_LONG", ERR_LIB_RAND, RAND_R_ENTROPY_INPUT_TOO_LONG},
#else
{"ENTROPY_INPUT_TOO_LONG", 36, 106},
#endif
#ifdef RAND_R_ENTROPY_OUT_OF_RANGE
{"ENTROPY_OUT_OF_RANGE", ERR_LIB_RAND, RAND_R_ENTROPY_OUT_OF_RANGE},
#else
{"ENTROPY_OUT_OF_RANGE", 36, 124},
#endif
#ifdef RAND_R_ERROR_ENTROPY_POOL_WAS_IGNORED
{"ERROR_ENTROPY_POOL_WAS_IGNORED", ERR_LIB_RAND, RAND_R_ERROR_ENTROPY_POOL_WAS_IGNORED},
#else
{"ERROR_ENTROPY_POOL_WAS_IGNORED", 36, 127},
#endif
#ifdef RAND_R_ERROR_INITIALISING_DRBG
{"ERROR_INITIALISING_DRBG", ERR_LIB_RAND, RAND_R_ERROR_INITIALISING_DRBG},
#else
{"ERROR_INITIALISING_DRBG", 36, 107},
#endif
#ifdef RAND_R_ERROR_INSTANTIATING_DRBG
{"ERROR_INSTANTIATING_DRBG", ERR_LIB_RAND, RAND_R_ERROR_INSTANTIATING_DRBG},
#else
{"ERROR_INSTANTIATING_DRBG", 36, 108},
#endif
#ifdef RAND_R_ERROR_RETRIEVING_ADDITIONAL_INPUT
{"ERROR_RETRIEVING_ADDITIONAL_INPUT", ERR_LIB_RAND, RAND_R_ERROR_RETRIEVING_ADDITIONAL_INPUT},
#else
{"ERROR_RETRIEVING_ADDITIONAL_INPUT", 36, 109},
#endif
#ifdef RAND_R_ERROR_RETRIEVING_ENTROPY
{"ERROR_RETRIEVING_ENTROPY", ERR_LIB_RAND, RAND_R_ERROR_RETRIEVING_ENTROPY},
#else
{"ERROR_RETRIEVING_ENTROPY", 36, 110},
#endif
#ifdef RAND_R_ERROR_RETRIEVING_NONCE
{"ERROR_RETRIEVING_NONCE", ERR_LIB_RAND, RAND_R_ERROR_RETRIEVING_NONCE},
#else
{"ERROR_RETRIEVING_NONCE", 36, 111},
#endif
#ifdef RAND_R_FAILED_TO_CREATE_LOCK
{"FAILED_TO_CREATE_LOCK", ERR_LIB_RAND, RAND_R_FAILED_TO_CREATE_LOCK},
#else
{"FAILED_TO_CREATE_LOCK", 36, 126},
#endif
#ifdef RAND_R_FUNC_NOT_IMPLEMENTED
{"FUNC_NOT_IMPLEMENTED", ERR_LIB_RAND, RAND_R_FUNC_NOT_IMPLEMENTED},
#else
{"FUNC_NOT_IMPLEMENTED", 36, 101},
#endif
#ifdef RAND_R_FWRITE_ERROR
{"FWRITE_ERROR", ERR_LIB_RAND, RAND_R_FWRITE_ERROR},
#else
{"FWRITE_ERROR", 36, 123},
#endif
#ifdef RAND_R_GENERATE_ERROR
{"GENERATE_ERROR", ERR_LIB_RAND, RAND_R_GENERATE_ERROR},
#else
{"GENERATE_ERROR", 36, 112},
#endif
#ifdef RAND_R_INTERNAL_ERROR
{"INTERNAL_ERROR", ERR_LIB_RAND, RAND_R_INTERNAL_ERROR},
#else
{"INTERNAL_ERROR", 36, 113},
#endif
#ifdef RAND_R_IN_ERROR_STATE
{"IN_ERROR_STATE", ERR_LIB_RAND, RAND_R_IN_ERROR_STATE},
#else
{"IN_ERROR_STATE", 36, 114},
#endif
#ifdef RAND_R_NOT_A_REGULAR_FILE
{"NOT_A_REGULAR_FILE", ERR_LIB_RAND, RAND_R_NOT_A_REGULAR_FILE},
#else
{"NOT_A_REGULAR_FILE", 36, 122},
#endif
#ifdef RAND_R_NOT_INSTANTIATED
{"NOT_INSTANTIATED", ERR_LIB_RAND, RAND_R_NOT_INSTANTIATED},
#else
{"NOT_INSTANTIATED", 36, 115},
#endif
#ifdef RAND_R_NO_DRBG_IMPLEMENTATION_SELECTED
{"NO_DRBG_IMPLEMENTATION_SELECTED", ERR_LIB_RAND, RAND_R_NO_DRBG_IMPLEMENTATION_SELECTED},
#else
{"NO_DRBG_IMPLEMENTATION_SELECTED", 36, 128},
#endif
#ifdef RAND_R_PARENT_LOCKING_NOT_ENABLED
{"PARENT_LOCKING_NOT_ENABLED", ERR_LIB_RAND, RAND_R_PARENT_LOCKING_NOT_ENABLED},
#else
{"PARENT_LOCKING_NOT_ENABLED", 36, 130},
#endif
#ifdef RAND_R_PARENT_STRENGTH_TOO_WEAK
{"PARENT_STRENGTH_TOO_WEAK", ERR_LIB_RAND, RAND_R_PARENT_STRENGTH_TOO_WEAK},
#else
{"PARENT_STRENGTH_TOO_WEAK", 36, 131},
#endif
#ifdef RAND_R_PERSONALISATION_STRING_TOO_LONG
{"PERSONALISATION_STRING_TOO_LONG", ERR_LIB_RAND, RAND_R_PERSONALISATION_STRING_TOO_LONG},
#else
{"PERSONALISATION_STRING_TOO_LONG", 36, 116},
#endif
#ifdef RAND_R_PREDICTION_RESISTANCE_NOT_SUPPORTED
{"PREDICTION_RESISTANCE_NOT_SUPPORTED", ERR_LIB_RAND, RAND_R_PREDICTION_RESISTANCE_NOT_SUPPORTED},
#else
{"PREDICTION_RESISTANCE_NOT_SUPPORTED", 36, 133},
#endif
#ifdef RAND_R_PRNG_NOT_SEEDED
{"PRNG_NOT_SEEDED", ERR_LIB_RAND, RAND_R_PRNG_NOT_SEEDED},
#else
{"PRNG_NOT_SEEDED", 36, 100},
#endif
#ifdef RAND_R_RANDOM_POOL_OVERFLOW
{"RANDOM_POOL_OVERFLOW", ERR_LIB_RAND, RAND_R_RANDOM_POOL_OVERFLOW},
#else
{"RANDOM_POOL_OVERFLOW", 36, 125},
#endif
#ifdef RAND_R_RANDOM_POOL_UNDERFLOW
{"RANDOM_POOL_UNDERFLOW", ERR_LIB_RAND, RAND_R_RANDOM_POOL_UNDERFLOW},
#else
{"RANDOM_POOL_UNDERFLOW", 36, 134},
#endif
#ifdef RAND_R_REQUEST_TOO_LARGE_FOR_DRBG
{"REQUEST_TOO_LARGE_FOR_DRBG", ERR_LIB_RAND, RAND_R_REQUEST_TOO_LARGE_FOR_DRBG},
#else
{"REQUEST_TOO_LARGE_FOR_DRBG", 36, 117},
#endif
#ifdef RAND_R_RESEED_ERROR
{"RESEED_ERROR", ERR_LIB_RAND, RAND_R_RESEED_ERROR},
#else
{"RESEED_ERROR", 36, 118},
#endif
#ifdef RAND_R_SELFTEST_FAILURE
{"SELFTEST_FAILURE", ERR_LIB_RAND, RAND_R_SELFTEST_FAILURE},
#else
{"SELFTEST_FAILURE", 36, 119},
#endif
#ifdef RAND_R_TOO_LITTLE_NONCE_REQUESTED
{"TOO_LITTLE_NONCE_REQUESTED", ERR_LIB_RAND, RAND_R_TOO_LITTLE_NONCE_REQUESTED},
#else
{"TOO_LITTLE_NONCE_REQUESTED", 36, 135},
#endif
#ifdef RAND_R_TOO_MUCH_NONCE_REQUESTED
{"TOO_MUCH_NONCE_REQUESTED", ERR_LIB_RAND, RAND_R_TOO_MUCH_NONCE_REQUESTED},
#else
{"TOO_MUCH_NONCE_REQUESTED", 36, 136},
#endif
#ifdef RAND_R_UNSUPPORTED_DRBG_FLAGS
{"UNSUPPORTED_DRBG_FLAGS", ERR_LIB_RAND, RAND_R_UNSUPPORTED_DRBG_FLAGS},
#else
{"UNSUPPORTED_DRBG_FLAGS", 36, 132},
#endif
#ifdef RAND_R_UNSUPPORTED_DRBG_TYPE
{"UNSUPPORTED_DRBG_TYPE", ERR_LIB_RAND, RAND_R_UNSUPPORTED_DRBG_TYPE},
#else
{"UNSUPPORTED_DRBG_TYPE", 36, 120},
#endif
#ifdef RSA_R_ALGORITHM_MISMATCH
{"ALGORITHM_MISMATCH", ERR_LIB_RSA, RSA_R_ALGORITHM_MISMATCH},
#else
{"ALGORITHM_MISMATCH", 4, 100},
#endif
#ifdef RSA_R_BAD_E_VALUE
{"BAD_E_VALUE", ERR_LIB_RSA, RSA_R_BAD_E_VALUE},
#else
{"BAD_E_VALUE", 4, 101},
#endif
#ifdef RSA_R_BAD_FIXED_HEADER_DECRYPT
{"BAD_FIXED_HEADER_DECRYPT", ERR_LIB_RSA, RSA_R_BAD_FIXED_HEADER_DECRYPT},
#else
{"BAD_FIXED_HEADER_DECRYPT", 4, 102},
#endif
#ifdef RSA_R_BAD_PAD_BYTE_COUNT
{"BAD_PAD_BYTE_COUNT", ERR_LIB_RSA, RSA_R_BAD_PAD_BYTE_COUNT},
#else
{"BAD_PAD_BYTE_COUNT", 4, 103},
#endif
#ifdef RSA_R_BAD_SIGNATURE
{"BAD_SIGNATURE", ERR_LIB_RSA, RSA_R_BAD_SIGNATURE},
#else
{"BAD_SIGNATURE", 4, 104},
#endif
#ifdef RSA_R_BLOCK_TYPE_IS_NOT_01
{"BLOCK_TYPE_IS_NOT_01", ERR_LIB_RSA, RSA_R_BLOCK_TYPE_IS_NOT_01},
#else
{"BLOCK_TYPE_IS_NOT_01", 4, 106},
#endif
#ifdef RSA_R_BLOCK_TYPE_IS_NOT_02
{"BLOCK_TYPE_IS_NOT_02", ERR_LIB_RSA, RSA_R_BLOCK_TYPE_IS_NOT_02},
#else
{"BLOCK_TYPE_IS_NOT_02", 4, 107},
#endif
#ifdef RSA_R_DATA_GREATER_THAN_MOD_LEN
{"DATA_GREATER_THAN_MOD_LEN", ERR_LIB_RSA, RSA_R_DATA_GREATER_THAN_MOD_LEN},
#else
{"DATA_GREATER_THAN_MOD_LEN", 4, 108},
#endif
#ifdef RSA_R_DATA_TOO_LARGE
{"DATA_TOO_LARGE", ERR_LIB_RSA, RSA_R_DATA_TOO_LARGE},
#else
{"DATA_TOO_LARGE", 4, 109},
#endif
#ifdef RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE
{"DATA_TOO_LARGE_FOR_KEY_SIZE", ERR_LIB_RSA, RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE},
#else
{"DATA_TOO_LARGE_FOR_KEY_SIZE", 4, 110},
#endif
#ifdef RSA_R_DATA_TOO_LARGE_FOR_MODULUS
{"DATA_TOO_LARGE_FOR_MODULUS", ERR_LIB_RSA, RSA_R_DATA_TOO_LARGE_FOR_MODULUS},
#else
{"DATA_TOO_LARGE_FOR_MODULUS", 4, 132},
#endif
#ifdef RSA_R_DATA_TOO_SMALL
{"DATA_TOO_SMALL", ERR_LIB_RSA, RSA_R_DATA_TOO_SMALL},
#else
{"DATA_TOO_SMALL", 4, 111},
#endif
#ifdef RSA_R_DATA_TOO_SMALL_FOR_KEY_SIZE
{"DATA_TOO_SMALL_FOR_KEY_SIZE", ERR_LIB_RSA, RSA_R_DATA_TOO_SMALL_FOR_KEY_SIZE},
#else
{"DATA_TOO_SMALL_FOR_KEY_SIZE", 4, 122},
#endif
#ifdef RSA_R_DIGEST_DOES_NOT_MATCH
{"DIGEST_DOES_NOT_MATCH", ERR_LIB_RSA, RSA_R_DIGEST_DOES_NOT_MATCH},
#else
{"DIGEST_DOES_NOT_MATCH", 4, 158},
#endif
#ifdef RSA_R_DIGEST_NOT_ALLOWED
{"DIGEST_NOT_ALLOWED", ERR_LIB_RSA, RSA_R_DIGEST_NOT_ALLOWED},
#else
{"DIGEST_NOT_ALLOWED", 4, 145},
#endif
#ifdef RSA_R_DIGEST_TOO_BIG_FOR_RSA_KEY
{"DIGEST_TOO_BIG_FOR_RSA_KEY", ERR_LIB_RSA, RSA_R_DIGEST_TOO_BIG_FOR_RSA_KEY},
#else
{"DIGEST_TOO_BIG_FOR_RSA_KEY", 4, 112},
#endif
#ifdef RSA_R_DMP1_NOT_CONGRUENT_TO_D
{"DMP1_NOT_CONGRUENT_TO_D", ERR_LIB_RSA, RSA_R_DMP1_NOT_CONGRUENT_TO_D},
#else
{"DMP1_NOT_CONGRUENT_TO_D", 4, 124},
#endif
#ifdef RSA_R_DMQ1_NOT_CONGRUENT_TO_D
{"DMQ1_NOT_CONGRUENT_TO_D", ERR_LIB_RSA, RSA_R_DMQ1_NOT_CONGRUENT_TO_D},
#else
{"DMQ1_NOT_CONGRUENT_TO_D", 4, 125},
#endif
#ifdef RSA_R_D_E_NOT_CONGRUENT_TO_1
{"D_E_NOT_CONGRUENT_TO_1", ERR_LIB_RSA, RSA_R_D_E_NOT_CONGRUENT_TO_1},
#else
{"D_E_NOT_CONGRUENT_TO_1", 4, 123},
#endif
#ifdef RSA_R_FIRST_OCTET_INVALID
{"FIRST_OCTET_INVALID", ERR_LIB_RSA, RSA_R_FIRST_OCTET_INVALID},
#else
{"FIRST_OCTET_INVALID", 4, 133},
#endif
#ifdef RSA_R_ILLEGAL_OR_UNSUPPORTED_PADDING_MODE
{"ILLEGAL_OR_UNSUPPORTED_PADDING_MODE", ERR_LIB_RSA, RSA_R_ILLEGAL_OR_UNSUPPORTED_PADDING_MODE},
#else
{"ILLEGAL_OR_UNSUPPORTED_PADDING_MODE", 4, 144},
#endif
#ifdef RSA_R_INVALID_DIGEST
{"INVALID_DIGEST", ERR_LIB_RSA, RSA_R_INVALID_DIGEST},
#else
{"INVALID_DIGEST", 4, 157},
#endif
#ifdef RSA_R_INVALID_DIGEST_LENGTH
{"INVALID_DIGEST_LENGTH", ERR_LIB_RSA, RSA_R_INVALID_DIGEST_LENGTH},
#else
{"INVALID_DIGEST_LENGTH", 4, 143},
#endif
#ifdef RSA_R_INVALID_HEADER
{"INVALID_HEADER", ERR_LIB_RSA, RSA_R_INVALID_HEADER},
#else
{"INVALID_HEADER", 4, 137},
#endif
#ifdef RSA_R_INVALID_LABEL
{"INVALID_LABEL", ERR_LIB_RSA, RSA_R_INVALID_LABEL},
#else
{"INVALID_LABEL", 4, 160},
#endif
#ifdef RSA_R_INVALID_MESSAGE_LENGTH
{"INVALID_MESSAGE_LENGTH", ERR_LIB_RSA, RSA_R_INVALID_MESSAGE_LENGTH},
#else
{"INVALID_MESSAGE_LENGTH", 4, 131},
#endif
#ifdef RSA_R_INVALID_MGF1_MD
{"INVALID_MGF1_MD", ERR_LIB_RSA, RSA_R_INVALID_MGF1_MD},
#else
{"INVALID_MGF1_MD", 4, 156},
#endif
#ifdef RSA_R_INVALID_MULTI_PRIME_KEY
{"INVALID_MULTI_PRIME_KEY", ERR_LIB_RSA, RSA_R_INVALID_MULTI_PRIME_KEY},
#else
{"INVALID_MULTI_PRIME_KEY", 4, 167},
#endif
#ifdef RSA_R_INVALID_OAEP_PARAMETERS
{"INVALID_OAEP_PARAMETERS", ERR_LIB_RSA, RSA_R_INVALID_OAEP_PARAMETERS},
#else
{"INVALID_OAEP_PARAMETERS", 4, 161},
#endif
#ifdef RSA_R_INVALID_PADDING
{"INVALID_PADDING", ERR_LIB_RSA, RSA_R_INVALID_PADDING},
#else
{"INVALID_PADDING", 4, 138},
#endif
#ifdef RSA_R_INVALID_PADDING_MODE
{"INVALID_PADDING_MODE", ERR_LIB_RSA, RSA_R_INVALID_PADDING_MODE},
#else
{"INVALID_PADDING_MODE", 4, 141},
#endif
#ifdef RSA_R_INVALID_PSS_PARAMETERS
{"INVALID_PSS_PARAMETERS", ERR_LIB_RSA, RSA_R_INVALID_PSS_PARAMETERS},
#else
{"INVALID_PSS_PARAMETERS", 4, 149},
#endif
#ifdef RSA_R_INVALID_PSS_SALTLEN
{"INVALID_PSS_SALTLEN", ERR_LIB_RSA, RSA_R_INVALID_PSS_SALTLEN},
#else
{"INVALID_PSS_SALTLEN", 4, 146},
#endif
#ifdef RSA_R_INVALID_SALT_LENGTH
{"INVALID_SALT_LENGTH", ERR_LIB_RSA, RSA_R_INVALID_SALT_LENGTH},
#else
{"INVALID_SALT_LENGTH", 4, 150},
#endif
#ifdef RSA_R_INVALID_TRAILER
{"INVALID_TRAILER", ERR_LIB_RSA, RSA_R_INVALID_TRAILER},
#else
{"INVALID_TRAILER", 4, 139},
#endif
#ifdef RSA_R_INVALID_X931_DIGEST
{"INVALID_X931_DIGEST", ERR_LIB_RSA, RSA_R_INVALID_X931_DIGEST},
#else
{"INVALID_X931_DIGEST", 4, 142},
#endif
#ifdef RSA_R_IQMP_NOT_INVERSE_OF_Q
{"IQMP_NOT_INVERSE_OF_Q", ERR_LIB_RSA, RSA_R_IQMP_NOT_INVERSE_OF_Q},
#else
{"IQMP_NOT_INVERSE_OF_Q", 4, 126},
#endif
#ifdef RSA_R_KEY_PRIME_NUM_INVALID
{"KEY_PRIME_NUM_INVALID", ERR_LIB_RSA, RSA_R_KEY_PRIME_NUM_INVALID},
#else
{"KEY_PRIME_NUM_INVALID", 4, 165},
#endif
#ifdef RSA_R_KEY_SIZE_TOO_SMALL
{"KEY_SIZE_TOO_SMALL", ERR_LIB_RSA, RSA_R_KEY_SIZE_TOO_SMALL},
#else
{"KEY_SIZE_TOO_SMALL", 4, 120},
#endif
#ifdef RSA_R_LAST_OCTET_INVALID
{"LAST_OCTET_INVALID", ERR_LIB_RSA, RSA_R_LAST_OCTET_INVALID},
#else
{"LAST_OCTET_INVALID", 4, 134},
#endif
#ifdef RSA_R_MGF1_DIGEST_NOT_ALLOWED
{"MGF1_DIGEST_NOT_ALLOWED", ERR_LIB_RSA, RSA_R_MGF1_DIGEST_NOT_ALLOWED},
#else
{"MGF1_DIGEST_NOT_ALLOWED", 4, 152},
#endif
#ifdef RSA_R_MISSING_PRIVATE_KEY
{"MISSING_PRIVATE_KEY", ERR_LIB_RSA, RSA_R_MISSING_PRIVATE_KEY},
#else
{"MISSING_PRIVATE_KEY", 4, 179},
#endif
#ifdef RSA_R_MODULUS_TOO_LARGE
{"MODULUS_TOO_LARGE", ERR_LIB_RSA, RSA_R_MODULUS_TOO_LARGE},
#else
{"MODULUS_TOO_LARGE", 4, 105},
#endif
#ifdef RSA_R_MP_COEFFICIENT_NOT_INVERSE_OF_R
{"MP_COEFFICIENT_NOT_INVERSE_OF_R", ERR_LIB_RSA, RSA_R_MP_COEFFICIENT_NOT_INVERSE_OF_R},
#else
{"MP_COEFFICIENT_NOT_INVERSE_OF_R", 4, 168},
#endif
#ifdef RSA_R_MP_EXPONENT_NOT_CONGRUENT_TO_D
{"MP_EXPONENT_NOT_CONGRUENT_TO_D", ERR_LIB_RSA, RSA_R_MP_EXPONENT_NOT_CONGRUENT_TO_D},
#else
{"MP_EXPONENT_NOT_CONGRUENT_TO_D", 4, 169},
#endif
#ifdef RSA_R_MP_R_NOT_PRIME
{"MP_R_NOT_PRIME", ERR_LIB_RSA, RSA_R_MP_R_NOT_PRIME},
#else
{"MP_R_NOT_PRIME", 4, 170},
#endif
#ifdef RSA_R_NO_PUBLIC_EXPONENT
{"NO_PUBLIC_EXPONENT", ERR_LIB_RSA, RSA_R_NO_PUBLIC_EXPONENT},
#else
{"NO_PUBLIC_EXPONENT", 4, 140},
#endif
#ifdef RSA_R_NULL_BEFORE_BLOCK_MISSING
{"NULL_BEFORE_BLOCK_MISSING", ERR_LIB_RSA, RSA_R_NULL_BEFORE_BLOCK_MISSING},
#else
{"NULL_BEFORE_BLOCK_MISSING", 4, 113},
#endif
#ifdef RSA_R_N_DOES_NOT_EQUAL_PRODUCT_OF_PRIMES
{"N_DOES_NOT_EQUAL_PRODUCT_OF_PRIMES", ERR_LIB_RSA, RSA_R_N_DOES_NOT_EQUAL_PRODUCT_OF_PRIMES},
#else
{"N_DOES_NOT_EQUAL_PRODUCT_OF_PRIMES", 4, 172},
#endif
#ifdef RSA_R_N_DOES_NOT_EQUAL_P_Q
{"N_DOES_NOT_EQUAL_P_Q", ERR_LIB_RSA, RSA_R_N_DOES_NOT_EQUAL_P_Q},
#else
{"N_DOES_NOT_EQUAL_P_Q", 4, 127},
#endif
#ifdef RSA_R_OAEP_DECODING_ERROR
{"OAEP_DECODING_ERROR", ERR_LIB_RSA, RSA_R_OAEP_DECODING_ERROR},
#else
{"OAEP_DECODING_ERROR", 4, 121},
#endif
#ifdef RSA_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE
{"OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE", ERR_LIB_RSA, RSA_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE},
#else
{"OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE", 4, 148},
#endif
#ifdef RSA_R_PADDING_CHECK_FAILED
{"PADDING_CHECK_FAILED", ERR_LIB_RSA, RSA_R_PADDING_CHECK_FAILED},
#else
{"PADDING_CHECK_FAILED", 4, 114},
#endif
#ifdef RSA_R_PKCS_DECODING_ERROR
{"PKCS_DECODING_ERROR", ERR_LIB_RSA, RSA_R_PKCS_DECODING_ERROR},
#else
{"PKCS_DECODING_ERROR", 4, 159},
#endif
#ifdef RSA_R_PSS_SALTLEN_TOO_SMALL
{"PSS_SALTLEN_TOO_SMALL", ERR_LIB_RSA, RSA_R_PSS_SALTLEN_TOO_SMALL},
#else
{"PSS_SALTLEN_TOO_SMALL", 4, 164},
#endif
#ifdef RSA_R_P_NOT_PRIME
{"P_NOT_PRIME", ERR_LIB_RSA, RSA_R_P_NOT_PRIME},
#else
{"P_NOT_PRIME", 4, 128},
#endif
#ifdef RSA_R_Q_NOT_PRIME
{"Q_NOT_PRIME", ERR_LIB_RSA, RSA_R_Q_NOT_PRIME},
#else
{"Q_NOT_PRIME", 4, 129},
#endif
#ifdef RSA_R_RSA_OPERATIONS_NOT_SUPPORTED
{"RSA_OPERATIONS_NOT_SUPPORTED", ERR_LIB_RSA, RSA_R_RSA_OPERATIONS_NOT_SUPPORTED},
#else
{"RSA_OPERATIONS_NOT_SUPPORTED", 4, 130},
#endif
#ifdef RSA_R_SLEN_CHECK_FAILED
{"SLEN_CHECK_FAILED", ERR_LIB_RSA, RSA_R_SLEN_CHECK_FAILED},
#else
{"SLEN_CHECK_FAILED", 4, 136},
#endif
#ifdef RSA_R_SLEN_RECOVERY_FAILED
{"SLEN_RECOVERY_FAILED", ERR_LIB_RSA, RSA_R_SLEN_RECOVERY_FAILED},
#else
{"SLEN_RECOVERY_FAILED", 4, 135},
#endif
#ifdef RSA_R_SSLV3_ROLLBACK_ATTACK
{"SSLV3_ROLLBACK_ATTACK", ERR_LIB_RSA, RSA_R_SSLV3_ROLLBACK_ATTACK},
#else
{"SSLV3_ROLLBACK_ATTACK", 4, 115},
#endif
#ifdef RSA_R_THE_ASN1_OBJECT_IDENTIFIER_IS_NOT_KNOWN_FOR_THIS_MD
{"THE_ASN1_OBJECT_IDENTIFIER_IS_NOT_KNOWN_FOR_THIS_MD", ERR_LIB_RSA, RSA_R_THE_ASN1_OBJECT_IDENTIFIER_IS_NOT_KNOWN_FOR_THIS_MD},
#else
{"THE_ASN1_OBJECT_IDENTIFIER_IS_NOT_KNOWN_FOR_THIS_MD", 4, 116},
#endif
#ifdef RSA_R_UNKNOWN_ALGORITHM_TYPE
{"UNKNOWN_ALGORITHM_TYPE", ERR_LIB_RSA, RSA_R_UNKNOWN_ALGORITHM_TYPE},
#else
{"UNKNOWN_ALGORITHM_TYPE", 4, 117},
#endif
#ifdef RSA_R_UNKNOWN_DIGEST
{"UNKNOWN_DIGEST", ERR_LIB_RSA, RSA_R_UNKNOWN_DIGEST},
#else
{"UNKNOWN_DIGEST", 4, 166},
#endif
#ifdef RSA_R_UNKNOWN_MASK_DIGEST
{"UNKNOWN_MASK_DIGEST", ERR_LIB_RSA, RSA_R_UNKNOWN_MASK_DIGEST},
#else
{"UNKNOWN_MASK_DIGEST", 4, 151},
#endif
#ifdef RSA_R_UNKNOWN_PADDING_TYPE
{"UNKNOWN_PADDING_TYPE", ERR_LIB_RSA, RSA_R_UNKNOWN_PADDING_TYPE},
#else
{"UNKNOWN_PADDING_TYPE", 4, 118},
#endif
#ifdef RSA_R_UNSUPPORTED_ENCRYPTION_TYPE
{"UNSUPPORTED_ENCRYPTION_TYPE", ERR_LIB_RSA, RSA_R_UNSUPPORTED_ENCRYPTION_TYPE},
#else
{"UNSUPPORTED_ENCRYPTION_TYPE", 4, 162},
#endif
#ifdef RSA_R_UNSUPPORTED_LABEL_SOURCE
{"UNSUPPORTED_LABEL_SOURCE", ERR_LIB_RSA, RSA_R_UNSUPPORTED_LABEL_SOURCE},
#else
{"UNSUPPORTED_LABEL_SOURCE", 4, 163},
#endif
#ifdef RSA_R_UNSUPPORTED_MASK_ALGORITHM
{"UNSUPPORTED_MASK_ALGORITHM", ERR_LIB_RSA, RSA_R_UNSUPPORTED_MASK_ALGORITHM},
#else
{"UNSUPPORTED_MASK_ALGORITHM", 4, 153},
#endif
#ifdef RSA_R_UNSUPPORTED_MASK_PARAMETER
{"UNSUPPORTED_MASK_PARAMETER", ERR_LIB_RSA, RSA_R_UNSUPPORTED_MASK_PARAMETER},
#else
{"UNSUPPORTED_MASK_PARAMETER", 4, 154},
#endif
#ifdef RSA_R_UNSUPPORTED_SIGNATURE_TYPE
{"UNSUPPORTED_SIGNATURE_TYPE", ERR_LIB_RSA, RSA_R_UNSUPPORTED_SIGNATURE_TYPE},
#else
{"UNSUPPORTED_SIGNATURE_TYPE", 4, 155},
#endif
#ifdef RSA_R_VALUE_MISSING
{"VALUE_MISSING", ERR_LIB_RSA, RSA_R_VALUE_MISSING},
#else
{"VALUE_MISSING", 4, 147},
#endif
#ifdef RSA_R_WRONG_SIGNATURE_LENGTH
{"WRONG_SIGNATURE_LENGTH", ERR_LIB_RSA, RSA_R_WRONG_SIGNATURE_LENGTH},
#else
{"WRONG_SIGNATURE_LENGTH", 4, 119},
#endif
#ifdef SM2_R_ASN1_ERROR
{"ASN1_ERROR", ERR_LIB_SM2, SM2_R_ASN1_ERROR},
#else
{"ASN1_ERROR", 53, 100},
#endif
#ifdef SM2_R_BAD_SIGNATURE
{"BAD_SIGNATURE", ERR_LIB_SM2, SM2_R_BAD_SIGNATURE},
#else
{"BAD_SIGNATURE", 53, 101},
#endif
#ifdef SM2_R_BUFFER_TOO_SMALL
{"BUFFER_TOO_SMALL", ERR_LIB_SM2, SM2_R_BUFFER_TOO_SMALL},
#else
{"BUFFER_TOO_SMALL", 53, 107},
#endif
#ifdef SM2_R_DIST_ID_TOO_LARGE
{"DIST_ID_TOO_LARGE", ERR_LIB_SM2, SM2_R_DIST_ID_TOO_LARGE},
#else
{"DIST_ID_TOO_LARGE", 53, 110},
#endif
#ifdef SM2_R_ID_NOT_SET
{"ID_NOT_SET", ERR_LIB_SM2, SM2_R_ID_NOT_SET},
#else
{"ID_NOT_SET", 53, 112},
#endif
#ifdef SM2_R_ID_TOO_LARGE
{"ID_TOO_LARGE", ERR_LIB_SM2, SM2_R_ID_TOO_LARGE},
#else
{"ID_TOO_LARGE", 53, 111},
#endif
#ifdef SM2_R_INVALID_CURVE
{"INVALID_CURVE", ERR_LIB_SM2, SM2_R_INVALID_CURVE},
#else
{"INVALID_CURVE", 53, 108},
#endif
#ifdef SM2_R_INVALID_DIGEST
{"INVALID_DIGEST", ERR_LIB_SM2, SM2_R_INVALID_DIGEST},
#else
{"INVALID_DIGEST", 53, 102},
#endif
#ifdef SM2_R_INVALID_DIGEST_TYPE
{"INVALID_DIGEST_TYPE", ERR_LIB_SM2, SM2_R_INVALID_DIGEST_TYPE},
#else
{"INVALID_DIGEST_TYPE", 53, 103},
#endif
#ifdef SM2_R_INVALID_ENCODING
{"INVALID_ENCODING", ERR_LIB_SM2, SM2_R_INVALID_ENCODING},
#else
{"INVALID_ENCODING", 53, 104},
#endif
#ifdef SM2_R_INVALID_FIELD
{"INVALID_FIELD", ERR_LIB_SM2, SM2_R_INVALID_FIELD},
#else
{"INVALID_FIELD", 53, 105},
#endif
#ifdef SM2_R_NO_PARAMETERS_SET
{"NO_PARAMETERS_SET", ERR_LIB_SM2, SM2_R_NO_PARAMETERS_SET},
#else
{"NO_PARAMETERS_SET", 53, 109},
#endif
#ifdef SM2_R_USER_ID_TOO_LARGE
{"USER_ID_TOO_LARGE", ERR_LIB_SM2, SM2_R_USER_ID_TOO_LARGE},
#else
{"USER_ID_TOO_LARGE", 53, 106},
#endif
#ifdef SSL_R_APPLICATION_DATA_AFTER_CLOSE_NOTIFY
{"APPLICATION_DATA_AFTER_CLOSE_NOTIFY", ERR_LIB_SSL, SSL_R_APPLICATION_DATA_AFTER_CLOSE_NOTIFY},
#else
{"APPLICATION_DATA_AFTER_CLOSE_NOTIFY", 20, 291},
#endif
#ifdef SSL_R_APP_DATA_IN_HANDSHAKE
{"APP_DATA_IN_HANDSHAKE", ERR_LIB_SSL, SSL_R_APP_DATA_IN_HANDSHAKE},
#else
{"APP_DATA_IN_HANDSHAKE", 20, 100},
#endif
#ifdef SSL_R_ATTEMPT_TO_REUSE_SESSION_IN_DIFFERENT_CONTEXT
{"ATTEMPT_TO_REUSE_SESSION_IN_DIFFERENT_CONTEXT", ERR_LIB_SSL, SSL_R_ATTEMPT_TO_REUSE_SESSION_IN_DIFFERENT_CONTEXT},
#else
{"ATTEMPT_TO_REUSE_SESSION_IN_DIFFERENT_CONTEXT", 20, 272},
#endif
#ifdef SSL_R_AT_LEAST_TLS_1_0_NEEDED_IN_FIPS_MODE
{"AT_LEAST_TLS_1_0_NEEDED_IN_FIPS_MODE", ERR_LIB_SSL, SSL_R_AT_LEAST_TLS_1_0_NEEDED_IN_FIPS_MODE},
#else
{"AT_LEAST_TLS_1_0_NEEDED_IN_FIPS_MODE", 20, 143},
#endif
#ifdef SSL_R_AT_LEAST_TLS_1_2_NEEDED_IN_SUITEB_MODE
{"AT_LEAST_TLS_1_2_NEEDED_IN_SUITEB_MODE", ERR_LIB_SSL, SSL_R_AT_LEAST_TLS_1_2_NEEDED_IN_SUITEB_MODE},
#else
{"AT_LEAST_TLS_1_2_NEEDED_IN_SUITEB_MODE", 20, 158},
#endif
#ifdef SSL_R_BAD_CHANGE_CIPHER_SPEC
{"BAD_CHANGE_CIPHER_SPEC", ERR_LIB_SSL, SSL_R_BAD_CHANGE_CIPHER_SPEC},
#else
{"BAD_CHANGE_CIPHER_SPEC", 20, 103},
#endif
#ifdef SSL_R_BAD_CIPHER
{"BAD_CIPHER", ERR_LIB_SSL, SSL_R_BAD_CIPHER},
#else
{"BAD_CIPHER", 20, 186},
#endif
#ifdef SSL_R_BAD_DATA
{"BAD_DATA", ERR_LIB_SSL, SSL_R_BAD_DATA},
#else
{"BAD_DATA", 20, 390},
#endif
#ifdef SSL_R_BAD_DATA_RETURNED_BY_CALLBACK
{"BAD_DATA_RETURNED_BY_CALLBACK", ERR_LIB_SSL, SSL_R_BAD_DATA_RETURNED_BY_CALLBACK},
#else
{"BAD_DATA_RETURNED_BY_CALLBACK", 20, 106},
#endif
#ifdef SSL_R_BAD_DECOMPRESSION
{"BAD_DECOMPRESSION", ERR_LIB_SSL, SSL_R_BAD_DECOMPRESSION},
#else
{"BAD_DECOMPRESSION", 20, 107},
#endif
#ifdef SSL_R_BAD_DH_VALUE
{"BAD_DH_VALUE", ERR_LIB_SSL, SSL_R_BAD_DH_VALUE},
#else
{"BAD_DH_VALUE", 20, 102},
#endif
#ifdef SSL_R_BAD_DIGEST_LENGTH
{"BAD_DIGEST_LENGTH", ERR_LIB_SSL, SSL_R_BAD_DIGEST_LENGTH},
#else
{"BAD_DIGEST_LENGTH", 20, 111},
#endif
#ifdef SSL_R_BAD_EARLY_DATA
{"BAD_EARLY_DATA", ERR_LIB_SSL, SSL_R_BAD_EARLY_DATA},
#else
{"BAD_EARLY_DATA", 20, 233},
#endif
#ifdef SSL_R_BAD_ECC_CERT
{"BAD_ECC_CERT", ERR_LIB_SSL, SSL_R_BAD_ECC_CERT},
#else
{"BAD_ECC_CERT", 20, 304},
#endif
#ifdef SSL_R_BAD_ECPOINT
{"BAD_ECPOINT", ERR_LIB_SSL, SSL_R_BAD_ECPOINT},
#else
{"BAD_ECPOINT", 20, 306},
#endif
#ifdef SSL_R_BAD_EXTENSION
{"BAD_EXTENSION", ERR_LIB_SSL, SSL_R_BAD_EXTENSION},
#else
{"BAD_EXTENSION", 20, 110},
#endif
#ifdef SSL_R_BAD_HANDSHAKE_LENGTH
{"BAD_HANDSHAKE_LENGTH", ERR_LIB_SSL, SSL_R_BAD_HANDSHAKE_LENGTH},
#else
{"BAD_HANDSHAKE_LENGTH", 20, 332},
#endif
#ifdef SSL_R_BAD_HANDSHAKE_STATE
{"BAD_HANDSHAKE_STATE", ERR_LIB_SSL, SSL_R_BAD_HANDSHAKE_STATE},
#else
{"BAD_HANDSHAKE_STATE", 20, 236},
#endif
#ifdef SSL_R_BAD_HELLO_REQUEST
{"BAD_HELLO_REQUEST", ERR_LIB_SSL, SSL_R_BAD_HELLO_REQUEST},
#else
{"BAD_HELLO_REQUEST", 20, 105},
#endif
#ifdef SSL_R_BAD_HRR_VERSION
{"BAD_HRR_VERSION", ERR_LIB_SSL, SSL_R_BAD_HRR_VERSION},
#else
{"BAD_HRR_VERSION", 20, 263},
#endif
#ifdef SSL_R_BAD_KEY_SHARE
{"BAD_KEY_SHARE", ERR_LIB_SSL, SSL_R_BAD_KEY_SHARE},
#else
{"BAD_KEY_SHARE", 20, 108},
#endif
#ifdef SSL_R_BAD_KEY_UPDATE
{"BAD_KEY_UPDATE", ERR_LIB_SSL, SSL_R_BAD_KEY_UPDATE},
#else
{"BAD_KEY_UPDATE", 20, 122},
#endif
#ifdef SSL_R_BAD_LEGACY_VERSION
{"BAD_LEGACY_VERSION", ERR_LIB_SSL, SSL_R_BAD_LEGACY_VERSION},
#else
{"BAD_LEGACY_VERSION", 20, 292},
#endif
#ifdef SSL_R_BAD_LENGTH
{"BAD_LENGTH", ERR_LIB_SSL, SSL_R_BAD_LENGTH},
#else
{"BAD_LENGTH", 20, 271},
#endif
#ifdef SSL_R_BAD_PACKET
{"BAD_PACKET", ERR_LIB_SSL, SSL_R_BAD_PACKET},
#else
{"BAD_PACKET", 20, 240},
#endif
#ifdef SSL_R_BAD_PACKET_LENGTH
{"BAD_PACKET_LENGTH", ERR_LIB_SSL, SSL_R_BAD_PACKET_LENGTH},
#else
{"BAD_PACKET_LENGTH", 20, 115},
#endif
#ifdef SSL_R_BAD_PROTOCOL_VERSION_NUMBER
{"BAD_PROTOCOL_VERSION_NUMBER", ERR_LIB_SSL, SSL_R_BAD_PROTOCOL_VERSION_NUMBER},
#else
{"BAD_PROTOCOL_VERSION_NUMBER", 20, 116},
#endif
#ifdef SSL_R_BAD_PSK
{"BAD_PSK", ERR_LIB_SSL, SSL_R_BAD_PSK},
#else
{"BAD_PSK", 20, 219},
#endif
#ifdef SSL_R_BAD_PSK_IDENTITY
{"BAD_PSK_IDENTITY", ERR_LIB_SSL, SSL_R_BAD_PSK_IDENTITY},
#else
{"BAD_PSK_IDENTITY", 20, 114},
#endif
#ifdef SSL_R_BAD_RECORD_TYPE
{"BAD_RECORD_TYPE", ERR_LIB_SSL, SSL_R_BAD_RECORD_TYPE},
#else
{"BAD_RECORD_TYPE", 20, 443},
#endif
#ifdef SSL_R_BAD_RSA_ENCRYPT
{"BAD_RSA_ENCRYPT", ERR_LIB_SSL, SSL_R_BAD_RSA_ENCRYPT},
#else
{"BAD_RSA_ENCRYPT", 20, 119},
#endif
#ifdef SSL_R_BAD_SIGNATURE
{"BAD_SIGNATURE", ERR_LIB_SSL, SSL_R_BAD_SIGNATURE},
#else
{"BAD_SIGNATURE", 20, 123},
#endif
#ifdef SSL_R_BAD_SRP_A_LENGTH
{"BAD_SRP_A_LENGTH", ERR_LIB_SSL, SSL_R_BAD_SRP_A_LENGTH},
#else
{"BAD_SRP_A_LENGTH", 20, 347},
#endif
#ifdef SSL_R_BAD_SRP_PARAMETERS
{"BAD_SRP_PARAMETERS", ERR_LIB_SSL, SSL_R_BAD_SRP_PARAMETERS},
#else
{"BAD_SRP_PARAMETERS", 20, 371},
#endif
#ifdef SSL_R_BAD_SRTP_MKI_VALUE
{"BAD_SRTP_MKI_VALUE", ERR_LIB_SSL, SSL_R_BAD_SRTP_MKI_VALUE},
#else
{"BAD_SRTP_MKI_VALUE", 20, 352},
#endif
#ifdef SSL_R_BAD_SRTP_PROTECTION_PROFILE_LIST
{"BAD_SRTP_PROTECTION_PROFILE_LIST", ERR_LIB_SSL, SSL_R_BAD_SRTP_PROTECTION_PROFILE_LIST},
#else
{"BAD_SRTP_PROTECTION_PROFILE_LIST", 20, 353},
#endif
#ifdef SSL_R_BAD_SSL_FILETYPE
{"BAD_SSL_FILETYPE", ERR_LIB_SSL, SSL_R_BAD_SSL_FILETYPE},
#else
{"BAD_SSL_FILETYPE", 20, 124},
#endif
#ifdef SSL_R_BAD_VALUE
{"BAD_VALUE", ERR_LIB_SSL, SSL_R_BAD_VALUE},
#else
{"BAD_VALUE", 20, 384},
#endif
#ifdef SSL_R_BAD_WRITE_RETRY
{"BAD_WRITE_RETRY", ERR_LIB_SSL, SSL_R_BAD_WRITE_RETRY},
#else
{"BAD_WRITE_RETRY", 20, 127},
#endif
#ifdef SSL_R_BINDER_DOES_NOT_VERIFY
{"BINDER_DOES_NOT_VERIFY", ERR_LIB_SSL, SSL_R_BINDER_DOES_NOT_VERIFY},
#else
{"BINDER_DOES_NOT_VERIFY", 20, 253},
#endif
#ifdef SSL_R_BIO_NOT_SET
{"BIO_NOT_SET", ERR_LIB_SSL, SSL_R_BIO_NOT_SET},
#else
{"BIO_NOT_SET", 20, 128},
#endif
#ifdef SSL_R_BLOCK_CIPHER_PAD_IS_WRONG
{"BLOCK_CIPHER_PAD_IS_WRONG", ERR_LIB_SSL, SSL_R_BLOCK_CIPHER_PAD_IS_WRONG},
#else
{"BLOCK_CIPHER_PAD_IS_WRONG", 20, 129},
#endif
#ifdef SSL_R_BN_LIB
{"BN_LIB", ERR_LIB_SSL, SSL_R_BN_LIB},
#else
{"BN_LIB", 20, 130},
#endif
#ifdef SSL_R_CALLBACK_FAILED
{"CALLBACK_FAILED", ERR_LIB_SSL, SSL_R_CALLBACK_FAILED},
#else
{"CALLBACK_FAILED", 20, 234},
#endif
#ifdef SSL_R_CANNOT_CHANGE_CIPHER
{"CANNOT_CHANGE_CIPHER", ERR_LIB_SSL, SSL_R_CANNOT_CHANGE_CIPHER},
#else
{"CANNOT_CHANGE_CIPHER", 20, 109},
#endif
#ifdef SSL_R_CA_DN_LENGTH_MISMATCH
{"CA_DN_LENGTH_MISMATCH", ERR_LIB_SSL, SSL_R_CA_DN_LENGTH_MISMATCH},
#else
{"CA_DN_LENGTH_MISMATCH", 20, 131},
#endif
#ifdef SSL_R_CA_KEY_TOO_SMALL
{"CA_KEY_TOO_SMALL", ERR_LIB_SSL, SSL_R_CA_KEY_TOO_SMALL},
#else
{"CA_KEY_TOO_SMALL", 20, 397},
#endif
#ifdef SSL_R_CA_MD_TOO_WEAK
{"CA_MD_TOO_WEAK", ERR_LIB_SSL, SSL_R_CA_MD_TOO_WEAK},
#else
{"CA_MD_TOO_WEAK", 20, 398},
#endif
#ifdef SSL_R_CCS_RECEIVED_EARLY
{"CCS_RECEIVED_EARLY", ERR_LIB_SSL, SSL_R_CCS_RECEIVED_EARLY},
#else
{"CCS_RECEIVED_EARLY", 20, 133},
#endif
#ifdef SSL_R_CERTIFICATE_VERIFY_FAILED
{"CERTIFICATE_VERIFY_FAILED", ERR_LIB_SSL, SSL_R_CERTIFICATE_VERIFY_FAILED},
#else
{"CERTIFICATE_VERIFY_FAILED", 20, 134},
#endif
#ifdef SSL_R_CERT_CB_ERROR
{"CERT_CB_ERROR", ERR_LIB_SSL, SSL_R_CERT_CB_ERROR},
#else
{"CERT_CB_ERROR", 20, 377},
#endif
#ifdef SSL_R_CERT_LENGTH_MISMATCH
{"CERT_LENGTH_MISMATCH", ERR_LIB_SSL, SSL_R_CERT_LENGTH_MISMATCH},
#else
{"CERT_LENGTH_MISMATCH", 20, 135},
#endif
#ifdef SSL_R_CIPHERSUITE_DIGEST_HAS_CHANGED
{"CIPHERSUITE_DIGEST_HAS_CHANGED", ERR_LIB_SSL, SSL_R_CIPHERSUITE_DIGEST_HAS_CHANGED},
#else
{"CIPHERSUITE_DIGEST_HAS_CHANGED", 20, 218},
#endif
#ifdef SSL_R_CIPHER_CODE_WRONG_LENGTH
{"CIPHER_CODE_WRONG_LENGTH", ERR_LIB_SSL, SSL_R_CIPHER_CODE_WRONG_LENGTH},
#else
{"CIPHER_CODE_WRONG_LENGTH", 20, 137},
#endif
#ifdef SSL_R_CIPHER_OR_HASH_UNAVAILABLE
{"CIPHER_OR_HASH_UNAVAILABLE", ERR_LIB_SSL, SSL_R_CIPHER_OR_HASH_UNAVAILABLE},
#else
{"CIPHER_OR_HASH_UNAVAILABLE", 20, 138},
#endif
#ifdef SSL_R_CLIENTHELLO_TLSEXT
{"CLIENTHELLO_TLSEXT", ERR_LIB_SSL, SSL_R_CLIENTHELLO_TLSEXT},
#else
{"CLIENTHELLO_TLSEXT", 20, 226},
#endif
#ifdef SSL_R_COMPRESSED_LENGTH_TOO_LONG
{"COMPRESSED_LENGTH_TOO_LONG", ERR_LIB_SSL, SSL_R_COMPRESSED_LENGTH_TOO_LONG},
#else
{"COMPRESSED_LENGTH_TOO_LONG", 20, 140},
#endif
#ifdef SSL_R_COMPRESSION_DISABLED
{"COMPRESSION_DISABLED", ERR_LIB_SSL, SSL_R_COMPRESSION_DISABLED},
#else
{"COMPRESSION_DISABLED", 20, 343},
#endif
#ifdef SSL_R_COMPRESSION_FAILURE
{"COMPRESSION_FAILURE", ERR_LIB_SSL, SSL_R_COMPRESSION_FAILURE},
#else
{"COMPRESSION_FAILURE", 20, 141},
#endif
#ifdef SSL_R_COMPRESSION_ID_NOT_WITHIN_PRIVATE_RANGE
{"COMPRESSION_ID_NOT_WITHIN_PRIVATE_RANGE", ERR_LIB_SSL, SSL_R_COMPRESSION_ID_NOT_WITHIN_PRIVATE_RANGE},
#else
{"COMPRESSION_ID_NOT_WITHIN_PRIVATE_RANGE", 20, 307},
#endif
#ifdef SSL_R_COMPRESSION_LIBRARY_ERROR
{"COMPRESSION_LIBRARY_ERROR", ERR_LIB_SSL, SSL_R_COMPRESSION_LIBRARY_ERROR},
#else
{"COMPRESSION_LIBRARY_ERROR", 20, 142},
#endif
#ifdef SSL_R_CONNECTION_TYPE_NOT_SET
{"CONNECTION_TYPE_NOT_SET", ERR_LIB_SSL, SSL_R_CONNECTION_TYPE_NOT_SET},
#else
{"CONNECTION_TYPE_NOT_SET", 20, 144},
#endif
#ifdef SSL_R_CONTEXT_NOT_DANE_ENABLED
{"CONTEXT_NOT_DANE_ENABLED", ERR_LIB_SSL, SSL_R_CONTEXT_NOT_DANE_ENABLED},
#else
{"CONTEXT_NOT_DANE_ENABLED", 20, 167},
#endif
#ifdef SSL_R_COOKIE_GEN_CALLBACK_FAILURE
{"COOKIE_GEN_CALLBACK_FAILURE", ERR_LIB_SSL, SSL_R_COOKIE_GEN_CALLBACK_FAILURE},
#else
{"COOKIE_GEN_CALLBACK_FAILURE", 20, 400},
#endif
#ifdef SSL_R_COOKIE_MISMATCH
{"COOKIE_MISMATCH", ERR_LIB_SSL, SSL_R_COOKIE_MISMATCH},
#else
{"COOKIE_MISMATCH", 20, 308},
#endif
#ifdef SSL_R_CUSTOM_EXT_HANDLER_ALREADY_INSTALLED
{"CUSTOM_EXT_HANDLER_ALREADY_INSTALLED", ERR_LIB_SSL, SSL_R_CUSTOM_EXT_HANDLER_ALREADY_INSTALLED},
#else
{"CUSTOM_EXT_HANDLER_ALREADY_INSTALLED", 20, 206},
#endif
#ifdef SSL_R_DANE_ALREADY_ENABLED
{"DANE_ALREADY_ENABLED", ERR_LIB_SSL, SSL_R_DANE_ALREADY_ENABLED},
#else
{"DANE_ALREADY_ENABLED", 20, 172},
#endif
#ifdef SSL_R_DANE_CANNOT_OVERRIDE_MTYPE_FULL
{"DANE_CANNOT_OVERRIDE_MTYPE_FULL", ERR_LIB_SSL, SSL_R_DANE_CANNOT_OVERRIDE_MTYPE_FULL},
#else
{"DANE_CANNOT_OVERRIDE_MTYPE_FULL", 20, 173},
#endif
#ifdef SSL_R_DANE_NOT_ENABLED
{"DANE_NOT_ENABLED", ERR_LIB_SSL, SSL_R_DANE_NOT_ENABLED},
#else
{"DANE_NOT_ENABLED", 20, 175},
#endif
#ifdef SSL_R_DANE_TLSA_BAD_CERTIFICATE
{"DANE_TLSA_BAD_CERTIFICATE", ERR_LIB_SSL, SSL_R_DANE_TLSA_BAD_CERTIFICATE},
#else
{"DANE_TLSA_BAD_CERTIFICATE", 20, 180},
#endif
#ifdef SSL_R_DANE_TLSA_BAD_CERTIFICATE_USAGE
{"DANE_TLSA_BAD_CERTIFICATE_USAGE", ERR_LIB_SSL, SSL_R_DANE_TLSA_BAD_CERTIFICATE_USAGE},
#else
{"DANE_TLSA_BAD_CERTIFICATE_USAGE", 20, 184},
#endif
#ifdef SSL_R_DANE_TLSA_BAD_DATA_LENGTH
{"DANE_TLSA_BAD_DATA_LENGTH", ERR_LIB_SSL, SSL_R_DANE_TLSA_BAD_DATA_LENGTH},
#else
{"DANE_TLSA_BAD_DATA_LENGTH", 20, 189},
#endif
#ifdef SSL_R_DANE_TLSA_BAD_DIGEST_LENGTH
{"DANE_TLSA_BAD_DIGEST_LENGTH", ERR_LIB_SSL, SSL_R_DANE_TLSA_BAD_DIGEST_LENGTH},
#else
{"DANE_TLSA_BAD_DIGEST_LENGTH", 20, 192},
#endif
#ifdef SSL_R_DANE_TLSA_BAD_MATCHING_TYPE
{"DANE_TLSA_BAD_MATCHING_TYPE", ERR_LIB_SSL, SSL_R_DANE_TLSA_BAD_MATCHING_TYPE},
#else
{"DANE_TLSA_BAD_MATCHING_TYPE", 20, 200},
#endif
#ifdef SSL_R_DANE_TLSA_BAD_PUBLIC_KEY
{"DANE_TLSA_BAD_PUBLIC_KEY", ERR_LIB_SSL, SSL_R_DANE_TLSA_BAD_PUBLIC_KEY},
#else
{"DANE_TLSA_BAD_PUBLIC_KEY", 20, 201},
#endif
#ifdef SSL_R_DANE_TLSA_BAD_SELECTOR
{"DANE_TLSA_BAD_SELECTOR", ERR_LIB_SSL, SSL_R_DANE_TLSA_BAD_SELECTOR},
#else
{"DANE_TLSA_BAD_SELECTOR", 20, 202},
#endif
#ifdef SSL_R_DANE_TLSA_NULL_DATA
{"DANE_TLSA_NULL_DATA", ERR_LIB_SSL, SSL_R_DANE_TLSA_NULL_DATA},
#else
{"DANE_TLSA_NULL_DATA", 20, 203},
#endif
#ifdef SSL_R_DATA_BETWEEN_CCS_AND_FINISHED
{"DATA_BETWEEN_CCS_AND_FINISHED", ERR_LIB_SSL, SSL_R_DATA_BETWEEN_CCS_AND_FINISHED},
#else
{"DATA_BETWEEN_CCS_AND_FINISHED", 20, 145},
#endif
#ifdef SSL_R_DATA_LENGTH_TOO_LONG
{"DATA_LENGTH_TOO_LONG", ERR_LIB_SSL, SSL_R_DATA_LENGTH_TOO_LONG},
#else
{"DATA_LENGTH_TOO_LONG", 20, 146},
#endif
#ifdef SSL_R_DECRYPTION_FAILED
{"DECRYPTION_FAILED", ERR_LIB_SSL, SSL_R_DECRYPTION_FAILED},
#else
{"DECRYPTION_FAILED", 20, 147},
#endif
#ifdef SSL_R_DECRYPTION_FAILED_OR_BAD_RECORD_MAC
{"DECRYPTION_FAILED_OR_BAD_RECORD_MAC", ERR_LIB_SSL, SSL_R_DECRYPTION_FAILED_OR_BAD_RECORD_MAC},
#else
{"DECRYPTION_FAILED_OR_BAD_RECORD_MAC", 20, 281},
#endif
#ifdef SSL_R_DH_KEY_TOO_SMALL
{"DH_KEY_TOO_SMALL", ERR_LIB_SSL, SSL_R_DH_KEY_TOO_SMALL},
#else
{"DH_KEY_TOO_SMALL", 20, 394},
#endif
#ifdef SSL_R_DH_PUBLIC_VALUE_LENGTH_IS_WRONG
{"DH_PUBLIC_VALUE_LENGTH_IS_WRONG", ERR_LIB_SSL, SSL_R_DH_PUBLIC_VALUE_LENGTH_IS_WRONG},
#else
{"DH_PUBLIC_VALUE_LENGTH_IS_WRONG", 20, 148},
#endif
#ifdef SSL_R_DIGEST_CHECK_FAILED
{"DIGEST_CHECK_FAILED", ERR_LIB_SSL, SSL_R_DIGEST_CHECK_FAILED},
#else
{"DIGEST_CHECK_FAILED", 20, 149},
#endif
#ifdef SSL_R_DTLS_MESSAGE_TOO_BIG
{"DTLS_MESSAGE_TOO_BIG", ERR_LIB_SSL, SSL_R_DTLS_MESSAGE_TOO_BIG},
#else
{"DTLS_MESSAGE_TOO_BIG", 20, 334},
#endif
#ifdef SSL_R_DUPLICATE_COMPRESSION_ID
{"DUPLICATE_COMPRESSION_ID", ERR_LIB_SSL, SSL_R_DUPLICATE_COMPRESSION_ID},
#else
{"DUPLICATE_COMPRESSION_ID", 20, 309},
#endif
#ifdef SSL_R_ECC_CERT_NOT_FOR_SIGNING
{"ECC_CERT_NOT_FOR_SIGNING", ERR_LIB_SSL, SSL_R_ECC_CERT_NOT_FOR_SIGNING},
#else
{"ECC_CERT_NOT_FOR_SIGNING", 20, 318},
#endif
#ifdef SSL_R_ECDH_REQUIRED_FOR_SUITEB_MODE
{"ECDH_REQUIRED_FOR_SUITEB_MODE", ERR_LIB_SSL, SSL_R_ECDH_REQUIRED_FOR_SUITEB_MODE},
#else
{"ECDH_REQUIRED_FOR_SUITEB_MODE", 20, 374},
#endif
#ifdef SSL_R_EE_KEY_TOO_SMALL
{"EE_KEY_TOO_SMALL", ERR_LIB_SSL, SSL_R_EE_KEY_TOO_SMALL},
#else
{"EE_KEY_TOO_SMALL", 20, 399},
#endif
#ifdef SSL_R_EMPTY_SRTP_PROTECTION_PROFILE_LIST
{"EMPTY_SRTP_PROTECTION_PROFILE_LIST", ERR_LIB_SSL, SSL_R_EMPTY_SRTP_PROTECTION_PROFILE_LIST},
#else
{"EMPTY_SRTP_PROTECTION_PROFILE_LIST", 20, 354},
#endif
#ifdef SSL_R_ENCRYPTED_LENGTH_TOO_LONG
{"ENCRYPTED_LENGTH_TOO_LONG", ERR_LIB_SSL, SSL_R_ENCRYPTED_LENGTH_TOO_LONG},
#else
{"ENCRYPTED_LENGTH_TOO_LONG", 20, 150},
#endif
#ifdef SSL_R_ERROR_IN_RECEIVED_CIPHER_LIST
{"ERROR_IN_RECEIVED_CIPHER_LIST", ERR_LIB_SSL, SSL_R_ERROR_IN_RECEIVED_CIPHER_LIST},
#else
{"ERROR_IN_RECEIVED_CIPHER_LIST", 20, 151},
#endif
#ifdef SSL_R_ERROR_SETTING_TLSA_BASE_DOMAIN
{"ERROR_SETTING_TLSA_BASE_DOMAIN", ERR_LIB_SSL, SSL_R_ERROR_SETTING_TLSA_BASE_DOMAIN},
#else
{"ERROR_SETTING_TLSA_BASE_DOMAIN", 20, 204},
#endif
#ifdef SSL_R_EXCEEDS_MAX_FRAGMENT_SIZE
{"EXCEEDS_MAX_FRAGMENT_SIZE", ERR_LIB_SSL, SSL_R_EXCEEDS_MAX_FRAGMENT_SIZE},
#else
{"EXCEEDS_MAX_FRAGMENT_SIZE", 20, 194},
#endif
#ifdef SSL_R_EXCESSIVE_MESSAGE_SIZE
{"EXCESSIVE_MESSAGE_SIZE", ERR_LIB_SSL, SSL_R_EXCESSIVE_MESSAGE_SIZE},
#else
{"EXCESSIVE_MESSAGE_SIZE", 20, 152},
#endif
#ifdef SSL_R_EXTENSION_NOT_RECEIVED
{"EXTENSION_NOT_RECEIVED", ERR_LIB_SSL, SSL_R_EXTENSION_NOT_RECEIVED},
#else
{"EXTENSION_NOT_RECEIVED", 20, 279},
#endif
#ifdef SSL_R_EXTRA_DATA_IN_MESSAGE
{"EXTRA_DATA_IN_MESSAGE", ERR_LIB_SSL, SSL_R_EXTRA_DATA_IN_MESSAGE},
#else
{"EXTRA_DATA_IN_MESSAGE", 20, 153},
#endif
#ifdef SSL_R_EXT_LENGTH_MISMATCH
{"EXT_LENGTH_MISMATCH", ERR_LIB_SSL, SSL_R_EXT_LENGTH_MISMATCH},
#else
{"EXT_LENGTH_MISMATCH", 20, 163},
#endif
#ifdef SSL_R_FAILED_TO_INIT_ASYNC
{"FAILED_TO_INIT_ASYNC", ERR_LIB_SSL, SSL_R_FAILED_TO_INIT_ASYNC},
#else
{"FAILED_TO_INIT_ASYNC", 20, 405},
#endif
#ifdef SSL_R_FRAGMENTED_CLIENT_HELLO
{"FRAGMENTED_CLIENT_HELLO", ERR_LIB_SSL, SSL_R_FRAGMENTED_CLIENT_HELLO},
#else
{"FRAGMENTED_CLIENT_HELLO", 20, 401},
#endif
#ifdef SSL_R_GOT_A_FIN_BEFORE_A_CCS
{"GOT_A_FIN_BEFORE_A_CCS", ERR_LIB_SSL, SSL_R_GOT_A_FIN_BEFORE_A_CCS},
#else
{"GOT_A_FIN_BEFORE_A_CCS", 20, 154},
#endif
#ifdef SSL_R_HTTPS_PROXY_REQUEST
{"HTTPS_PROXY_REQUEST", ERR_LIB_SSL, SSL_R_HTTPS_PROXY_REQUEST},
#else
{"HTTPS_PROXY_REQUEST", 20, 155},
#endif
#ifdef SSL_R_HTTP_REQUEST
{"HTTP_REQUEST", ERR_LIB_SSL, SSL_R_HTTP_REQUEST},
#else
{"HTTP_REQUEST", 20, 156},
#endif
#ifdef SSL_R_ILLEGAL_POINT_COMPRESSION
{"ILLEGAL_POINT_COMPRESSION", ERR_LIB_SSL, SSL_R_ILLEGAL_POINT_COMPRESSION},
#else
{"ILLEGAL_POINT_COMPRESSION", 20, 162},
#endif
#ifdef SSL_R_ILLEGAL_SUITEB_DIGEST
{"ILLEGAL_SUITEB_DIGEST", ERR_LIB_SSL, SSL_R_ILLEGAL_SUITEB_DIGEST},
#else
{"ILLEGAL_SUITEB_DIGEST", 20, 380},
#endif
#ifdef SSL_R_INAPPROPRIATE_FALLBACK
{"INAPPROPRIATE_FALLBACK", ERR_LIB_SSL, SSL_R_INAPPROPRIATE_FALLBACK},
#else
{"INAPPROPRIATE_FALLBACK", 20, 373},
#endif
#ifdef SSL_R_INCONSISTENT_COMPRESSION
{"INCONSISTENT_COMPRESSION", ERR_LIB_SSL, SSL_R_INCONSISTENT_COMPRESSION},
#else
{"INCONSISTENT_COMPRESSION", 20, 340},
#endif
#ifdef SSL_R_INCONSISTENT_EARLY_DATA_ALPN
{"INCONSISTENT_EARLY_DATA_ALPN", ERR_LIB_SSL, SSL_R_INCONSISTENT_EARLY_DATA_ALPN},
#else
{"INCONSISTENT_EARLY_DATA_ALPN", 20, 222},
#endif
#ifdef SSL_R_INCONSISTENT_EARLY_DATA_SNI
{"INCONSISTENT_EARLY_DATA_SNI", ERR_LIB_SSL, SSL_R_INCONSISTENT_EARLY_DATA_SNI},
#else
{"INCONSISTENT_EARLY_DATA_SNI", 20, 231},
#endif
#ifdef SSL_R_INCONSISTENT_EXTMS
{"INCONSISTENT_EXTMS", ERR_LIB_SSL, SSL_R_INCONSISTENT_EXTMS},
#else
{"INCONSISTENT_EXTMS", 20, 104},
#endif
#ifdef SSL_R_INSUFFICIENT_SECURITY
{"INSUFFICIENT_SECURITY", ERR_LIB_SSL, SSL_R_INSUFFICIENT_SECURITY},
#else
{"INSUFFICIENT_SECURITY", 20, 241},
#endif
#ifdef SSL_R_INVALID_ALERT
{"INVALID_ALERT", ERR_LIB_SSL, SSL_R_INVALID_ALERT},
#else
{"INVALID_ALERT", 20, 205},
#endif
#ifdef SSL_R_INVALID_CCS_MESSAGE
{"INVALID_CCS_MESSAGE", ERR_LIB_SSL, SSL_R_INVALID_CCS_MESSAGE},
#else
{"INVALID_CCS_MESSAGE", 20, 260},
#endif
#ifdef SSL_R_INVALID_CERTIFICATE_OR_ALG
{"INVALID_CERTIFICATE_OR_ALG", ERR_LIB_SSL, SSL_R_INVALID_CERTIFICATE_OR_ALG},
#else
{"INVALID_CERTIFICATE_OR_ALG", 20, 238},
#endif
#ifdef SSL_R_INVALID_COMMAND
{"INVALID_COMMAND", ERR_LIB_SSL, SSL_R_INVALID_COMMAND},
#else
{"INVALID_COMMAND", 20, 280},
#endif
#ifdef SSL_R_INVALID_COMPRESSION_ALGORITHM
{"INVALID_COMPRESSION_ALGORITHM", ERR_LIB_SSL, SSL_R_INVALID_COMPRESSION_ALGORITHM},
#else
{"INVALID_COMPRESSION_ALGORITHM", 20, 341},
#endif
#ifdef SSL_R_INVALID_CONFIG
{"INVALID_CONFIG", ERR_LIB_SSL, SSL_R_INVALID_CONFIG},
#else
{"INVALID_CONFIG", 20, 283},
#endif
#ifdef SSL_R_INVALID_CONFIGURATION_NAME
{"INVALID_CONFIGURATION_NAME", ERR_LIB_SSL, SSL_R_INVALID_CONFIGURATION_NAME},
#else
{"INVALID_CONFIGURATION_NAME", 20, 113},
#endif
#ifdef SSL_R_INVALID_CONTEXT
{"INVALID_CONTEXT", ERR_LIB_SSL, SSL_R_INVALID_CONTEXT},
#else
{"INVALID_CONTEXT", 20, 282},
#endif
#ifdef SSL_R_INVALID_CT_VALIDATION_TYPE
{"INVALID_CT_VALIDATION_TYPE", ERR_LIB_SSL, SSL_R_INVALID_CT_VALIDATION_TYPE},
#else
{"INVALID_CT_VALIDATION_TYPE", 20, 212},
#endif
#ifdef SSL_R_INVALID_KEY_UPDATE_TYPE
{"INVALID_KEY_UPDATE_TYPE", ERR_LIB_SSL, SSL_R_INVALID_KEY_UPDATE_TYPE},
#else
{"INVALID_KEY_UPDATE_TYPE", 20, 120},
#endif
#ifdef SSL_R_INVALID_MAX_EARLY_DATA
{"INVALID_MAX_EARLY_DATA", ERR_LIB_SSL, SSL_R_INVALID_MAX_EARLY_DATA},
#else
{"INVALID_MAX_EARLY_DATA", 20, 174},
#endif
#ifdef SSL_R_INVALID_NULL_CMD_NAME
{"INVALID_NULL_CMD_NAME", ERR_LIB_SSL, SSL_R_INVALID_NULL_CMD_NAME},
#else
{"INVALID_NULL_CMD_NAME", 20, 385},
#endif
#ifdef SSL_R_INVALID_SEQUENCE_NUMBER
{"INVALID_SEQUENCE_NUMBER", ERR_LIB_SSL, SSL_R_INVALID_SEQUENCE_NUMBER},
#else
{"INVALID_SEQUENCE_NUMBER", 20, 402},
#endif
#ifdef SSL_R_INVALID_SERVERINFO_DATA
{"INVALID_SERVERINFO_DATA", ERR_LIB_SSL, SSL_R_INVALID_SERVERINFO_DATA},
#else
{"INVALID_SERVERINFO_DATA", 20, 388},
#endif
#ifdef SSL_R_INVALID_SESSION_ID
{"INVALID_SESSION_ID", ERR_LIB_SSL, SSL_R_INVALID_SESSION_ID},
#else
{"INVALID_SESSION_ID", 20, 999},
#endif
#ifdef SSL_R_INVALID_SRP_USERNAME
{"INVALID_SRP_USERNAME", ERR_LIB_SSL, SSL_R_INVALID_SRP_USERNAME},
#else
{"INVALID_SRP_USERNAME", 20, 357},
#endif
#ifdef SSL_R_INVALID_STATUS_RESPONSE
{"INVALID_STATUS_RESPONSE", ERR_LIB_SSL, SSL_R_INVALID_STATUS_RESPONSE},
#else
{"INVALID_STATUS_RESPONSE", 20, 328},
#endif
#ifdef SSL_R_INVALID_TICKET_KEYS_LENGTH
{"INVALID_TICKET_KEYS_LENGTH", ERR_LIB_SSL, SSL_R_INVALID_TICKET_KEYS_LENGTH},
#else
{"INVALID_TICKET_KEYS_LENGTH", 20, 325},
#endif
#ifdef SSL_R_LENGTH_MISMATCH
{"LENGTH_MISMATCH", ERR_LIB_SSL, SSL_R_LENGTH_MISMATCH},
#else
{"LENGTH_MISMATCH", 20, 159},
#endif
#ifdef SSL_R_LENGTH_TOO_LONG
{"LENGTH_TOO_LONG", ERR_LIB_SSL, SSL_R_LENGTH_TOO_LONG},
#else
{"LENGTH_TOO_LONG", 20, 404},
#endif
#ifdef SSL_R_LENGTH_TOO_SHORT
{"LENGTH_TOO_SHORT", ERR_LIB_SSL, SSL_R_LENGTH_TOO_SHORT},
#else
{"LENGTH_TOO_SHORT", 20, 160},
#endif
#ifdef SSL_R_LIBRARY_BUG
{"LIBRARY_BUG", ERR_LIB_SSL, SSL_R_LIBRARY_BUG},
#else
{"LIBRARY_BUG", 20, 274},
#endif
#ifdef SSL_R_LIBRARY_HAS_NO_CIPHERS
{"LIBRARY_HAS_NO_CIPHERS", ERR_LIB_SSL, SSL_R_LIBRARY_HAS_NO_CIPHERS},
#else
{"LIBRARY_HAS_NO_CIPHERS", 20, 161},
#endif
#ifdef SSL_R_MISSING_DSA_SIGNING_CERT
{"MISSING_DSA_SIGNING_CERT", ERR_LIB_SSL, SSL_R_MISSING_DSA_SIGNING_CERT},
#else
{"MISSING_DSA_SIGNING_CERT", 20, 165},
#endif
#ifdef SSL_R_MISSING_ECDSA_SIGNING_CERT
{"MISSING_ECDSA_SIGNING_CERT", ERR_LIB_SSL, SSL_R_MISSING_ECDSA_SIGNING_CERT},
#else
{"MISSING_ECDSA_SIGNING_CERT", 20, 381},
#endif
#ifdef SSL_R_MISSING_FATAL
{"MISSING_FATAL", ERR_LIB_SSL, SSL_R_MISSING_FATAL},
#else
{"MISSING_FATAL", 20, 256},
#endif
#ifdef SSL_R_MISSING_PARAMETERS
{"MISSING_PARAMETERS", ERR_LIB_SSL, SSL_R_MISSING_PARAMETERS},
#else
{"MISSING_PARAMETERS", 20, 290},
#endif
#ifdef SSL_R_MISSING_PSK_KEX_MODES_EXTENSION
{"MISSING_PSK_KEX_MODES_EXTENSION", ERR_LIB_SSL, SSL_R_MISSING_PSK_KEX_MODES_EXTENSION},
#else
{"MISSING_PSK_KEX_MODES_EXTENSION", 20, 310},
#endif
#ifdef SSL_R_MISSING_RSA_CERTIFICATE
{"MISSING_RSA_CERTIFICATE", ERR_LIB_SSL, SSL_R_MISSING_RSA_CERTIFICATE},
#else
{"MISSING_RSA_CERTIFICATE", 20, 168},
#endif
#ifdef SSL_R_MISSING_RSA_ENCRYPTING_CERT
{"MISSING_RSA_ENCRYPTING_CERT", ERR_LIB_SSL, SSL_R_MISSING_RSA_ENCRYPTING_CERT},
#else
{"MISSING_RSA_ENCRYPTING_CERT", 20, 169},
#endif
#ifdef SSL_R_MISSING_RSA_SIGNING_CERT
{"MISSING_RSA_SIGNING_CERT", ERR_LIB_SSL, SSL_R_MISSING_RSA_SIGNING_CERT},
#else
{"MISSING_RSA_SIGNING_CERT", 20, 170},
#endif
#ifdef SSL_R_MISSING_SIGALGS_EXTENSION
{"MISSING_SIGALGS_EXTENSION", ERR_LIB_SSL, SSL_R_MISSING_SIGALGS_EXTENSION},
#else
{"MISSING_SIGALGS_EXTENSION", 20, 112},
#endif
#ifdef SSL_R_MISSING_SIGNING_CERT
{"MISSING_SIGNING_CERT", ERR_LIB_SSL, SSL_R_MISSING_SIGNING_CERT},
#else
{"MISSING_SIGNING_CERT", 20, 221},
#endif
#ifdef SSL_R_MISSING_SRP_PARAM
{"MISSING_SRP_PARAM", ERR_LIB_SSL, SSL_R_MISSING_SRP_PARAM},
#else
{"MISSING_SRP_PARAM", 20, 358},
#endif
#ifdef SSL_R_MISSING_SUPPORTED_GROUPS_EXTENSION
{"MISSING_SUPPORTED_GROUPS_EXTENSION", ERR_LIB_SSL, SSL_R_MISSING_SUPPORTED_GROUPS_EXTENSION},
#else
{"MISSING_SUPPORTED_GROUPS_EXTENSION", 20, 209},
#endif
#ifdef SSL_R_MISSING_TMP_DH_KEY
{"MISSING_TMP_DH_KEY", ERR_LIB_SSL, SSL_R_MISSING_TMP_DH_KEY},
#else
{"MISSING_TMP_DH_KEY", 20, 171},
#endif
#ifdef SSL_R_MISSING_TMP_ECDH_KEY
{"MISSING_TMP_ECDH_KEY", ERR_LIB_SSL, SSL_R_MISSING_TMP_ECDH_KEY},
#else
{"MISSING_TMP_ECDH_KEY", 20, 311},
#endif
#ifdef SSL_R_MIXED_HANDSHAKE_AND_NON_HANDSHAKE_DATA
{"MIXED_HANDSHAKE_AND_NON_HANDSHAKE_DATA", ERR_LIB_SSL, SSL_R_MIXED_HANDSHAKE_AND_NON_HANDSHAKE_DATA},
#else
{"MIXED_HANDSHAKE_AND_NON_HANDSHAKE_DATA", 20, 293},
#endif
#ifdef SSL_R_NOT_ON_RECORD_BOUNDARY
{"NOT_ON_RECORD_BOUNDARY", ERR_LIB_SSL, SSL_R_NOT_ON_RECORD_BOUNDARY},
#else
{"NOT_ON_RECORD_BOUNDARY", 20, 182},
#endif
#ifdef SSL_R_NOT_REPLACING_CERTIFICATE
{"NOT_REPLACING_CERTIFICATE", ERR_LIB_SSL, SSL_R_NOT_REPLACING_CERTIFICATE},
#else
{"NOT_REPLACING_CERTIFICATE", 20, 289},
#endif
#ifdef SSL_R_NOT_SERVER
{"NOT_SERVER", ERR_LIB_SSL, SSL_R_NOT_SERVER},
#else
{"NOT_SERVER", 20, 284},
#endif
#ifdef SSL_R_NO_APPLICATION_PROTOCOL
{"NO_APPLICATION_PROTOCOL", ERR_LIB_SSL, SSL_R_NO_APPLICATION_PROTOCOL},
#else
{"NO_APPLICATION_PROTOCOL", 20, 235},
#endif
#ifdef SSL_R_NO_CERTIFICATES_RETURNED
{"NO_CERTIFICATES_RETURNED", ERR_LIB_SSL, SSL_R_NO_CERTIFICATES_RETURNED},
#else
{"NO_CERTIFICATES_RETURNED", 20, 176},
#endif
#ifdef SSL_R_NO_CERTIFICATE_ASSIGNED
{"NO_CERTIFICATE_ASSIGNED", ERR_LIB_SSL, SSL_R_NO_CERTIFICATE_ASSIGNED},
#else
{"NO_CERTIFICATE_ASSIGNED", 20, 177},
#endif
#ifdef SSL_R_NO_CERTIFICATE_SET
{"NO_CERTIFICATE_SET", ERR_LIB_SSL, SSL_R_NO_CERTIFICATE_SET},
#else
{"NO_CERTIFICATE_SET", 20, 179},
#endif
#ifdef SSL_R_NO_CHANGE_FOLLOWING_HRR
{"NO_CHANGE_FOLLOWING_HRR", ERR_LIB_SSL, SSL_R_NO_CHANGE_FOLLOWING_HRR},
#else
{"NO_CHANGE_FOLLOWING_HRR", 20, 214},
#endif
#ifdef SSL_R_NO_CIPHERS_AVAILABLE
{"NO_CIPHERS_AVAILABLE", ERR_LIB_SSL, SSL_R_NO_CIPHERS_AVAILABLE},
#else
{"NO_CIPHERS_AVAILABLE", 20, 181},
#endif
#ifdef SSL_R_NO_CIPHERS_SPECIFIED
{"NO_CIPHERS_SPECIFIED", ERR_LIB_SSL, SSL_R_NO_CIPHERS_SPECIFIED},
#else
{"NO_CIPHERS_SPECIFIED", 20, 183},
#endif
#ifdef SSL_R_NO_CIPHER_MATCH
{"NO_CIPHER_MATCH", ERR_LIB_SSL, SSL_R_NO_CIPHER_MATCH},
#else
{"NO_CIPHER_MATCH", 20, 185},
#endif
#ifdef SSL_R_NO_CLIENT_CERT_METHOD
{"NO_CLIENT_CERT_METHOD", ERR_LIB_SSL, SSL_R_NO_CLIENT_CERT_METHOD},
#else
{"NO_CLIENT_CERT_METHOD", 20, 331},
#endif
#ifdef SSL_R_NO_COMPRESSION_SPECIFIED
{"NO_COMPRESSION_SPECIFIED", ERR_LIB_SSL, SSL_R_NO_COMPRESSION_SPECIFIED},
#else
{"NO_COMPRESSION_SPECIFIED", 20, 187},
#endif
#ifdef SSL_R_NO_COOKIE_CALLBACK_SET
{"NO_COOKIE_CALLBACK_SET", ERR_LIB_SSL, SSL_R_NO_COOKIE_CALLBACK_SET},
#else
{"NO_COOKIE_CALLBACK_SET", 20, 287},
#endif
#ifdef SSL_R_NO_GOST_CERTIFICATE_SENT_BY_PEER
{"NO_GOST_CERTIFICATE_SENT_BY_PEER", ERR_LIB_SSL, SSL_R_NO_GOST_CERTIFICATE_SENT_BY_PEER},
#else
{"NO_GOST_CERTIFICATE_SENT_BY_PEER", 20, 330},
#endif
#ifdef SSL_R_NO_METHOD_SPECIFIED
{"NO_METHOD_SPECIFIED", ERR_LIB_SSL, SSL_R_NO_METHOD_SPECIFIED},
#else
{"NO_METHOD_SPECIFIED", 20, 188},
#endif
#ifdef SSL_R_NO_PEM_EXTENSIONS
{"NO_PEM_EXTENSIONS", ERR_LIB_SSL, SSL_R_NO_PEM_EXTENSIONS},
#else
{"NO_PEM_EXTENSIONS", 20, 389},
#endif
#ifdef SSL_R_NO_PRIVATE_KEY_ASSIGNED
{"NO_PRIVATE_KEY_ASSIGNED", ERR_LIB_SSL, SSL_R_NO_PRIVATE_KEY_ASSIGNED},
#else
{"NO_PRIVATE_KEY_ASSIGNED", 20, 190},
#endif
#ifdef SSL_R_NO_PROTOCOLS_AVAILABLE
{"NO_PROTOCOLS_AVAILABLE", ERR_LIB_SSL, SSL_R_NO_PROTOCOLS_AVAILABLE},
#else
{"NO_PROTOCOLS_AVAILABLE", 20, 191},
#endif
#ifdef SSL_R_NO_RENEGOTIATION
{"NO_RENEGOTIATION", ERR_LIB_SSL, SSL_R_NO_RENEGOTIATION},
#else
{"NO_RENEGOTIATION", 20, 339},
#endif
#ifdef SSL_R_NO_REQUIRED_DIGEST
{"NO_REQUIRED_DIGEST", ERR_LIB_SSL, SSL_R_NO_REQUIRED_DIGEST},
#else
{"NO_REQUIRED_DIGEST", 20, 324},
#endif
#ifdef SSL_R_NO_SHARED_CIPHER
{"NO_SHARED_CIPHER", ERR_LIB_SSL, SSL_R_NO_SHARED_CIPHER},
#else
{"NO_SHARED_CIPHER", 20, 193},
#endif
#ifdef SSL_R_NO_SHARED_GROUPS
{"NO_SHARED_GROUPS", ERR_LIB_SSL, SSL_R_NO_SHARED_GROUPS},
#else
{"NO_SHARED_GROUPS", 20, 410},
#endif
#ifdef SSL_R_NO_SHARED_SIGNATURE_ALGORITHMS
{"NO_SHARED_SIGNATURE_ALGORITHMS", ERR_LIB_SSL, SSL_R_NO_SHARED_SIGNATURE_ALGORITHMS},
#else
{"NO_SHARED_SIGNATURE_ALGORITHMS", 20, 376},
#endif
#ifdef SSL_R_NO_SRTP_PROFILES
{"NO_SRTP_PROFILES", ERR_LIB_SSL, SSL_R_NO_SRTP_PROFILES},
#else
{"NO_SRTP_PROFILES", 20, 359},
#endif
#ifdef SSL_R_NO_SUITABLE_KEY_SHARE
{"NO_SUITABLE_KEY_SHARE", ERR_LIB_SSL, SSL_R_NO_SUITABLE_KEY_SHARE},
#else
{"NO_SUITABLE_KEY_SHARE", 20, 101},
#endif
#ifdef SSL_R_NO_SUITABLE_SIGNATURE_ALGORITHM
{"NO_SUITABLE_SIGNATURE_ALGORITHM", ERR_LIB_SSL, SSL_R_NO_SUITABLE_SIGNATURE_ALGORITHM},
#else
{"NO_SUITABLE_SIGNATURE_ALGORITHM", 20, 118},
#endif
#ifdef SSL_R_NO_VALID_SCTS
{"NO_VALID_SCTS", ERR_LIB_SSL, SSL_R_NO_VALID_SCTS},
#else
{"NO_VALID_SCTS", 20, 216},
#endif
#ifdef SSL_R_NO_VERIFY_COOKIE_CALLBACK
{"NO_VERIFY_COOKIE_CALLBACK", ERR_LIB_SSL, SSL_R_NO_VERIFY_COOKIE_CALLBACK},
#else
{"NO_VERIFY_COOKIE_CALLBACK", 20, 403},
#endif
#ifdef SSL_R_NULL_SSL_CTX
{"NULL_SSL_CTX", ERR_LIB_SSL, SSL_R_NULL_SSL_CTX},
#else
{"NULL_SSL_CTX", 20, 195},
#endif
#ifdef SSL_R_NULL_SSL_METHOD_PASSED
{"NULL_SSL_METHOD_PASSED", ERR_LIB_SSL, SSL_R_NULL_SSL_METHOD_PASSED},
#else
{"NULL_SSL_METHOD_PASSED", 20, 196},
#endif
#ifdef SSL_R_OCSP_CALLBACK_FAILURE
{"OCSP_CALLBACK_FAILURE", ERR_LIB_SSL, SSL_R_OCSP_CALLBACK_FAILURE},
#else
{"OCSP_CALLBACK_FAILURE", 20, 294},
#endif
#ifdef SSL_R_OLD_SESSION_CIPHER_NOT_RETURNED
{"OLD_SESSION_CIPHER_NOT_RETURNED", ERR_LIB_SSL, SSL_R_OLD_SESSION_CIPHER_NOT_RETURNED},
#else
{"OLD_SESSION_CIPHER_NOT_RETURNED", 20, 197},
#endif
#ifdef SSL_R_OLD_SESSION_COMPRESSION_ALGORITHM_NOT_RETURNED
{"OLD_SESSION_COMPRESSION_ALGORITHM_NOT_RETURNED", ERR_LIB_SSL, SSL_R_OLD_SESSION_COMPRESSION_ALGORITHM_NOT_RETURNED},
#else
{"OLD_SESSION_COMPRESSION_ALGORITHM_NOT_RETURNED", 20, 344},
#endif
#ifdef SSL_R_OVERFLOW_ERROR
{"OVERFLOW_ERROR", ERR_LIB_SSL, SSL_R_OVERFLOW_ERROR},
#else
{"OVERFLOW_ERROR", 20, 237},
#endif
#ifdef SSL_R_PACKET_LENGTH_TOO_LONG
{"PACKET_LENGTH_TOO_LONG", ERR_LIB_SSL, SSL_R_PACKET_LENGTH_TOO_LONG},
#else
{"PACKET_LENGTH_TOO_LONG", 20, 198},
#endif
#ifdef SSL_R_PARSE_TLSEXT
{"PARSE_TLSEXT", ERR_LIB_SSL, SSL_R_PARSE_TLSEXT},
#else
{"PARSE_TLSEXT", 20, 227},
#endif
#ifdef SSL_R_PATH_TOO_LONG
{"PATH_TOO_LONG", ERR_LIB_SSL, SSL_R_PATH_TOO_LONG},
#else
{"PATH_TOO_LONG", 20, 270},
#endif
#ifdef SSL_R_PEER_DID_NOT_RETURN_A_CERTIFICATE
{"PEER_DID_NOT_RETURN_A_CERTIFICATE", ERR_LIB_SSL, SSL_R_PEER_DID_NOT_RETURN_A_CERTIFICATE},
#else
{"PEER_DID_NOT_RETURN_A_CERTIFICATE", 20, 199},
#endif
#ifdef SSL_R_PEM_NAME_BAD_PREFIX
{"PEM_NAME_BAD_PREFIX", ERR_LIB_SSL, SSL_R_PEM_NAME_BAD_PREFIX},
#else
{"PEM_NAME_BAD_PREFIX", 20, 391},
#endif
#ifdef SSL_R_PEM_NAME_TOO_SHORT
{"PEM_NAME_TOO_SHORT", ERR_LIB_SSL, SSL_R_PEM_NAME_TOO_SHORT},
#else
{"PEM_NAME_TOO_SHORT", 20, 392},
#endif
#ifdef SSL_R_PIPELINE_FAILURE
{"PIPELINE_FAILURE", ERR_LIB_SSL, SSL_R_PIPELINE_FAILURE},
#else
{"PIPELINE_FAILURE", 20, 406},
#endif
#ifdef SSL_R_POST_HANDSHAKE_AUTH_ENCODING_ERR
{"POST_HANDSHAKE_AUTH_ENCODING_ERR", ERR_LIB_SSL, SSL_R_POST_HANDSHAKE_AUTH_ENCODING_ERR},
#else
{"POST_HANDSHAKE_AUTH_ENCODING_ERR", 20, 278},
#endif
#ifdef SSL_R_PRIVATE_KEY_MISMATCH
{"PRIVATE_KEY_MISMATCH", ERR_LIB_SSL, SSL_R_PRIVATE_KEY_MISMATCH},
#else
{"PRIVATE_KEY_MISMATCH", 20, 288},
#endif
#ifdef SSL_R_PROTOCOL_IS_SHUTDOWN
{"PROTOCOL_IS_SHUTDOWN", ERR_LIB_SSL, SSL_R_PROTOCOL_IS_SHUTDOWN},
#else
{"PROTOCOL_IS_SHUTDOWN", 20, 207},
#endif
#ifdef SSL_R_PSK_IDENTITY_NOT_FOUND
{"PSK_IDENTITY_NOT_FOUND", ERR_LIB_SSL, SSL_R_PSK_IDENTITY_NOT_FOUND},
#else
{"PSK_IDENTITY_NOT_FOUND", 20, 223},
#endif
#ifdef SSL_R_PSK_NO_CLIENT_CB
{"PSK_NO_CLIENT_CB", ERR_LIB_SSL, SSL_R_PSK_NO_CLIENT_CB},
#else
{"PSK_NO_CLIENT_CB", 20, 224},
#endif
#ifdef SSL_R_PSK_NO_SERVER_CB
{"PSK_NO_SERVER_CB", ERR_LIB_SSL, SSL_R_PSK_NO_SERVER_CB},
#else
{"PSK_NO_SERVER_CB", 20, 225},
#endif
#ifdef SSL_R_READ_BIO_NOT_SET
{"READ_BIO_NOT_SET", ERR_LIB_SSL, SSL_R_READ_BIO_NOT_SET},
#else
{"READ_BIO_NOT_SET", 20, 211},
#endif
#ifdef SSL_R_READ_TIMEOUT_EXPIRED
{"READ_TIMEOUT_EXPIRED", ERR_LIB_SSL, SSL_R_READ_TIMEOUT_EXPIRED},
#else
{"READ_TIMEOUT_EXPIRED", 20, 312},
#endif
#ifdef SSL_R_RECORD_LENGTH_MISMATCH
{"RECORD_LENGTH_MISMATCH", ERR_LIB_SSL, SSL_R_RECORD_LENGTH_MISMATCH},
#else
{"RECORD_LENGTH_MISMATCH", 20, 213},
#endif
#ifdef SSL_R_RECORD_TOO_SMALL
{"RECORD_TOO_SMALL", ERR_LIB_SSL, SSL_R_RECORD_TOO_SMALL},
#else
{"RECORD_TOO_SMALL", 20, 298},
#endif
#ifdef SSL_R_RENEGOTIATE_EXT_TOO_LONG
{"RENEGOTIATE_EXT_TOO_LONG", ERR_LIB_SSL, SSL_R_RENEGOTIATE_EXT_TOO_LONG},
#else
{"RENEGOTIATE_EXT_TOO_LONG", 20, 335},
#endif
#ifdef SSL_R_RENEGOTIATION_ENCODING_ERR
{"RENEGOTIATION_ENCODING_ERR", ERR_LIB_SSL, SSL_R_RENEGOTIATION_ENCODING_ERR},
#else
{"RENEGOTIATION_ENCODING_ERR", 20, 336},
#endif
#ifdef SSL_R_RENEGOTIATION_MISMATCH
{"RENEGOTIATION_MISMATCH", ERR_LIB_SSL, SSL_R_RENEGOTIATION_MISMATCH},
#else
{"RENEGOTIATION_MISMATCH", 20, 337},
#endif
#ifdef SSL_R_REQUEST_PENDING
{"REQUEST_PENDING", ERR_LIB_SSL, SSL_R_REQUEST_PENDING},
#else
{"REQUEST_PENDING", 20, 285},
#endif
#ifdef SSL_R_REQUEST_SENT
{"REQUEST_SENT", ERR_LIB_SSL, SSL_R_REQUEST_SENT},
#else
{"REQUEST_SENT", 20, 286},
#endif
#ifdef SSL_R_REQUIRED_CIPHER_MISSING
{"REQUIRED_CIPHER_MISSING", ERR_LIB_SSL, SSL_R_REQUIRED_CIPHER_MISSING},
#else
{"REQUIRED_CIPHER_MISSING", 20, 215},
#endif
#ifdef SSL_R_REQUIRED_COMPRESSION_ALGORITHM_MISSING
{"REQUIRED_COMPRESSION_ALGORITHM_MISSING", ERR_LIB_SSL, SSL_R_REQUIRED_COMPRESSION_ALGORITHM_MISSING},
#else
{"REQUIRED_COMPRESSION_ALGORITHM_MISSING", 20, 342},
#endif
#ifdef SSL_R_SCSV_RECEIVED_WHEN_RENEGOTIATING
{"SCSV_RECEIVED_WHEN_RENEGOTIATING", ERR_LIB_SSL, SSL_R_SCSV_RECEIVED_WHEN_RENEGOTIATING},
#else
{"SCSV_RECEIVED_WHEN_RENEGOTIATING", 20, 345},
#endif
#ifdef SSL_R_SCT_VERIFICATION_FAILED
{"SCT_VERIFICATION_FAILED", ERR_LIB_SSL, SSL_R_SCT_VERIFICATION_FAILED},
#else
{"SCT_VERIFICATION_FAILED", 20, 208},
#endif
#ifdef SSL_R_SERVERHELLO_TLSEXT
{"SERVERHELLO_TLSEXT", ERR_LIB_SSL, SSL_R_SERVERHELLO_TLSEXT},
#else
{"SERVERHELLO_TLSEXT", 20, 275},
#endif
#ifdef SSL_R_SESSION_ID_CONTEXT_UNINITIALIZED
{"SESSION_ID_CONTEXT_UNINITIALIZED", ERR_LIB_SSL, SSL_R_SESSION_ID_CONTEXT_UNINITIALIZED},
#else
{"SESSION_ID_CONTEXT_UNINITIALIZED", 20, 277},
#endif
#ifdef SSL_R_SHUTDOWN_WHILE_IN_INIT
{"SHUTDOWN_WHILE_IN_INIT", ERR_LIB_SSL, SSL_R_SHUTDOWN_WHILE_IN_INIT},
#else
{"SHUTDOWN_WHILE_IN_INIT", 20, 407},
#endif
#ifdef SSL_R_SIGNATURE_ALGORITHMS_ERROR
{"SIGNATURE_ALGORITHMS_ERROR", ERR_LIB_SSL, SSL_R_SIGNATURE_ALGORITHMS_ERROR},
#else
{"SIGNATURE_ALGORITHMS_ERROR", 20, 360},
#endif
#ifdef SSL_R_SIGNATURE_FOR_NON_SIGNING_CERTIFICATE
{"SIGNATURE_FOR_NON_SIGNING_CERTIFICATE", ERR_LIB_SSL, SSL_R_SIGNATURE_FOR_NON_SIGNING_CERTIFICATE},
#else
{"SIGNATURE_FOR_NON_SIGNING_CERTIFICATE", 20, 220},
#endif
#ifdef SSL_R_SRP_A_CALC
{"SRP_A_CALC", ERR_LIB_SSL, SSL_R_SRP_A_CALC},
#else
{"SRP_A_CALC", 20, 361},
#endif
#ifdef SSL_R_SRTP_COULD_NOT_ALLOCATE_PROFILES
{"SRTP_COULD_NOT_ALLOCATE_PROFILES", ERR_LIB_SSL, SSL_R_SRTP_COULD_NOT_ALLOCATE_PROFILES},
#else
{"SRTP_COULD_NOT_ALLOCATE_PROFILES", 20, 362},
#endif
#ifdef SSL_R_SRTP_PROTECTION_PROFILE_LIST_TOO_LONG
{"SRTP_PROTECTION_PROFILE_LIST_TOO_LONG", ERR_LIB_SSL, SSL_R_SRTP_PROTECTION_PROFILE_LIST_TOO_LONG},
#else
{"SRTP_PROTECTION_PROFILE_LIST_TOO_LONG", 20, 363},
#endif
#ifdef SSL_R_SRTP_UNKNOWN_PROTECTION_PROFILE
{"SRTP_UNKNOWN_PROTECTION_PROFILE", ERR_LIB_SSL, SSL_R_SRTP_UNKNOWN_PROTECTION_PROFILE},
#else
{"SRTP_UNKNOWN_PROTECTION_PROFILE", 20, 364},
#endif
#ifdef SSL_R_SSL3_EXT_INVALID_MAX_FRAGMENT_LENGTH
{"SSL3_EXT_INVALID_MAX_FRAGMENT_LENGTH", ERR_LIB_SSL, SSL_R_SSL3_EXT_INVALID_MAX_FRAGMENT_LENGTH},
#else
{"SSL3_EXT_INVALID_MAX_FRAGMENT_LENGTH", 20, 232},
#endif
#ifdef SSL_R_SSL3_EXT_INVALID_SERVERNAME
{"SSL3_EXT_INVALID_SERVERNAME", ERR_LIB_SSL, SSL_R_SSL3_EXT_INVALID_SERVERNAME},
#else
{"SSL3_EXT_INVALID_SERVERNAME", 20, 319},
#endif
#ifdef SSL_R_SSL3_EXT_INVALID_SERVERNAME_TYPE
{"SSL3_EXT_INVALID_SERVERNAME_TYPE", ERR_LIB_SSL, SSL_R_SSL3_EXT_INVALID_SERVERNAME_TYPE},
#else
{"SSL3_EXT_INVALID_SERVERNAME_TYPE", 20, 320},
#endif
#ifdef SSL_R_SSL3_SESSION_ID_TOO_LONG
{"SSL3_SESSION_ID_TOO_LONG", ERR_LIB_SSL, SSL_R_SSL3_SESSION_ID_TOO_LONG},
#else
{"SSL3_SESSION_ID_TOO_LONG", 20, 300},
#endif
#ifdef SSL_R_SSLV3_ALERT_BAD_CERTIFICATE
{"SSLV3_ALERT_BAD_CERTIFICATE", ERR_LIB_SSL, SSL_R_SSLV3_ALERT_BAD_CERTIFICATE},
#else
{"SSLV3_ALERT_BAD_CERTIFICATE", 20, 1042},
#endif
#ifdef SSL_R_SSLV3_ALERT_BAD_RECORD_MAC
{"SSLV3_ALERT_BAD_RECORD_MAC", ERR_LIB_SSL, SSL_R_SSLV3_ALERT_BAD_RECORD_MAC},
#else
{"SSLV3_ALERT_BAD_RECORD_MAC", 20, 1020},
#endif
#ifdef SSL_R_SSLV3_ALERT_CERTIFICATE_EXPIRED
{"SSLV3_ALERT_CERTIFICATE_EXPIRED", ERR_LIB_SSL, SSL_R_SSLV3_ALERT_CERTIFICATE_EXPIRED},
#else
{"SSLV3_ALERT_CERTIFICATE_EXPIRED", 20, 1045},
#endif
#ifdef SSL_R_SSLV3_ALERT_CERTIFICATE_REVOKED
{"SSLV3_ALERT_CERTIFICATE_REVOKED", ERR_LIB_SSL, SSL_R_SSLV3_ALERT_CERTIFICATE_REVOKED},
#else
{"SSLV3_ALERT_CERTIFICATE_REVOKED", 20, 1044},
#endif
#ifdef SSL_R_SSLV3_ALERT_CERTIFICATE_UNKNOWN
{"SSLV3_ALERT_CERTIFICATE_UNKNOWN", ERR_LIB_SSL, SSL_R_SSLV3_ALERT_CERTIFICATE_UNKNOWN},
#else
{"SSLV3_ALERT_CERTIFICATE_UNKNOWN", 20, 1046},
#endif
#ifdef SSL_R_SSLV3_ALERT_DECOMPRESSION_FAILURE
{"SSLV3_ALERT_DECOMPRESSION_FAILURE", ERR_LIB_SSL, SSL_R_SSLV3_ALERT_DECOMPRESSION_FAILURE},
#else
{"SSLV3_ALERT_DECOMPRESSION_FAILURE", 20, 1030},
#endif
#ifdef SSL_R_SSLV3_ALERT_HANDSHAKE_FAILURE
{"SSLV3_ALERT_HANDSHAKE_FAILURE", ERR_LIB_SSL, SSL_R_SSLV3_ALERT_HANDSHAKE_FAILURE},
#else
{"SSLV3_ALERT_HANDSHAKE_FAILURE", 20, 1040},
#endif
#ifdef SSL_R_SSLV3_ALERT_ILLEGAL_PARAMETER
{"SSLV3_ALERT_ILLEGAL_PARAMETER", ERR_LIB_SSL, SSL_R_SSLV3_ALERT_ILLEGAL_PARAMETER},
#else
{"SSLV3_ALERT_ILLEGAL_PARAMETER", 20, 1047},
#endif
#ifdef SSL_R_SSLV3_ALERT_NO_CERTIFICATE
{"SSLV3_ALERT_NO_CERTIFICATE", ERR_LIB_SSL, SSL_R_SSLV3_ALERT_NO_CERTIFICATE},
#else
{"SSLV3_ALERT_NO_CERTIFICATE", 20, 1041},
#endif
#ifdef SSL_R_SSLV3_ALERT_UNEXPECTED_MESSAGE
{"SSLV3_ALERT_UNEXPECTED_MESSAGE", ERR_LIB_SSL, SSL_R_SSLV3_ALERT_UNEXPECTED_MESSAGE},
#else
{"SSLV3_ALERT_UNEXPECTED_MESSAGE", 20, 1010},
#endif
#ifdef SSL_R_SSLV3_ALERT_UNSUPPORTED_CERTIFICATE
{"SSLV3_ALERT_UNSUPPORTED_CERTIFICATE", ERR_LIB_SSL, SSL_R_SSLV3_ALERT_UNSUPPORTED_CERTIFICATE},
#else
{"SSLV3_ALERT_UNSUPPORTED_CERTIFICATE", 20, 1043},
#endif
#ifdef SSL_R_SSL_COMMAND_SECTION_EMPTY
{"SSL_COMMAND_SECTION_EMPTY", ERR_LIB_SSL, SSL_R_SSL_COMMAND_SECTION_EMPTY},
#else
{"SSL_COMMAND_SECTION_EMPTY", 20, 117},
#endif
#ifdef SSL_R_SSL_COMMAND_SECTION_NOT_FOUND
{"SSL_COMMAND_SECTION_NOT_FOUND", ERR_LIB_SSL, SSL_R_SSL_COMMAND_SECTION_NOT_FOUND},
#else
{"SSL_COMMAND_SECTION_NOT_FOUND", 20, 125},
#endif
#ifdef SSL_R_SSL_CTX_HAS_NO_DEFAULT_SSL_VERSION
{"SSL_CTX_HAS_NO_DEFAULT_SSL_VERSION", ERR_LIB_SSL, SSL_R_SSL_CTX_HAS_NO_DEFAULT_SSL_VERSION},
#else
{"SSL_CTX_HAS_NO_DEFAULT_SSL_VERSION", 20, 228},
#endif
#ifdef SSL_R_SSL_HANDSHAKE_FAILURE
{"SSL_HANDSHAKE_FAILURE", ERR_LIB_SSL, SSL_R_SSL_HANDSHAKE_FAILURE},
#else
{"SSL_HANDSHAKE_FAILURE", 20, 229},
#endif
#ifdef SSL_R_SSL_LIBRARY_HAS_NO_CIPHERS
{"SSL_LIBRARY_HAS_NO_CIPHERS", ERR_LIB_SSL, SSL_R_SSL_LIBRARY_HAS_NO_CIPHERS},
#else
{"SSL_LIBRARY_HAS_NO_CIPHERS", 20, 230},
#endif
#ifdef SSL_R_SSL_NEGATIVE_LENGTH
{"SSL_NEGATIVE_LENGTH", ERR_LIB_SSL, SSL_R_SSL_NEGATIVE_LENGTH},
#else
{"SSL_NEGATIVE_LENGTH", 20, 372},
#endif
#ifdef SSL_R_SSL_SECTION_EMPTY
{"SSL_SECTION_EMPTY", ERR_LIB_SSL, SSL_R_SSL_SECTION_EMPTY},
#else
{"SSL_SECTION_EMPTY", 20, 126},
#endif
#ifdef SSL_R_SSL_SECTION_NOT_FOUND
{"SSL_SECTION_NOT_FOUND", ERR_LIB_SSL, SSL_R_SSL_SECTION_NOT_FOUND},
#else
{"SSL_SECTION_NOT_FOUND", 20, 136},
#endif
#ifdef SSL_R_SSL_SESSION_ID_CALLBACK_FAILED
{"SSL_SESSION_ID_CALLBACK_FAILED", ERR_LIB_SSL, SSL_R_SSL_SESSION_ID_CALLBACK_FAILED},
#else
{"SSL_SESSION_ID_CALLBACK_FAILED", 20, 301},
#endif
#ifdef SSL_R_SSL_SESSION_ID_CONFLICT
{"SSL_SESSION_ID_CONFLICT", ERR_LIB_SSL, SSL_R_SSL_SESSION_ID_CONFLICT},
#else
{"SSL_SESSION_ID_CONFLICT", 20, 302},
#endif
#ifdef SSL_R_SSL_SESSION_ID_CONTEXT_TOO_LONG
{"SSL_SESSION_ID_CONTEXT_TOO_LONG", ERR_LIB_SSL, SSL_R_SSL_SESSION_ID_CONTEXT_TOO_LONG},
#else
{"SSL_SESSION_ID_CONTEXT_TOO_LONG", 20, 273},
#endif
#ifdef SSL_R_SSL_SESSION_ID_HAS_BAD_LENGTH
{"SSL_SESSION_ID_HAS_BAD_LENGTH", ERR_LIB_SSL, SSL_R_SSL_SESSION_ID_HAS_BAD_LENGTH},
#else
{"SSL_SESSION_ID_HAS_BAD_LENGTH", 20, 303},
#endif
#ifdef SSL_R_SSL_SESSION_ID_TOO_LONG
{"SSL_SESSION_ID_TOO_LONG", ERR_LIB_SSL, SSL_R_SSL_SESSION_ID_TOO_LONG},
#else
{"SSL_SESSION_ID_TOO_LONG", 20, 408},
#endif
#ifdef SSL_R_SSL_SESSION_VERSION_MISMATCH
{"SSL_SESSION_VERSION_MISMATCH", ERR_LIB_SSL, SSL_R_SSL_SESSION_VERSION_MISMATCH},
#else
{"SSL_SESSION_VERSION_MISMATCH", 20, 210},
#endif
#ifdef SSL_R_STILL_IN_INIT
{"STILL_IN_INIT", ERR_LIB_SSL, SSL_R_STILL_IN_INIT},
#else
{"STILL_IN_INIT", 20, 121},
#endif
#ifdef SSL_R_TLSV13_ALERT_CERTIFICATE_REQUIRED
{"TLSV13_ALERT_CERTIFICATE_REQUIRED", ERR_LIB_SSL, SSL_R_TLSV13_ALERT_CERTIFICATE_REQUIRED},
#else
{"TLSV13_ALERT_CERTIFICATE_REQUIRED", 20, 1116},
#endif
#ifdef SSL_R_TLSV13_ALERT_MISSING_EXTENSION
{"TLSV13_ALERT_MISSING_EXTENSION", ERR_LIB_SSL, SSL_R_TLSV13_ALERT_MISSING_EXTENSION},
#else
{"TLSV13_ALERT_MISSING_EXTENSION", 20, 1109},
#endif
#ifdef SSL_R_TLSV1_ALERT_ACCESS_DENIED
{"TLSV1_ALERT_ACCESS_DENIED", ERR_LIB_SSL, SSL_R_TLSV1_ALERT_ACCESS_DENIED},
#else
{"TLSV1_ALERT_ACCESS_DENIED", 20, 1049},
#endif
#ifdef SSL_R_TLSV1_ALERT_DECODE_ERROR
{"TLSV1_ALERT_DECODE_ERROR", ERR_LIB_SSL, SSL_R_TLSV1_ALERT_DECODE_ERROR},
#else
{"TLSV1_ALERT_DECODE_ERROR", 20, 1050},
#endif
#ifdef SSL_R_TLSV1_ALERT_DECRYPTION_FAILED
{"TLSV1_ALERT_DECRYPTION_FAILED", ERR_LIB_SSL, SSL_R_TLSV1_ALERT_DECRYPTION_FAILED},
#else
{"TLSV1_ALERT_DECRYPTION_FAILED", 20, 1021},
#endif
#ifdef SSL_R_TLSV1_ALERT_DECRYPT_ERROR
{"TLSV1_ALERT_DECRYPT_ERROR", ERR_LIB_SSL, SSL_R_TLSV1_ALERT_DECRYPT_ERROR},
#else
{"TLSV1_ALERT_DECRYPT_ERROR", 20, 1051},
#endif
#ifdef SSL_R_TLSV1_ALERT_EXPORT_RESTRICTION
{"TLSV1_ALERT_EXPORT_RESTRICTION", ERR_LIB_SSL, SSL_R_TLSV1_ALERT_EXPORT_RESTRICTION},
#else
{"TLSV1_ALERT_EXPORT_RESTRICTION", 20, 1060},
#endif
#ifdef SSL_R_TLSV1_ALERT_INAPPROPRIATE_FALLBACK
{"TLSV1_ALERT_INAPPROPRIATE_FALLBACK", ERR_LIB_SSL, SSL_R_TLSV1_ALERT_INAPPROPRIATE_FALLBACK},
#else
{"TLSV1_ALERT_INAPPROPRIATE_FALLBACK", 20, 1086},
#endif
#ifdef SSL_R_TLSV1_ALERT_INSUFFICIENT_SECURITY
{"TLSV1_ALERT_INSUFFICIENT_SECURITY", ERR_LIB_SSL, SSL_R_TLSV1_ALERT_INSUFFICIENT_SECURITY},
#else
{"TLSV1_ALERT_INSUFFICIENT_SECURITY", 20, 1071},
#endif
#ifdef SSL_R_TLSV1_ALERT_INTERNAL_ERROR
{"TLSV1_ALERT_INTERNAL_ERROR", ERR_LIB_SSL, SSL_R_TLSV1_ALERT_INTERNAL_ERROR},
#else
{"TLSV1_ALERT_INTERNAL_ERROR", 20, 1080},
#endif
#ifdef SSL_R_TLSV1_ALERT_NO_RENEGOTIATION
{"TLSV1_ALERT_NO_RENEGOTIATION", ERR_LIB_SSL, SSL_R_TLSV1_ALERT_NO_RENEGOTIATION},
#else
{"TLSV1_ALERT_NO_RENEGOTIATION", 20, 1100},
#endif
#ifdef SSL_R_TLSV1_ALERT_PROTOCOL_VERSION
{"TLSV1_ALERT_PROTOCOL_VERSION", ERR_LIB_SSL, SSL_R_TLSV1_ALERT_PROTOCOL_VERSION},
#else
{"TLSV1_ALERT_PROTOCOL_VERSION", 20, 1070},
#endif
#ifdef SSL_R_TLSV1_ALERT_RECORD_OVERFLOW
{"TLSV1_ALERT_RECORD_OVERFLOW", ERR_LIB_SSL, SSL_R_TLSV1_ALERT_RECORD_OVERFLOW},
#else
{"TLSV1_ALERT_RECORD_OVERFLOW", 20, 1022},
#endif
#ifdef SSL_R_TLSV1_ALERT_UNKNOWN_CA
{"TLSV1_ALERT_UNKNOWN_CA", ERR_LIB_SSL, SSL_R_TLSV1_ALERT_UNKNOWN_CA},
#else
{"TLSV1_ALERT_UNKNOWN_CA", 20, 1048},
#endif
#ifdef SSL_R_TLSV1_ALERT_USER_CANCELLED
{"TLSV1_ALERT_USER_CANCELLED", ERR_LIB_SSL, SSL_R_TLSV1_ALERT_USER_CANCELLED},
#else
{"TLSV1_ALERT_USER_CANCELLED", 20, 1090},
#endif
#ifdef SSL_R_TLSV1_BAD_CERTIFICATE_HASH_VALUE
{"TLSV1_BAD_CERTIFICATE_HASH_VALUE", ERR_LIB_SSL, SSL_R_TLSV1_BAD_CERTIFICATE_HASH_VALUE},
#else
{"TLSV1_BAD_CERTIFICATE_HASH_VALUE", 20, 1114},
#endif
#ifdef SSL_R_TLSV1_BAD_CERTIFICATE_STATUS_RESPONSE
{"TLSV1_BAD_CERTIFICATE_STATUS_RESPONSE", ERR_LIB_SSL, SSL_R_TLSV1_BAD_CERTIFICATE_STATUS_RESPONSE},
#else
{"TLSV1_BAD_CERTIFICATE_STATUS_RESPONSE", 20, 1113},
#endif
#ifdef SSL_R_TLSV1_CERTIFICATE_UNOBTAINABLE
{"TLSV1_CERTIFICATE_UNOBTAINABLE", ERR_LIB_SSL, SSL_R_TLSV1_CERTIFICATE_UNOBTAINABLE},
#else
{"TLSV1_CERTIFICATE_UNOBTAINABLE", 20, 1111},
#endif
#ifdef SSL_R_TLSV1_UNRECOGNIZED_NAME
{"TLSV1_UNRECOGNIZED_NAME", ERR_LIB_SSL, SSL_R_TLSV1_UNRECOGNIZED_NAME},
#else
{"TLSV1_UNRECOGNIZED_NAME", 20, 1112},
#endif
#ifdef SSL_R_TLSV1_UNSUPPORTED_EXTENSION
{"TLSV1_UNSUPPORTED_EXTENSION", ERR_LIB_SSL, SSL_R_TLSV1_UNSUPPORTED_EXTENSION},
#else
{"TLSV1_UNSUPPORTED_EXTENSION", 20, 1110},
#endif
#ifdef SSL_R_TLS_HEARTBEAT_PEER_DOESNT_ACCEPT
{"TLS_HEARTBEAT_PEER_DOESNT_ACCEPT", ERR_LIB_SSL, SSL_R_TLS_HEARTBEAT_PEER_DOESNT_ACCEPT},
#else
{"TLS_HEARTBEAT_PEER_DOESNT_ACCEPT", 20, 365},
#endif
#ifdef SSL_R_TLS_HEARTBEAT_PENDING
{"TLS_HEARTBEAT_PENDING", ERR_LIB_SSL, SSL_R_TLS_HEARTBEAT_PENDING},
#else
{"TLS_HEARTBEAT_PENDING", 20, 366},
#endif
#ifdef SSL_R_TLS_ILLEGAL_EXPORTER_LABEL
{"TLS_ILLEGAL_EXPORTER_LABEL", ERR_LIB_SSL, SSL_R_TLS_ILLEGAL_EXPORTER_LABEL},
#else
{"TLS_ILLEGAL_EXPORTER_LABEL", 20, 367},
#endif
#ifdef SSL_R_TLS_INVALID_ECPOINTFORMAT_LIST
{"TLS_INVALID_ECPOINTFORMAT_LIST", ERR_LIB_SSL, SSL_R_TLS_INVALID_ECPOINTFORMAT_LIST},
#else
{"TLS_INVALID_ECPOINTFORMAT_LIST", 20, 157},
#endif
#ifdef SSL_R_TOO_MANY_KEY_UPDATES
{"TOO_MANY_KEY_UPDATES", ERR_LIB_SSL, SSL_R_TOO_MANY_KEY_UPDATES},
#else
{"TOO_MANY_KEY_UPDATES", 20, 132},
#endif
#ifdef SSL_R_TOO_MANY_WARN_ALERTS
{"TOO_MANY_WARN_ALERTS", ERR_LIB_SSL, SSL_R_TOO_MANY_WARN_ALERTS},
#else
{"TOO_MANY_WARN_ALERTS", 20, 409},
#endif
#ifdef SSL_R_TOO_MUCH_EARLY_DATA
{"TOO_MUCH_EARLY_DATA", ERR_LIB_SSL, SSL_R_TOO_MUCH_EARLY_DATA},
#else
{"TOO_MUCH_EARLY_DATA", 20, 164},
#endif
#ifdef SSL_R_UNABLE_TO_FIND_ECDH_PARAMETERS
{"UNABLE_TO_FIND_ECDH_PARAMETERS", ERR_LIB_SSL, SSL_R_UNABLE_TO_FIND_ECDH_PARAMETERS},
#else
{"UNABLE_TO_FIND_ECDH_PARAMETERS", 20, 314},
#endif
#ifdef SSL_R_UNABLE_TO_FIND_PUBLIC_KEY_PARAMETERS
{"UNABLE_TO_FIND_PUBLIC_KEY_PARAMETERS", ERR_LIB_SSL, SSL_R_UNABLE_TO_FIND_PUBLIC_KEY_PARAMETERS},
#else
{"UNABLE_TO_FIND_PUBLIC_KEY_PARAMETERS", 20, 239},
#endif
#ifdef SSL_R_UNABLE_TO_LOAD_SSL3_MD5_ROUTINES
{"UNABLE_TO_LOAD_SSL3_MD5_ROUTINES", ERR_LIB_SSL, SSL_R_UNABLE_TO_LOAD_SSL3_MD5_ROUTINES},
#else
{"UNABLE_TO_LOAD_SSL3_MD5_ROUTINES", 20, 242},
#endif
#ifdef SSL_R_UNABLE_TO_LOAD_SSL3_SHA1_ROUTINES
{"UNABLE_TO_LOAD_SSL3_SHA1_ROUTINES", ERR_LIB_SSL, SSL_R_UNABLE_TO_LOAD_SSL3_SHA1_ROUTINES},
#else
{"UNABLE_TO_LOAD_SSL3_SHA1_ROUTINES", 20, 243},
#endif
#ifdef SSL_R_UNEXPECTED_CCS_MESSAGE
{"UNEXPECTED_CCS_MESSAGE", ERR_LIB_SSL, SSL_R_UNEXPECTED_CCS_MESSAGE},
#else
{"UNEXPECTED_CCS_MESSAGE", 20, 262},
#endif
#ifdef SSL_R_UNEXPECTED_END_OF_EARLY_DATA
{"UNEXPECTED_END_OF_EARLY_DATA", ERR_LIB_SSL, SSL_R_UNEXPECTED_END_OF_EARLY_DATA},
#else
{"UNEXPECTED_END_OF_EARLY_DATA", 20, 178},
#endif
#ifdef SSL_R_UNEXPECTED_MESSAGE
{"UNEXPECTED_MESSAGE", ERR_LIB_SSL, SSL_R_UNEXPECTED_MESSAGE},
#else
{"UNEXPECTED_MESSAGE", 20, 244},
#endif
#ifdef SSL_R_UNEXPECTED_RECORD
{"UNEXPECTED_RECORD", ERR_LIB_SSL, SSL_R_UNEXPECTED_RECORD},
#else
{"UNEXPECTED_RECORD", 20, 245},
#endif
#ifdef SSL_R_UNINITIALIZED
{"UNINITIALIZED", ERR_LIB_SSL, SSL_R_UNINITIALIZED},
#else
{"UNINITIALIZED", 20, 276},
#endif
#ifdef SSL_R_UNKNOWN_ALERT_TYPE
{"UNKNOWN_ALERT_TYPE", ERR_LIB_SSL, SSL_R_UNKNOWN_ALERT_TYPE},
#else
{"UNKNOWN_ALERT_TYPE", 20, 246},
#endif
#ifdef SSL_R_UNKNOWN_CERTIFICATE_TYPE
{"UNKNOWN_CERTIFICATE_TYPE", ERR_LIB_SSL, SSL_R_UNKNOWN_CERTIFICATE_TYPE},
#else
{"UNKNOWN_CERTIFICATE_TYPE", 20, 247},
#endif
#ifdef SSL_R_UNKNOWN_CIPHER_RETURNED
{"UNKNOWN_CIPHER_RETURNED", ERR_LIB_SSL, SSL_R_UNKNOWN_CIPHER_RETURNED},
#else
{"UNKNOWN_CIPHER_RETURNED", 20, 248},
#endif
#ifdef SSL_R_UNKNOWN_CIPHER_TYPE
{"UNKNOWN_CIPHER_TYPE", ERR_LIB_SSL, SSL_R_UNKNOWN_CIPHER_TYPE},
#else
{"UNKNOWN_CIPHER_TYPE", 20, 249},
#endif
#ifdef SSL_R_UNKNOWN_CMD_NAME
{"UNKNOWN_CMD_NAME", ERR_LIB_SSL, SSL_R_UNKNOWN_CMD_NAME},
#else
{"UNKNOWN_CMD_NAME", 20, 386},
#endif
#ifdef SSL_R_UNKNOWN_COMMAND
{"UNKNOWN_COMMAND", ERR_LIB_SSL, SSL_R_UNKNOWN_COMMAND},
#else
{"UNKNOWN_COMMAND", 20, 139},
#endif
#ifdef SSL_R_UNKNOWN_DIGEST
{"UNKNOWN_DIGEST", ERR_LIB_SSL, SSL_R_UNKNOWN_DIGEST},
#else
{"UNKNOWN_DIGEST", 20, 368},
#endif
#ifdef SSL_R_UNKNOWN_KEY_EXCHANGE_TYPE
{"UNKNOWN_KEY_EXCHANGE_TYPE", ERR_LIB_SSL, SSL_R_UNKNOWN_KEY_EXCHANGE_TYPE},
#else
{"UNKNOWN_KEY_EXCHANGE_TYPE", 20, 250},
#endif
#ifdef SSL_R_UNKNOWN_PKEY_TYPE
{"UNKNOWN_PKEY_TYPE", ERR_LIB_SSL, SSL_R_UNKNOWN_PKEY_TYPE},
#else
{"UNKNOWN_PKEY_TYPE", 20, 251},
#endif
#ifdef SSL_R_UNKNOWN_PROTOCOL
{"UNKNOWN_PROTOCOL", ERR_LIB_SSL, SSL_R_UNKNOWN_PROTOCOL},
#else
{"UNKNOWN_PROTOCOL", 20, 252},
#endif
#ifdef SSL_R_UNKNOWN_SSL_VERSION
{"UNKNOWN_SSL_VERSION", ERR_LIB_SSL, SSL_R_UNKNOWN_SSL_VERSION},
#else
{"UNKNOWN_SSL_VERSION", 20, 254},
#endif
#ifdef SSL_R_UNKNOWN_STATE
{"UNKNOWN_STATE", ERR_LIB_SSL, SSL_R_UNKNOWN_STATE},
#else
{"UNKNOWN_STATE", 20, 255},
#endif
#ifdef SSL_R_UNSAFE_LEGACY_RENEGOTIATION_DISABLED
{"UNSAFE_LEGACY_RENEGOTIATION_DISABLED", ERR_LIB_SSL, SSL_R_UNSAFE_LEGACY_RENEGOTIATION_DISABLED},
#else
{"UNSAFE_LEGACY_RENEGOTIATION_DISABLED", 20, 338},
#endif
#ifdef SSL_R_UNSOLICITED_EXTENSION
{"UNSOLICITED_EXTENSION", ERR_LIB_SSL, SSL_R_UNSOLICITED_EXTENSION},
#else
{"UNSOLICITED_EXTENSION", 20, 217},
#endif
#ifdef SSL_R_UNSUPPORTED_COMPRESSION_ALGORITHM
{"UNSUPPORTED_COMPRESSION_ALGORITHM", ERR_LIB_SSL, SSL_R_UNSUPPORTED_COMPRESSION_ALGORITHM},
#else
{"UNSUPPORTED_COMPRESSION_ALGORITHM", 20, 257},
#endif
#ifdef SSL_R_UNSUPPORTED_ELLIPTIC_CURVE
{"UNSUPPORTED_ELLIPTIC_CURVE", ERR_LIB_SSL, SSL_R_UNSUPPORTED_ELLIPTIC_CURVE},
#else
{"UNSUPPORTED_ELLIPTIC_CURVE", 20, 315},
#endif
#ifdef SSL_R_UNSUPPORTED_PROTOCOL
{"UNSUPPORTED_PROTOCOL", ERR_LIB_SSL, SSL_R_UNSUPPORTED_PROTOCOL},
#else
{"UNSUPPORTED_PROTOCOL", 20, 258},
#endif
#ifdef SSL_R_UNSUPPORTED_SSL_VERSION
{"UNSUPPORTED_SSL_VERSION", ERR_LIB_SSL, SSL_R_UNSUPPORTED_SSL_VERSION},
#else
{"UNSUPPORTED_SSL_VERSION", 20, 259},
#endif
#ifdef SSL_R_UNSUPPORTED_STATUS_TYPE
{"UNSUPPORTED_STATUS_TYPE", ERR_LIB_SSL, SSL_R_UNSUPPORTED_STATUS_TYPE},
#else
{"UNSUPPORTED_STATUS_TYPE", 20, 329},
#endif
#ifdef SSL_R_USE_SRTP_NOT_NEGOTIATED
{"USE_SRTP_NOT_NEGOTIATED", ERR_LIB_SSL, SSL_R_USE_SRTP_NOT_NEGOTIATED},
#else
{"USE_SRTP_NOT_NEGOTIATED", 20, 369},
#endif
#ifdef SSL_R_VERSION_TOO_HIGH
{"VERSION_TOO_HIGH", ERR_LIB_SSL, SSL_R_VERSION_TOO_HIGH},
#else
{"VERSION_TOO_HIGH", 20, 166},
#endif
#ifdef SSL_R_VERSION_TOO_LOW
{"VERSION_TOO_LOW", ERR_LIB_SSL, SSL_R_VERSION_TOO_LOW},
#else
{"VERSION_TOO_LOW", 20, 396},
#endif
#ifdef SSL_R_WRONG_CERTIFICATE_TYPE
{"WRONG_CERTIFICATE_TYPE", ERR_LIB_SSL, SSL_R_WRONG_CERTIFICATE_TYPE},
#else
{"WRONG_CERTIFICATE_TYPE", 20, 383},
#endif
#ifdef SSL_R_WRONG_CIPHER_RETURNED
{"WRONG_CIPHER_RETURNED", ERR_LIB_SSL, SSL_R_WRONG_CIPHER_RETURNED},
#else
{"WRONG_CIPHER_RETURNED", 20, 261},
#endif
#ifdef SSL_R_WRONG_CURVE
{"WRONG_CURVE", ERR_LIB_SSL, SSL_R_WRONG_CURVE},
#else
{"WRONG_CURVE", 20, 378},
#endif
#ifdef SSL_R_WRONG_SIGNATURE_LENGTH
{"WRONG_SIGNATURE_LENGTH", ERR_LIB_SSL, SSL_R_WRONG_SIGNATURE_LENGTH},
#else
{"WRONG_SIGNATURE_LENGTH", 20, 264},
#endif
#ifdef SSL_R_WRONG_SIGNATURE_SIZE
{"WRONG_SIGNATURE_SIZE", ERR_LIB_SSL, SSL_R_WRONG_SIGNATURE_SIZE},
#else
{"WRONG_SIGNATURE_SIZE", 20, 265},
#endif
#ifdef SSL_R_WRONG_SIGNATURE_TYPE
{"WRONG_SIGNATURE_TYPE", ERR_LIB_SSL, SSL_R_WRONG_SIGNATURE_TYPE},
#else
{"WRONG_SIGNATURE_TYPE", 20, 370},
#endif
#ifdef SSL_R_WRONG_SSL_VERSION
{"WRONG_SSL_VERSION", ERR_LIB_SSL, SSL_R_WRONG_SSL_VERSION},
#else
{"WRONG_SSL_VERSION", 20, 266},
#endif
#ifdef SSL_R_WRONG_VERSION_NUMBER
{"WRONG_VERSION_NUMBER", ERR_LIB_SSL, SSL_R_WRONG_VERSION_NUMBER},
#else
{"WRONG_VERSION_NUMBER", 20, 267},
#endif
#ifdef SSL_R_X509_LIB
{"X509_LIB", ERR_LIB_SSL, SSL_R_X509_LIB},
#else
{"X509_LIB", 20, 268},
#endif
#ifdef SSL_R_X509_VERIFICATION_SETUP_PROBLEMS
{"X509_VERIFICATION_SETUP_PROBLEMS", ERR_LIB_SSL, SSL_R_X509_VERIFICATION_SETUP_PROBLEMS},
#else
{"X509_VERIFICATION_SETUP_PROBLEMS", 20, 269},
#endif
#ifdef TS_R_BAD_PKCS7_TYPE
{"BAD_PKCS7_TYPE", ERR_LIB_TS, TS_R_BAD_PKCS7_TYPE},
#else
{"BAD_PKCS7_TYPE", 47, 132},
#endif
#ifdef TS_R_BAD_TYPE
{"BAD_TYPE", ERR_LIB_TS, TS_R_BAD_TYPE},
#else
{"BAD_TYPE", 47, 133},
#endif
#ifdef TS_R_CANNOT_LOAD_CERT
{"CANNOT_LOAD_CERT", ERR_LIB_TS, TS_R_CANNOT_LOAD_CERT},
#else
{"CANNOT_LOAD_CERT", 47, 137},
#endif
#ifdef TS_R_CANNOT_LOAD_KEY
{"CANNOT_LOAD_KEY", ERR_LIB_TS, TS_R_CANNOT_LOAD_KEY},
#else
{"CANNOT_LOAD_KEY", 47, 138},
#endif
#ifdef TS_R_CERTIFICATE_VERIFY_ERROR
{"CERTIFICATE_VERIFY_ERROR", ERR_LIB_TS, TS_R_CERTIFICATE_VERIFY_ERROR},
#else
{"CERTIFICATE_VERIFY_ERROR", 47, 100},
#endif
#ifdef TS_R_COULD_NOT_SET_ENGINE
{"COULD_NOT_SET_ENGINE", ERR_LIB_TS, TS_R_COULD_NOT_SET_ENGINE},
#else
{"COULD_NOT_SET_ENGINE", 47, 127},
#endif
#ifdef TS_R_COULD_NOT_SET_TIME
{"COULD_NOT_SET_TIME", ERR_LIB_TS, TS_R_COULD_NOT_SET_TIME},
#else
{"COULD_NOT_SET_TIME", 47, 115},
#endif
#ifdef TS_R_DETACHED_CONTENT
{"DETACHED_CONTENT", ERR_LIB_TS, TS_R_DETACHED_CONTENT},
#else
{"DETACHED_CONTENT", 47, 134},
#endif
#ifdef TS_R_ESS_ADD_SIGNING_CERT_ERROR
{"ESS_ADD_SIGNING_CERT_ERROR", ERR_LIB_TS, TS_R_ESS_ADD_SIGNING_CERT_ERROR},
#else
{"ESS_ADD_SIGNING_CERT_ERROR", 47, 116},
#endif
#ifdef TS_R_ESS_ADD_SIGNING_CERT_V2_ERROR
{"ESS_ADD_SIGNING_CERT_V2_ERROR", ERR_LIB_TS, TS_R_ESS_ADD_SIGNING_CERT_V2_ERROR},
#else
{"ESS_ADD_SIGNING_CERT_V2_ERROR", 47, 139},
#endif
#ifdef TS_R_ESS_SIGNING_CERTIFICATE_ERROR
{"ESS_SIGNING_CERTIFICATE_ERROR", ERR_LIB_TS, TS_R_ESS_SIGNING_CERTIFICATE_ERROR},
#else
{"ESS_SIGNING_CERTIFICATE_ERROR", 47, 101},
#endif
#ifdef TS_R_INVALID_NULL_POINTER
{"INVALID_NULL_POINTER", ERR_LIB_TS, TS_R_INVALID_NULL_POINTER},
#else
{"INVALID_NULL_POINTER", 47, 102},
#endif
#ifdef TS_R_INVALID_SIGNER_CERTIFICATE_PURPOSE
{"INVALID_SIGNER_CERTIFICATE_PURPOSE", ERR_LIB_TS, TS_R_INVALID_SIGNER_CERTIFICATE_PURPOSE},
#else
{"INVALID_SIGNER_CERTIFICATE_PURPOSE", 47, 117},
#endif
#ifdef TS_R_MESSAGE_IMPRINT_MISMATCH
{"MESSAGE_IMPRINT_MISMATCH", ERR_LIB_TS, TS_R_MESSAGE_IMPRINT_MISMATCH},
#else
{"MESSAGE_IMPRINT_MISMATCH", 47, 103},
#endif
#ifdef TS_R_NONCE_MISMATCH
{"NONCE_MISMATCH", ERR_LIB_TS, TS_R_NONCE_MISMATCH},
#else
{"NONCE_MISMATCH", 47, 104},
#endif
#ifdef TS_R_NONCE_NOT_RETURNED
{"NONCE_NOT_RETURNED", ERR_LIB_TS, TS_R_NONCE_NOT_RETURNED},
#else
{"NONCE_NOT_RETURNED", 47, 105},
#endif
#ifdef TS_R_NO_CONTENT
{"NO_CONTENT", ERR_LIB_TS, TS_R_NO_CONTENT},
#else
{"NO_CONTENT", 47, 106},
#endif
#ifdef TS_R_NO_TIME_STAMP_TOKEN
{"NO_TIME_STAMP_TOKEN", ERR_LIB_TS, TS_R_NO_TIME_STAMP_TOKEN},
#else
{"NO_TIME_STAMP_TOKEN", 47, 107},
#endif
#ifdef TS_R_PKCS7_ADD_SIGNATURE_ERROR
{"PKCS7_ADD_SIGNATURE_ERROR", ERR_LIB_TS, TS_R_PKCS7_ADD_SIGNATURE_ERROR},
#else
{"PKCS7_ADD_SIGNATURE_ERROR", 47, 118},
#endif
#ifdef TS_R_PKCS7_ADD_SIGNED_ATTR_ERROR
{"PKCS7_ADD_SIGNED_ATTR_ERROR", ERR_LIB_TS, TS_R_PKCS7_ADD_SIGNED_ATTR_ERROR},
#else
{"PKCS7_ADD_SIGNED_ATTR_ERROR", 47, 119},
#endif
#ifdef TS_R_PKCS7_TO_TS_TST_INFO_FAILED
{"PKCS7_TO_TS_TST_INFO_FAILED", ERR_LIB_TS, TS_R_PKCS7_TO_TS_TST_INFO_FAILED},
#else
{"PKCS7_TO_TS_TST_INFO_FAILED", 47, 129},
#endif
#ifdef TS_R_POLICY_MISMATCH
{"POLICY_MISMATCH", ERR_LIB_TS, TS_R_POLICY_MISMATCH},
#else
{"POLICY_MISMATCH", 47, 108},
#endif
#ifdef TS_R_PRIVATE_KEY_DOES_NOT_MATCH_CERTIFICATE
{"PRIVATE_KEY_DOES_NOT_MATCH_CERTIFICATE", ERR_LIB_TS, TS_R_PRIVATE_KEY_DOES_NOT_MATCH_CERTIFICATE},
#else
{"PRIVATE_KEY_DOES_NOT_MATCH_CERTIFICATE", 47, 120},
#endif
#ifdef TS_R_RESPONSE_SETUP_ERROR
{"RESPONSE_SETUP_ERROR", ERR_LIB_TS, TS_R_RESPONSE_SETUP_ERROR},
#else
{"RESPONSE_SETUP_ERROR", 47, 121},
#endif
#ifdef TS_R_SIGNATURE_FAILURE
{"SIGNATURE_FAILURE", ERR_LIB_TS, TS_R_SIGNATURE_FAILURE},
#else
{"SIGNATURE_FAILURE", 47, 109},
#endif
#ifdef TS_R_THERE_MUST_BE_ONE_SIGNER
{"THERE_MUST_BE_ONE_SIGNER", ERR_LIB_TS, TS_R_THERE_MUST_BE_ONE_SIGNER},
#else
{"THERE_MUST_BE_ONE_SIGNER", 47, 110},
#endif
#ifdef TS_R_TIME_SYSCALL_ERROR
{"TIME_SYSCALL_ERROR", ERR_LIB_TS, TS_R_TIME_SYSCALL_ERROR},
#else
{"TIME_SYSCALL_ERROR", 47, 122},
#endif
#ifdef TS_R_TOKEN_NOT_PRESENT
{"TOKEN_NOT_PRESENT", ERR_LIB_TS, TS_R_TOKEN_NOT_PRESENT},
#else
{"TOKEN_NOT_PRESENT", 47, 130},
#endif
#ifdef TS_R_TOKEN_PRESENT
{"TOKEN_PRESENT", ERR_LIB_TS, TS_R_TOKEN_PRESENT},
#else
{"TOKEN_PRESENT", 47, 131},
#endif
#ifdef TS_R_TSA_NAME_MISMATCH
{"TSA_NAME_MISMATCH", ERR_LIB_TS, TS_R_TSA_NAME_MISMATCH},
#else
{"TSA_NAME_MISMATCH", 47, 111},
#endif
#ifdef TS_R_TSA_UNTRUSTED
{"TSA_UNTRUSTED", ERR_LIB_TS, TS_R_TSA_UNTRUSTED},
#else
{"TSA_UNTRUSTED", 47, 112},
#endif
#ifdef TS_R_TST_INFO_SETUP_ERROR
{"TST_INFO_SETUP_ERROR", ERR_LIB_TS, TS_R_TST_INFO_SETUP_ERROR},
#else
{"TST_INFO_SETUP_ERROR", 47, 123},
#endif
#ifdef TS_R_TS_DATASIGN
{"TS_DATASIGN", ERR_LIB_TS, TS_R_TS_DATASIGN},
#else
{"TS_DATASIGN", 47, 124},
#endif
#ifdef TS_R_UNACCEPTABLE_POLICY
{"UNACCEPTABLE_POLICY", ERR_LIB_TS, TS_R_UNACCEPTABLE_POLICY},
#else
{"UNACCEPTABLE_POLICY", 47, 125},
#endif
#ifdef TS_R_UNSUPPORTED_MD_ALGORITHM
{"UNSUPPORTED_MD_ALGORITHM", ERR_LIB_TS, TS_R_UNSUPPORTED_MD_ALGORITHM},
#else
{"UNSUPPORTED_MD_ALGORITHM", 47, 126},
#endif
#ifdef TS_R_UNSUPPORTED_VERSION
{"UNSUPPORTED_VERSION", ERR_LIB_TS, TS_R_UNSUPPORTED_VERSION},
#else
{"UNSUPPORTED_VERSION", 47, 113},
#endif
#ifdef TS_R_VAR_BAD_VALUE
{"VAR_BAD_VALUE", ERR_LIB_TS, TS_R_VAR_BAD_VALUE},
#else
{"VAR_BAD_VALUE", 47, 135},
#endif
#ifdef TS_R_VAR_LOOKUP_FAILURE
{"VAR_LOOKUP_FAILURE", ERR_LIB_TS, TS_R_VAR_LOOKUP_FAILURE},
#else
{"VAR_LOOKUP_FAILURE", 47, 136},
#endif
#ifdef TS_R_WRONG_CONTENT_TYPE
{"WRONG_CONTENT_TYPE", ERR_LIB_TS, TS_R_WRONG_CONTENT_TYPE},
#else
{"WRONG_CONTENT_TYPE", 47, 114},
#endif
#ifdef UI_R_COMMON_OK_AND_CANCEL_CHARACTERS
{"COMMON_OK_AND_CANCEL_CHARACTERS", ERR_LIB_UI, UI_R_COMMON_OK_AND_CANCEL_CHARACTERS},
#else
{"COMMON_OK_AND_CANCEL_CHARACTERS", 40, 104},
#endif
#ifdef UI_R_INDEX_TOO_LARGE
{"INDEX_TOO_LARGE", ERR_LIB_UI, UI_R_INDEX_TOO_LARGE},
#else
{"INDEX_TOO_LARGE", 40, 102},
#endif
#ifdef UI_R_INDEX_TOO_SMALL
{"INDEX_TOO_SMALL", ERR_LIB_UI, UI_R_INDEX_TOO_SMALL},
#else
{"INDEX_TOO_SMALL", 40, 103},
#endif
#ifdef UI_R_NO_RESULT_BUFFER
{"NO_RESULT_BUFFER", ERR_LIB_UI, UI_R_NO_RESULT_BUFFER},
#else
{"NO_RESULT_BUFFER", 40, 105},
#endif
#ifdef UI_R_PROCESSING_ERROR
{"PROCESSING_ERROR", ERR_LIB_UI, UI_R_PROCESSING_ERROR},
#else
{"PROCESSING_ERROR", 40, 107},
#endif
#ifdef UI_R_RESULT_TOO_LARGE
{"RESULT_TOO_LARGE", ERR_LIB_UI, UI_R_RESULT_TOO_LARGE},
#else
{"RESULT_TOO_LARGE", 40, 100},
#endif
#ifdef UI_R_RESULT_TOO_SMALL
{"RESULT_TOO_SMALL", ERR_LIB_UI, UI_R_RESULT_TOO_SMALL},
#else
{"RESULT_TOO_SMALL", 40, 101},
#endif
#ifdef UI_R_SYSASSIGN_ERROR
{"SYSASSIGN_ERROR", ERR_LIB_UI, UI_R_SYSASSIGN_ERROR},
#else
{"SYSASSIGN_ERROR", 40, 109},
#endif
#ifdef UI_R_SYSDASSGN_ERROR
{"SYSDASSGN_ERROR", ERR_LIB_UI, UI_R_SYSDASSGN_ERROR},
#else
{"SYSDASSGN_ERROR", 40, 110},
#endif
#ifdef UI_R_SYSQIOW_ERROR
{"SYSQIOW_ERROR", ERR_LIB_UI, UI_R_SYSQIOW_ERROR},
#else
{"SYSQIOW_ERROR", 40, 111},
#endif
#ifdef UI_R_UNKNOWN_CONTROL_COMMAND
{"UNKNOWN_CONTROL_COMMAND", ERR_LIB_UI, UI_R_UNKNOWN_CONTROL_COMMAND},
#else
{"UNKNOWN_CONTROL_COMMAND", 40, 106},
#endif
#ifdef UI_R_UNKNOWN_TTYGET_ERRNO_VALUE
{"UNKNOWN_TTYGET_ERRNO_VALUE", ERR_LIB_UI, UI_R_UNKNOWN_TTYGET_ERRNO_VALUE},
#else
{"UNKNOWN_TTYGET_ERRNO_VALUE", 40, 108},
#endif
#ifdef UI_R_USER_DATA_DUPLICATION_UNSUPPORTED
{"USER_DATA_DUPLICATION_UNSUPPORTED", ERR_LIB_UI, UI_R_USER_DATA_DUPLICATION_UNSUPPORTED},
#else
{"USER_DATA_DUPLICATION_UNSUPPORTED", 40, 112},
#endif
#ifdef X509V3_R_BAD_IP_ADDRESS
{"BAD_IP_ADDRESS", ERR_LIB_X509V3, X509V3_R_BAD_IP_ADDRESS},
#else
{"BAD_IP_ADDRESS", 34, 118},
#endif
#ifdef X509V3_R_BAD_OBJECT
{"BAD_OBJECT", ERR_LIB_X509V3, X509V3_R_BAD_OBJECT},
#else
{"BAD_OBJECT", 34, 119},
#endif
#ifdef X509V3_R_BN_DEC2BN_ERROR
{"BN_DEC2BN_ERROR", ERR_LIB_X509V3, X509V3_R_BN_DEC2BN_ERROR},
#else
{"BN_DEC2BN_ERROR", 34, 100},
#endif
#ifdef X509V3_R_BN_TO_ASN1_INTEGER_ERROR
{"BN_TO_ASN1_INTEGER_ERROR", ERR_LIB_X509V3, X509V3_R_BN_TO_ASN1_INTEGER_ERROR},
#else
{"BN_TO_ASN1_INTEGER_ERROR", 34, 101},
#endif
#ifdef X509V3_R_DIRNAME_ERROR
{"DIRNAME_ERROR", ERR_LIB_X509V3, X509V3_R_DIRNAME_ERROR},
#else
{"DIRNAME_ERROR", 34, 149},
#endif
#ifdef X509V3_R_DISTPOINT_ALREADY_SET
{"DISTPOINT_ALREADY_SET", ERR_LIB_X509V3, X509V3_R_DISTPOINT_ALREADY_SET},
#else
{"DISTPOINT_ALREADY_SET", 34, 160},
#endif
#ifdef X509V3_R_DUPLICATE_ZONE_ID
{"DUPLICATE_ZONE_ID", ERR_LIB_X509V3, X509V3_R_DUPLICATE_ZONE_ID},
#else
{"DUPLICATE_ZONE_ID", 34, 133},
#endif
#ifdef X509V3_R_ERROR_CONVERTING_ZONE
{"ERROR_CONVERTING_ZONE", ERR_LIB_X509V3, X509V3_R_ERROR_CONVERTING_ZONE},
#else
{"ERROR_CONVERTING_ZONE", 34, 131},
#endif
#ifdef X509V3_R_ERROR_CREATING_EXTENSION
{"ERROR_CREATING_EXTENSION", ERR_LIB_X509V3, X509V3_R_ERROR_CREATING_EXTENSION},
#else
{"ERROR_CREATING_EXTENSION", 34, 144},
#endif
#ifdef X509V3_R_ERROR_IN_EXTENSION
{"ERROR_IN_EXTENSION", ERR_LIB_X509V3, X509V3_R_ERROR_IN_EXTENSION},
#else
{"ERROR_IN_EXTENSION", 34, 128},
#endif
#ifdef X509V3_R_EXPECTED_A_SECTION_NAME
{"EXPECTED_A_SECTION_NAME", ERR_LIB_X509V3, X509V3_R_EXPECTED_A_SECTION_NAME},
#else
{"EXPECTED_A_SECTION_NAME", 34, 137},
#endif
#ifdef X509V3_R_EXTENSION_EXISTS
{"EXTENSION_EXISTS", ERR_LIB_X509V3, X509V3_R_EXTENSION_EXISTS},
#else
{"EXTENSION_EXISTS", 34, 145},
#endif
#ifdef X509V3_R_EXTENSION_NAME_ERROR
{"EXTENSION_NAME_ERROR", ERR_LIB_X509V3, X509V3_R_EXTENSION_NAME_ERROR},
#else
{"EXTENSION_NAME_ERROR", 34, 115},
#endif
#ifdef X509V3_R_EXTENSION_NOT_FOUND
{"EXTENSION_NOT_FOUND", ERR_LIB_X509V3, X509V3_R_EXTENSION_NOT_FOUND},
#else
{"EXTENSION_NOT_FOUND", 34, 102},
#endif
#ifdef X509V3_R_EXTENSION_SETTING_NOT_SUPPORTED
{"EXTENSION_SETTING_NOT_SUPPORTED", ERR_LIB_X509V3, X509V3_R_EXTENSION_SETTING_NOT_SUPPORTED},
#else
{"EXTENSION_SETTING_NOT_SUPPORTED", 34, 103},
#endif
#ifdef X509V3_R_EXTENSION_VALUE_ERROR
{"EXTENSION_VALUE_ERROR", ERR_LIB_X509V3, X509V3_R_EXTENSION_VALUE_ERROR},
#else
{"EXTENSION_VALUE_ERROR", 34, 116},
#endif
#ifdef X509V3_R_ILLEGAL_EMPTY_EXTENSION
{"ILLEGAL_EMPTY_EXTENSION", ERR_LIB_X509V3, X509V3_R_ILLEGAL_EMPTY_EXTENSION},
#else
{"ILLEGAL_EMPTY_EXTENSION", 34, 151},
#endif
#ifdef X509V3_R_INCORRECT_POLICY_SYNTAX_TAG
{"INCORRECT_POLICY_SYNTAX_TAG", ERR_LIB_X509V3, X509V3_R_INCORRECT_POLICY_SYNTAX_TAG},
#else
{"INCORRECT_POLICY_SYNTAX_TAG", 34, 152},
#endif
#ifdef X509V3_R_INVALID_ASNUMBER
{"INVALID_ASNUMBER", ERR_LIB_X509V3, X509V3_R_INVALID_ASNUMBER},
#else
{"INVALID_ASNUMBER", 34, 162},
#endif
#ifdef X509V3_R_INVALID_ASRANGE
{"INVALID_ASRANGE", ERR_LIB_X509V3, X509V3_R_INVALID_ASRANGE},
#else
{"INVALID_ASRANGE", 34, 163},
#endif
#ifdef X509V3_R_INVALID_BOOLEAN_STRING
{"INVALID_BOOLEAN_STRING", ERR_LIB_X509V3, X509V3_R_INVALID_BOOLEAN_STRING},
#else
{"INVALID_BOOLEAN_STRING", 34, 104},
#endif
#ifdef X509V3_R_INVALID_EXTENSION_STRING
{"INVALID_EXTENSION_STRING", ERR_LIB_X509V3, X509V3_R_INVALID_EXTENSION_STRING},
#else
{"INVALID_EXTENSION_STRING", 34, 105},
#endif
#ifdef X509V3_R_INVALID_INHERITANCE
{"INVALID_INHERITANCE", ERR_LIB_X509V3, X509V3_R_INVALID_INHERITANCE},
#else
{"INVALID_INHERITANCE", 34, 165},
#endif
#ifdef X509V3_R_INVALID_IPADDRESS
{"INVALID_IPADDRESS", ERR_LIB_X509V3, X509V3_R_INVALID_IPADDRESS},
#else
{"INVALID_IPADDRESS", 34, 166},
#endif
#ifdef X509V3_R_INVALID_MULTIPLE_RDNS
{"INVALID_MULTIPLE_RDNS", ERR_LIB_X509V3, X509V3_R_INVALID_MULTIPLE_RDNS},
#else
{"INVALID_MULTIPLE_RDNS", 34, 161},
#endif
#ifdef X509V3_R_INVALID_NAME
{"INVALID_NAME", ERR_LIB_X509V3, X509V3_R_INVALID_NAME},
#else
{"INVALID_NAME", 34, 106},
#endif
#ifdef X509V3_R_INVALID_NULL_ARGUMENT
{"INVALID_NULL_ARGUMENT", ERR_LIB_X509V3, X509V3_R_INVALID_NULL_ARGUMENT},
#else
{"INVALID_NULL_ARGUMENT", 34, 107},
#endif
#ifdef X509V3_R_INVALID_NULL_NAME
{"INVALID_NULL_NAME", ERR_LIB_X509V3, X509V3_R_INVALID_NULL_NAME},
#else
{"INVALID_NULL_NAME", 34, 108},
#endif
#ifdef X509V3_R_INVALID_NULL_VALUE
{"INVALID_NULL_VALUE", ERR_LIB_X509V3, X509V3_R_INVALID_NULL_VALUE},
#else
{"INVALID_NULL_VALUE", 34, 109},
#endif
#ifdef X509V3_R_INVALID_NUMBER
{"INVALID_NUMBER", ERR_LIB_X509V3, X509V3_R_INVALID_NUMBER},
#else
{"INVALID_NUMBER", 34, 140},
#endif
#ifdef X509V3_R_INVALID_NUMBERS
{"INVALID_NUMBERS", ERR_LIB_X509V3, X509V3_R_INVALID_NUMBERS},
#else
{"INVALID_NUMBERS", 34, 141},
#endif
#ifdef X509V3_R_INVALID_OBJECT_IDENTIFIER
{"INVALID_OBJECT_IDENTIFIER", ERR_LIB_X509V3, X509V3_R_INVALID_OBJECT_IDENTIFIER},
#else
{"INVALID_OBJECT_IDENTIFIER", 34, 110},
#endif
#ifdef X509V3_R_INVALID_OPTION
{"INVALID_OPTION", ERR_LIB_X509V3, X509V3_R_INVALID_OPTION},
#else
{"INVALID_OPTION", 34, 138},
#endif
#ifdef X509V3_R_INVALID_POLICY_IDENTIFIER
{"INVALID_POLICY_IDENTIFIER", ERR_LIB_X509V3, X509V3_R_INVALID_POLICY_IDENTIFIER},
#else
{"INVALID_POLICY_IDENTIFIER", 34, 134},
#endif
#ifdef X509V3_R_INVALID_PROXY_POLICY_SETTING
{"INVALID_PROXY_POLICY_SETTING", ERR_LIB_X509V3, X509V3_R_INVALID_PROXY_POLICY_SETTING},
#else
{"INVALID_PROXY_POLICY_SETTING", 34, 153},
#endif
#ifdef X509V3_R_INVALID_PURPOSE
{"INVALID_PURPOSE", ERR_LIB_X509V3, X509V3_R_INVALID_PURPOSE},
#else
{"INVALID_PURPOSE", 34, 146},
#endif
#ifdef X509V3_R_INVALID_SAFI
{"INVALID_SAFI", ERR_LIB_X509V3, X509V3_R_INVALID_SAFI},
#else
{"INVALID_SAFI", 34, 164},
#endif
#ifdef X509V3_R_INVALID_SECTION
{"INVALID_SECTION", ERR_LIB_X509V3, X509V3_R_INVALID_SECTION},
#else
{"INVALID_SECTION", 34, 135},
#endif
#ifdef X509V3_R_INVALID_SYNTAX
{"INVALID_SYNTAX", ERR_LIB_X509V3, X509V3_R_INVALID_SYNTAX},
#else
{"INVALID_SYNTAX", 34, 143},
#endif
#ifdef X509V3_R_ISSUER_DECODE_ERROR
{"ISSUER_DECODE_ERROR", ERR_LIB_X509V3, X509V3_R_ISSUER_DECODE_ERROR},
#else
{"ISSUER_DECODE_ERROR", 34, 126},
#endif
#ifdef X509V3_R_MISSING_VALUE
{"MISSING_VALUE", ERR_LIB_X509V3, X509V3_R_MISSING_VALUE},
#else
{"MISSING_VALUE", 34, 124},
#endif
#ifdef X509V3_R_NEED_ORGANIZATION_AND_NUMBERS
{"NEED_ORGANIZATION_AND_NUMBERS", ERR_LIB_X509V3, X509V3_R_NEED_ORGANIZATION_AND_NUMBERS},
#else
{"NEED_ORGANIZATION_AND_NUMBERS", 34, 142},
#endif
#ifdef X509V3_R_NO_CONFIG_DATABASE
{"NO_CONFIG_DATABASE", ERR_LIB_X509V3, X509V3_R_NO_CONFIG_DATABASE},
#else
{"NO_CONFIG_DATABASE", 34, 136},
#endif
#ifdef X509V3_R_NO_ISSUER_CERTIFICATE
{"NO_ISSUER_CERTIFICATE", ERR_LIB_X509V3, X509V3_R_NO_ISSUER_CERTIFICATE},
#else
{"NO_ISSUER_CERTIFICATE", 34, 121},
#endif
#ifdef X509V3_R_NO_ISSUER_DETAILS
{"NO_ISSUER_DETAILS", ERR_LIB_X509V3, X509V3_R_NO_ISSUER_DETAILS},
#else
{"NO_ISSUER_DETAILS", 34, 127},
#endif
#ifdef X509V3_R_NO_POLICY_IDENTIFIER
{"NO_POLICY_IDENTIFIER", ERR_LIB_X509V3, X509V3_R_NO_POLICY_IDENTIFIER},
#else
{"NO_POLICY_IDENTIFIER", 34, 139},
#endif
#ifdef X509V3_R_NO_PROXY_CERT_POLICY_LANGUAGE_DEFINED
{"NO_PROXY_CERT_POLICY_LANGUAGE_DEFINED", ERR_LIB_X509V3, X509V3_R_NO_PROXY_CERT_POLICY_LANGUAGE_DEFINED},
#else
{"NO_PROXY_CERT_POLICY_LANGUAGE_DEFINED", 34, 154},
#endif
#ifdef X509V3_R_NO_PUBLIC_KEY
{"NO_PUBLIC_KEY", ERR_LIB_X509V3, X509V3_R_NO_PUBLIC_KEY},
#else
{"NO_PUBLIC_KEY", 34, 114},
#endif
#ifdef X509V3_R_NO_SUBJECT_DETAILS
{"NO_SUBJECT_DETAILS", ERR_LIB_X509V3, X509V3_R_NO_SUBJECT_DETAILS},
#else
{"NO_SUBJECT_DETAILS", 34, 125},
#endif
#ifdef X509V3_R_OPERATION_NOT_DEFINED
{"OPERATION_NOT_DEFINED", ERR_LIB_X509V3, X509V3_R_OPERATION_NOT_DEFINED},
#else
{"OPERATION_NOT_DEFINED", 34, 148},
#endif
#ifdef X509V3_R_OTHERNAME_ERROR
{"OTHERNAME_ERROR", ERR_LIB_X509V3, X509V3_R_OTHERNAME_ERROR},
#else
{"OTHERNAME_ERROR", 34, 147},
#endif
#ifdef X509V3_R_POLICY_LANGUAGE_ALREADY_DEFINED
{"POLICY_LANGUAGE_ALREADY_DEFINED", ERR_LIB_X509V3, X509V3_R_POLICY_LANGUAGE_ALREADY_DEFINED},
#else
{"POLICY_LANGUAGE_ALREADY_DEFINED", 34, 155},
#endif
#ifdef X509V3_R_POLICY_PATH_LENGTH
{"POLICY_PATH_LENGTH", ERR_LIB_X509V3, X509V3_R_POLICY_PATH_LENGTH},
#else
{"POLICY_PATH_LENGTH", 34, 156},
#endif
#ifdef X509V3_R_POLICY_PATH_LENGTH_ALREADY_DEFINED
{"POLICY_PATH_LENGTH_ALREADY_DEFINED", ERR_LIB_X509V3, X509V3_R_POLICY_PATH_LENGTH_ALREADY_DEFINED},
#else
{"POLICY_PATH_LENGTH_ALREADY_DEFINED", 34, 157},
#endif
#ifdef X509V3_R_POLICY_WHEN_PROXY_LANGUAGE_REQUIRES_NO_POLICY
{"POLICY_WHEN_PROXY_LANGUAGE_REQUIRES_NO_POLICY", ERR_LIB_X509V3, X509V3_R_POLICY_WHEN_PROXY_LANGUAGE_REQUIRES_NO_POLICY},
#else
{"POLICY_WHEN_PROXY_LANGUAGE_REQUIRES_NO_POLICY", 34, 159},
#endif
#ifdef X509V3_R_SECTION_NOT_FOUND
{"SECTION_NOT_FOUND", ERR_LIB_X509V3, X509V3_R_SECTION_NOT_FOUND},
#else
{"SECTION_NOT_FOUND", 34, 150},
#endif
#ifdef X509V3_R_UNABLE_TO_GET_ISSUER_DETAILS
{"UNABLE_TO_GET_ISSUER_DETAILS", ERR_LIB_X509V3, X509V3_R_UNABLE_TO_GET_ISSUER_DETAILS},
#else
{"UNABLE_TO_GET_ISSUER_DETAILS", 34, 122},
#endif
#ifdef X509V3_R_UNABLE_TO_GET_ISSUER_KEYID
{"UNABLE_TO_GET_ISSUER_KEYID", ERR_LIB_X509V3, X509V3_R_UNABLE_TO_GET_ISSUER_KEYID},
#else
{"UNABLE_TO_GET_ISSUER_KEYID", 34, 123},
#endif
#ifdef X509V3_R_UNKNOWN_BIT_STRING_ARGUMENT
{"UNKNOWN_BIT_STRING_ARGUMENT", ERR_LIB_X509V3, X509V3_R_UNKNOWN_BIT_STRING_ARGUMENT},
#else
{"UNKNOWN_BIT_STRING_ARGUMENT", 34, 111},
#endif
#ifdef X509V3_R_UNKNOWN_EXTENSION
{"UNKNOWN_EXTENSION", ERR_LIB_X509V3, X509V3_R_UNKNOWN_EXTENSION},
#else
{"UNKNOWN_EXTENSION", 34, 129},
#endif
#ifdef X509V3_R_UNKNOWN_EXTENSION_NAME
{"UNKNOWN_EXTENSION_NAME", ERR_LIB_X509V3, X509V3_R_UNKNOWN_EXTENSION_NAME},
#else
{"UNKNOWN_EXTENSION_NAME", 34, 130},
#endif
#ifdef X509V3_R_UNKNOWN_OPTION
{"UNKNOWN_OPTION", ERR_LIB_X509V3, X509V3_R_UNKNOWN_OPTION},
#else
{"UNKNOWN_OPTION", 34, 120},
#endif
#ifdef X509V3_R_UNSUPPORTED_OPTION
{"UNSUPPORTED_OPTION", ERR_LIB_X509V3, X509V3_R_UNSUPPORTED_OPTION},
#else
{"UNSUPPORTED_OPTION", 34, 117},
#endif
#ifdef X509V3_R_UNSUPPORTED_TYPE
{"UNSUPPORTED_TYPE", ERR_LIB_X509V3, X509V3_R_UNSUPPORTED_TYPE},
#else
{"UNSUPPORTED_TYPE", 34, 167},
#endif
#ifdef X509V3_R_USER_TOO_LONG
{"USER_TOO_LONG", ERR_LIB_X509V3, X509V3_R_USER_TOO_LONG},
#else
{"USER_TOO_LONG", 34, 132},
#endif
#ifdef X509_R_AKID_MISMATCH
{"AKID_MISMATCH", ERR_LIB_X509, X509_R_AKID_MISMATCH},
#else
{"AKID_MISMATCH", 11, 110},
#endif
#ifdef X509_R_BAD_SELECTOR
{"BAD_SELECTOR", ERR_LIB_X509, X509_R_BAD_SELECTOR},
#else
{"BAD_SELECTOR", 11, 133},
#endif
#ifdef X509_R_BAD_X509_FILETYPE
{"BAD_X509_FILETYPE", ERR_LIB_X509, X509_R_BAD_X509_FILETYPE},
#else
{"BAD_X509_FILETYPE", 11, 100},
#endif
#ifdef X509_R_BASE64_DECODE_ERROR
{"BASE64_DECODE_ERROR", ERR_LIB_X509, X509_R_BASE64_DECODE_ERROR},
#else
{"BASE64_DECODE_ERROR", 11, 118},
#endif
#ifdef X509_R_CANT_CHECK_DH_KEY
{"CANT_CHECK_DH_KEY", ERR_LIB_X509, X509_R_CANT_CHECK_DH_KEY},
#else
{"CANT_CHECK_DH_KEY", 11, 114},
#endif
#ifdef X509_R_CERT_ALREADY_IN_HASH_TABLE
{"CERT_ALREADY_IN_HASH_TABLE", ERR_LIB_X509, X509_R_CERT_ALREADY_IN_HASH_TABLE},
#else
{"CERT_ALREADY_IN_HASH_TABLE", 11, 101},
#endif
#ifdef X509_R_CRL_ALREADY_DELTA
{"CRL_ALREADY_DELTA", ERR_LIB_X509, X509_R_CRL_ALREADY_DELTA},
#else
{"CRL_ALREADY_DELTA", 11, 127},
#endif
#ifdef X509_R_CRL_VERIFY_FAILURE
{"CRL_VERIFY_FAILURE", ERR_LIB_X509, X509_R_CRL_VERIFY_FAILURE},
#else
{"CRL_VERIFY_FAILURE", 11, 131},
#endif
#ifdef X509_R_IDP_MISMATCH
{"IDP_MISMATCH", ERR_LIB_X509, X509_R_IDP_MISMATCH},
#else
{"IDP_MISMATCH", 11, 128},
#endif
#ifdef X509_R_INVALID_ATTRIBUTES
{"INVALID_ATTRIBUTES", ERR_LIB_X509, X509_R_INVALID_ATTRIBUTES},
#else
{"INVALID_ATTRIBUTES", 11, 138},
#endif
#ifdef X509_R_INVALID_DIRECTORY
{"INVALID_DIRECTORY", ERR_LIB_X509, X509_R_INVALID_DIRECTORY},
#else
{"INVALID_DIRECTORY", 11, 113},
#endif
#ifdef X509_R_INVALID_FIELD_NAME
{"INVALID_FIELD_NAME", ERR_LIB_X509, X509_R_INVALID_FIELD_NAME},
#else
{"INVALID_FIELD_NAME", 11, 119},
#endif
#ifdef X509_R_INVALID_TRUST
{"INVALID_TRUST", ERR_LIB_X509, X509_R_INVALID_TRUST},
#else
{"INVALID_TRUST", 11, 123},
#endif
#ifdef X509_R_ISSUER_MISMATCH
{"ISSUER_MISMATCH", ERR_LIB_X509, X509_R_ISSUER_MISMATCH},
#else
{"ISSUER_MISMATCH", 11, 129},
#endif
#ifdef X509_R_KEY_TYPE_MISMATCH
{"KEY_TYPE_MISMATCH", ERR_LIB_X509, X509_R_KEY_TYPE_MISMATCH},
#else
{"KEY_TYPE_MISMATCH", 11, 115},
#endif
#ifdef X509_R_KEY_VALUES_MISMATCH
{"KEY_VALUES_MISMATCH", ERR_LIB_X509, X509_R_KEY_VALUES_MISMATCH},
#else
{"KEY_VALUES_MISMATCH", 11, 116},
#endif
#ifdef X509_R_LOADING_CERT_DIR
{"LOADING_CERT_DIR", ERR_LIB_X509, X509_R_LOADING_CERT_DIR},
#else
{"LOADING_CERT_DIR", 11, 103},
#endif
#ifdef X509_R_LOADING_DEFAULTS
{"LOADING_DEFAULTS", ERR_LIB_X509, X509_R_LOADING_DEFAULTS},
#else
{"LOADING_DEFAULTS", 11, 104},
#endif
#ifdef X509_R_METHOD_NOT_SUPPORTED
{"METHOD_NOT_SUPPORTED", ERR_LIB_X509, X509_R_METHOD_NOT_SUPPORTED},
#else
{"METHOD_NOT_SUPPORTED", 11, 124},
#endif
#ifdef X509_R_NAME_TOO_LONG
{"NAME_TOO_LONG", ERR_LIB_X509, X509_R_NAME_TOO_LONG},
#else
{"NAME_TOO_LONG", 11, 134},
#endif
#ifdef X509_R_NEWER_CRL_NOT_NEWER
{"NEWER_CRL_NOT_NEWER", ERR_LIB_X509, X509_R_NEWER_CRL_NOT_NEWER},
#else
{"NEWER_CRL_NOT_NEWER", 11, 132},
#endif
#ifdef X509_R_NO_CERTIFICATE_FOUND
{"NO_CERTIFICATE_FOUND", ERR_LIB_X509, X509_R_NO_CERTIFICATE_FOUND},
#else
{"NO_CERTIFICATE_FOUND", 11, 135},
#endif
#ifdef X509_R_NO_CERTIFICATE_OR_CRL_FOUND
{"NO_CERTIFICATE_OR_CRL_FOUND", ERR_LIB_X509, X509_R_NO_CERTIFICATE_OR_CRL_FOUND},
#else
{"NO_CERTIFICATE_OR_CRL_FOUND", 11, 136},
#endif
#ifdef X509_R_NO_CERT_SET_FOR_US_TO_VERIFY
{"NO_CERT_SET_FOR_US_TO_VERIFY", ERR_LIB_X509, X509_R_NO_CERT_SET_FOR_US_TO_VERIFY},
#else
{"NO_CERT_SET_FOR_US_TO_VERIFY", 11, 105},
#endif
#ifdef X509_R_NO_CRL_FOUND
{"NO_CRL_FOUND", ERR_LIB_X509, X509_R_NO_CRL_FOUND},
#else
{"NO_CRL_FOUND", 11, 137},
#endif
#ifdef X509_R_NO_CRL_NUMBER
{"NO_CRL_NUMBER", ERR_LIB_X509, X509_R_NO_CRL_NUMBER},
#else
{"NO_CRL_NUMBER", 11, 130},
#endif
#ifdef X509_R_PUBLIC_KEY_DECODE_ERROR
{"PUBLIC_KEY_DECODE_ERROR", ERR_LIB_X509, X509_R_PUBLIC_KEY_DECODE_ERROR},
#else
{"PUBLIC_KEY_DECODE_ERROR", 11, 125},
#endif
#ifdef X509_R_PUBLIC_KEY_ENCODE_ERROR
{"PUBLIC_KEY_ENCODE_ERROR", ERR_LIB_X509, X509_R_PUBLIC_KEY_ENCODE_ERROR},
#else
{"PUBLIC_KEY_ENCODE_ERROR", 11, 126},
#endif
#ifdef X509_R_SHOULD_RETRY
{"SHOULD_RETRY", ERR_LIB_X509, X509_R_SHOULD_RETRY},
#else
{"SHOULD_RETRY", 11, 106},
#endif
#ifdef X509_R_UNABLE_TO_FIND_PARAMETERS_IN_CHAIN
{"UNABLE_TO_FIND_PARAMETERS_IN_CHAIN", ERR_LIB_X509, X509_R_UNABLE_TO_FIND_PARAMETERS_IN_CHAIN},
#else
{"UNABLE_TO_FIND_PARAMETERS_IN_CHAIN", 11, 107},
#endif
#ifdef X509_R_UNABLE_TO_GET_CERTS_PUBLIC_KEY
{"UNABLE_TO_GET_CERTS_PUBLIC_KEY", ERR_LIB_X509, X509_R_UNABLE_TO_GET_CERTS_PUBLIC_KEY},
#else
{"UNABLE_TO_GET_CERTS_PUBLIC_KEY", 11, 108},
#endif
#ifdef X509_R_UNKNOWN_KEY_TYPE
{"UNKNOWN_KEY_TYPE", ERR_LIB_X509, X509_R_UNKNOWN_KEY_TYPE},
#else
{"UNKNOWN_KEY_TYPE", 11, 117},
#endif
#ifdef X509_R_UNKNOWN_NID
{"UNKNOWN_NID", ERR_LIB_X509, X509_R_UNKNOWN_NID},
#else
{"UNKNOWN_NID", 11, 109},
#endif
#ifdef X509_R_UNKNOWN_PURPOSE_ID
{"UNKNOWN_PURPOSE_ID", ERR_LIB_X509, X509_R_UNKNOWN_PURPOSE_ID},
#else
{"UNKNOWN_PURPOSE_ID", 11, 121},
#endif
#ifdef X509_R_UNKNOWN_TRUST_ID
{"UNKNOWN_TRUST_ID", ERR_LIB_X509, X509_R_UNKNOWN_TRUST_ID},
#else
{"UNKNOWN_TRUST_ID", 11, 120},
#endif
#ifdef X509_R_UNSUPPORTED_ALGORITHM
{"UNSUPPORTED_ALGORITHM", ERR_LIB_X509, X509_R_UNSUPPORTED_ALGORITHM},
#else
{"UNSUPPORTED_ALGORITHM", 11, 111},
#endif
#ifdef X509_R_WRONG_LOOKUP_TYPE
{"WRONG_LOOKUP_TYPE", ERR_LIB_X509, X509_R_WRONG_LOOKUP_TYPE},
#else
{"WRONG_LOOKUP_TYPE", 11, 112},
#endif
#ifdef X509_R_WRONG_TYPE
{"WRONG_TYPE", ERR_LIB_X509, X509_R_WRONG_TYPE},
#else
{"WRONG_TYPE", 11, 122},
#endif
{NULL, 0, 0} /* sentinel */
}; | c | github | https://github.com/python/cpython | Modules/_ssl_data_111.h |
<!---
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-->
# Apache Hadoop Changelog
## Release 0.20.3 - Unreleased (as of 2018-09-01)
### INCOMPATIBLE CHANGES:
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|:---- |:---- | :--- |:---- |:---- |:---- |
| [HADOOP-6382](https://issues.apache.org/jira/browse/HADOOP-6382) | publish hadoop jars to apache mvn repo. | Major | build | Giridharan Kesavan | Giridharan Kesavan |
| [HDFS-132](https://issues.apache.org/jira/browse/HDFS-132) | Namenode in Safemode reports to Simon non-zero number of deleted files during startup | Minor | namenode | Hairong Kuang | Suresh Srinivas |
| [HADOOP-6701](https://issues.apache.org/jira/browse/HADOOP-6701) | Incorrect exit codes for "dfs -chown", "dfs -chgrp" | Minor | fs | Ravi Phulari | Ravi Phulari |
### IMPROVEMENTS:
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|:---- |:---- | :--- |:---- |:---- |:---- |
| [MAPREDUCE-1832](https://issues.apache.org/jira/browse/MAPREDUCE-1832) | Support for file sizes less than 1MB in DFSIO benchmark. | Major | benchmarks | Konstantin Shvachko | Konstantin Shvachko |
| [HADOOP-7240](https://issues.apache.org/jira/browse/HADOOP-7240) | Update eclipse .classpath template | Major | . | Aaron T. Myers | Aaron T. Myers |
| [MAPREDUCE-1734](https://issues.apache.org/jira/browse/MAPREDUCE-1734) | Un-deprecate the old MapReduce API in the 0.20 branch | Blocker | documentation | Tom White | Todd Lipcon |
| [HDFS-1013](https://issues.apache.org/jira/browse/HDFS-1013) | Miscellaneous improvements to HTML markup for web UIs | Minor | . | Todd Lipcon | Eugene Koontz |
| [HADOOP-6882](https://issues.apache.org/jira/browse/HADOOP-6882) | Update the patch level of Jetty | Major | . | Owen O'Malley | Owen O'Malley |
### BUG FIXES:
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|:---- |:---- | :--- |:---- |:---- |:---- |
| [HDFS-15](https://issues.apache.org/jira/browse/HDFS-15) | Rack replication policy can be violated for over replicated blocks | Critical | . | Hairong Kuang | Jitendra Nath Pandey |
| [MAPREDUCE-1522](https://issues.apache.org/jira/browse/MAPREDUCE-1522) | FileInputFormat may change the file system of an input path | Blocker | . | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
| [MAPREDUCE-1407](https://issues.apache.org/jira/browse/MAPREDUCE-1407) | Invalid example in the documentation of org.apache.hadoop.mapreduce.{Mapper,Reducer} | Trivial | documentation | Benoit Sigoure | Benoit Sigoure |
| [HDFS-955](https://issues.apache.org/jira/browse/HDFS-955) | FSImage.saveFSImage can lose edits | Blocker | namenode | Todd Lipcon | Konstantin Shvachko |
| [HDFS-1041](https://issues.apache.org/jira/browse/HDFS-1041) | DFSClient does not retry in getFileChecksum(..) | Major | hdfs-client | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
| [HDFS-909](https://issues.apache.org/jira/browse/HDFS-909) | Race condition between rollEditLog or rollFSImage ant FSEditsLog.write operations corrupts edits log | Blocker | namenode | Cosmin Lehene | Todd Lipcon |
| [HADOOP-6702](https://issues.apache.org/jira/browse/HADOOP-6702) | Incorrect exit codes for "dfs -chown", "dfs -chgrp" when input is given in wildcard format. | Minor | fs | Ravi Phulari | Ravi Phulari |
| [HADOOP-6760](https://issues.apache.org/jira/browse/HADOOP-6760) | WebServer shouldn't increase port number in case of negative port setting caused by Jetty's race | Major | . | Konstantin Boudnik | Konstantin Boudnik |
| [MAPREDUCE-1372](https://issues.apache.org/jira/browse/MAPREDUCE-1372) | ConcurrentModificationException in JobInProgress | Blocker | jobtracker | Amareshwari Sriramadasu | Dick King |
| [MAPREDUCE-118](https://issues.apache.org/jira/browse/MAPREDUCE-118) | Job.getJobID() will always return null | Blocker | client | Amar Kamat | Amareshwari Sriramadasu |
| [MAPREDUCE-1880](https://issues.apache.org/jira/browse/MAPREDUCE-1880) | "java.lang.ArithmeticException: Non-terminating decimal expansion; no exact representable decimal result." while running "hadoop jar hadoop-0.20.1+169.89-examples.jar pi 4 30" | Minor | examples | Victor Pakhomov | Tsz Wo Nicholas Sze |
| [HDFS-1258](https://issues.apache.org/jira/browse/HDFS-1258) | Clearing namespace quota on "/" corrupts FS image | Blocker | namenode | Aaron T. Myers | Aaron T. Myers |
| [HADOOP-6881](https://issues.apache.org/jira/browse/HADOOP-6881) | The efficient comparators aren't always used except for BytesWritable and Text | Major | . | Owen O'Malley | Owen O'Malley |
| [HADOOP-6833](https://issues.apache.org/jira/browse/HADOOP-6833) | IPC leaks call parameters when exceptions thrown | Blocker | . | Todd Lipcon | Todd Lipcon |
| [HADOOP-6928](https://issues.apache.org/jira/browse/HADOOP-6928) | Fix BooleanWritable comparator in 0.20 | Major | io | Owen O'Malley | Johannes Zillmann |
| [HDFS-1404](https://issues.apache.org/jira/browse/HDFS-1404) | TestNodeCount logic incorrect in branch-0.20 | Minor | namenode, test | Todd Lipcon | Todd Lipcon |
| [MAPREDUCE-1280](https://issues.apache.org/jira/browse/MAPREDUCE-1280) | Eclipse Plugin does not work with Eclipse Ganymede (3.4) | Major | . | Aaron Kimball | Alex Kozlov |
| [HADOOP-6724](https://issues.apache.org/jira/browse/HADOOP-6724) | IPC doesn't properly handle IOEs thrown by socket factory | Major | ipc | Todd Lipcon | Todd Lipcon |
| [HDFS-1240](https://issues.apache.org/jira/browse/HDFS-1240) | TestDFSShell failing in branch-20 | Critical | test | Todd Lipcon | Todd Lipcon |
| [HDFS-727](https://issues.apache.org/jira/browse/HDFS-727) | bug setting block size hdfsOpenFile | Blocker | libhdfs | Eli Collins | Eli Collins |
| [HDFS-908](https://issues.apache.org/jira/browse/HDFS-908) | TestDistributedFileSystem fails with Wrong FS on weird hosts | Minor | test | Todd Lipcon | Todd Lipcon |
| [HDFS-1377](https://issues.apache.org/jira/browse/HDFS-1377) | Quota bug for partial blocks allows quotas to be violated | Blocker | namenode | Eli Collins | Eli Collins |
| [HDFS-1406](https://issues.apache.org/jira/browse/HDFS-1406) | TestCLI fails on Ubuntu with default /etc/hosts | Minor | . | Todd Lipcon | Konstantin Boudnik |
| [MAPREDUCE-2262](https://issues.apache.org/jira/browse/MAPREDUCE-2262) | Capacity Scheduler unit tests fail with class not found | Major | capacity-sched | Owen O'Malley | Owen O'Malley |
| [HADOOP-6923](https://issues.apache.org/jira/browse/HADOOP-6923) | Native Libraries do not load if a different platform signature is returned from org.apache.hadoop.util.PlatformName | Major | native | Stephen Watt | Stephen Watt |
| [HDFS-1543](https://issues.apache.org/jira/browse/HDFS-1543) | Reduce dev. cycle time by moving system testing artifacts from default build and push to maven for HDFS | Major | . | Arun C Murthy | Luke Lu |
| [HDFS-1836](https://issues.apache.org/jira/browse/HDFS-1836) | Thousand of CLOSE\_WAIT socket | Major | hdfs-client | Dennis Cheung | Bharath Mundlapudi |
| [HADOOP-7116](https://issues.apache.org/jira/browse/HADOOP-7116) | raise contrib junit test jvm memory size to 512mb | Major | test | Owen O'Malley | Owen O'Malley |
### TESTS:
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|:---- |:---- | :--- |:---- |:---- |:---- |
| [HADOOP-6637](https://issues.apache.org/jira/browse/HADOOP-6637) | Benchmark overhead of RPC session establishment | Major | benchmarks | Konstantin Shvachko | Konstantin Shvachko |
### OTHER:
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|:---- |:---- | :--- |:---- |:---- |:---- |
| [HDFS-1286](https://issues.apache.org/jira/browse/HDFS-1286) | Dry entropy pool on Hudson boxes causing test timeouts | Major | test | Todd Lipcon | Konstantin Boudnik |
| [HADOOP-7372](https://issues.apache.org/jira/browse/HADOOP-7372) | Remove ref of 20.3 release from branch-0.20 CHANGES.txt | Major | documentation | Eli Collins | Eli Collins | | unknown | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.3/CHANGELOG.0.20.3.md |
# epydoc -- Command line interface
#
# Copyright (C) 2005 Edward Loper
# Author: Edward Loper <edloper@loper.org>
# URL: <http://epydoc.sf.net>
#
# $Id: cli.py 1678 2008-01-29 17:21:29Z edloper $
"""
Command-line interface for epydoc. Abbreviated Usage::
epydoc [options] NAMES...
NAMES... The Python modules to document.
--html Generate HTML output (default).
--latex Generate LaTeX output.
--pdf Generate pdf output, via LaTeX.
-o DIR, --output DIR The output directory.
--inheritance STYLE The format for showing inherited objects.
-V, --version Print the version of epydoc.
-h, --help Display a usage message.
Run \"epydoc --help\" for a complete option list. See the epydoc(1)
man page for more information.
Config Files
============
Configuration files can be specified with the C{--config} option.
These files are read using U{ConfigParser
<http://docs.python.org/lib/module-ConfigParser.html>}. Configuration
files may set options or add names of modules to document. Option
names are (usually) identical to the long names of command line
options. To specify names to document, use any of the following
option names::
module modules value values object objects
A simple example of a config file is::
[epydoc]
modules: sys, os, os.path, re, %(MYSANDBOXPATH)/utilities.py
name: Example
graph: classtree
introspect: no
All ConfigParser interpolations are done using local values and the
environment variables.
Verbosity Levels
================
The C{-v} and C{-q} options increase and decrease verbosity,
respectively. The default verbosity level is zero. The verbosity
levels are currently defined as follows::
Progress Markup warnings Warnings Errors
-3 none no no no
-2 none no no yes
-1 none no yes yes
0 (default) bar no yes yes
1 bar yes yes yes
2 list yes yes yes
"""
__docformat__ = 'epytext en'
import sys, os, time, re, pickle, textwrap
from glob import glob
from optparse import OptionParser, OptionGroup, SUPPRESS_HELP
import optparse
import epydoc
from epydoc import log
from epydoc.util import wordwrap, run_subprocess, RunSubprocessError
from epydoc.util import plaintext_to_html
from epydoc.apidoc import UNKNOWN
from epydoc.compat import *
import ConfigParser
from epydoc.docwriter.html_css import STYLESHEETS as CSS_STYLESHEETS
# This module is only available if Docutils are in the system
try:
from epydoc.docwriter import xlink
except:
xlink = None
INHERITANCE_STYLES = ('grouped', 'listed', 'included')
GRAPH_TYPES = ('classtree', 'callgraph', 'umlclasstree')
ACTIONS = ('html', 'text', 'latex', 'dvi', 'ps', 'pdf', 'check')
DEFAULT_DOCFORMAT = 'epytext'
PROFILER = 'profile' #: Which profiler to use: 'hotshot' or 'profile'
######################################################################
#{ Help Topics
######################################################################
DOCFORMATS = ('epytext', 'plaintext', 'restructuredtext', 'javadoc')
HELP_TOPICS = {
'docformat': textwrap.dedent('''\
__docformat__ is a module variable that specifies the markup
language for the docstrings in a module. Its value is a
string, consisting the name of a markup language, optionally
followed by a language code (such as "en" for English). Epydoc
currently recognizes the following markup language names:
''' + ', '.join(DOCFORMATS)),
'inheritance': textwrap.dedent('''\
The following inheritance formats are currently supported:
- grouped: inherited objects are gathered into groups,
based on what class they were inherited from.
- listed: inherited objects are listed in a short list
at the end of their section.
- included: inherited objects are mixed in with
non-inherited objects.'''),
'css': textwrap.dedent(
'The following built-in CSS stylesheets are available:\n' +
'\n'.join([' %10s: %s' % (key, descr)
for (key, (sheet, descr))
in CSS_STYLESHEETS.items()])),
#'checks': textwrap.dedent('''\
#
# '''),
}
HELP_TOPICS['topics'] = wordwrap(
'Epydoc can provide additional help for the following topics: ' +
', '.join(['%r' % topic for topic in HELP_TOPICS.keys()]))
######################################################################
#{ Argument & Config File Parsing
######################################################################
OPTION_DEFAULTS = dict(
action="html", show_frames=True, docformat=DEFAULT_DOCFORMAT,
show_private=True, show_imports=False, inheritance="listed",
verbose=0, quiet=0, load_pickle=False, parse=True, introspect=True,
debug=epydoc.DEBUG, profile=False, graphs=[],
list_classes_separately=False, graph_font=None, graph_font_size=None,
include_source_code=True, pstat_files=[], simple_term=False, fail_on=None,
exclude=[], exclude_parse=[], exclude_introspect=[],
external_api=[], external_api_file=[], external_api_root=[],
redundant_details=False, src_code_tab_width=8)
def parse_arguments():
# Construct the option parser.
usage = '%prog [ACTION] [options] NAMES...'
version = "Epydoc, version %s" % epydoc.__version__
optparser = OptionParser(usage=usage, add_help_option=False)
optparser.add_option('--config',
action='append', dest="configfiles", metavar='FILE',
help=("A configuration file, specifying additional OPTIONS "
"and/or NAMES. This option may be repeated."))
optparser.add_option("--output", "-o",
dest="target", metavar="PATH",
help="The output directory. If PATH does not exist, then "
"it will be created.")
optparser.add_option("--quiet", "-q",
action="count", dest="quiet",
help="Decrease the verbosity.")
optparser.add_option("--verbose", "-v",
action="count", dest="verbose",
help="Increase the verbosity.")
optparser.add_option("--debug",
action="store_true", dest="debug",
help="Show full tracebacks for internal errors.")
optparser.add_option("--simple-term",
action="store_true", dest="simple_term",
help="Do not try to use color or cursor control when displaying "
"the progress bar, warnings, or errors.")
action_group = OptionGroup(optparser, 'Actions')
optparser.add_option_group(action_group)
action_group.add_option("--html",
action="store_const", dest="action", const="html",
help="Write HTML output.")
action_group.add_option("--text",
action="store_const", dest="action", const="text",
help="Write plaintext output. (not implemented yet)")
action_group.add_option("--latex",
action="store_const", dest="action", const="latex",
help="Write LaTeX output.")
action_group.add_option("--dvi",
action="store_const", dest="action", const="dvi",
help="Write DVI output.")
action_group.add_option("--ps",
action="store_const", dest="action", const="ps",
help="Write Postscript output.")
action_group.add_option("--pdf",
action="store_const", dest="action", const="pdf",
help="Write PDF output.")
action_group.add_option("--check",
action="store_const", dest="action", const="check",
help="Check completeness of docs.")
action_group.add_option("--pickle",
action="store_const", dest="action", const="pickle",
help="Write the documentation to a pickle file.")
# Provide our own --help and --version options.
action_group.add_option("--version",
action="store_const", dest="action", const="version",
help="Show epydoc's version number and exit.")
action_group.add_option("-h", "--help",
action="store_const", dest="action", const="help",
help="Show this message and exit. For help on specific "
"topics, use \"--help TOPIC\". Use \"--help topics\" for a "
"list of available help topics")
generation_group = OptionGroup(optparser, 'Generation Options')
optparser.add_option_group(generation_group)
generation_group.add_option("--docformat",
dest="docformat", metavar="NAME",
help="The default markup language for docstrings. Defaults "
"to \"%s\"." % DEFAULT_DOCFORMAT)
generation_group.add_option("--parse-only",
action="store_false", dest="introspect",
help="Get all information from parsing (don't introspect)")
generation_group.add_option("--introspect-only",
action="store_false", dest="parse",
help="Get all information from introspecting (don't parse)")
generation_group.add_option("--exclude",
dest="exclude", metavar="PATTERN", action="append",
help="Exclude modules whose dotted name matches "
"the regular expression PATTERN")
generation_group.add_option("--exclude-introspect",
dest="exclude_introspect", metavar="PATTERN", action="append",
help="Exclude introspection of modules whose dotted name matches "
"the regular expression PATTERN")
generation_group.add_option("--exclude-parse",
dest="exclude_parse", metavar="PATTERN", action="append",
help="Exclude parsing of modules whose dotted name matches "
"the regular expression PATTERN")
generation_group.add_option("--inheritance",
dest="inheritance", metavar="STYLE",
help="The format for showing inheritance objects. STYLE "
"should be one of: %s." % ', '.join(INHERITANCE_STYLES))
generation_group.add_option("--show-private",
action="store_true", dest="show_private",
help="Include private variables in the output. (default)")
generation_group.add_option("--no-private",
action="store_false", dest="show_private",
help="Do not include private variables in the output.")
generation_group.add_option("--show-imports",
action="store_true", dest="show_imports",
help="List each module's imports.")
generation_group.add_option("--no-imports",
action="store_false", dest="show_imports",
help="Do not list each module's imports. (default)")
generation_group.add_option('--show-sourcecode',
action='store_true', dest='include_source_code',
help=("Include source code with syntax highlighting in the "
"HTML output. (default)"))
generation_group.add_option('--no-sourcecode',
action='store_false', dest='include_source_code',
help=("Do not include source code with syntax highlighting in the "
"HTML output."))
generation_group.add_option('--include-log',
action='store_true', dest='include_log',
help=("Include a page with the process log (epydoc-log.html)"))
generation_group.add_option(
'--redundant-details',
action='store_true', dest='redundant_details',
help=("Include values in the details lists even if all info "
"about them is already provided by the summary table."))
output_group = OptionGroup(optparser, 'Output Options')
optparser.add_option_group(output_group)
output_group.add_option("--name", "-n",
dest="prj_name", metavar="NAME",
help="The documented project's name (for the navigation bar).")
output_group.add_option("--css", "-c",
dest="css", metavar="STYLESHEET",
help="The CSS stylesheet. STYLESHEET can be either a "
"builtin stylesheet or the name of a CSS file.")
output_group.add_option("--url", "-u",
dest="prj_url", metavar="URL",
help="The documented project's URL (for the navigation bar).")
output_group.add_option("--navlink",
dest="prj_link", metavar="HTML",
help="HTML code for a navigation link to place in the "
"navigation bar.")
output_group.add_option("--top",
dest="top_page", metavar="PAGE",
help="The \"top\" page for the HTML documentation. PAGE can "
"be a URL, the name of a module or class, or one of the "
"special names \"trees.html\", \"indices.html\", or \"help.html\"")
output_group.add_option("--help-file",
dest="help_file", metavar="FILE",
help="An alternate help file. FILE should contain the body "
"of an HTML file -- navigation bars will be added to it.")
output_group.add_option("--show-frames",
action="store_true", dest="show_frames",
help="Include frames in the HTML output. (default)")
output_group.add_option("--no-frames",
action="store_false", dest="show_frames",
help="Do not include frames in the HTML output.")
output_group.add_option('--separate-classes',
action='store_true', dest='list_classes_separately',
help=("When generating LaTeX or PDF output, list each class in "
"its own section, instead of listing them under their "
"containing module."))
output_group.add_option('--src-code-tab-width',
action='store', type='int', dest='src_code_tab_width',
help=("When generating HTML output, sets the number of spaces "
"each tab in source code listings is replaced with."))
# The group of external API options.
# Skip if the module couldn't be imported (usually missing docutils)
if xlink is not None:
link_group = OptionGroup(optparser,
xlink.ApiLinkReader.settings_spec[0])
optparser.add_option_group(link_group)
for help, names, opts in xlink.ApiLinkReader.settings_spec[2]:
opts = opts.copy()
opts['help'] = help
link_group.add_option(*names, **opts)
graph_group = OptionGroup(optparser, 'Graph Options')
optparser.add_option_group(graph_group)
graph_group.add_option('--graph',
action='append', dest='graphs', metavar='GRAPHTYPE',
help=("Include graphs of type GRAPHTYPE in the generated output. "
"Graphs are generated using the Graphviz dot executable. "
"If this executable is not on the path, then use --dotpath "
"to specify its location. This option may be repeated to "
"include multiple graph types in the output. GRAPHTYPE "
"should be one of: all, %s." % ', '.join(GRAPH_TYPES)))
graph_group.add_option("--dotpath",
dest="dotpath", metavar='PATH',
help="The path to the Graphviz 'dot' executable.")
graph_group.add_option('--graph-font',
dest='graph_font', metavar='FONT',
help=("Specify the font used to generate Graphviz graphs. (e.g., "
"helvetica or times)."))
graph_group.add_option('--graph-font-size',
dest='graph_font_size', metavar='SIZE',
help=("Specify the font size used to generate Graphviz graphs, "
"in points."))
graph_group.add_option('--pstat',
action='append', dest='pstat_files', metavar='FILE',
help="A pstat output file, to be used in generating call graphs.")
# this option is for developers, not users.
graph_group.add_option("--profile-epydoc",
action="store_true", dest="profile",
help=SUPPRESS_HELP or
("Run the hotshot profiler on epydoc itself. Output "
"will be written to profile.out."))
return_group = OptionGroup(optparser, 'Return Value Options')
optparser.add_option_group(return_group)
return_group.add_option("--fail-on-error",
action="store_const", dest="fail_on", const=log.ERROR,
help="Return a non-zero exit status, indicating failure, if any "
"errors are encountered.")
return_group.add_option("--fail-on-warning",
action="store_const", dest="fail_on", const=log.WARNING,
help="Return a non-zero exit status, indicating failure, if any "
"errors or warnings are encountered (not including docstring "
"warnings).")
return_group.add_option("--fail-on-docstring-warning",
action="store_const", dest="fail_on", const=log.DOCSTRING_WARNING,
help="Return a non-zero exit status, indicating failure, if any "
"errors or warnings are encountered (including docstring "
"warnings).")
# Set the option parser's defaults.
optparser.set_defaults(**OPTION_DEFAULTS)
# Parse the arguments.
options, names = optparser.parse_args()
# Print help message, if requested. We also provide support for
# --help [topic]
if options.action == 'help':
names = set([n.lower() for n in names])
for (topic, msg) in HELP_TOPICS.items():
if topic.lower() in names:
print '\n' + msg.rstrip() + '\n'
sys.exit(0)
optparser.print_help()
sys.exit(0)
# Print version message, if requested.
if options.action == 'version':
print version
sys.exit(0)
# Process any config files.
if options.configfiles:
try:
parse_configfiles(options.configfiles, options, names)
except (KeyboardInterrupt,SystemExit): raise
except Exception, e:
if len(options.configfiles) == 1:
cf_name = 'config file %s' % options.configfiles[0]
else:
cf_name = 'config files %s' % ', '.join(options.configfiles)
optparser.error('Error reading %s:\n %s' % (cf_name, e))
# Check if the input file is a pickle file.
for name in names:
if name.endswith('.pickle'):
if len(names) != 1:
optparser.error("When a pickle file is specified, no other "
"input files may be specified.")
options.load_pickle = True
# Check to make sure all options are valid.
if len(names) == 0:
optparser.error("No names specified.")
# perform shell expansion.
for i, name in reversed(list(enumerate(names[:]))):
if '?' in name or '*' in name:
names[i:i+1] = glob(name)
if options.inheritance not in INHERITANCE_STYLES:
optparser.error("Bad inheritance style. Valid options are " +
",".join(INHERITANCE_STYLES))
if not options.parse and not options.introspect:
optparser.error("Invalid option combination: --parse-only "
"and --introspect-only.")
if options.action == 'text' and len(names) > 1:
optparser.error("--text option takes only one name.")
# Check the list of requested graph types to make sure they're
# acceptable.
options.graphs = [graph_type.lower() for graph_type in options.graphs]
for graph_type in options.graphs:
if graph_type == 'callgraph' and not options.pstat_files:
optparser.error('"callgraph" graph type may only be used if '
'one or more pstat files are specified.')
# If it's 'all', then add everything (but don't add callgraph if
# we don't have any profiling info to base them on).
if graph_type == 'all':
if options.pstat_files:
options.graphs = GRAPH_TYPES
else:
options.graphs = [g for g in GRAPH_TYPES if g != 'callgraph']
break
elif graph_type not in GRAPH_TYPES:
optparser.error("Invalid graph type %s." % graph_type)
# Calculate verbosity.
verbosity = getattr(options, 'verbosity', 0)
options.verbosity = verbosity + options.verbose - options.quiet
# The target default depends on the action.
if options.target is None:
options.target = options.action
# Return parsed args.
options.names = names
return options, names
def parse_configfiles(configfiles, options, names):
configparser = ConfigParser.ConfigParser()
# ConfigParser.read() silently ignores errors, so open the files
# manually (since we want to notify the user of any errors).
for configfile in configfiles:
fp = open(configfile, 'r') # may raise IOError.
configparser.readfp(fp, configfile)
fp.close()
for optname in configparser.options('epydoc'):
val = configparser.get('epydoc', optname, vars=os.environ).strip()
optname = optname.lower().strip()
if optname in ('modules', 'objects', 'values',
'module', 'object', 'value'):
names.extend(_str_to_list(val))
elif optname == 'target':
options.target = val
elif optname == 'output':
if val.lower() not in ACTIONS:
raise ValueError('"%s" expected one of: %s' %
(optname, ', '.join(ACTIONS)))
options.action = val.lower()
elif optname == 'verbosity':
options.verbosity = _str_to_int(val, optname)
elif optname == 'debug':
options.debug = _str_to_bool(val, optname)
elif optname in ('simple-term', 'simple_term'):
options.simple_term = _str_to_bool(val, optname)
# Generation options
elif optname == 'docformat':
options.docformat = val
elif optname == 'parse':
options.parse = _str_to_bool(val, optname)
elif optname == 'introspect':
options.introspect = _str_to_bool(val, optname)
elif optname == 'exclude':
options.exclude.extend(_str_to_list(val))
elif optname in ('exclude-parse', 'exclude_parse'):
options.exclude_parse.extend(_str_to_list(val))
elif optname in ('exclude-introspect', 'exclude_introspect'):
options.exclude_introspect.extend(_str_to_list(val))
elif optname == 'inheritance':
if val.lower() not in INHERITANCE_STYLES:
raise ValueError('"%s" expected one of: %s.' %
(optname, ', '.join(INHERITANCE_STYLES)))
options.inheritance = val.lower()
elif optname =='private':
options.show_private = _str_to_bool(val, optname)
elif optname =='imports':
options.show_imports = _str_to_bool(val, optname)
elif optname == 'sourcecode':
options.include_source_code = _str_to_bool(val, optname)
elif optname in ('include-log', 'include_log'):
options.include_log = _str_to_bool(val, optname)
elif optname in ('redundant-details', 'redundant_details'):
options.redundant_details = _str_to_bool(val, optname)
# Output options
elif optname == 'name':
options.prj_name = val
elif optname == 'css':
options.css = val
elif optname == 'url':
options.prj_url = val
elif optname == 'link':
options.prj_link = val
elif optname == 'top':
options.top_page = val
elif optname == 'help':
options.help_file = val
elif optname =='frames':
options.show_frames = _str_to_bool(val, optname)
elif optname in ('separate-classes', 'separate_classes'):
options.list_classes_separately = _str_to_bool(val, optname)
elif optname in ('src-code-tab-width', 'src_code_tab_width'):
options.src_code_tab_width = _str_to_int(val, optname)
# External API
elif optname in ('external-api', 'external_api'):
options.external_api.extend(_str_to_list(val))
elif optname in ('external-api-file', 'external_api_file'):
options.external_api_file.extend(_str_to_list(val))
elif optname in ('external-api-root', 'external_api_root'):
options.external_api_root.extend(_str_to_list(val))
# Graph options
elif optname == 'graph':
graphtypes = _str_to_list(val)
for graphtype in graphtypes:
if graphtype not in GRAPH_TYPES + ('all',):
raise ValueError('"%s" expected one of: all, %s.' %
(optname, ', '.join(GRAPH_TYPES)))
options.graphs.extend(graphtypes)
elif optname == 'dotpath':
options.dotpath = val
elif optname in ('graph-font', 'graph_font'):
options.graph_font = val
elif optname in ('graph-font-size', 'graph_font_size'):
options.graph_font_size = _str_to_int(val, optname)
elif optname == 'pstat':
options.pstat_files.extend(_str_to_list(val))
# Return value options
elif optname in ('failon', 'fail-on', 'fail_on'):
if val.lower().strip() in ('error', 'errors'):
options.fail_on = log.ERROR
elif val.lower().strip() in ('warning', 'warnings'):
options.fail_on = log.WARNING
elif val.lower().strip() in ('docstring_warning',
'docstring_warnings'):
options.fail_on = log.DOCSTRING_WARNING
else:
raise ValueError("%r expected one of: error, warning, "
"docstring_warning" % optname)
else:
raise ValueError('Unknown option %s' % optname)
def _str_to_bool(val, optname):
if val.lower() in ('0', 'no', 'false', 'n', 'f', 'hide'):
return False
elif val.lower() in ('1', 'yes', 'true', 'y', 't', 'show'):
return True
else:
raise ValueError('"%s" option expected a boolean' % optname)
def _str_to_int(val, optname):
try:
return int(val)
except ValueError:
raise ValueError('"%s" option expected an int' % optname)
def _str_to_list(val):
return val.replace(',', ' ').split()
######################################################################
#{ Interface
######################################################################
def main(options, names):
# Set the debug flag, if '--debug' was specified.
if options.debug:
epydoc.DEBUG = True
## [XX] Did this serve a purpose? Commenting out for now:
#if options.action == 'text':
# if options.parse and options.introspect:
# options.parse = False
# Set up the logger
if options.simple_term:
TerminalController.FORCE_SIMPLE_TERM = True
if options.action == 'text':
logger = None # no logger for text output.
elif options.verbosity > 1:
logger = ConsoleLogger(options.verbosity)
log.register_logger(logger)
else:
# Each number is a rough approximation of how long we spend on
# that task, used to divide up the unified progress bar.
stages = [40, # Building documentation
7, # Merging parsed & introspected information
1, # Linking imported variables
3, # Indexing documentation
1, # Checking for overridden methods
30, # Parsing Docstrings
1, # Inheriting documentation
2] # Sorting & Grouping
if options.load_pickle:
stages = [30] # Loading pickled documentation
if options.action == 'html': stages += [100]
elif options.action == 'text': stages += [30]
elif options.action == 'latex': stages += [60]
elif options.action == 'dvi': stages += [60,30]
elif options.action == 'ps': stages += [60,40]
elif options.action == 'pdf': stages += [60,50]
elif options.action == 'check': stages += [10]
elif options.action == 'pickle': stages += [10]
else: raise ValueError, '%r not supported' % options.action
if options.parse and not options.introspect:
del stages[1] # no merging
if options.introspect and not options.parse:
del stages[1:3] # no merging or linking
logger = UnifiedProgressConsoleLogger(options.verbosity, stages)
log.register_logger(logger)
# check the output directory.
if options.action not in ('text', 'check', 'pickle'):
if os.path.exists(options.target):
if not os.path.isdir(options.target):
log.error("%s is not a directory" % options.target)
sys.exit(1)
if options.include_log:
if options.action == 'html':
if not os.path.exists(options.target):
os.mkdir(options.target)
log.register_logger(HTMLLogger(options.target, options))
else:
log.warning("--include-log requires --html")
# Set the default docformat
from epydoc import docstringparser
docstringparser.DEFAULT_DOCFORMAT = options.docformat
# Configure the external API linking
if xlink is not None:
try:
xlink.ApiLinkReader.read_configuration(options, problematic=False)
except Exception, exc:
log.error("Error while configuring external API linking: %s: %s"
% (exc.__class__.__name__, exc))
# Set the dot path
if options.dotpath:
from epydoc.docwriter import dotgraph
dotgraph.DOT_COMMAND = options.dotpath
# Set the default graph font & size
if options.graph_font:
from epydoc.docwriter import dotgraph
fontname = options.graph_font
dotgraph.DotGraph.DEFAULT_NODE_DEFAULTS['fontname'] = fontname
dotgraph.DotGraph.DEFAULT_EDGE_DEFAULTS['fontname'] = fontname
if options.graph_font_size:
from epydoc.docwriter import dotgraph
fontsize = options.graph_font_size
dotgraph.DotGraph.DEFAULT_NODE_DEFAULTS['fontsize'] = fontsize
dotgraph.DotGraph.DEFAULT_EDGE_DEFAULTS['fontsize'] = fontsize
# If the input name is a pickle file, then read the docindex that
# it contains. Otherwise, build the docs for the input names.
if options.load_pickle:
assert len(names) == 1
log.start_progress('Deserializing')
log.progress(0.1, 'Loading %r' % names[0])
t0 = time.time()
unpickler = pickle.Unpickler(open(names[0], 'rb'))
unpickler.persistent_load = pickle_persistent_load
docindex = unpickler.load()
log.debug('deserialization time: %.1f sec' % (time.time()-t0))
log.end_progress()
else:
# Build docs for the named values.
from epydoc.docbuilder import build_doc_index
exclude_parse = '|'.join(options.exclude_parse+options.exclude)
exclude_introspect = '|'.join(options.exclude_introspect+
options.exclude)
docindex = build_doc_index(names, options.introspect, options.parse,
add_submodules=(options.action!='text'),
exclude_introspect=exclude_introspect,
exclude_parse=exclude_parse)
if docindex is None:
if log.ERROR in logger.reported_message_levels:
sys.exit(1)
else:
return # docbuilder already logged an error.
# Load profile information, if it was given.
if options.pstat_files:
try: import pstats
except ImportError:
log.error("Could not import pstats -- ignoring pstat files.")
try:
profile_stats = pstats.Stats(options.pstat_files[0])
for filename in options.pstat_files[1:]:
profile_stats.add(filename)
except KeyboardInterrupt: raise
except Exception, e:
log.error("Error reading pstat file: %s" % e)
profile_stats = None
if profile_stats is not None:
docindex.read_profiling_info(profile_stats)
# Perform the specified action.
if options.action == 'html':
write_html(docindex, options)
elif options.action in ('latex', 'dvi', 'ps', 'pdf'):
write_latex(docindex, options, options.action)
elif options.action == 'text':
write_text(docindex, options)
elif options.action == 'check':
check_docs(docindex, options)
elif options.action == 'pickle':
write_pickle(docindex, options)
else:
print >>sys.stderr, '\nUnsupported action %s!' % options.action
# If we suppressed docstring warnings, then let the user know.
if logger is not None and logger.suppressed_docstring_warning:
if logger.suppressed_docstring_warning == 1:
prefix = '1 markup error was found'
else:
prefix = ('%d markup errors were found' %
logger.suppressed_docstring_warning)
log.warning("%s while processing docstrings. Use the verbose "
"switch (-v) to display markup errors." % prefix)
# Basic timing breakdown:
if options.verbosity >= 2 and logger is not None:
logger.print_times()
# If we encountered any message types that we were requested to
# fail on, then exit with status 2.
if options.fail_on is not None:
max_reported_message_level = max(logger.reported_message_levels)
if max_reported_message_level >= options.fail_on:
sys.exit(2)
def write_html(docindex, options):
from epydoc.docwriter.html import HTMLWriter
html_writer = HTMLWriter(docindex, **options.__dict__)
if options.verbose > 0:
log.start_progress('Writing HTML docs to %r' % options.target)
else:
log.start_progress('Writing HTML docs')
html_writer.write(options.target)
log.end_progress()
def write_pickle(docindex, options):
"""Helper for writing output to a pickle file, which can then be
read in at a later time. But loading the pickle is only marginally
faster than building the docs from scratch, so this has pretty
limited application."""
if options.target == 'pickle':
options.target = 'api.pickle'
elif not options.target.endswith('.pickle'):
options.target += '.pickle'
log.start_progress('Serializing output')
log.progress(0.2, 'Writing %r' % options.target)
outfile = open(options.target, 'wb')
pickler = pickle.Pickler(outfile, protocol=0)
pickler.persistent_id = pickle_persistent_id
pickler.dump(docindex)
outfile.close()
log.end_progress()
def pickle_persistent_id(obj):
"""Helper for pickling, which allows us to save and restore UNKNOWN,
which is required to be identical to apidoc.UNKNOWN."""
if obj is UNKNOWN: return 'UNKNOWN'
else: return None
def pickle_persistent_load(identifier):
"""Helper for pickling, which allows us to save and restore UNKNOWN,
which is required to be identical to apidoc.UNKNOWN."""
if identifier == 'UNKNOWN': return UNKNOWN
else: raise pickle.UnpicklingError, 'Invalid persistent id'
_RERUN_LATEX_RE = re.compile(r'(?im)^LaTeX\s+Warning:\s+Label\(s\)\s+may'
r'\s+have\s+changed.\s+Rerun')
def write_latex(docindex, options, format):
from epydoc.docwriter.latex import LatexWriter
latex_writer = LatexWriter(docindex, **options.__dict__)
log.start_progress('Writing LaTeX docs')
latex_writer.write(options.target)
log.end_progress()
# If we're just generating the latex, and not any output format,
# then we're done.
if format == 'latex': return
if format == 'dvi': steps = 4
elif format == 'ps': steps = 5
elif format == 'pdf': steps = 6
log.start_progress('Processing LaTeX docs')
oldpath = os.path.abspath(os.curdir)
running = None # keep track of what we're doing.
try:
try:
os.chdir(options.target)
# Clear any old files out of the way.
for ext in 'tex aux log out idx ilg toc ind'.split():
if os.path.exists('apidoc.%s' % ext):
os.remove('apidoc.%s' % ext)
# The first pass generates index files.
running = 'latex'
log.progress(0./steps, 'LaTeX: First pass')
run_subprocess('latex api.tex')
# Build the index.
running = 'makeindex'
log.progress(1./steps, 'LaTeX: Build index')
run_subprocess('makeindex api.idx')
# The second pass generates our output.
running = 'latex'
log.progress(2./steps, 'LaTeX: Second pass')
out, err = run_subprocess('latex api.tex')
# The third pass is only necessary if the second pass
# changed what page some things are on.
running = 'latex'
if _RERUN_LATEX_RE.match(out):
log.progress(3./steps, 'LaTeX: Third pass')
out, err = run_subprocess('latex api.tex')
# A fourth path should (almost?) never be necessary.
running = 'latex'
if _RERUN_LATEX_RE.match(out):
log.progress(3./steps, 'LaTeX: Fourth pass')
run_subprocess('latex api.tex')
# If requested, convert to postscript.
if format in ('ps', 'pdf'):
running = 'dvips'
log.progress(4./steps, 'dvips')
run_subprocess('dvips api.dvi -o api.ps -G0 -Ppdf')
# If requested, convert to pdf.
if format in ('pdf'):
running = 'ps2pdf'
log.progress(5./steps, 'ps2pdf')
run_subprocess(
'ps2pdf -sPAPERSIZE#letter -dMaxSubsetPct#100 '
'-dSubsetFonts#true -dCompatibilityLevel#1.2 '
'-dEmbedAllFonts#true api.ps api.pdf')
except RunSubprocessError, e:
if running == 'latex':
e.out = re.sub(r'(?sm)\A.*?!( LaTeX Error:)?', r'', e.out)
e.out = re.sub(r'(?sm)\s*Type X to quit.*', '', e.out)
e.out = re.sub(r'(?sm)^! Emergency stop.*', '', e.out)
log.error("%s failed: %s" % (running, (e.out+e.err).lstrip()))
except OSError, e:
log.error("%s failed: %s" % (running, e))
finally:
os.chdir(oldpath)
log.end_progress()
def write_text(docindex, options):
log.start_progress('Writing output')
from epydoc.docwriter.plaintext import PlaintextWriter
plaintext_writer = PlaintextWriter()
s = ''
for apidoc in docindex.root:
s += plaintext_writer.write(apidoc)
log.end_progress()
if isinstance(s, unicode):
s = s.encode('ascii', 'backslashreplace')
print s
def check_docs(docindex, options):
from epydoc.checker import DocChecker
DocChecker(docindex).check()
def cli():
# Parse command-line arguments.
options, names = parse_arguments()
try:
try:
if options.profile:
_profile()
else:
main(options, names)
finally:
log.close()
except SystemExit:
raise
except KeyboardInterrupt:
print '\n\n'
print >>sys.stderr, 'Keyboard interrupt.'
except:
if options.debug: raise
print '\n\n'
exc_info = sys.exc_info()
if isinstance(exc_info[0], basestring): e = exc_info[0]
else: e = exc_info[1]
print >>sys.stderr, ('\nUNEXPECTED ERROR:\n'
'%s\n' % (str(e) or e.__class__.__name__))
print >>sys.stderr, 'Use --debug to see trace information.'
sys.exit(3)
def _profile():
# Hotshot profiler.
if PROFILER == 'hotshot':
try: import hotshot, hotshot.stats
except ImportError:
print >>sys.stderr, "Could not import profile module!"
return
try:
prof = hotshot.Profile('hotshot.out')
prof = prof.runctx('main(*parse_arguments())', globals(), {})
except SystemExit:
pass
prof.close()
# Convert profile.hotshot -> profile.out
print 'Consolidating hotshot profiling info...'
hotshot.stats.load('hotshot.out').dump_stats('profile.out')
# Standard 'profile' profiler.
elif PROFILER == 'profile':
# cProfile module was added in Python 2.5 -- use it if its'
# available, since it's faster.
try: from cProfile import Profile
except ImportError:
try: from profile import Profile
except ImportError:
print >>sys.stderr, "Could not import profile module!"
return
# There was a bug in Python 2.4's profiler. Check if it's
# present, and if so, fix it. (Bug was fixed in 2.4maint:
# <http://mail.python.org/pipermail/python-checkins/
# 2005-September/047099.html>)
if (hasattr(Profile, 'dispatch') and
Profile.dispatch['c_exception'] is
Profile.trace_dispatch_exception.im_func):
trace_dispatch_return = Profile.trace_dispatch_return.im_func
Profile.dispatch['c_exception'] = trace_dispatch_return
try:
prof = Profile()
prof = prof.runctx('main(*parse_arguments())', globals(), {})
except SystemExit:
pass
prof.dump_stats('profile.out')
else:
print >>sys.stderr, 'Unknown profiler %s' % PROFILER
return
######################################################################
#{ Logging
######################################################################
class TerminalController:
"""
A class that can be used to portably generate formatted output to
a terminal. See
U{http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/475116}
for documentation. (This is a somewhat stripped-down version.)
"""
BOL = '' #: Move the cursor to the beginning of the line
UP = '' #: Move the cursor up one line
DOWN = '' #: Move the cursor down one line
LEFT = '' #: Move the cursor left one char
RIGHT = '' #: Move the cursor right one char
CLEAR_EOL = '' #: Clear to the end of the line.
CLEAR_LINE = '' #: Clear the current line; cursor to BOL.
BOLD = '' #: Turn on bold mode
NORMAL = '' #: Turn off all modes
COLS = 75 #: Width of the terminal (default to 75)
BLACK = BLUE = GREEN = CYAN = RED = MAGENTA = YELLOW = WHITE = ''
_STRING_CAPABILITIES = """
BOL=cr UP=cuu1 DOWN=cud1 LEFT=cub1 RIGHT=cuf1
CLEAR_EOL=el BOLD=bold UNDERLINE=smul NORMAL=sgr0""".split()
_COLORS = """BLACK BLUE GREEN CYAN RED MAGENTA YELLOW WHITE""".split()
_ANSICOLORS = "BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE".split()
#: If this is set to true, then new TerminalControllers will
#: assume that the terminal is not capable of doing manipulation
#: of any kind.
FORCE_SIMPLE_TERM = False
def __init__(self, term_stream=sys.stdout):
# If the stream isn't a tty, then assume it has no capabilities.
if not term_stream.isatty(): return
if self.FORCE_SIMPLE_TERM: return
# Curses isn't available on all platforms
try: import curses
except:
# If it's not available, then try faking enough to get a
# simple progress bar.
self.BOL = '\r'
self.CLEAR_LINE = '\r' + ' '*self.COLS + '\r'
# Check the terminal type. If we fail, then assume that the
# terminal has no capabilities.
try: curses.setupterm()
except: return
# Look up numeric capabilities.
self.COLS = curses.tigetnum('cols')
# Look up string capabilities.
for capability in self._STRING_CAPABILITIES:
(attrib, cap_name) = capability.split('=')
setattr(self, attrib, self._tigetstr(cap_name) or '')
if self.BOL and self.CLEAR_EOL:
self.CLEAR_LINE = self.BOL+self.CLEAR_EOL
# Colors
set_fg = self._tigetstr('setf')
if set_fg:
for i,color in zip(range(len(self._COLORS)), self._COLORS):
setattr(self, color, curses.tparm(set_fg, i) or '')
set_fg_ansi = self._tigetstr('setaf')
if set_fg_ansi:
for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
setattr(self, color, curses.tparm(set_fg_ansi, i) or '')
def _tigetstr(self, cap_name):
# String capabilities can include "delays" of the form "$<2>".
# For any modern terminal, we should be able to just ignore
# these, so strip them out.
import curses
cap = curses.tigetstr(cap_name) or ''
return re.sub(r'\$<\d+>[/*]?', '', cap)
class ConsoleLogger(log.Logger):
def __init__(self, verbosity, progress_mode=None):
self._verbosity = verbosity
self._progress = None
self._message_blocks = []
# For ETA display:
self._progress_start_time = None
# For per-task times:
self._task_times = []
self._progress_header = None
self.reported_message_levels = set()
"""This set contains all the message levels (WARNING, ERROR,
etc) that have been reported. It is used by the options
--fail-on-warning etc to determine the return value."""
self.suppressed_docstring_warning = 0
"""This variable will be incremented once every time a
docstring warning is reported tothe logger, but the verbosity
level is too low for it to be displayed."""
self.term = TerminalController()
# Set the progress bar mode.
if verbosity >= 2: self._progress_mode = 'list'
elif verbosity >= 0:
if progress_mode is not None:
self._progress_mode = progress_mode
elif self.term.COLS < 15:
self._progress_mode = 'simple-bar'
elif self.term.BOL and self.term.CLEAR_EOL and self.term.UP:
self._progress_mode = 'multiline-bar'
elif self.term.BOL and self.term.CLEAR_LINE:
self._progress_mode = 'bar'
else:
self._progress_mode = 'simple-bar'
else: self._progress_mode = 'hide'
def start_block(self, header):
self._message_blocks.append( (header, []) )
def end_block(self):
header, messages = self._message_blocks.pop()
if messages:
width = self.term.COLS - 5 - 2*len(self._message_blocks)
prefix = self.term.CYAN+self.term.BOLD+'| '+self.term.NORMAL
divider = (self.term.CYAN+self.term.BOLD+'+'+'-'*(width-1)+
self.term.NORMAL)
# Mark up the header:
header = wordwrap(header, right=width-2, splitchars='\\/').rstrip()
header = '\n'.join([prefix+self.term.CYAN+l+self.term.NORMAL
for l in header.split('\n')])
# Construct the body:
body = ''
for message in messages:
if message.endswith('\n'): body += message
else: body += message+'\n'
# Indent the body:
body = '\n'.join([prefix+' '+l for l in body.split('\n')])
# Put it all together:
message = divider + '\n' + header + '\n' + body + '\n'
self._report(message)
def _format(self, prefix, message, color):
"""
Rewrap the message; but preserve newlines, and don't touch any
lines that begin with spaces.
"""
lines = message.split('\n')
startindex = indent = len(prefix)
for i in range(len(lines)):
if lines[i].startswith(' '):
lines[i] = ' '*(indent-startindex) + lines[i] + '\n'
else:
width = self.term.COLS - 5 - 4*len(self._message_blocks)
lines[i] = wordwrap(lines[i], indent, width, startindex, '\\/')
startindex = 0
return color+prefix+self.term.NORMAL+''.join(lines)
def log(self, level, message):
self.reported_message_levels.add(level)
if self._verbosity >= -2 and level >= log.ERROR:
message = self._format(' Error: ', message, self.term.RED)
elif self._verbosity >= -1 and level >= log.WARNING:
message = self._format('Warning: ', message, self.term.YELLOW)
elif self._verbosity >= 1 and level >= log.DOCSTRING_WARNING:
message = self._format('Warning: ', message, self.term.YELLOW)
elif self._verbosity >= 3 and level >= log.INFO:
message = self._format(' Info: ', message, self.term.NORMAL)
elif epydoc.DEBUG and level == log.DEBUG:
message = self._format(' Debug: ', message, self.term.CYAN)
else:
if level >= log.DOCSTRING_WARNING:
self.suppressed_docstring_warning += 1
return
self._report(message)
def _report(self, message):
if not message.endswith('\n'): message += '\n'
if self._message_blocks:
self._message_blocks[-1][-1].append(message)
else:
# If we're in the middle of displaying a progress bar,
# then make room for the message.
if self._progress_mode == 'simple-bar':
if self._progress is not None:
print
self._progress = None
if self._progress_mode == 'bar':
sys.stdout.write(self.term.CLEAR_LINE)
if self._progress_mode == 'multiline-bar':
sys.stdout.write((self.term.CLEAR_EOL + '\n')*2 +
self.term.CLEAR_EOL + self.term.UP*2)
# Display the message message.
sys.stdout.write(message)
sys.stdout.flush()
def progress(self, percent, message=''):
percent = min(1.0, percent)
message = '%s' % message
if self._progress_mode == 'list':
if message:
print '[%3d%%] %s' % (100*percent, message)
sys.stdout.flush()
elif self._progress_mode == 'bar':
dots = int((self.term.COLS/2-8)*percent)
background = '-'*(self.term.COLS/2-8)
if len(message) > self.term.COLS/2:
message = message[:self.term.COLS/2-3]+'...'
sys.stdout.write(self.term.CLEAR_LINE + '%3d%% '%(100*percent) +
self.term.GREEN + '[' + self.term.BOLD +
'='*dots + background[dots:] + self.term.NORMAL +
self.term.GREEN + '] ' + self.term.NORMAL +
message + self.term.BOL)
sys.stdout.flush()
self._progress = percent
elif self._progress_mode == 'multiline-bar':
dots = int((self.term.COLS-10)*percent)
background = '-'*(self.term.COLS-10)
if len(message) > self.term.COLS-10:
message = message[:self.term.COLS-10-3]+'...'
else:
message = message.center(self.term.COLS-10)
time_elapsed = time.time()-self._progress_start_time
if percent > 0:
time_remain = (time_elapsed / percent) * (1-percent)
else:
time_remain = 0
sys.stdout.write(
# Line 1:
self.term.CLEAR_EOL + ' ' +
'%-8s' % self._timestr(time_elapsed) +
self.term.BOLD + 'Progress:'.center(self.term.COLS-26) +
self.term.NORMAL + '%8s' % self._timestr(time_remain) + '\n' +
# Line 2:
self.term.CLEAR_EOL + ('%3d%% ' % (100*percent)) +
self.term.GREEN + '[' + self.term.BOLD + '='*dots +
background[dots:] + self.term.NORMAL + self.term.GREEN +
']' + self.term.NORMAL + '\n' +
# Line 3:
self.term.CLEAR_EOL + ' ' + message + self.term.BOL +
self.term.UP + self.term.UP)
sys.stdout.flush()
self._progress = percent
elif self._progress_mode == 'simple-bar':
if self._progress is None:
sys.stdout.write(' [')
self._progress = 0.0
dots = int((self.term.COLS-2)*percent)
progress_dots = int((self.term.COLS-2)*self._progress)
if dots > progress_dots:
sys.stdout.write('.'*(dots-progress_dots))
sys.stdout.flush()
self._progress = percent
def _timestr(self, dt):
dt = int(dt)
if dt >= 3600:
return '%d:%02d:%02d' % (dt/3600, dt%3600/60, dt%60)
else:
return '%02d:%02d' % (dt/60, dt%60)
def start_progress(self, header=None):
if self._progress is not None:
raise ValueError
self._progress = None
self._progress_start_time = time.time()
self._progress_header = header
if self._progress_mode != 'hide' and header:
print self.term.BOLD + header + self.term.NORMAL
def end_progress(self):
self.progress(1.)
if self._progress_mode == 'bar':
sys.stdout.write(self.term.CLEAR_LINE)
if self._progress_mode == 'multiline-bar':
sys.stdout.write((self.term.CLEAR_EOL + '\n')*2 +
self.term.CLEAR_EOL + self.term.UP*2)
if self._progress_mode == 'simple-bar':
print ']'
self._progress = None
self._task_times.append( (time.time()-self._progress_start_time,
self._progress_header) )
def print_times(self):
print
print 'Timing summary:'
total = sum([time for (time, task) in self._task_times])
max_t = max([time for (time, task) in self._task_times])
for (time, task) in self._task_times:
task = task[:31]
print ' %s%s %7.1fs' % (task, '.'*(35-len(task)), time),
if self.term.COLS > 55:
print '|'+'=' * int((self.term.COLS-53) * time / max_t)
else:
print
print
class UnifiedProgressConsoleLogger(ConsoleLogger):
def __init__(self, verbosity, stages, progress_mode=None):
self.stage = 0
self.stages = stages
self.task = None
ConsoleLogger.__init__(self, verbosity, progress_mode)
def progress(self, percent, message=''):
#p = float(self.stage-1+percent)/self.stages
i = self.stage-1
p = ((sum(self.stages[:i]) + percent*self.stages[i]) /
float(sum(self.stages)))
if message is UNKNOWN: message = None
if message: message = '%s: %s' % (self.task, message)
ConsoleLogger.progress(self, p, message)
def start_progress(self, header=None):
self.task = header
if self.stage == 0:
ConsoleLogger.start_progress(self)
self.stage += 1
def end_progress(self):
if self.stage == len(self.stages):
ConsoleLogger.end_progress(self)
def print_times(self):
pass
class HTMLLogger(log.Logger):
"""
A logger used to generate a log of all warnings and messages to an
HTML file.
"""
FILENAME = "epydoc-log.html"
HEADER = textwrap.dedent('''\
<?xml version="1.0" encoding="ascii"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<title>Epydoc Log</title>
<link rel="stylesheet" href="epydoc.css" type="text/css" />
</head>
<body bgcolor="white" text="black" link="blue" vlink="#204080"
alink="#204080">
<h1 class="epydoc">Epydoc Log</h1>
<p class="log">Epydoc started at %s</p>''')
START_BLOCK = '<div class="log-block"><h2 class="log-hdr">%s</h2>'
MESSAGE = ('<div class="log-%s"><b>%s</b>: \n'
'%s</div>\n')
END_BLOCK = '</div>'
FOOTER = "</body>\n</html>\n"
def __init__(self, directory, options):
self.start_time = time.time()
self.out = open(os.path.join(directory, self.FILENAME), 'w')
self.out.write(self.HEADER % time.ctime(self.start_time))
self.is_empty = True
self.options = options
def write_options(self, options):
self.out.write(self.START_BLOCK % 'Epydoc Options')
msg = '<table border="0" cellpadding="0" cellspacing="0">\n'
opts = [(key, getattr(options, key)) for key in dir(options)
if key not in dir(optparse.Values)]
opts = [(val==OPTION_DEFAULTS.get(key), key, val)
for (key, val) in opts]
for is_default, key, val in sorted(opts):
css = is_default and 'opt-default' or 'opt-changed'
msg += ('<tr valign="top" class="%s"><td valign="top">%s</td>'
'<td valign="top"><tt> = </tt></td>'
'<td valign="top"><tt>%s</tt></td></tr>' %
(css, key, plaintext_to_html(repr(val))))
msg += '</table>\n'
self.out.write('<div class="log-info">\n%s</div>\n' % msg)
self.out.write(self.END_BLOCK)
def start_block(self, header):
self.out.write(self.START_BLOCK % header)
def end_block(self):
self.out.write(self.END_BLOCK)
def log(self, level, message):
if message.endswith("(-v) to display markup errors."): return
if level >= log.ERROR:
self.out.write(self._message('error', message))
elif level >= log.WARNING:
self.out.write(self._message('warning', message))
elif level >= log.DOCSTRING_WARNING:
self.out.write(self._message('docstring warning', message))
def _message(self, level, message):
self.is_empty = False
message = plaintext_to_html(message)
if '\n' in message:
message = '<pre class="log">%s</pre>' % message
hdr = ' '.join([w.capitalize() for w in level.split()])
return self.MESSAGE % (level.split()[-1], hdr, message)
def close(self):
if self.is_empty:
self.out.write('<div class="log-info">'
'No warnings or errors!</div>')
self.write_options(self.options)
self.out.write('<p class="log">Epydoc finished at %s</p>\n'
'<p class="log">(Elapsed time: %s)</p>' %
(time.ctime(), self._elapsed_time()))
self.out.write(self.FOOTER)
self.out.close()
def _elapsed_time(self):
secs = int(time.time()-self.start_time)
if secs < 60:
return '%d seconds' % secs
if secs < 3600:
return '%d minutes, %d seconds' % (secs/60, secs%60)
else:
return '%d hours, %d minutes' % (secs/3600, secs%3600)
######################################################################
## main
######################################################################
if __name__ == '__main__':
cli() | unknown | codeparrot/codeparrot-clean | ||
from ..Qt import QtGui, QtCore
from .. import functions as fn
__all__ = ['PathButton']
class PathButton(QtGui.QPushButton):
"""Simple PushButton extension that paints a QPainterPath centered on its face.
"""
def __init__(self, parent=None, path=None, pen='default', brush=None, size=(30,30), margin=7):
QtGui.QPushButton.__init__(self, parent)
self.margin = margin
self.path = None
if pen == 'default':
pen = 'k'
self.setPen(pen)
self.setBrush(brush)
if path is not None:
self.setPath(path)
if size is not None:
self.setFixedWidth(size[0])
self.setFixedHeight(size[1])
def setBrush(self, brush):
self.brush = fn.mkBrush(brush)
def setPen(self, *args, **kwargs):
self.pen = fn.mkPen(*args, **kwargs)
def setPath(self, path):
self.path = path
self.update()
def paintEvent(self, ev):
QtGui.QPushButton.paintEvent(self, ev)
margin = self.margin
geom = QtCore.QRectF(0, 0, self.width(), self.height()).adjusted(margin, margin, -margin, -margin)
rect = self.path.boundingRect()
scale = min(geom.width() / float(rect.width()), geom.height() / float(rect.height()))
p = QtGui.QPainter(self)
p.setRenderHint(p.Antialiasing)
p.translate(geom.center())
p.scale(scale, scale)
p.translate(-rect.center())
p.setPen(self.pen)
p.setBrush(self.brush)
p.drawPath(self.path)
p.end() | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2014 Quobyte Inc.
# Copyright (c) 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
from cinder import compute
from cinder import exception
from cinder.i18n import _, _LI, _LW
from cinder.image import image_utils
from cinder import utils
from cinder.volume.drivers import remotefs as remotefs_drv
VERSION = '1.0'
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.StrOpt('quobyte_volume_url',
default=None,
help=('URL to the Quobyte volume e.g.,'
' quobyte://<DIR host>/<volume name>')),
cfg.StrOpt('quobyte_client_cfg',
default=None,
help=('Path to a Quobyte Client configuration file.')),
cfg.BoolOpt('quobyte_sparsed_volumes',
default=True,
help=('Create volumes as sparse files which take no space.'
' If set to False, volume is created as regular file.'
'In such case volume creation takes a lot of time.')),
cfg.BoolOpt('quobyte_qcow2_volumes',
default=True,
help=('Create volumes as QCOW2 files rather than raw files.')),
cfg.StrOpt('quobyte_mount_point_base',
default='$state_path/mnt',
help=('Base dir containing the mount point'
' for the Quobyte volume.')),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
class QuobyteDriver(remotefs_drv.RemoteFSSnapDriver):
"""Cinder driver for Quobyte USP.
Volumes are stored as files on the mounted Quobyte volume. The hypervisor
will expose them as block devices.
Unlike other similar drivers, this driver uses exactly one Quobyte volume
because Quobyte USP is a distributed storage system. To add or remove
capacity, administrators can add or remove storage servers to/from the
volume.
For different types of volumes e.g., SSD vs. rotating disks,
use multiple backends in Cinder.
Note: To be compliant with the inherited RemoteFSSnapDriver, Quobyte
volumes are also referred to as shares.
Version history:
1.0 - Initial driver.
"""
driver_volume_type = 'quobyte'
driver_prefix = 'quobyte'
volume_backend_name = 'Quobyte'
VERSION = VERSION
def __init__(self, execute=processutils.execute, *args, **kwargs):
super(QuobyteDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(volume_opts)
# Used to manage snapshots which are currently attached to a VM.
self._nova = None
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
self.set_nas_security_options(is_new_cinder_install=False)
super(QuobyteDriver, self).do_setup(context)
self.shares = {} # address : options
self._nova = compute.API()
def check_for_setup_error(self):
if not self.configuration.quobyte_volume_url:
msg = (_("There's no Quobyte volume configured (%s). Example:"
" quobyte://<DIR host>/<volume name>") %
'quobyte_volume_url')
LOG.warning(msg)
raise exception.VolumeDriverException(msg)
# Check if mount.quobyte is installed
try:
self._execute('mount.quobyte', check_exit_code=False,
run_as_root=False)
except OSError as exc:
if exc.errno == errno.ENOENT:
raise exception.VolumeDriverException(
'mount.quobyte is not installed')
else:
raise
def set_nas_security_options(self, is_new_cinder_install):
self.configuration.nas_secure_file_operations = 'true'
self.configuration.nas_secure_file_permissions = 'true'
self._execute_as_root = False
def _qemu_img_info(self, path, volume_name):
return super(QuobyteDriver, self)._qemu_img_info_base(
path, volume_name, self.configuration.quobyte_mount_point_base)
@utils.synchronized('quobyte', external=False)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
self._create_cloned_volume(volume, src_vref)
@utils.synchronized('quobyte', external=False)
def create_volume(self, volume):
return super(QuobyteDriver, self).create_volume(volume)
@utils.synchronized('quobyte', external=False)
def create_volume_from_snapshot(self, volume, snapshot):
return self._create_volume_from_snapshot(volume, snapshot)
def _copy_volume_from_snapshot(self, snapshot, volume, volume_size):
"""Copy data from snapshot to destination volume.
This is done with a qemu-img convert to raw/qcow2 from the snapshot
qcow2.
"""
LOG.debug("snapshot: %(snap)s, volume: %(vol)s, ",
{'snap': snapshot['id'],
'vol': volume['id'],
'size': volume_size})
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path)
vol_path = self._local_volume_dir(snapshot['volume'])
forward_file = snap_info[snapshot['id']]
forward_path = os.path.join(vol_path, forward_file)
# Find the file which backs this file, which represents the point
# when this snapshot was created.
img_info = self._qemu_img_info(forward_path,
snapshot['volume']['name'])
path_to_snap_img = os.path.join(vol_path, img_info.backing_file)
path_to_new_vol = self._local_path_volume(volume)
LOG.debug("will copy from snapshot at %s", path_to_snap_img)
if self.configuration.quobyte_qcow2_volumes:
out_format = 'qcow2'
else:
out_format = 'raw'
image_utils.convert_image(path_to_snap_img,
path_to_new_vol,
out_format,
run_as_root=self._execute_as_root)
self._set_rw_permissions_for_all(path_to_new_vol)
@utils.synchronized('quobyte', external=False)
def delete_volume(self, volume):
"""Deletes a logical volume."""
if not volume['provider_location']:
LOG.warning(_LW('Volume %s does not have provider_location '
'specified, skipping'), volume['name'])
return
self._ensure_share_mounted(volume['provider_location'])
volume_dir = self._local_volume_dir(volume)
mounted_path = os.path.join(volume_dir,
self.get_active_image_from_info(volume))
self._execute('rm', '-f', mounted_path,
run_as_root=self._execute_as_root)
# If an exception (e.g. timeout) occurred during delete_snapshot, the
# base volume may linger around, so just delete it if it exists
base_volume_path = self._local_path_volume(volume)
fileutils.delete_if_exists(base_volume_path)
info_path = self._local_path_volume_info(volume)
fileutils.delete_if_exists(info_path)
@utils.synchronized('quobyte', external=False)
def create_snapshot(self, snapshot):
"""Apply locking to the create snapshot operation."""
return self._create_snapshot(snapshot)
@utils.synchronized('quobyte', external=False)
def delete_snapshot(self, snapshot):
"""Apply locking to the delete snapshot operation."""
self._delete_snapshot(snapshot)
@utils.synchronized('quobyte', external=False)
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info."""
# Find active qcow2 file
active_file = self.get_active_image_from_info(volume)
path = '%s/%s/%s' % (self.configuration.quobyte_mount_point_base,
self._get_hash_str(volume['provider_location']),
active_file)
data = {'export': volume['provider_location'],
'name': active_file}
if volume['provider_location'] in self.shares:
data['options'] = self.shares[volume['provider_location']]
# Test file for raw vs. qcow2 format
info = self._qemu_img_info(path, volume['name'])
data['format'] = info.file_format
if data['format'] not in ['raw', 'qcow2']:
msg = _('%s must be a valid raw or qcow2 image.') % path
raise exception.InvalidVolume(msg)
return {
'driver_volume_type': 'quobyte',
'data': data,
'mount_point_base': self.configuration.quobyte_mount_point_base
}
@utils.synchronized('quobyte', external=False)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
self._copy_volume_to_image(context, volume, image_service,
image_meta)
@utils.synchronized('quobyte', external=False)
def extend_volume(self, volume, size_gb):
volume_path = self.local_path(volume)
volume_filename = os.path.basename(volume_path)
# Ensure no snapshots exist for the volume
active_image = self.get_active_image_from_info(volume)
if volume_filename != active_image:
msg = _('Extend volume is only supported for this'
' driver when no snapshots exist.')
raise exception.InvalidVolume(msg)
info = self._qemu_img_info(volume_path, volume['name'])
backing_fmt = info.file_format
if backing_fmt not in ['raw', 'qcow2']:
msg = _('Unrecognized backing format: %s')
raise exception.InvalidVolume(msg % backing_fmt)
# qemu-img can resize both raw and qcow2 files
image_utils.resize_image(volume_path, size_gb)
def _do_create_volume(self, volume):
"""Create a volume on given Quobyte volume.
:param volume: volume reference
"""
volume_path = self.local_path(volume)
volume_size = volume['size']
if self.configuration.quobyte_qcow2_volumes:
self._create_qcow2_file(volume_path, volume_size)
else:
if self.configuration.quobyte_sparsed_volumes:
self._create_sparsed_file(volume_path, volume_size)
else:
self._create_regular_file(volume_path, volume_size)
self._set_rw_permissions_for_all(volume_path)
def _load_shares_config(self, share_file=None):
"""Put 'quobyte_volume_url' into the 'shares' list.
:param share_file: string, Not used because the user has to specify the
the Quobyte volume directly.
"""
self.shares = {}
url = self.configuration.quobyte_volume_url
# Strip quobyte:// from the URL
protocol = self.driver_volume_type + "://"
if url.startswith(protocol):
url = url[len(protocol):]
self.shares[url] = None # None = No extra mount options.
LOG.debug("Quobyte Volume URL set to: %s", self.shares)
def _ensure_share_mounted(self, quobyte_volume):
"""Mount Quobyte volume.
:param quobyte_volume: string
"""
mount_path = self._get_mount_point_for_share(quobyte_volume)
self._mount_quobyte(quobyte_volume, mount_path, ensure=True)
@utils.synchronized('quobyte_ensure', external=False)
def _ensure_shares_mounted(self):
"""Mount the Quobyte volume.
Used for example by RemoteFsDriver._update_volume_stats
"""
self._mounted_shares = []
self._load_shares_config()
for share in self.shares.keys():
try:
self._ensure_share_mounted(share)
self._mounted_shares.append(share)
except Exception as exc:
LOG.warning(_LW('Exception during mounting %s'), exc)
LOG.debug('Available shares %s', self._mounted_shares)
def _find_share(self, volume_size_in_gib):
"""Returns the mounted Quobyte volume.
Multiple shares are not supported because the virtualization of
multiple storage devices is taken care of at the level of Quobyte USP.
For different types of volumes e.g., SSD vs. rotating disks, use
multiple backends in Cinder.
:param volume_size_in_gib: int size in GB. Ignored by this driver.
"""
if not self._mounted_shares:
raise exception.NotFound()
assert len(self._mounted_shares) == 1, 'There must be exactly' \
' one Quobyte volume.'
target_volume = self._mounted_shares[0]
LOG.debug('Selected %s as target Quobyte volume.', target_volume)
return target_volume
def _get_mount_point_for_share(self, quobyte_volume):
"""Return mount point for Quobyte volume.
:param quobyte_volume: Example: storage-host/openstack-volumes
"""
return os.path.join(self.configuration.quobyte_mount_point_base,
self._get_hash_str(quobyte_volume))
# open() wrapper to mock reading from /proc/mount.
@staticmethod
def read_proc_mount(): # pragma: no cover
return open('/proc/mounts')
def _mount_quobyte(self, quobyte_volume, mount_path, ensure=False):
"""Mount Quobyte volume to mount path."""
mounted = False
for l in QuobyteDriver.read_proc_mount():
if l.split()[1] == mount_path:
mounted = True
break
if mounted:
try:
os.stat(mount_path)
except OSError as exc:
if exc.errno == errno.ENOTCONN:
mounted = False
try:
LOG.info(_LI('Fixing previous mount %s which was not'
' unmounted correctly.'), mount_path)
self._execute('umount.quobyte', mount_path,
run_as_root=False)
except processutils.ProcessExecutionError as exc:
LOG.warning(_LW("Failed to unmount previous mount: "
"%s"), exc)
else:
# TODO(quobyte): Extend exc analysis in here?
LOG.warning(_LW("Unknown error occurred while checking "
"mount point: %s Trying to continue."),
exc)
if not mounted:
if not os.path.isdir(mount_path):
self._execute('mkdir', '-p', mount_path)
command = ['mount.quobyte', quobyte_volume, mount_path]
if self.configuration.quobyte_client_cfg:
command.extend(['-c', self.configuration.quobyte_client_cfg])
try:
LOG.info(_LI('Mounting volume: %s ...'), quobyte_volume)
self._execute(*command, run_as_root=False)
LOG.info(_LI('Mounting volume: %s succeeded'), quobyte_volume)
mounted = True
except processutils.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.stderr:
LOG.warning(_LW("%s is already mounted"), quobyte_volume)
else:
raise
if mounted:
self._validate_volume(mount_path)
def _validate_volume(self, mount_path):
"""Wraps execute calls for checking validity of a Quobyte volume"""
command = ['getfattr', "-n", "quobyte.info", mount_path]
try:
self._execute(*command, run_as_root=False)
except processutils.ProcessExecutionError as exc:
msg = (_("The mount %(mount_path)s is not a valid"
" Quobyte USP volume. Error: %(exc)s")
% {'mount_path': mount_path, 'exc': exc})
raise exception.VolumeDriverException(msg)
if not os.access(mount_path, os.W_OK | os.X_OK):
LOG.warning(_LW("Volume is not writable. Please broaden the file"
" permissions. Mount: %s"), mount_path) | unknown | codeparrot/codeparrot-clean | ||
#
# (c) 2017 Michael De La Rue
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
import pytest
boto3 = pytest.importorskip("boto3")
import json
import copy
from ansible.module_utils._text import to_bytes
from ansible.module_utils import basic
from ansible.compat.tests.mock import MagicMock, Mock, patch
# lambda is a keyword so we have to hack this.
_temp = __import__("ansible.modules.cloud.amazon.lambda")
lda = getattr(_temp.modules.cloud.amazon,"lambda")
def set_module_args(args):
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args)
base_lambda_config={
'FunctionName' : 'lambda_name',
'Role' : 'arn:aws:iam::987654321012:role/lambda_basic_execution',
'Handler' : 'lambda_python.my_handler',
'Description' : 'this that the other',
'Timeout' : 3,
'MemorySize' : 128,
'Runtime' : 'python2.7',
'CodeSha256' : 'AqMZ+xptM7aC9VXu+5jyp1sqO+Nj4WFMNzQxtPMP2n8=',
}
one_change_lambda_config=copy.copy(base_lambda_config)
one_change_lambda_config['Timeout']=4
two_change_lambda_config=copy.copy(one_change_lambda_config)
two_change_lambda_config['Role']='arn:aws:iam::987654321012:role/lambda_advanced_execution'
code_change_lambda_config=copy.copy(base_lambda_config)
code_change_lambda_config['CodeSha256']='P+Zy8U4T4RiiHWElhL10VBKj9jw4rSJ5bm/TiW+4Rts='
base_module_args={
"region": "us-west-1",
"name": "lambda_name",
"state": "present",
"zip_file": "test/units/modules/cloud/amazon/fixtures/thezip.zip",
"runtime": 'python2.7',
"role": 'arn:aws:iam::987654321012:role/lambda_basic_execution',
"memory_size": 128,
"timeout" : 3,
"handler": 'lambda_python.my_handler'
}
module_args_with_environment=dict(base_module_args, environment_variables={
"variable_name": "variable_value"
})
def make_mock_no_connection_connection(config):
"""return a mock of ansible's boto3_conn ready to return a mock AWS API client"""
lambda_client_double = MagicMock()
lambda_client_double.get_function.configure_mock(
return_value=False
)
lambda_client_double.update_function_configuration.configure_mock(
return_value={
'Version' : 1
}
)
fake_boto3_conn=Mock(return_value=lambda_client_double)
return (fake_boto3_conn, lambda_client_double)
def make_mock_connection(config):
"""return a mock of ansible's boto3_conn ready to return a mock AWS API client"""
lambda_client_double = MagicMock()
lambda_client_double.get_function.configure_mock(
return_value={
'Configuration' : config
}
)
lambda_client_double.update_function_configuration.configure_mock(
return_value={
'Version' : 1
}
)
fake_boto3_conn=Mock(return_value=lambda_client_double)
return (fake_boto3_conn, lambda_client_double)
class AnsibleFailJson(Exception):
pass
def fail_json_double(*args, **kwargs):
"""works like fail_json but returns module results inside exception instead of stdout"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
#TODO: def test_handle_different_types_in_config_params():
def test_create_lambda_if_not_exist():
set_module_args(base_module_args)
(boto3_conn_double, lambda_client_double)=make_mock_no_connection_connection(code_change_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) == 1), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) == 0), \
"unexpectedly updated lambda configuration when should have only created"
assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
"update lambda function code when function should have been created only"
assert(len(lambda_client_double.create_function.mock_calls) > 0), \
"failed to call create_function "
(create_args, create_kwargs)=lambda_client_double.create_function.call_args
assert (len(create_kwargs) > 0), "expected create called with keyword args, none found"
try:
# For now I assume that we should NOT send an empty environment. It might
# be okay / better to explicitly send an empty environment. However `None'
# is not acceptable - mikedlr
create_kwargs["Environment"]
raise(Exception("Environment sent to boto when none expected"))
except KeyError:
pass #We are happy, no environment is fine
def test_update_lambda_if_code_changed():
set_module_args(base_module_args)
(boto3_conn_double, lambda_client_double)=make_mock_connection(code_change_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) == 1), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) == 0), \
"unexpectedly updatede lambda configuration when only code changed"
assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
"lambda function update called multiple times when only one time should be needed"
assert(len(lambda_client_double.update_function_code.mock_calls) > 1), \
"failed to update lambda function when code changed"
# 3 because after uploading we call into the return from mock to try to find what function version
# was returned so the MagicMock actually sees two calls for one update.
assert(len(lambda_client_double.update_function_code.mock_calls) < 3), \
"lambda function code update called multiple times when only one time should be needed"
def test_update_lambda_if_config_changed():
set_module_args(base_module_args)
(boto3_conn_double,lambda_client_double)=make_mock_connection(two_change_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) == 1), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) > 0), \
"failed to update lambda function when configuration changed"
assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
"lambda function update called multiple times when only one time should be needed"
assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
"updated lambda code when no change should have happened"
def test_update_lambda_if_only_one_config_item_changed():
set_module_args(base_module_args)
(boto3_conn_double,lambda_client_double)=make_mock_connection(one_change_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) == 1), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) > 0), \
"failed to update lambda function when configuration changed"
assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
"lambda function update called multiple times when only one time should be needed"
assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
"updated lambda code when no change should have happened"
def test_update_lambda_if_added_environment_variable():
set_module_args(module_args_with_environment)
(boto3_conn_double,lambda_client_double)=make_mock_connection(base_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) == 1), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) > 0), \
"failed to update lambda function when configuration changed"
assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
"lambda function update called multiple times when only one time should be needed"
assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
"updated lambda code when no change should have happened"
(update_args, update_kwargs)=lambda_client_double.update_function_configuration.call_args
assert (len(update_kwargs) > 0), "expected update configuration called with keyword args, none found"
assert update_kwargs['Environment']['Variables'] == module_args_with_environment['environment_variables']
def test_dont_update_lambda_if_nothing_changed():
set_module_args(base_module_args)
(boto3_conn_double,lambda_client_double)=make_mock_connection(base_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) == 1), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) == 0), \
"updated lambda function when no configuration changed"
assert(len(lambda_client_double.update_function_code.mock_calls) == 0 ), \
"updated lambda code when no change should have happened"
def test_warn_region_not_specified():
set_module_args({
"name": "lambda_name",
"state": "present",
# Module is called without a region causing error
# "region": "us-east-1",
"zip_file": "test/units/modules/cloud/amazon/fixtures/thezip.zip",
"runtime": 'python2.7',
"role": 'arn:aws:iam::987654321012:role/lambda_basic_execution',
"handler": 'lambda_python.my_handler'})
get_aws_connection_info_double=Mock(return_value=(None,None,None))
with patch.object(lda, 'get_aws_connection_info', get_aws_connection_info_double):
with patch.object(basic.AnsibleModule, 'fail_json', fail_json_double):
try:
lda.main()
except AnsibleFailJson as e:
result = e.args[0]
assert("region must be specified" in result['msg']) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package namespace
import (
"bytes"
"context"
clientv3 "go.etcd.io/etcd/client/v3"
)
type leasePrefix struct {
clientv3.Lease
pfx []byte
}
// NewLease wraps a Lease interface to filter for only keys with a prefix
// and remove that prefix when fetching attached keys through TimeToLive.
func NewLease(l clientv3.Lease, prefix string) clientv3.Lease {
return &leasePrefix{l, []byte(prefix)}
}
func (l *leasePrefix) TimeToLive(ctx context.Context, id clientv3.LeaseID, opts ...clientv3.LeaseOption) (*clientv3.LeaseTimeToLiveResponse, error) {
resp, err := l.Lease.TimeToLive(ctx, id, opts...)
if err != nil {
return nil, err
}
if len(resp.Keys) > 0 {
var outKeys [][]byte
for i := range resp.Keys {
if len(resp.Keys[i]) < len(l.pfx) {
// too short
continue
}
if !bytes.Equal(resp.Keys[i][:len(l.pfx)], l.pfx) {
// doesn't match prefix
continue
}
// strip prefix
outKeys = append(outKeys, resp.Keys[i][len(l.pfx):])
}
resp.Keys = outKeys
}
return resp, nil
} | go | github | https://github.com/etcd-io/etcd | client/v3/namespace/lease.go |
//===--- ObjCBridge.h - Swift Language Objective-C Bridging ABI -*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
//
// Swift ABI for interacting with Objective-C.
//
//===----------------------------------------------------------------------===//
#ifndef SWIFT_ABI_OBJCBRIDGE_H
#define SWIFT_ABI_OBJCBRIDGE_H
#include "swift/Runtime/Config.h"
#include <cstdint>
struct objc_class;
namespace swift {
template <typename Runtime> struct TargetMetadata;
using Metadata = TargetMetadata<InProcess>;
struct HeapObject;
} // end namespace swift
#if SWIFT_OBJC_INTEROP
#include <objc/objc.h>
#include <objc/runtime.h>
#include <objc/objc-api.h>
// Redeclare APIs from the Objective-C runtime.
// These functions are not available through public headers, but are guaranteed
// to exist on OS X >= 10.9 and iOS >= 7.0.
OBJC_EXPORT id objc_retain(id);
OBJC_EXPORT void objc_release(id);
OBJC_EXPORT id _objc_rootAutorelease(id);
OBJC_EXPORT void objc_moveWeak(id*, id*);
OBJC_EXPORT void objc_copyWeak(id*, id*);
OBJC_EXPORT id objc_initWeak(id*, id);
OBJC_EXPORT void objc_destroyWeak(id*);
OBJC_EXPORT id objc_loadWeakRetained(id*);
// Description of an Objective-C image.
// __DATA,__objc_imageinfo stores one of these.
typedef struct objc_image_info {
uint32_t version; // currently 0
uint32_t flags;
} objc_image_info;
// Class and metaclass construction from a compiler-generated memory image.
// cls and cls->isa must each be OBJC_MAX_CLASS_SIZE bytes.
// Extra bytes not used the metadata must be zero.
// info is the same objc_image_info that would be emitted by a static compiler.
// Returns nil if a class with the same name already exists.
// Returns nil if the superclass is nil and the class is not marked as a root.
// Returns nil if the superclass is under construction.
// Do not call objc_registerClassPair().
OBJC_EXPORT Class objc_readClassPair(Class cls,
const struct objc_image_info *info)
__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0);
// Magic symbol whose _address_ is the runtime's isa mask.
OBJC_EXPORT const struct { char c; } objc_absolute_packed_isa_class_mask;
namespace swift {
// Root -dealloc implementation for classes with Swift reference counting.
// This function should be used to implement -dealloc in a root class with
// Swift reference counting. [super dealloc] MUST NOT be called after this,
// for the object will have already been deallocated by the time
// this function returns.
SWIFT_RUNTIME_EXPORT
void swift_rootObjCDealloc(HeapObject *self);
// Uses Swift bridging to box a C string into an NSString without introducing
// a link-time dependency on NSString.
SWIFT_CC(swift) SWIFT_RUNTIME_STDLIB_API
id swift_stdlib_NSStringFromUTF8(const char *cstr, int len);
}
#endif // SWIFT_OBJC_INTEROP
#endif // SWIFT_ABI_OBJCBRIDGE_H | c | github | https://github.com/apple/swift | include/swift/Runtime/ObjCBridge.h |
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
from lms.djangoapps.course_blocks.api import get_course_blocks
from openedx.core.djangolib.testing.utils import get_mock_request
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from ..course_data import CourseData
from ..subsection_grade_factory import SubsectionGradeFactory
class GradeTestBase(SharedModuleStoreTestCase):
"""
Base class for some Grades tests.
"""
@classmethod
def setUpClass(cls):
super(GradeTestBase, cls).setUpClass()
cls.course = CourseFactory.create()
with cls.store.bulk_operations(cls.course.id):
cls.chapter = ItemFactory.create(
parent=cls.course,
category="chapter",
display_name="Test Chapter"
)
cls.sequence = ItemFactory.create(
parent=cls.chapter,
category='sequential',
display_name="Test Sequential X",
graded=True,
format="Homework"
)
cls.vertical = ItemFactory.create(
parent=cls.sequence,
category='vertical',
display_name='Test Vertical 1'
)
problem_xml = MultipleChoiceResponseXMLFactory().build_xml(
question_text='The correct answer is Choice 3',
choices=[False, False, True, False],
choice_names=['choice_0', 'choice_1', 'choice_2', 'choice_3']
)
cls.problem = ItemFactory.create(
parent=cls.vertical,
category="problem",
display_name="Test Problem",
data=problem_xml
)
cls.sequence2 = ItemFactory.create(
parent=cls.chapter,
category='sequential',
display_name="Test Sequential A",
graded=True,
format="Homework"
)
cls.problem2 = ItemFactory.create(
parent=cls.sequence2,
category="problem",
display_name="Test Problem",
data=problem_xml
)
# AED 2017-06-19: make cls.sequence belong to multiple parents,
# so we can test that DAGs with this shape are handled correctly.
cls.chapter_2 = ItemFactory.create(
parent=cls.course,
category='chapter',
display_name='Test Chapter 2'
)
cls.chapter_2.children.append(cls.sequence.location)
cls.store.update_item(cls.chapter_2, UserFactory().id)
def setUp(self):
super(GradeTestBase, self).setUp()
self.request = get_mock_request(UserFactory())
self.client.login(username=self.request.user.username, password="test")
self._set_grading_policy()
self.course_structure = get_course_blocks(self.request.user, self.course.location)
self.course_data = CourseData(self.request.user, structure=self.course_structure)
self.subsection_grade_factory = SubsectionGradeFactory(self.request.user, self.course, self.course_structure)
CourseEnrollment.enroll(self.request.user, self.course.id)
def _set_grading_policy(self, passing=0.5):
"""
Updates the course's grading policy.
"""
self.grading_policy = {
"GRADER": [
{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
"weight": 1.0,
},
{
"type": "NoCredit",
"min_count": 0,
"drop_count": 0,
"short_label": "NC",
"weight": 0.0,
},
],
"GRADE_CUTOFFS": {
"Pass": passing,
},
}
self.course.set_grading_policy(self.grading_policy)
self.store.update_item(self.course, 0) | unknown | codeparrot/codeparrot-clean | ||
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
# copyright 2003-2010 Sylvain Thenault, all rights reserved.
# contact mailto:thenault@gmail.com
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
"""This module contains the classes for "scoped" node, i.e. which are opening a
new local scope in the language definition : Module, Class, Function (and
Lambda, GenExpr, DictComp and SetComp to some extent).
"""
from __future__ import with_statement
__doctype__ = "restructuredtext en"
import sys
from itertools import chain
from logilab.common.compat import builtins
from logilab.common.decorators import cached
from logilab.astng import BUILTINS_MODULE
from logilab.astng.exceptions import NotFoundError, NoDefault, \
ASTNGBuildingException, InferenceError
from logilab.astng.node_classes import Const, DelName, DelAttr, \
Dict, From, List, Name, Pass, Raise, Return, Tuple, Yield, \
are_exclusive, LookupMixIn, const_factory as cf, unpack_infer
from logilab.astng.bases import NodeNG, InferenceContext, Instance,\
YES, Generator, UnboundMethod, BoundMethod, _infer_stmts, copy_context, \
BUILTINS_NAME
from logilab.astng.mixins import FilterStmtsMixin
from logilab.astng.bases import Statement
from logilab.astng.manager import ASTNGManager
def remove_nodes(func, cls):
def wrapper(*args, **kwargs):
nodes = [n for n in func(*args, **kwargs) if not isinstance(n, cls)]
if not nodes:
raise NotFoundError()
return nodes
return wrapper
def function_to_method(n, klass):
if isinstance(n, Function):
if n.type == 'classmethod':
return BoundMethod(n, klass)
if n.type != 'staticmethod':
return UnboundMethod(n)
return n
def std_special_attributes(self, name, add_locals=True):
if add_locals:
locals = self.locals
else:
locals = {}
if name == '__name__':
return [cf(self.name)] + locals.get(name, [])
if name == '__doc__':
return [cf(self.doc)] + locals.get(name, [])
if name == '__dict__':
return [Dict()] + locals.get(name, [])
raise NotFoundError(name)
MANAGER = ASTNGManager()
def builtin_lookup(name):
"""lookup a name into the builtin module
return the list of matching statements and the astng for the builtin
module
"""
builtin_astng = MANAGER.astng_from_module(builtins)
if name == '__dict__':
return builtin_astng, ()
try:
stmts = builtin_astng.locals[name]
except KeyError:
stmts = ()
return builtin_astng, stmts
# TODO move this Mixin to mixins.py; problem: 'Function' in _scope_lookup
class LocalsDictNodeNG(LookupMixIn, NodeNG):
""" this class provides locals handling common to Module, Function
and Class nodes, including a dict like interface for direct access
to locals information
"""
# attributes below are set by the builder module or by raw factories
# dictionary of locals with name as key and node defining the local as
# value
def qname(self):
"""return the 'qualified' name of the node, eg module.name,
module.class.name ...
"""
if self.parent is None:
return self.name
return '%s.%s' % (self.parent.frame().qname(), self.name)
def frame(self):
"""return the first parent frame node (i.e. Module, Function or Class)
"""
return self
def scope(self):
"""return the first node defining a new scope (i.e. Module,
Function, Class, Lambda but also GenExpr, DictComp and SetComp)
"""
return self
def _scope_lookup(self, node, name, offset=0):
"""XXX method for interfacing the scope lookup"""
try:
stmts = node._filter_stmts(self.locals[name], self, offset)
except KeyError:
stmts = ()
if stmts:
return self, stmts
if self.parent: # i.e. not Module
# nested scope: if parent scope is a function, that's fine
# else jump to the module
pscope = self.parent.scope()
if not pscope.is_function:
pscope = pscope.root()
return pscope.scope_lookup(node, name)
return builtin_lookup(name) # Module
def set_local(self, name, stmt):
"""define <name> in locals (<stmt> is the node defining the name)
if the node is a Module node (i.e. has globals), add the name to
globals
if the name is already defined, ignore it
"""
#assert not stmt in self.locals.get(name, ()), (self, stmt)
self.locals.setdefault(name, []).append(stmt)
__setitem__ = set_local
def _append_node(self, child):
"""append a child, linking it in the tree"""
self.body.append(child)
child.parent = self
def add_local_node(self, child_node, name=None):
"""append a child which should alter locals to the given node"""
if name != '__class__':
# add __class__ node as a child will cause infinite recursion later!
self._append_node(child_node)
self.set_local(name or child_node.name, child_node)
def __getitem__(self, item):
"""method from the `dict` interface returning the first node
associated with the given name in the locals dictionary
:type item: str
:param item: the name of the locally defined object
:raises KeyError: if the name is not defined
"""
return self.locals[item][0]
def __iter__(self):
"""method from the `dict` interface returning an iterator on
`self.keys()`
"""
return iter(self.keys())
def keys(self):
"""method from the `dict` interface returning a tuple containing
locally defined names
"""
return self.locals.keys()
def values(self):
"""method from the `dict` interface returning a tuple containing
locally defined nodes which are instance of `Function` or `Class`
"""
return [self[key] for key in self.keys()]
def items(self):
"""method from the `dict` interface returning a list of tuple
containing each locally defined name with its associated node,
which is an instance of `Function` or `Class`
"""
return zip(self.keys(), self.values())
def __contains__(self, name):
return name in self.locals
has_key = __contains__
# Module #####################################################################
class Module(LocalsDictNodeNG):
_astng_fields = ('body',)
fromlineno = 0
lineno = 0
# attributes below are set by the builder module or by raw factories
# the file from which as been extracted the astng representation. It may
# be None if the representation has been built from a built-in module
file = None
# the module name
name = None
# boolean for astng built from source (i.e. ast)
pure_python = None
# boolean for package module
package = None
# dictionary of globals with name as key and node defining the global
# as value
globals = None
# names of python special attributes (handled by getattr impl.)
special_attributes = set(('__name__', '__doc__', '__file__', '__path__',
'__dict__'))
# names of module attributes available through the global scope
scope_attrs = set(('__name__', '__doc__', '__file__', '__path__'))
def __init__(self, name, doc, pure_python=True):
self.name = name
self.doc = doc
self.pure_python = pure_python
self.locals = self.globals = {}
self.body = []
@property
def file_stream(self):
if self.file is not None:
return file(self.file)
return None
def block_range(self, lineno):
"""return block line numbers.
start from the beginning whatever the given lineno
"""
return self.fromlineno, self.tolineno
def scope_lookup(self, node, name, offset=0):
if name in self.scope_attrs and not name in self.locals:
try:
return self, self.getattr(name)
except NotFoundError:
return self, ()
return self._scope_lookup(node, name, offset)
def pytype(self):
return '%s.module' % BUILTINS_MODULE
def display_type(self):
return 'Module'
def getattr(self, name, context=None, ignore_locals=False):
if name in self.special_attributes:
if name == '__file__':
return [cf(self.file)] + self.locals.get(name, [])
if name == '__path__' and self.package:
return [List()] + self.locals.get(name, [])
return std_special_attributes(self, name)
if not ignore_locals and name in self.locals:
return self.locals[name]
if self.package:
try:
return [self.import_module(name, relative_only=True)]
except ASTNGBuildingException:
raise NotFoundError(name)
except Exception:# XXX pylint tests never pass here; do we need it?
import traceback
traceback.print_exc()
raise NotFoundError(name)
getattr = remove_nodes(getattr, DelName)
def igetattr(self, name, context=None):
"""inferred getattr"""
# set lookup name since this is necessary to infer on import nodes for
# instance
context = copy_context(context)
context.lookupname = name
try:
return _infer_stmts(self.getattr(name, context), context, frame=self)
except NotFoundError:
raise InferenceError(name)
def fully_defined(self):
"""return True if this module has been built from a .py file
and so contains a complete representation including the code
"""
return self.file is not None and self.file.endswith('.py')
def statement(self):
"""return the first parent node marked as statement node
consider a module as a statement...
"""
return self
def previous_sibling(self):
"""module has no sibling"""
return
def next_sibling(self):
"""module has no sibling"""
return
if sys.version_info < (2, 8):
def absolute_import_activated(self):
for stmt in self.locals.get('absolute_import', ()):
if isinstance(stmt, From) and stmt.modname == '__future__':
return True
return False
else:
absolute_import_activated = lambda self: True
def import_module(self, modname, relative_only=False, level=None):
"""import the given module considering self as context"""
if relative_only and level is None:
level = 0
absmodname = self.relative_to_absolute_name(modname, level)
try:
return MANAGER.astng_from_module_name(absmodname)
except ASTNGBuildingException:
# we only want to import a sub module or package of this module,
# skip here
if relative_only:
raise
return MANAGER.astng_from_module_name(modname)
def relative_to_absolute_name(self, modname, level):
"""return the absolute module name for a relative import.
The relative import can be implicit or explicit.
"""
# XXX this returns non sens when called on an absolute import
# like 'pylint.checkers.logilab.astng.utils'
# XXX doesn't return absolute name if self.name isn't absolute name
if self.absolute_import_activated() and level is None:
return modname
if level:
if self.package:
level = level - 1
package_name = self.name.rsplit('.', level)[0]
elif self.package:
package_name = self.name
else:
package_name = self.name.rsplit('.', 1)[0]
if package_name:
if not modname:
return package_name
return '%s.%s' % (package_name, modname)
return modname
def wildcard_import_names(self):
"""return the list of imported names when this module is 'wildcard
imported'
It doesn't include the '__builtins__' name which is added by the
current CPython implementation of wildcard imports.
"""
# take advantage of a living module if it exists
try:
living = sys.modules[self.name]
except KeyError:
pass
else:
try:
return living.__all__
except AttributeError:
return [name for name in living.__dict__.keys()
if not name.startswith('_')]
# else lookup the astng
#
# We separate the different steps of lookup in try/excepts
# to avoid catching too many Exceptions
# However, we can not analyse dynamically constructed __all__
try:
all = self['__all__']
except KeyError:
return [name for name in self.keys() if not name.startswith('_')]
try:
explicit = all.assigned_stmts().next()
except InferenceError:
return [name for name in self.keys() if not name.startswith('_')]
except AttributeError:
# not an assignment node
# XXX infer?
return [name for name in self.keys() if not name.startswith('_')]
try:
# should be a Tuple/List of constant string / 1 string not allowed
return [const.value for const in explicit.elts]
except AttributeError:
return [name for name in self.keys() if not name.startswith('_')]
class ComprehensionScope(LocalsDictNodeNG):
def frame(self):
return self.parent.frame()
scope_lookup = LocalsDictNodeNG._scope_lookup
class GenExpr(ComprehensionScope):
_astng_fields = ('elt', 'generators')
def __init__(self):
self.locals = {}
self.elt = None
self.generators = []
class DictComp(ComprehensionScope):
_astng_fields = ('key', 'value', 'generators')
def __init__(self):
self.locals = {}
self.key = None
self.value = None
self.generators = []
class SetComp(ComprehensionScope):
_astng_fields = ('elt', 'generators')
def __init__(self):
self.locals = {}
self.elt = None
self.generators = []
class _ListComp(NodeNG):
"""class representing a ListComp node"""
_astng_fields = ('elt', 'generators')
elt = None
generators = None
if sys.version_info >= (3, 0):
class ListComp(_ListComp, ComprehensionScope):
"""class representing a ListComp node"""
def __init__(self):
self.locals = {}
else:
class ListComp(_ListComp):
"""class representing a ListComp node"""
# Function ###################################################################
class Lambda(LocalsDictNodeNG, FilterStmtsMixin):
_astng_fields = ('args', 'body',)
name = '<lambda>'
# function's type, 'function' | 'method' | 'staticmethod' | 'classmethod'
type = 'function'
def __init__(self):
self.locals = {}
self.args = []
self.body = []
def pytype(self):
if 'method' in self.type:
return '%s.instancemethod' % BUILTINS_MODULE
return '%s.function' % BUILTINS_MODULE
def display_type(self):
if 'method' in self.type:
return 'Method'
return 'Function'
def callable(self):
return True
def argnames(self):
"""return a list of argument names"""
if self.args.args: # maybe None with builtin functions
names = _rec_get_names(self.args.args)
else:
names = []
if self.args.vararg:
names.append(self.args.vararg)
if self.args.kwarg:
names.append(self.args.kwarg)
return names
def infer_call_result(self, caller, context=None):
"""infer what a function is returning when called"""
return self.body.infer(context)
def scope_lookup(self, node, name, offset=0):
if node in self.args.defaults:
frame = self.parent.frame()
# line offset to avoid that def func(f=func) resolve the default
# value to the defined function
offset = -1
else:
# check this is not used in function decorators
frame = self
return frame._scope_lookup(node, name, offset)
class Function(Statement, Lambda):
_astng_fields = ('decorators', 'args', 'body')
special_attributes = set(('__name__', '__doc__', '__dict__'))
is_function = True
# attributes below are set by the builder module or by raw factories
blockstart_tolineno = None
decorators = None
def __init__(self, name, doc):
self.locals = {}
self.args = []
self.body = []
self.decorators = None
self.name = name
self.doc = doc
self.extra_decorators = []
self.instance_attrs = {}
def set_line_info(self, lastchild):
self.fromlineno = self.lineno
# lineno is the line number of the first decorator, we want the def statement lineno
if self.decorators is not None:
self.fromlineno += len(self.decorators.nodes)
self.tolineno = lastchild.tolineno
self.blockstart_tolineno = self.args.tolineno
def block_range(self, lineno):
"""return block line numbers.
start from the "def" position whatever the given lineno
"""
return self.fromlineno, self.tolineno
def getattr(self, name, context=None):
"""this method doesn't look in the instance_attrs dictionary since it's
done by an Instance proxy at inference time.
"""
if name == '__module__':
return [cf(self.root().qname())]
if name in self.instance_attrs:
return self.instance_attrs[name]
return std_special_attributes(self, name, False)
def is_method(self):
"""return true if the function node should be considered as a method"""
# check we are defined in a Class, because this is usually expected
# (e.g. pylint...) when is_method() return True
return self.type != 'function' and isinstance(self.parent.frame(), Class)
def decoratornames(self):
"""return a list of decorator qualified names"""
result = set()
decoratornodes = []
if self.decorators is not None:
decoratornodes += self.decorators.nodes
decoratornodes += self.extra_decorators
for decnode in decoratornodes:
for infnode in decnode.infer():
result.add(infnode.qname())
return result
decoratornames = cached(decoratornames)
def is_bound(self):
"""return true if the function is bound to an Instance or a class"""
return self.type == 'classmethod'
def is_abstract(self, pass_is_abstract=True):
"""return true if the method is abstract
It's considered as abstract if the only statement is a raise of
NotImplementError, or, if pass_is_abstract, a pass statement
"""
for child_node in self.body:
if isinstance(child_node, Raise):
if child_node.raises_not_implemented():
return True
if pass_is_abstract and isinstance(child_node, Pass):
return True
return False
# empty function is the same as function with a single "pass" statement
if pass_is_abstract:
return True
def is_generator(self):
"""return true if this is a generator function"""
# XXX should be flagged, not computed
try:
return self.nodes_of_class(Yield, skip_klass=Function).next()
except StopIteration:
return False
def infer_call_result(self, caller, context=None):
"""infer what a function is returning when called"""
if self.is_generator():
yield Generator(self)
return
returns = self.nodes_of_class(Return, skip_klass=Function)
for returnnode in returns:
if returnnode.value is None:
yield Const(None)
else:
try:
for infered in returnnode.value.infer(context):
yield infered
except InferenceError:
yield YES
def _rec_get_names(args, names=None):
"""return a list of all argument names"""
if names is None:
names = []
for arg in args:
if isinstance(arg, Tuple):
_rec_get_names(arg.elts, names)
else:
names.append(arg.name)
return names
# Class ######################################################################
def _class_type(klass, ancestors=None):
"""return a Class node type to differ metaclass, interface and exception
from 'regular' classes
"""
# XXX we have to store ancestors in case we have a ancestor loop
if klass._type is not None:
return klass._type
if klass.name == 'type':
klass._type = 'metaclass'
elif klass.name.endswith('Interface'):
klass._type = 'interface'
elif klass.name.endswith('Exception'):
klass._type = 'exception'
else:
if ancestors is None:
ancestors = set()
if klass in ancestors:
# XXX we are in loop ancestors, and have found no type
klass._type = 'class'
return 'class'
ancestors.add(klass)
# print >> sys.stderr, '_class_type', repr(klass)
for base in klass.ancestors(recurs=False):
if _class_type(base, ancestors) != 'class':
klass._type = base.type
break
if klass._type is None:
klass._type = 'class'
return klass._type
def _iface_hdlr(iface_node):
"""a handler function used by interfaces to handle suspicious
interface nodes
"""
return True
class Class(Statement, LocalsDictNodeNG, FilterStmtsMixin):
# some of the attributes below are set by the builder module or
# by a raw factories
# a dictionary of class instances attributes
_astng_fields = ('decorators', 'bases', 'body') # name
decorators = None
special_attributes = set(('__name__', '__doc__', '__dict__', '__module__',
'__bases__', '__mro__', '__subclasses__'))
blockstart_tolineno = None
_type = None
type = property(_class_type,
doc="class'type, possible values are 'class' | "
"'metaclass' | 'interface' | 'exception'")
def __init__(self, name, doc):
self.instance_attrs = {}
self.locals = {}
self.bases = []
self.body = []
self.name = name
self.doc = doc
def _newstyle_impl(self, context=None):
if context is None:
context = InferenceContext()
if self._newstyle is not None:
return self._newstyle
for base in self.ancestors(recurs=False, context=context):
if base._newstyle_impl(context):
self._newstyle = True
break
if self._newstyle is None:
self._newstyle = False
return self._newstyle
_newstyle = None
newstyle = property(_newstyle_impl,
doc="boolean indicating if it's a new style class"
"or not")
def set_line_info(self, lastchild):
self.fromlineno = self.lineno
self.blockstart_tolineno = self.bases and self.bases[-1].tolineno or self.fromlineno
if lastchild is not None:
self.tolineno = lastchild.tolineno
# else this is a class with only a docstring, then tolineno is (should be) already ok
def block_range(self, lineno):
"""return block line numbers.
start from the "class" position whatever the given lineno
"""
return self.fromlineno, self.tolineno
def pytype(self):
if self.newstyle:
return '%s.type' % BUILTINS_MODULE
return '%s.classobj' % BUILTINS_MODULE
def display_type(self):
return 'Class'
def callable(self):
return True
def infer_call_result(self, caller, context=None):
"""infer what a class is returning when called"""
yield Instance(self)
def scope_lookup(self, node, name, offset=0):
if node in self.bases:
frame = self.parent.frame()
# line offset to avoid that class A(A) resolve the ancestor to
# the defined class
offset = -1
else:
frame = self
return frame._scope_lookup(node, name, offset)
# list of parent class as a list of string (i.e. names as they appear
# in the class definition) XXX bw compat
def basenames(self):
return [bnode.as_string() for bnode in self.bases]
basenames = property(basenames)
def ancestors(self, recurs=True, context=None):
"""return an iterator on the node base classes in a prefixed
depth first order
:param recurs:
boolean indicating if it should recurse or return direct
ancestors only
"""
# FIXME: should be possible to choose the resolution order
# XXX inference make infinite loops possible here (see BaseTransformer
# manipulation in the builder module for instance)
yielded = set([self])
if context is None:
context = InferenceContext()
for stmt in self.bases:
with context.restore_path():
try:
for baseobj in stmt.infer(context):
if not isinstance(baseobj, Class):
# duh ?
continue
if baseobj in yielded:
continue # cf xxx above
yielded.add(baseobj)
yield baseobj
if recurs:
for grandpa in baseobj.ancestors(True, context):
if grandpa in yielded:
continue # cf xxx above
yielded.add(grandpa)
yield grandpa
except InferenceError:
# XXX log error ?
continue
def local_attr_ancestors(self, name, context=None):
"""return an iterator on astng representation of parent classes
which have <name> defined in their locals
"""
for astng in self.ancestors(context=context):
if name in astng:
yield astng
def instance_attr_ancestors(self, name, context=None):
"""return an iterator on astng representation of parent classes
which have <name> defined in their instance attribute dictionary
"""
for astng in self.ancestors(context=context):
if name in astng.instance_attrs:
yield astng
def has_base(self, node):
return node in self.bases
def local_attr(self, name, context=None):
"""return the list of assign node associated to name in this class
locals or in its parents
:raises `NotFoundError`:
if no attribute with this name has been find in this class or
its parent classes
"""
try:
return self.locals[name]
except KeyError:
# get if from the first parent implementing it if any
for class_node in self.local_attr_ancestors(name, context):
return class_node.locals[name]
raise NotFoundError(name)
local_attr = remove_nodes(local_attr, DelAttr)
def instance_attr(self, name, context=None):
"""return the astng nodes associated to name in this class instance
attributes dictionary and in its parents
:raises `NotFoundError`:
if no attribute with this name has been find in this class or
its parent classes
"""
values = self.instance_attrs.get(name, [])
# get all values from parents
for class_node in self.instance_attr_ancestors(name, context):
values += class_node.instance_attrs[name]
if not values:
raise NotFoundError(name)
return values
instance_attr = remove_nodes(instance_attr, DelAttr)
def instanciate_class(self):
"""return Instance of Class node, else return self"""
return Instance(self)
def getattr(self, name, context=None):
"""this method doesn't look in the instance_attrs dictionary since it's
done by an Instance proxy at inference time.
It may return a YES object if the attribute has not been actually
found but a __getattr__ or __getattribute__ method is defined
"""
values = self.locals.get(name, [])
if name in self.special_attributes:
if name == '__module__':
return [cf(self.root().qname())] + values
# FIXME : what is expected by passing the list of ancestors to cf:
# you can just do [cf(tuple())] + values without breaking any test
# this is ticket http://www.logilab.org/ticket/52785
if name == '__bases__':
return [cf(tuple(self.ancestors(recurs=False, context=context)))] + values
# XXX need proper meta class handling + MRO implementation
if name == '__mro__' and self.newstyle:
# XXX mro is read-only but that's not our job to detect that
return [cf(tuple(self.ancestors(recurs=True, context=context)))] + values
return std_special_attributes(self, name)
# don't modify the list in self.locals!
values = list(values)
for classnode in self.ancestors(recurs=True, context=context):
values += classnode.locals.get(name, [])
if not values:
raise NotFoundError(name)
return values
def igetattr(self, name, context=None):
"""inferred getattr, need special treatment in class to handle
descriptors
"""
# set lookup name since this is necessary to infer on import nodes for
# instance
context = copy_context(context)
context.lookupname = name
try:
for infered in _infer_stmts(self.getattr(name, context), context,
frame=self):
# yield YES object instead of descriptors when necessary
if not isinstance(infered, Const) and isinstance(infered, Instance):
try:
infered._proxied.getattr('__get__', context)
except NotFoundError:
yield infered
else:
yield YES
else:
yield function_to_method(infered, self)
except NotFoundError:
if not name.startswith('__') and self.has_dynamic_getattr(context):
# class handle some dynamic attributes, return a YES object
yield YES
else:
raise InferenceError(name)
def has_dynamic_getattr(self, context=None):
"""return True if the class has a custom __getattr__ or
__getattribute__ method
"""
# need to explicitly handle optparse.Values (setattr is not detected)
if self.name == 'Values' and self.root().name == 'optparse':
return True
try:
self.getattr('__getattr__', context)
return True
except NotFoundError:
#if self.newstyle: XXX cause an infinite recursion error
try:
getattribute = self.getattr('__getattribute__', context)[0]
if getattribute.root().name != BUILTINS_NAME:
# class has a custom __getattribute__ defined
return True
except NotFoundError:
pass
return False
def methods(self):
"""return an iterator on all methods defined in the class and
its ancestors
"""
done = {}
for astng in chain(iter((self,)), self.ancestors()):
for meth in astng.mymethods():
if meth.name in done:
continue
done[meth.name] = None
yield meth
def mymethods(self):
"""return an iterator on all methods defined in the class"""
for member in self.values():
if isinstance(member, Function):
yield member
def interfaces(self, herited=True, handler_func=_iface_hdlr):
"""return an iterator on interfaces implemented by the given
class node
"""
# FIXME: what if __implements__ = (MyIFace, MyParent.__implements__)...
try:
implements = Instance(self).getattr('__implements__')[0]
except NotFoundError:
return
if not herited and not implements.frame() is self:
return
found = set()
missing = False
for iface in unpack_infer(implements):
if iface is YES:
missing = True
continue
if not iface in found and handler_func(iface):
found.add(iface)
yield iface
if missing:
raise InferenceError() | unknown | codeparrot/codeparrot-clean | ||
"""Test utilities for writing Pulp tests
Part of functionalities of Pulp are defined in this module
and have utilities of single repository synchronization, single
sequential repository sync, sequential repository re-sync.
"""
import logging
from robottelo import ssh
from robottelo.cli.base import CLIReturnCodeError
from robottelo.cli.repository import Repository
LOGGER = logging.getLogger(__name__)
class Pulp(object):
"""Performance Measurement of RH Satellite 6
Pulp Synchronization functionality
"""
@classmethod
def repository_single_sync(cls, repo_id, repo_name, thread_id):
"""Single Synchronization
:param str repo_id: Repository id to be synchronized
:param str repo_name: Repository name
:return: time measure for a single sync
:rtype: float
"""
LOGGER.info(
'Synchronize {0} by thread-{1}:'
.format(repo_name, thread_id)
)
result = Repository.synchronize(
{'id': repo_id},
return_raw_response=True
)
if result.return_code != 0:
LOGGER.error(
'Sync repository {0} by thread-{1} failed!'
.format(repo_name, thread_id)
)
return 0
LOGGER.info(
'Sync repository {0} by thread-{1} successful!'
.format(repo_name, thread_id)
)
return cls.get_elapsed_time(result.stderr)
@staticmethod
def get_elapsed_time(stderr):
"""retrieve time from stderr"""
# should return only one time point as a single sync
real_time = ''
for line in stderr.split('\n'):
if line.startswith('real'):
real_time = line
return 0 if real_time == '' else float(real_time.split(' ')[1])
@staticmethod
def get_enabled_repos(org_id):
"""Get all enabled repositories ids and names
:return map_repo_name_id: The dictionary contains all enabled
repositories in Satellite. Map repo-name as key, repo-id as value
:raises ``RumtimeException`` if there's no enabled repository in
default organization denoted by ``org_id``
"""
LOGGER.info('Searching for enabled repositories by hammer CLI:')
try:
result = Repository.list(
{'organization-id': org_id},
per_page=False
)
except CLIReturnCodeError:
raise RuntimeError(
'No enabled repository found in organization {0}!'
.format(org_id)
)
# map repository name with id
map_repo_name_id = {}
for repo in result:
map_repo_name_id[repo['name']] = repo['id']
return map_repo_name_id
@classmethod
def repositories_sequential_sync(
cls,
repo_names_list,
map_repo_name_id,
sync_iterations,
savepoint=None):
"""Sync all repositories linearly, and repeat X times
:param list repo_names_list: A list of targeting repository names
:param int sync_iterations: The number of times to repeat sync
:return time_result_dict_sync
:rtype: dict
"""
# Create a dictionary to store all timing results from each sync
time_result_dict_sync = {}
# repeat sequential sync X times
for i in range(sync_iterations):
# note: name key by thread to adapt to graph module
key = 'thread-{0}'.format(i)
time_result_dict_sync[key] = []
# Sync each repo one-by-one and collect timing data
for repo_name in repo_names_list:
repo_id = map_repo_name_id.get(repo_name, None)
if repo_id is None:
LOGGER.warning(
'Invalid repository name {}!'.format(repo_name)
)
continue
LOGGER.debug(
'Sequential Sync {0} attempt {1}:'.format(repo_name, i)
)
# sync repository once at a time
time_result_dict_sync[key].append(
cls.repository_single_sync(repo_id, repo_name, 'linear')
)
# for resync purpose, no need to restore
if savepoint is None:
return
else:
# restore database at the end of each iteration
cls._restore_from_savepoint(savepoint)
return time_result_dict_sync
@staticmethod
def _restore_from_savepoint(savepoint):
"""Restore from savepoint"""
if savepoint == '':
LOGGER.warning('No savepoint while continuing test!')
return
LOGGER.info('Reset db from /home/backup/{0}'.format(savepoint))
ssh.command('./reset-db.sh /home/backup/{0}'.format(savepoint)) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import math
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
stats = try_import("scipy.stats")
class NormalTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(123)
def assertAllFinite(self, tensor):
is_finite = np.isfinite(self.evaluate(tensor))
all_true = np.ones_like(is_finite, dtype=np.bool)
self.assertAllEqual(all_true, is_finite)
def _testParamShapes(self, sample_shape, expected):
param_shapes = normal_lib.Normal.param_shapes(sample_shape)
mu_shape, sigma_shape = param_shapes["loc"], param_shapes["scale"]
self.assertAllEqual(expected, self.evaluate(mu_shape))
self.assertAllEqual(expected, self.evaluate(sigma_shape))
mu = array_ops.zeros(mu_shape)
sigma = array_ops.ones(sigma_shape)
self.assertAllEqual(
expected,
self.evaluate(array_ops.shape(normal_lib.Normal(mu, sigma).sample())))
def _testParamStaticShapes(self, sample_shape, expected):
param_shapes = normal_lib.Normal.param_static_shapes(sample_shape)
mu_shape, sigma_shape = param_shapes["loc"], param_shapes["scale"]
self.assertEqual(expected, mu_shape)
self.assertEqual(expected, sigma_shape)
@test_util.run_in_graph_and_eager_modes
def testSampleLikeArgsGetDistDType(self):
dist = normal_lib.Normal(0., 1.)
self.assertEqual(dtypes.float32, dist.dtype)
for method in ("log_prob", "prob", "log_cdf", "cdf",
"log_survival_function", "survival_function", "quantile"):
self.assertEqual(dtypes.float32, getattr(dist, method)(1).dtype)
@test_util.run_in_graph_and_eager_modes
def testParamShapes(self):
sample_shape = [10, 3, 4]
self._testParamShapes(sample_shape, sample_shape)
self._testParamShapes(constant_op.constant(sample_shape), sample_shape)
@test_util.run_in_graph_and_eager_modes
def testParamStaticShapes(self):
sample_shape = [10, 3, 4]
self._testParamStaticShapes(sample_shape, sample_shape)
self._testParamStaticShapes(
tensor_shape.TensorShape(sample_shape), sample_shape)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNormalWithSoftplusScale(self):
mu = array_ops.zeros((10, 3))
rho = array_ops.ones((10, 3)) * -2.
normal = normal_lib.NormalWithSoftplusScale(loc=mu, scale=rho)
self.assertAllEqual(self.evaluate(mu), self.evaluate(normal.loc))
self.assertAllEqual(
self.evaluate(nn_ops.softplus(rho)), self.evaluate(normal.scale))
@test_util.run_in_graph_and_eager_modes
def testNormalLogPDF(self):
batch_size = 6
mu = constant_op.constant([3.0] * batch_size)
sigma = constant_op.constant([math.sqrt(10.0)] * batch_size)
x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32)
normal = normal_lib.Normal(loc=mu, scale=sigma)
log_pdf = normal.log_prob(x)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), log_pdf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(log_pdf).shape)
self.assertAllEqual(normal.batch_shape, log_pdf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(log_pdf).shape)
pdf = normal.prob(x)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), pdf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(pdf).shape)
self.assertAllEqual(normal.batch_shape, pdf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(pdf).shape)
if not stats:
return
expected_log_pdf = stats.norm(self.evaluate(mu),
self.evaluate(sigma)).logpdf(x)
self.assertAllClose(expected_log_pdf, self.evaluate(log_pdf))
self.assertAllClose(np.exp(expected_log_pdf), self.evaluate(pdf))
@test_util.run_in_graph_and_eager_modes
def testNormalLogPDFMultidimensional(self):
batch_size = 6
mu = constant_op.constant([[3.0, -3.0]] * batch_size)
sigma = constant_op.constant(
[[math.sqrt(10.0), math.sqrt(15.0)]] * batch_size)
x = np.array([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=np.float32).T
normal = normal_lib.Normal(loc=mu, scale=sigma)
log_pdf = normal.log_prob(x)
log_pdf_values = self.evaluate(log_pdf)
self.assertEqual(log_pdf.get_shape(), (6, 2))
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), log_pdf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(log_pdf).shape)
self.assertAllEqual(normal.batch_shape, log_pdf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(log_pdf).shape)
pdf = normal.prob(x)
pdf_values = self.evaluate(pdf)
self.assertEqual(pdf.get_shape(), (6, 2))
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), pdf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), pdf_values.shape)
self.assertAllEqual(normal.batch_shape, pdf.get_shape())
self.assertAllEqual(normal.batch_shape, pdf_values.shape)
if not stats:
return
expected_log_pdf = stats.norm(self.evaluate(mu),
self.evaluate(sigma)).logpdf(x)
self.assertAllClose(expected_log_pdf, log_pdf_values)
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
@test_util.run_in_graph_and_eager_modes
def testNormalCDF(self):
batch_size = 50
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
normal = normal_lib.Normal(loc=mu, scale=sigma)
cdf = normal.cdf(x)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), cdf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(cdf).shape)
self.assertAllEqual(normal.batch_shape, cdf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(cdf).shape)
if not stats:
return
expected_cdf = stats.norm(mu, sigma).cdf(x)
self.assertAllClose(expected_cdf, self.evaluate(cdf), atol=0)
@test_util.run_in_graph_and_eager_modes
def testNormalSurvivalFunction(self):
batch_size = 50
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
normal = normal_lib.Normal(loc=mu, scale=sigma)
sf = normal.survival_function(x)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), sf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(sf).shape)
self.assertAllEqual(normal.batch_shape, sf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(sf).shape)
if not stats:
return
expected_sf = stats.norm(mu, sigma).sf(x)
self.assertAllClose(expected_sf, self.evaluate(sf), atol=0)
@test_util.run_in_graph_and_eager_modes
def testNormalLogCDF(self):
batch_size = 50
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-100.0, 10.0, batch_size).astype(np.float64)
normal = normal_lib.Normal(loc=mu, scale=sigma)
cdf = normal.log_cdf(x)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), cdf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(cdf).shape)
self.assertAllEqual(normal.batch_shape, cdf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(cdf).shape)
if not stats:
return
expected_cdf = stats.norm(mu, sigma).logcdf(x)
self.assertAllClose(expected_cdf, self.evaluate(cdf), atol=0, rtol=1e-3)
def testFiniteGradientAtDifficultPoints(self):
for dtype in [np.float32, np.float64]:
g = ops.Graph()
with g.as_default():
mu = variables.Variable(dtype(0.0))
sigma = variables.Variable(dtype(1.0))
dist = normal_lib.Normal(loc=mu, scale=sigma)
x = np.array([-100., -20., -5., 0., 5., 20., 100.]).astype(dtype)
for func in [
dist.cdf, dist.log_cdf, dist.survival_function,
dist.log_survival_function, dist.log_prob, dist.prob
]:
value = func(x)
grads = gradients_impl.gradients(value, [mu, sigma])
with self.session(graph=g):
variables.global_variables_initializer().run()
self.assertAllFinite(value)
self.assertAllFinite(grads[0])
self.assertAllFinite(grads[1])
@test_util.run_in_graph_and_eager_modes
def testNormalLogSurvivalFunction(self):
batch_size = 50
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-10.0, 100.0, batch_size).astype(np.float64)
normal = normal_lib.Normal(loc=mu, scale=sigma)
sf = normal.log_survival_function(x)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), sf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(sf).shape)
self.assertAllEqual(normal.batch_shape, sf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(sf).shape)
if not stats:
return
expected_sf = stats.norm(mu, sigma).logsf(x)
self.assertAllClose(expected_sf, self.evaluate(sf), atol=0, rtol=1e-5)
@test_util.run_in_graph_and_eager_modes
def testNormalEntropyWithScalarInputs(self):
# Scipy.stats.norm cannot deal with the shapes in the other test.
mu_v = 2.34
sigma_v = 4.56
normal = normal_lib.Normal(loc=mu_v, scale=sigma_v)
entropy = normal.entropy()
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), entropy.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(entropy).shape)
self.assertAllEqual(normal.batch_shape, entropy.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(entropy).shape)
# scipy.stats.norm cannot deal with these shapes.
if not stats:
return
expected_entropy = stats.norm(mu_v, sigma_v).entropy()
self.assertAllClose(expected_entropy, self.evaluate(entropy))
@test_util.run_in_graph_and_eager_modes
def testNormalEntropy(self):
mu_v = np.array([1.0, 1.0, 1.0])
sigma_v = np.array([[1.0, 2.0, 3.0]]).T
normal = normal_lib.Normal(loc=mu_v, scale=sigma_v)
# scipy.stats.norm cannot deal with these shapes.
sigma_broadcast = mu_v * sigma_v
expected_entropy = 0.5 * np.log(2 * np.pi * np.exp(1) * sigma_broadcast**2)
entropy = normal.entropy()
np.testing.assert_allclose(expected_entropy, self.evaluate(entropy))
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), entropy.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(entropy).shape)
self.assertAllEqual(normal.batch_shape, entropy.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(entropy).shape)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNormalMeanAndMode(self):
# Mu will be broadcast to [7, 7, 7].
mu = [7.]
sigma = [11., 12., 13.]
normal = normal_lib.Normal(loc=mu, scale=sigma)
self.assertAllEqual((3,), normal.mean().get_shape())
self.assertAllEqual([7., 7, 7], self.evaluate(normal.mean()))
self.assertAllEqual((3,), normal.mode().get_shape())
self.assertAllEqual([7., 7, 7], self.evaluate(normal.mode()))
@test_util.run_in_graph_and_eager_modes
def testNormalQuantile(self):
batch_size = 52
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
p = np.linspace(0., 1.0, batch_size - 2).astype(np.float64)
# Quantile performs piecewise rational approximation so adding some
# special input values to make sure we hit all the pieces.
p = np.hstack((p, np.exp(-33), 1. - np.exp(-33)))
normal = normal_lib.Normal(loc=mu, scale=sigma)
x = normal.quantile(p)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), x.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(x).shape)
self.assertAllEqual(normal.batch_shape, x.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(x).shape)
if not stats:
return
expected_x = stats.norm(mu, sigma).ppf(p)
self.assertAllClose(expected_x, self.evaluate(x), atol=0.)
def _baseQuantileFiniteGradientAtDifficultPoints(self, dtype):
g = ops.Graph()
with g.as_default():
mu = variables.Variable(dtype(0.0))
sigma = variables.Variable(dtype(1.0))
dist = normal_lib.Normal(loc=mu, scale=sigma)
p = variables.Variable(
np.array([0.,
np.exp(-32.), np.exp(-2.),
1. - np.exp(-2.), 1. - np.exp(-32.),
1.]).astype(dtype))
value = dist.quantile(p)
grads = gradients_impl.gradients(value, [mu, p])
with self.cached_session(graph=g):
variables.global_variables_initializer().run()
self.assertAllFinite(grads[0])
self.assertAllFinite(grads[1])
def testQuantileFiniteGradientAtDifficultPointsFloat32(self):
self._baseQuantileFiniteGradientAtDifficultPoints(np.float32)
def testQuantileFiniteGradientAtDifficultPointsFloat64(self):
self._baseQuantileFiniteGradientAtDifficultPoints(np.float64)
@test_util.run_in_graph_and_eager_modes
def testNormalVariance(self):
# sigma will be broadcast to [7, 7, 7]
mu = [1., 2., 3.]
sigma = [7.]
normal = normal_lib.Normal(loc=mu, scale=sigma)
self.assertAllEqual((3,), normal.variance().get_shape())
self.assertAllEqual([49., 49, 49], self.evaluate(normal.variance()))
@test_util.run_in_graph_and_eager_modes
def testNormalStandardDeviation(self):
# sigma will be broadcast to [7, 7, 7]
mu = [1., 2., 3.]
sigma = [7.]
normal = normal_lib.Normal(loc=mu, scale=sigma)
self.assertAllEqual((3,), normal.stddev().get_shape())
self.assertAllEqual([7., 7, 7], self.evaluate(normal.stddev()))
@test_util.run_in_graph_and_eager_modes
def testNormalSample(self):
mu = constant_op.constant(3.0)
sigma = constant_op.constant(math.sqrt(3.0))
mu_v = 3.0
sigma_v = np.sqrt(3.0)
n = constant_op.constant(100000)
normal = normal_lib.Normal(loc=mu, scale=sigma)
samples = normal.sample(n)
sample_values = self.evaluate(samples)
# Note that the standard error for the sample mean is ~ sigma / sqrt(n).
# The sample variance similarly is dependent on sigma and n.
# Thus, the tolerances below are very sensitive to number of samples
# as well as the variances chosen.
self.assertEqual(sample_values.shape, (100000,))
self.assertAllClose(sample_values.mean(), mu_v, atol=1e-1)
self.assertAllClose(sample_values.std(), sigma_v, atol=1e-1)
expected_samples_shape = tensor_shape.TensorShape(
[self.evaluate(n)]).concatenate(
tensor_shape.TensorShape(
self.evaluate(normal.batch_shape_tensor())))
self.assertAllEqual(expected_samples_shape, samples.get_shape())
self.assertAllEqual(expected_samples_shape, sample_values.shape)
expected_samples_shape = (
tensor_shape.TensorShape([self.evaluate(n)]).concatenate(
normal.batch_shape))
self.assertAllEqual(expected_samples_shape, samples.get_shape())
self.assertAllEqual(expected_samples_shape, sample_values.shape)
def testNormalFullyReparameterized(self):
mu = constant_op.constant(4.0)
sigma = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
tape.watch(mu)
tape.watch(sigma)
normal = normal_lib.Normal(loc=mu, scale=sigma)
samples = normal.sample(100)
grad_mu, grad_sigma = tape.gradient(samples, [mu, sigma])
self.assertIsNotNone(grad_mu)
self.assertIsNotNone(grad_sigma)
@test_util.run_in_graph_and_eager_modes
def testNormalSampleMultiDimensional(self):
batch_size = 2
mu = constant_op.constant([[3.0, -3.0]] * batch_size)
sigma = constant_op.constant(
[[math.sqrt(2.0), math.sqrt(3.0)]] * batch_size)
mu_v = [3.0, -3.0]
sigma_v = [np.sqrt(2.0), np.sqrt(3.0)]
n = constant_op.constant(100000)
normal = normal_lib.Normal(loc=mu, scale=sigma)
samples = normal.sample(n)
sample_values = self.evaluate(samples)
# Note that the standard error for the sample mean is ~ sigma / sqrt(n).
# The sample variance similarly is dependent on sigma and n.
# Thus, the tolerances below are very sensitive to number of samples
# as well as the variances chosen.
self.assertEqual(samples.get_shape(), (100000, batch_size, 2))
self.assertAllClose(sample_values[:, 0, 0].mean(), mu_v[0], atol=1e-1)
self.assertAllClose(sample_values[:, 0, 0].std(), sigma_v[0], atol=1e-1)
self.assertAllClose(sample_values[:, 0, 1].mean(), mu_v[1], atol=1e-1)
self.assertAllClose(sample_values[:, 0, 1].std(), sigma_v[1], atol=1e-1)
expected_samples_shape = tensor_shape.TensorShape(
[self.evaluate(n)]).concatenate(
tensor_shape.TensorShape(
self.evaluate(normal.batch_shape_tensor())))
self.assertAllEqual(expected_samples_shape, samples.get_shape())
self.assertAllEqual(expected_samples_shape, sample_values.shape)
expected_samples_shape = (
tensor_shape.TensorShape([self.evaluate(n)]).concatenate(
normal.batch_shape))
self.assertAllEqual(expected_samples_shape, samples.get_shape())
self.assertAllEqual(expected_samples_shape, sample_values.shape)
@test_util.run_in_graph_and_eager_modes
def testNegativeSigmaFails(self):
with self.assertRaisesOpError("Condition x > 0 did not hold"):
normal = normal_lib.Normal(
loc=[1.], scale=[-5.], validate_args=True, name="G")
self.evaluate(normal.mean())
@test_util.run_in_graph_and_eager_modes
def testNormalShape(self):
mu = constant_op.constant([-3.0] * 5)
sigma = constant_op.constant(11.0)
normal = normal_lib.Normal(loc=mu, scale=sigma)
self.assertEqual(self.evaluate(normal.batch_shape_tensor()), [5])
self.assertEqual(normal.batch_shape, tensor_shape.TensorShape([5]))
self.assertAllEqual(self.evaluate(normal.event_shape_tensor()), [])
self.assertEqual(normal.event_shape, tensor_shape.TensorShape([]))
@test_util.run_deprecated_v1
def testNormalShapeWithPlaceholders(self):
mu = array_ops.placeholder(dtype=dtypes.float32)
sigma = array_ops.placeholder(dtype=dtypes.float32)
normal = normal_lib.Normal(loc=mu, scale=sigma)
with self.cached_session() as sess:
# get_batch_shape should return an "<unknown>" tensor.
self.assertEqual(normal.batch_shape, tensor_shape.TensorShape(None))
self.assertEqual(normal.event_shape, ())
self.assertAllEqual(self.evaluate(normal.event_shape_tensor()), [])
self.assertAllEqual(
sess.run(normal.batch_shape_tensor(),
feed_dict={mu: 5.0,
sigma: [1.0, 2.0]}), [2])
@test_util.run_in_graph_and_eager_modes
def testNormalNormalKL(self):
batch_size = 6
mu_a = np.array([3.0] * batch_size)
sigma_a = np.array([1.0, 2.0, 3.0, 1.5, 2.5, 3.5])
mu_b = np.array([-3.0] * batch_size)
sigma_b = np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0])
n_a = normal_lib.Normal(loc=mu_a, scale=sigma_a)
n_b = normal_lib.Normal(loc=mu_b, scale=sigma_b)
kl = kullback_leibler.kl_divergence(n_a, n_b)
kl_val = self.evaluate(kl)
kl_expected = ((mu_a - mu_b)**2 / (2 * sigma_b**2) + 0.5 * (
(sigma_a**2 / sigma_b**2) - 1 - 2 * np.log(sigma_a / sigma_b)))
self.assertEqual(kl.get_shape(), (batch_size,))
self.assertAllClose(kl_val, kl_expected)
if __name__ == "__main__":
test.main() | unknown | codeparrot/codeparrot-clean | ||
#include "parts.h"
static PyObject*
test_gc_control(PyObject *self, PyObject *Py_UNUSED(ignored))
{
int orig_enabled = PyGC_IsEnabled();
const char* msg = "ok";
int old_state;
old_state = PyGC_Enable();
msg = "Enable(1)";
if (old_state != orig_enabled) {
goto failed;
}
msg = "IsEnabled(1)";
if (!PyGC_IsEnabled()) {
goto failed;
}
old_state = PyGC_Disable();
msg = "disable(2)";
if (!old_state) {
goto failed;
}
msg = "IsEnabled(2)";
if (PyGC_IsEnabled()) {
goto failed;
}
old_state = PyGC_Enable();
msg = "enable(3)";
if (old_state) {
goto failed;
}
msg = "IsEnabled(3)";
if (!PyGC_IsEnabled()) {
goto failed;
}
if (!orig_enabled) {
old_state = PyGC_Disable();
msg = "disable(4)";
if (old_state) {
goto failed;
}
msg = "IsEnabled(4)";
if (PyGC_IsEnabled()) {
goto failed;
}
}
Py_RETURN_NONE;
failed:
/* Try to clean up if we can. */
if (orig_enabled) {
PyGC_Enable();
} else {
PyGC_Disable();
}
PyErr_Format(PyExc_ValueError, "GC control failed in %s", msg);
return NULL;
}
static PyObject *
without_gc(PyObject *Py_UNUSED(self), PyObject *obj)
{
PyTypeObject *tp = (PyTypeObject*)obj;
if (!PyType_Check(obj) || !PyType_HasFeature(tp, Py_TPFLAGS_HEAPTYPE)) {
return PyErr_Format(PyExc_TypeError, "heap type expected, got %R", obj);
}
if (PyType_IS_GC(tp)) {
// Don't try this at home, kids:
tp->tp_flags -= Py_TPFLAGS_HAVE_GC;
tp->tp_free = PyObject_Free;
tp->tp_traverse = NULL;
tp->tp_clear = NULL;
}
assert(!PyType_IS_GC(tp));
return Py_NewRef(obj);
}
static void
slot_tp_del(PyObject *self)
{
PyObject *del, *res;
/* Temporarily resurrect the object. */
assert(Py_REFCNT(self) == 0);
Py_SET_REFCNT(self, 1);
/* Save the current exception, if any. */
PyObject *exc = PyErr_GetRaisedException();
PyObject *tp_del = PyUnicode_InternFromString("__tp_del__");
if (tp_del == NULL) {
PyErr_FormatUnraisable("Exception ignored while deallocating");
PyErr_SetRaisedException(exc);
return;
}
/* Execute __del__ method, if any. */
del = _PyType_LookupRef(Py_TYPE(self), tp_del);
Py_DECREF(tp_del);
if (del != NULL) {
res = PyObject_CallOneArg(del, self);
Py_DECREF(del);
if (res == NULL) {
PyErr_FormatUnraisable("Exception ignored while calling "
"deallocator %R", del);
}
else {
Py_DECREF(res);
}
}
/* Restore the saved exception. */
PyErr_SetRaisedException(exc);
/* Undo the temporary resurrection; can't use DECREF here, it would
* cause a recursive call.
*/
assert(Py_REFCNT(self) > 0);
Py_SET_REFCNT(self, Py_REFCNT(self) - 1);
if (Py_REFCNT(self) == 0) {
/* this is the normal path out */
return;
}
/* __del__ resurrected it! Make it look like the original Py_DECREF
* never happened.
*/
{
_Py_ResurrectReference(self);
}
assert(!PyType_IS_GC(Py_TYPE(self)) || PyObject_GC_IsTracked(self));
}
static PyObject *
with_tp_del(PyObject *self, PyObject *args)
{
PyObject *obj;
PyTypeObject *tp;
if (!PyArg_ParseTuple(args, "O:with_tp_del", &obj))
return NULL;
tp = (PyTypeObject *) obj;
if (!PyType_Check(obj) || !PyType_HasFeature(tp, Py_TPFLAGS_HEAPTYPE)) {
PyErr_Format(PyExc_TypeError,
"heap type expected, got %R", obj);
return NULL;
}
tp->tp_del = slot_tp_del;
return Py_NewRef(obj);
}
struct gc_visit_state_basic {
PyObject *target;
int found;
};
static int
gc_visit_callback_basic(PyObject *obj, void *arg)
{
struct gc_visit_state_basic *state = (struct gc_visit_state_basic *)arg;
if (obj == state->target) {
state->found = 1;
return 0;
}
return 1;
}
static PyObject *
test_gc_visit_objects_basic(PyObject *Py_UNUSED(self),
PyObject *Py_UNUSED(ignored))
{
PyObject *obj;
struct gc_visit_state_basic state;
obj = PyList_New(0);
if (obj == NULL) {
return NULL;
}
state.target = obj;
state.found = 0;
PyUnstable_GC_VisitObjects(gc_visit_callback_basic, &state);
Py_DECREF(obj);
if (!state.found) {
PyErr_SetString(
PyExc_AssertionError,
"test_gc_visit_objects_basic: Didn't find live list");
return NULL;
}
Py_RETURN_NONE;
}
static int
gc_visit_callback_exit_early(PyObject *obj, void *arg)
{
int *visited_i = (int *)arg;
(*visited_i)++;
if (*visited_i == 2) {
return 0;
}
return 1;
}
static PyObject *
test_gc_visit_objects_exit_early(PyObject *Py_UNUSED(self),
PyObject *Py_UNUSED(ignored))
{
int visited_i = 0;
PyUnstable_GC_VisitObjects(gc_visit_callback_exit_early, &visited_i);
if (visited_i != 2) {
PyErr_SetString(
PyExc_AssertionError,
"test_gc_visit_objects_exit_early: did not exit when expected");
}
Py_RETURN_NONE;
}
typedef struct {
PyObject_HEAD
} ObjExtraData;
static PyObject *
obj_extra_data_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
size_t extra_size = sizeof(PyObject *);
PyObject *obj = PyUnstable_Object_GC_NewWithExtraData(type, extra_size);
if (obj == NULL) {
return PyErr_NoMemory();
}
PyObject_GC_Track(obj);
return obj;
}
static PyObject **
obj_extra_data_get_extra_storage(PyObject *self)
{
return (PyObject **)((char *)self + Py_TYPE(self)->tp_basicsize);
}
static PyObject *
obj_extra_data_get(PyObject *self, void *Py_UNUSED(ignored))
{
PyObject **extra_storage = obj_extra_data_get_extra_storage(self);
PyObject *value = *extra_storage;
if (!value) {
Py_RETURN_NONE;
}
return Py_NewRef(value);
}
static int
obj_extra_data_set(PyObject *self, PyObject *newval, void *Py_UNUSED(ignored))
{
PyObject **extra_storage = obj_extra_data_get_extra_storage(self);
Py_CLEAR(*extra_storage);
if (newval) {
*extra_storage = Py_NewRef(newval);
}
return 0;
}
static PyGetSetDef obj_extra_data_getset[] = {
{"extra", obj_extra_data_get, obj_extra_data_set, NULL},
{NULL}
};
static int
obj_extra_data_traverse(PyObject *self, visitproc visit, void *arg)
{
PyObject **extra_storage = obj_extra_data_get_extra_storage(self);
PyObject *value = *extra_storage;
Py_VISIT(value);
return 0;
}
static int
obj_extra_data_clear(PyObject *self)
{
PyObject **extra_storage = obj_extra_data_get_extra_storage(self);
Py_CLEAR(*extra_storage);
return 0;
}
static void
obj_extra_data_dealloc(PyObject *self)
{
PyTypeObject *tp = Py_TYPE(self);
PyObject_GC_UnTrack(self);
obj_extra_data_clear(self);
tp->tp_free(self);
Py_DECREF(tp);
}
static PyType_Slot ObjExtraData_Slots[] = {
{Py_tp_getset, obj_extra_data_getset},
{Py_tp_dealloc, obj_extra_data_dealloc},
{Py_tp_traverse, obj_extra_data_traverse},
{Py_tp_clear, obj_extra_data_clear},
{Py_tp_new, obj_extra_data_new},
{Py_tp_free, PyObject_GC_Del},
{0, NULL},
};
static PyType_Spec ObjExtraData_TypeSpec = {
.name = "_testcapi.ObjExtraData",
.basicsize = sizeof(ObjExtraData),
.flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC,
.slots = ObjExtraData_Slots,
};
static PyMethodDef test_methods[] = {
{"test_gc_control", test_gc_control, METH_NOARGS},
{"test_gc_visit_objects_basic", test_gc_visit_objects_basic, METH_NOARGS, NULL},
{"test_gc_visit_objects_exit_early", test_gc_visit_objects_exit_early, METH_NOARGS, NULL},
{"without_gc", without_gc, METH_O, NULL},
{"with_tp_del", with_tp_del, METH_VARARGS, NULL},
{NULL}
};
int _PyTestCapi_Init_GC(PyObject *mod)
{
if (PyModule_AddFunctions(mod, test_methods) < 0) {
return -1;
}
if (PyModule_AddFunctions(mod, test_methods) < 0) {
return -1;
}
PyObject *ObjExtraData_Type = PyType_FromModuleAndSpec(
mod, &ObjExtraData_TypeSpec, NULL);
if (ObjExtraData_Type == 0) {
return -1;
}
int ret = PyModule_AddType(mod, (PyTypeObject*)ObjExtraData_Type);
Py_DECREF(ObjExtraData_Type);
if (ret < 0) {
return ret;
}
return 0;
} | c | github | https://github.com/python/cpython | Modules/_testcapi/gc.c |
import vtk
from vtk.test import Testing
# Data from our friends at Sandia
points = vtk.vtkPoints()
points.InsertNextPoint(0,0,0)
points.InsertNextPoint(1,0,0)
points.InsertNextPoint(1,1,0)
points.InsertNextPoint(0,1,0)
points.InsertNextPoint(0,0,5)
points.InsertNextPoint(1,0,4)
points.InsertNextPoint(1,1,4)
points.InsertNextPoint(0,1,5)
points.InsertNextPoint(5,0,7)
points.InsertNextPoint(5,0,6)
points.InsertNextPoint(5,1,6)
points.InsertNextPoint(5,1,7)
points.InsertNextPoint(11,1,5)
points.InsertNextPoint(10,1,4)
points.InsertNextPoint(10,0,4)
points.InsertNextPoint(11,0,5)
points.InsertNextPoint(10,0,0)
points.InsertNextPoint(11,0,0)
points.InsertNextPoint(11,1,0)
points.InsertNextPoint(10,1,0)
profile = vtk.vtkPolyData()
profile.SetPoints(points)
# triangulate them
#
del1 = vtk.vtkDelaunay3D()
del1.SetInputData(profile)
del1.SetTolerance(0.01)
del1.SetAlpha(2.8)
del1.AlphaTetsOn()
del1.AlphaTrisOn()
del1.AlphaLinesOff()
del1.AlphaVertsOn()
map = vtk.vtkDataSetMapper()
map.SetInputConnection(del1.GetOutputPort())
triangulation = vtk.vtkActor()
triangulation.SetMapper(map)
triangulation.GetProperty().SetColor(1,0,0)
# Create graphics stuff
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(triangulation)
ren1.SetBackground(1,1,1)
renWin.SetSize(250,250)
cam1 = ren1.GetActiveCamera()
cam1.SetFocalPoint(0,0,0)
cam1.SetPosition(1,1,1)
ren1.ResetCamera()
# render the image
#
renWin.Render()
# prevent the tk window from showing up then start the event loop
# --- end of script -- | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.