id
stringlengths 23
25
| content
stringlengths 1.16k
88k
| max_stars_repo_path
stringlengths 12
48
|
|---|---|---|
codereval_python_data_1
|
Hydrator for `Time` and `LocalTime` values.
:param nanoseconds:
:param tz:
:return: Time
def hydrate_time(nanoseconds, tz=None):
""" Hydrator for `Time` and `LocalTime` values.
:param nanoseconds:
:param tz:
:return: Time
"""
from pytz import FixedOffset
seconds, nanoseconds = map(int, divmod(nanoseconds, 1000000000))
minutes, seconds = map(int, divmod(seconds, 60))
hours, minutes = map(int, divmod(minutes, 60))
t = Time(hours, minutes, seconds, nanoseconds)
if tz is None:
return t
tz_offset_minutes, tz_offset_seconds = divmod(tz, 60)
zone = FixedOffset(tz_offset_minutes)
return zone.localize(t)
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import (
datetime,
time,
timedelta,
)
from ....time import (
Date,
DateTime,
Duration,
Time,
)
from ...packstream import Structure
def get_date_unix_epoch():
return Date(1970, 1, 1)
def get_date_unix_epoch_ordinal():
return get_date_unix_epoch().to_ordinal()
def get_datetime_unix_epoch_utc():
from pytz import utc
return DateTime(1970, 1, 1, 0, 0, 0, utc)
def hydrate_date(days):
""" Hydrator for `Date` values.
:param days:
:return: Date
"""
return Date.from_ordinal(get_date_unix_epoch_ordinal() + days)
def dehydrate_date(value):
""" Dehydrator for `date` values.
:param value:
:type value: Date
:return:
"""
return Structure(b"D", value.toordinal() - get_date_unix_epoch().toordinal())
def hydrate_time(nanoseconds, tz=None):
""" Hydrator for `Time` and `LocalTime` values.
:param nanoseconds:
:param tz:
:return: Time
"""
from pytz import FixedOffset
seconds, nanoseconds = map(int, divmod(nanoseconds, 1000000000))
minutes, seconds = map(int, divmod(seconds, 60))
hours, minutes = map(int, divmod(minutes, 60))
t = Time(hours, minutes, seconds, nanoseconds)
if tz is None:
return t
tz_offset_minutes, tz_offset_seconds = divmod(tz, 60)
zone = FixedOffset(tz_offset_minutes)
return zone.localize(t)
def dehydrate_time(value):
""" Dehydrator for `time` values.
:param value:
:type value: Time
:return:
"""
if isinstance(value, Time):
nanoseconds = value.ticks
elif isinstance(value, time):
nanoseconds = (3600000000000 * value.hour + 60000000000 * value.minute +
1000000000 * value.second + 1000 * value.microsecond)
else:
raise TypeError("Value must be a neo4j.time.Time or a datetime.time")
if value.tzinfo:
return Structure(b"T", nanoseconds,
int(value.tzinfo.utcoffset(value).total_seconds()))
else:
return Structure(b"t", nanoseconds)
def hydrate_datetime(seconds, nanoseconds, tz=None):
""" Hydrator for `DateTime` and `LocalDateTime` values.
:param seconds:
:param nanoseconds:
:param tz:
:return: datetime
"""
from pytz import (
FixedOffset,
timezone,
)
minutes, seconds = map(int, divmod(seconds, 60))
hours, minutes = map(int, divmod(minutes, 60))
days, hours = map(int, divmod(hours, 24))
t = DateTime.combine(
Date.from_ordinal(get_date_unix_epoch_ordinal() + days),
Time(hours, minutes, seconds, nanoseconds)
)
if tz is None:
return t
if isinstance(tz, int):
tz_offset_minutes, tz_offset_seconds = divmod(tz, 60)
zone = FixedOffset(tz_offset_minutes)
else:
zone = timezone(tz)
return zone.localize(t)
def dehydrate_datetime(value):
""" Dehydrator for `datetime` values.
:param value:
:type value: datetime or DateTime
:return:
"""
def seconds_and_nanoseconds(dt):
if isinstance(dt, datetime):
dt = DateTime.from_native(dt)
zone_epoch = DateTime(1970, 1, 1, tzinfo=dt.tzinfo)
dt_clock_time = dt.to_clock_time()
zone_epoch_clock_time = zone_epoch.to_clock_time()
t = dt_clock_time - zone_epoch_clock_time
return t.seconds, t.nanoseconds
tz = value.tzinfo
if tz is None:
# without time zone
from pytz import utc
value = utc.localize(value)
seconds, nanoseconds = seconds_and_nanoseconds(value)
return Structure(b"d", seconds, nanoseconds)
elif hasattr(tz, "zone") and tz.zone and isinstance(tz.zone, str):
# with named pytz time zone
seconds, nanoseconds = seconds_and_nanoseconds(value)
return Structure(b"f", seconds, nanoseconds, tz.zone)
elif hasattr(tz, "key") and tz.key and isinstance(tz.key, str):
# with named zoneinfo (Python 3.9+) time zone
seconds, nanoseconds = seconds_and_nanoseconds(value)
return Structure(b"f", seconds, nanoseconds, tz.key)
else:
# with time offset
seconds, nanoseconds = seconds_and_nanoseconds(value)
return Structure(b"F", seconds, nanoseconds,
int(tz.utcoffset(value).total_seconds()))
def hydrate_duration(months, days, seconds, nanoseconds):
""" Hydrator for `Duration` values.
:param months:
:param days:
:param seconds:
:param nanoseconds:
:return: `duration` namedtuple
"""
return Duration(months=months, days=days, seconds=seconds, nanoseconds=nanoseconds)
def dehydrate_duration(value):
""" Dehydrator for `duration` values.
:param value:
:type value: Duration
:return:
"""
return Structure(b"E", value.months, value.days, value.seconds, value.nanoseconds)
def dehydrate_timedelta(value):
""" Dehydrator for `timedelta` values.
:param value:
:type value: timedelta
:return:
"""
months = 0
days = value.days
seconds = value.seconds
nanoseconds = 1000 * value.microseconds
return Structure(b"E", months, days, seconds, nanoseconds)
|
neo4j/_codec/hydration/v1/temporal.py
|
codereval_python_data_2
|
Dehydrator for `timedelta` values.
:param value:
:type value: timedelta
:return:
def dehydrate_timedelta(value):
""" Dehydrator for `timedelta` values.
:param value:
:type value: timedelta
:return:
"""
months = 0
days = value.days
seconds = value.seconds
nanoseconds = 1000 * value.microseconds
return Structure(b"E", months, days, seconds, nanoseconds)
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import (
datetime,
time,
timedelta,
)
from ....time import (
Date,
DateTime,
Duration,
Time,
)
from ...packstream import Structure
def get_date_unix_epoch():
return Date(1970, 1, 1)
def get_date_unix_epoch_ordinal():
return get_date_unix_epoch().to_ordinal()
def get_datetime_unix_epoch_utc():
from pytz import utc
return DateTime(1970, 1, 1, 0, 0, 0, utc)
def hydrate_date(days):
""" Hydrator for `Date` values.
:param days:
:return: Date
"""
return Date.from_ordinal(get_date_unix_epoch_ordinal() + days)
def dehydrate_date(value):
""" Dehydrator for `date` values.
:param value:
:type value: Date
:return:
"""
return Structure(b"D", value.toordinal() - get_date_unix_epoch().toordinal())
def hydrate_time(nanoseconds, tz=None):
""" Hydrator for `Time` and `LocalTime` values.
:param nanoseconds:
:param tz:
:return: Time
"""
from pytz import FixedOffset
seconds, nanoseconds = map(int, divmod(nanoseconds, 1000000000))
minutes, seconds = map(int, divmod(seconds, 60))
hours, minutes = map(int, divmod(minutes, 60))
t = Time(hours, minutes, seconds, nanoseconds)
if tz is None:
return t
tz_offset_minutes, tz_offset_seconds = divmod(tz, 60)
zone = FixedOffset(tz_offset_minutes)
return zone.localize(t)
def dehydrate_time(value):
""" Dehydrator for `time` values.
:param value:
:type value: Time
:return:
"""
if isinstance(value, Time):
nanoseconds = value.ticks
elif isinstance(value, time):
nanoseconds = (3600000000000 * value.hour + 60000000000 * value.minute +
1000000000 * value.second + 1000 * value.microsecond)
else:
raise TypeError("Value must be a neo4j.time.Time or a datetime.time")
if value.tzinfo:
return Structure(b"T", nanoseconds,
int(value.tzinfo.utcoffset(value).total_seconds()))
else:
return Structure(b"t", nanoseconds)
def hydrate_datetime(seconds, nanoseconds, tz=None):
""" Hydrator for `DateTime` and `LocalDateTime` values.
:param seconds:
:param nanoseconds:
:param tz:
:return: datetime
"""
from pytz import (
FixedOffset,
timezone,
)
minutes, seconds = map(int, divmod(seconds, 60))
hours, minutes = map(int, divmod(minutes, 60))
days, hours = map(int, divmod(hours, 24))
t = DateTime.combine(
Date.from_ordinal(get_date_unix_epoch_ordinal() + days),
Time(hours, minutes, seconds, nanoseconds)
)
if tz is None:
return t
if isinstance(tz, int):
tz_offset_minutes, tz_offset_seconds = divmod(tz, 60)
zone = FixedOffset(tz_offset_minutes)
else:
zone = timezone(tz)
return zone.localize(t)
def dehydrate_datetime(value):
""" Dehydrator for `datetime` values.
:param value:
:type value: datetime or DateTime
:return:
"""
def seconds_and_nanoseconds(dt):
if isinstance(dt, datetime):
dt = DateTime.from_native(dt)
zone_epoch = DateTime(1970, 1, 1, tzinfo=dt.tzinfo)
dt_clock_time = dt.to_clock_time()
zone_epoch_clock_time = zone_epoch.to_clock_time()
t = dt_clock_time - zone_epoch_clock_time
return t.seconds, t.nanoseconds
tz = value.tzinfo
if tz is None:
# without time zone
from pytz import utc
value = utc.localize(value)
seconds, nanoseconds = seconds_and_nanoseconds(value)
return Structure(b"d", seconds, nanoseconds)
elif hasattr(tz, "zone") and tz.zone and isinstance(tz.zone, str):
# with named pytz time zone
seconds, nanoseconds = seconds_and_nanoseconds(value)
return Structure(b"f", seconds, nanoseconds, tz.zone)
elif hasattr(tz, "key") and tz.key and isinstance(tz.key, str):
# with named zoneinfo (Python 3.9+) time zone
seconds, nanoseconds = seconds_and_nanoseconds(value)
return Structure(b"f", seconds, nanoseconds, tz.key)
else:
# with time offset
seconds, nanoseconds = seconds_and_nanoseconds(value)
return Structure(b"F", seconds, nanoseconds,
int(tz.utcoffset(value).total_seconds()))
def hydrate_duration(months, days, seconds, nanoseconds):
""" Hydrator for `Duration` values.
:param months:
:param days:
:param seconds:
:param nanoseconds:
:return: `duration` namedtuple
"""
return Duration(months=months, days=days, seconds=seconds, nanoseconds=nanoseconds)
def dehydrate_duration(value):
""" Dehydrator for `duration` values.
:param value:
:type value: Duration
:return:
"""
return Structure(b"E", value.months, value.days, value.seconds, value.nanoseconds)
def dehydrate_timedelta(value):
""" Dehydrator for `timedelta` values.
:param value:
:type value: timedelta
:return:
"""
months = 0
days = value.days
seconds = value.seconds
nanoseconds = 1000 * value.microseconds
return Structure(b"E", months, days, seconds, nanoseconds)
|
neo4j/_codec/hydration/v1/temporal.py
|
codereval_python_data_3
|
Dehydrator for `time` values.
:param value:
:type value: Time
:return:
def dehydrate_time(value):
""" Dehydrator for `time` values.
:param value:
:type value: Time
:return:
"""
if isinstance(value, Time):
nanoseconds = value.ticks
elif isinstance(value, time):
nanoseconds = (3600000000000 * value.hour + 60000000000 * value.minute +
1000000000 * value.second + 1000 * value.microsecond)
else:
raise TypeError("Value must be a neo4j.time.Time or a datetime.time")
if value.tzinfo:
return Structure(b"T", nanoseconds,
int(value.tzinfo.utcoffset(value).total_seconds()))
else:
return Structure(b"t", nanoseconds)
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import (
datetime,
time,
timedelta,
)
from ....time import (
Date,
DateTime,
Duration,
Time,
)
from ...packstream import Structure
def get_date_unix_epoch():
return Date(1970, 1, 1)
def get_date_unix_epoch_ordinal():
return get_date_unix_epoch().to_ordinal()
def get_datetime_unix_epoch_utc():
from pytz import utc
return DateTime(1970, 1, 1, 0, 0, 0, utc)
def hydrate_date(days):
""" Hydrator for `Date` values.
:param days:
:return: Date
"""
return Date.from_ordinal(get_date_unix_epoch_ordinal() + days)
def dehydrate_date(value):
""" Dehydrator for `date` values.
:param value:
:type value: Date
:return:
"""
return Structure(b"D", value.toordinal() - get_date_unix_epoch().toordinal())
def hydrate_time(nanoseconds, tz=None):
""" Hydrator for `Time` and `LocalTime` values.
:param nanoseconds:
:param tz:
:return: Time
"""
from pytz import FixedOffset
seconds, nanoseconds = map(int, divmod(nanoseconds, 1000000000))
minutes, seconds = map(int, divmod(seconds, 60))
hours, minutes = map(int, divmod(minutes, 60))
t = Time(hours, minutes, seconds, nanoseconds)
if tz is None:
return t
tz_offset_minutes, tz_offset_seconds = divmod(tz, 60)
zone = FixedOffset(tz_offset_minutes)
return zone.localize(t)
def dehydrate_time(value):
""" Dehydrator for `time` values.
:param value:
:type value: Time
:return:
"""
if isinstance(value, Time):
nanoseconds = value.ticks
elif isinstance(value, time):
nanoseconds = (3600000000000 * value.hour + 60000000000 * value.minute +
1000000000 * value.second + 1000 * value.microsecond)
else:
raise TypeError("Value must be a neo4j.time.Time or a datetime.time")
if value.tzinfo:
return Structure(b"T", nanoseconds,
int(value.tzinfo.utcoffset(value).total_seconds()))
else:
return Structure(b"t", nanoseconds)
def hydrate_datetime(seconds, nanoseconds, tz=None):
""" Hydrator for `DateTime` and `LocalDateTime` values.
:param seconds:
:param nanoseconds:
:param tz:
:return: datetime
"""
from pytz import (
FixedOffset,
timezone,
)
minutes, seconds = map(int, divmod(seconds, 60))
hours, minutes = map(int, divmod(minutes, 60))
days, hours = map(int, divmod(hours, 24))
t = DateTime.combine(
Date.from_ordinal(get_date_unix_epoch_ordinal() + days),
Time(hours, minutes, seconds, nanoseconds)
)
if tz is None:
return t
if isinstance(tz, int):
tz_offset_minutes, tz_offset_seconds = divmod(tz, 60)
zone = FixedOffset(tz_offset_minutes)
else:
zone = timezone(tz)
return zone.localize(t)
def dehydrate_datetime(value):
""" Dehydrator for `datetime` values.
:param value:
:type value: datetime or DateTime
:return:
"""
def seconds_and_nanoseconds(dt):
if isinstance(dt, datetime):
dt = DateTime.from_native(dt)
zone_epoch = DateTime(1970, 1, 1, tzinfo=dt.tzinfo)
dt_clock_time = dt.to_clock_time()
zone_epoch_clock_time = zone_epoch.to_clock_time()
t = dt_clock_time - zone_epoch_clock_time
return t.seconds, t.nanoseconds
tz = value.tzinfo
if tz is None:
# without time zone
from pytz import utc
value = utc.localize(value)
seconds, nanoseconds = seconds_and_nanoseconds(value)
return Structure(b"d", seconds, nanoseconds)
elif hasattr(tz, "zone") and tz.zone and isinstance(tz.zone, str):
# with named pytz time zone
seconds, nanoseconds = seconds_and_nanoseconds(value)
return Structure(b"f", seconds, nanoseconds, tz.zone)
elif hasattr(tz, "key") and tz.key and isinstance(tz.key, str):
# with named zoneinfo (Python 3.9+) time zone
seconds, nanoseconds = seconds_and_nanoseconds(value)
return Structure(b"f", seconds, nanoseconds, tz.key)
else:
# with time offset
seconds, nanoseconds = seconds_and_nanoseconds(value)
return Structure(b"F", seconds, nanoseconds,
int(tz.utcoffset(value).total_seconds()))
def hydrate_duration(months, days, seconds, nanoseconds):
""" Hydrator for `Duration` values.
:param months:
:param days:
:param seconds:
:param nanoseconds:
:return: `duration` namedtuple
"""
return Duration(months=months, days=days, seconds=seconds, nanoseconds=nanoseconds)
def dehydrate_duration(value):
""" Dehydrator for `duration` values.
:param value:
:type value: Duration
:return:
"""
return Structure(b"E", value.months, value.days, value.seconds, value.nanoseconds)
def dehydrate_timedelta(value):
""" Dehydrator for `timedelta` values.
:param value:
:type value: timedelta
:return:
"""
months = 0
days = value.days
seconds = value.seconds
nanoseconds = 1000 * value.microseconds
return Structure(b"E", months, days, seconds, nanoseconds)
|
neo4j/_codec/hydration/v1/temporal.py
|
codereval_python_data_4
|
Dehydrator for Point data.
:param value:
:type value: Point
:return:
def dehydrate_point(value):
""" Dehydrator for Point data.
:param value:
:type value: Point
:return:
"""
dim = len(value)
if dim == 2:
return Structure(b"X", value.srid, *value)
elif dim == 3:
return Structure(b"Y", value.srid, *value)
else:
raise ValueError("Cannot dehydrate Point with %d dimensions" % dim)
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...._spatial import (
Point,
srid_table,
)
from ...packstream import Structure
def hydrate_point(srid, *coordinates):
""" Create a new instance of a Point subclass from a raw
set of fields. The subclass chosen is determined by the
given SRID; a ValueError will be raised if no such
subclass can be found.
"""
try:
point_class, dim = srid_table[srid]
except KeyError:
point = Point(coordinates)
point.srid = srid
return point
else:
if len(coordinates) != dim:
raise ValueError("SRID %d requires %d coordinates (%d provided)" % (srid, dim, len(coordinates)))
return point_class(coordinates)
def dehydrate_point(value):
""" Dehydrator for Point data.
:param value:
:type value: Point
:return:
"""
dim = len(value)
if dim == 2:
return Structure(b"X", value.srid, *value)
elif dim == 3:
return Structure(b"Y", value.srid, *value)
else:
raise ValueError("Cannot dehydrate Point with %d dimensions" % dim)
__all__ = [
"hydrate_point",
"dehydrate_point",
]
|
neo4j/_codec/hydration/v1/spatial.py
|
codereval_python_data_5
|
Return the keys of the record.
:return: list of key names
def keys(self):
""" Return the keys of the record.
:return: list of key names
"""
return list(self.__keys)
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import (
ABCMeta,
abstractmethod,
)
from collections.abc import (
Mapping,
Sequence,
Set,
)
from functools import reduce
from operator import xor as xor_operator
from ._codec.hydration import BrokenHydrationObject
from ._conf import iter_items
from ._meta import deprecated
from .exceptions import BrokenRecordError
from .graph import (
Node,
Path,
Relationship,
)
class Record(tuple, Mapping):
""" A :class:`.Record` is an immutable ordered collection of key-value
pairs. It is generally closer to a :py:class:`namedtuple` than to a
:py:class:`OrderedDict` in as much as iteration of the collection will
yield values rather than keys.
"""
__keys = None
def __new__(cls, iterable=()):
keys = []
values = []
for key, value in iter_items(iterable):
keys.append(key)
values.append(value)
inst = tuple.__new__(cls, values)
inst.__keys = tuple(keys)
return inst
def _broken_record_error(self, index):
return BrokenRecordError(
f"Record contains broken data at {index} ('{self.__keys[index]}')"
)
def _super_getitem_single(self, index):
value = super().__getitem__(index)
if isinstance(value, BrokenHydrationObject):
raise self._broken_record_error(index) from value.error
return value
def __repr__(self):
return "<%s %s>" % (
self.__class__.__name__,
" ".join("%s=%r" % (field, value)
for field, value in zip(self.__keys, super().__iter__()))
)
def __str__(self):
return self.__repr__()
def __eq__(self, other):
""" In order to be flexible regarding comparison, the equality rules
for a record permit comparison with any other Sequence or Mapping.
:param other:
:return:
"""
compare_as_sequence = isinstance(other, Sequence)
compare_as_mapping = isinstance(other, Mapping)
if compare_as_sequence and compare_as_mapping:
return list(self) == list(other) and dict(self) == dict(other)
elif compare_as_sequence:
return list(self) == list(other)
elif compare_as_mapping:
return dict(self) == dict(other)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return reduce(xor_operator, map(hash, self.items()))
def __iter__(self):
for i, v in enumerate(super().__iter__()):
if isinstance(v, BrokenHydrationObject):
raise self._broken_record_error(i) from v.error
yield v
def __getitem__(self, key):
if isinstance(key, slice):
keys = self.__keys[key]
values = super().__getitem__(key)
return self.__class__(zip(keys, values))
try:
index = self.index(key)
except IndexError:
return None
else:
return self._super_getitem_single(index)
# TODO: 6.0 - remove
@deprecated("This method is deprecated and will be removed in the future.")
def __getslice__(self, start, stop):
key = slice(start, stop)
keys = self.__keys[key]
values = tuple(self)[key]
return self.__class__(zip(keys, values))
def get(self, key, default=None):
""" Obtain a value from the record by key, returning a default
value if the key does not exist.
:param key: a key
:param default: default value
:return: a value
"""
try:
index = self.__keys.index(str(key))
except ValueError:
return default
if 0 <= index < len(self):
return self._super_getitem_single(index)
else:
return default
def index(self, key):
""" Return the index of the given item.
:param key: a key
:return: index
:rtype: int
"""
if isinstance(key, int):
if 0 <= key < len(self.__keys):
return key
raise IndexError(key)
elif isinstance(key, str):
try:
return self.__keys.index(key)
except ValueError:
raise KeyError(key)
else:
raise TypeError(key)
def value(self, key=0, default=None):
""" Obtain a single value from the record by index or key. If no
index or key is specified, the first value is returned. If the
specified item does not exist, the default value is returned.
:param key: an index or key
:param default: default value
:return: a single value
"""
try:
index = self.index(key)
except (IndexError, KeyError):
return default
else:
return self[index]
def keys(self):
""" Return the keys of the record.
:return: list of key names
"""
return list(self.__keys)
def values(self, *keys):
""" Return the values of the record, optionally filtering to
include only certain values by index or key.
:param keys: indexes or keys of the items to include; if none
are provided, all values will be included
:return: list of values
:rtype: list
"""
if keys:
d = []
for key in keys:
try:
i = self.index(key)
except KeyError:
d.append(None)
else:
d.append(self[i])
return d
return list(self)
def items(self, *keys):
""" Return the fields of the record as a list of key and value tuples
:return: a list of value tuples
:rtype: list
"""
if keys:
d = []
for key in keys:
try:
i = self.index(key)
except KeyError:
d.append((key, None))
else:
d.append((self.__keys[i], self[i]))
return d
return list((self.__keys[i], self._super_getitem_single(i))
for i in range(len(self)))
def data(self, *keys):
""" Return the keys and values of this record as a dictionary,
optionally including only certain values by index or key. Keys
provided in the items that are not in the record will be
inserted with a value of :const:`None`; indexes provided
that are out of bounds will trigger an :exc:`IndexError`.
:param keys: indexes or keys of the items to include; if none
are provided, all values will be included
:return: dictionary of values, keyed by field name
:raises: :exc:`IndexError` if an out-of-bounds index is specified
"""
return RecordExporter().transform(dict(self.items(*keys)))
class DataTransformer(metaclass=ABCMeta):
""" Abstract base class for transforming data from one form into
another.
"""
@abstractmethod
def transform(self, x):
""" Transform a value, or collection of values.
:param x: input value
:return: output value
"""
class RecordExporter(DataTransformer):
""" Transformer class used by the :meth:`.Record.data` method.
"""
def transform(self, x):
if isinstance(x, Node):
return self.transform(dict(x))
elif isinstance(x, Relationship):
return (self.transform(dict(x.start_node)),
x.__class__.__name__,
self.transform(dict(x.end_node)))
elif isinstance(x, Path):
path = [self.transform(x.start_node)]
for i, relationship in enumerate(x.relationships):
path.append(self.transform(relationship.__class__.__name__))
path.append(self.transform(x.nodes[i + 1]))
return path
elif isinstance(x, str):
return x
elif isinstance(x, Sequence):
t = type(x)
return t(map(self.transform, x))
elif isinstance(x, Set):
t = type(x)
return t(map(self.transform, x))
elif isinstance(x, Mapping):
t = type(x)
return t((k, self.transform(v)) for k, v in x.items())
else:
return x
class RecordTableRowExporter(DataTransformer):
"""Transformer class used by the :meth:`.Result.to_df` method."""
def transform(self, x):
assert isinstance(x, Mapping)
t = type(x)
return t(item
for k, v in x.items()
for item in self._transform(
v, prefix=k.replace("\\", "\\\\").replace(".", "\\.")
).items())
def _transform(self, x, prefix):
if isinstance(x, Node):
res = {
"%s().element_id" % prefix: x.element_id,
"%s().labels" % prefix: x.labels,
}
res.update(("%s().prop.%s" % (prefix, k), v) for k, v in x.items())
return res
elif isinstance(x, Relationship):
res = {
"%s->.element_id" % prefix: x.element_id,
"%s->.start.element_id" % prefix: x.start_node.element_id,
"%s->.end.element_id" % prefix: x.end_node.element_id,
"%s->.type" % prefix: x.__class__.__name__,
}
res.update(("%s->.prop.%s" % (prefix, k), v) for k, v in x.items())
return res
elif isinstance(x, Path) or isinstance(x, str):
return {prefix: x}
elif isinstance(x, Sequence):
return dict(
item
for i, v in enumerate(x)
for item in self._transform(
v, prefix="%s[].%i" % (prefix, i)
).items()
)
elif isinstance(x, Mapping):
t = type(x)
return t(
item
for k, v in x.items()
for item in self._transform(
v, prefix="%s{}.%s" % (prefix, k.replace("\\", "\\\\")
.replace(".", "\\."))
).items()
)
else:
return {prefix: x}
|
neo4j/_data.py
|
codereval_python_data_6
|
Return a dictionary of available Bolt protocol handlers,
keyed by version tuple. If an explicit protocol version is
provided, the dictionary will contain either zero or one items,
depending on whether that version is supported. If no protocol
version is provided, all available versions will be returned.
:param protocol_version: tuple identifying a specific protocol
version (e.g. (3, 5)) or None
:return: dictionary of version tuple to handler class for all
relevant and supported protocol versions
:raise TypeError: if protocol version is not passed in a tuple
@classmethod
def protocol_handlers(cls, protocol_version=None):
""" Return a dictionary of available Bolt protocol handlers,
keyed by version tuple. If an explicit protocol version is
provided, the dictionary will contain either zero or one items,
depending on whether that version is supported. If no protocol
version is provided, all available versions will be returned.
:param protocol_version: tuple identifying a specific protocol
version (e.g. (3, 5)) or None
:return: dictionary of version tuple to handler class for all
relevant and supported protocol versions
:raise TypeError: if protocol version is not passed in a tuple
"""
# Carry out Bolt subclass imports locally to avoid circular dependency issues.
from ._bolt3 import Bolt3
from ._bolt4 import (
Bolt4x1,
Bolt4x2,
Bolt4x3,
Bolt4x4,
)
from ._bolt5 import Bolt5x0
handlers = {
Bolt3.PROTOCOL_VERSION: Bolt3,
# 4.0 unsupported because no space left in the handshake
Bolt4x1.PROTOCOL_VERSION: Bolt4x1,
Bolt4x2.PROTOCOL_VERSION: Bolt4x2,
Bolt4x3.PROTOCOL_VERSION: Bolt4x3,
Bolt4x4.PROTOCOL_VERSION: Bolt4x4,
Bolt5x0.PROTOCOL_VERSION: Bolt5x0,
}
if protocol_version is None:
return handlers
if not isinstance(protocol_version, tuple):
raise TypeError("Protocol version must be specified as a tuple")
if protocol_version in handlers:
return {protocol_version: handlers[protocol_version]}
return {}
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import asyncio
from collections import deque
from logging import getLogger
from time import perf_counter
from ..._async_compat.network import BoltSocket
from ..._async_compat.util import Util
from ..._codec.hydration import v1 as hydration_v1
from ..._codec.packstream import v1 as packstream_v1
from ..._conf import PoolConfig
from ..._exceptions import (
BoltError,
BoltHandshakeError,
SocketDeadlineExceeded,
)
from ..._meta import get_user_agent
from ...addressing import Address
from ...api import (
ServerInfo,
Version,
)
from ...exceptions import (
AuthError,
DriverError,
IncompleteCommit,
ServiceUnavailable,
SessionExpired,
)
from ._common import (
CommitResponse,
Inbox,
Outbox,
)
# Set up logger
log = getLogger("neo4j")
class Bolt:
""" Server connection for Bolt protocol.
A :class:`.Bolt` should be constructed following a
successful .open()
Bolt handshake and takes the socket over which
the handshake was carried out.
"""
# TODO: let packer/unpacker know of hydration (give them hooks?)
# TODO: make sure query parameter dehydration gets clear error message.
PACKER_CLS = packstream_v1.Packer
UNPACKER_CLS = packstream_v1.Unpacker
HYDRATION_HANDLER_CLS = hydration_v1.HydrationHandler
MAGIC_PREAMBLE = b"\x60\x60\xB0\x17"
PROTOCOL_VERSION = None
# flag if connection needs RESET to go back to READY state
is_reset = False
# The socket
in_use = False
# When the connection was last put back into the pool
idle_since = float("-inf")
# The socket
_closing = False
_closed = False
# The socket
_defunct = False
#: The pool of which this connection is a member
pool = None
# Store the id of the most recent ran query to be able to reduce sent bits by
# using the default (-1) to refer to the most recent query when pulling
# results for it.
most_recent_qid = None
def __init__(self, unresolved_address, sock, max_connection_lifetime, *,
auth=None, user_agent=None, routing_context=None):
self.unresolved_address = unresolved_address
self.socket = sock
self.local_port = self.socket.getsockname()[1]
self.server_info = ServerInfo(Address(sock.getpeername()),
self.PROTOCOL_VERSION)
# so far `connection.recv_timeout_seconds` is the only available
# configuration hint that exists. Therefore, all hints can be stored at
# connection level. This might change in the future.
self.configuration_hints = {}
self.patch = {}
self.outbox = Outbox(
self.socket, on_error=self._set_defunct_write,
packer_cls=self.PACKER_CLS
)
self.inbox = Inbox(
self.socket, on_error=self._set_defunct_read,
unpacker_cls=self.UNPACKER_CLS
)
self.hydration_handler = self.HYDRATION_HANDLER_CLS()
self.responses = deque()
self._max_connection_lifetime = max_connection_lifetime
self._creation_timestamp = perf_counter()
self.routing_context = routing_context
self.idle_since = perf_counter()
# Determine the user agent
if user_agent:
self.user_agent = user_agent
else:
self.user_agent = get_user_agent()
# Determine auth details
if not auth:
self.auth_dict = {}
elif isinstance(auth, tuple) and 2 <= len(auth) <= 3:
from neo4j import Auth
self.auth_dict = vars(Auth("basic", *auth))
else:
try:
self.auth_dict = vars(auth)
except (KeyError, TypeError):
raise AuthError("Cannot determine auth details from %r" % auth)
# Check for missing password
try:
credentials = self.auth_dict["credentials"]
except KeyError:
pass
else:
if credentials is None:
raise AuthError("Password cannot be None")
def __del__(self):
if not asyncio.iscoroutinefunction(self.close):
self.close()
@property
@abc.abstractmethod
def supports_multiple_results(self):
""" Boolean flag to indicate if the connection version supports multiple
queries to be buffered on the server side (True) or if all results need
to be eagerly pulled before sending the next RUN (False).
"""
pass
@property
@abc.abstractmethod
def supports_multiple_databases(self):
""" Boolean flag to indicate if the connection version supports multiple
databases.
"""
pass
@classmethod
def protocol_handlers(cls, protocol_version=None):
""" Return a dictionary of available Bolt protocol handlers,
keyed by version tuple. If an explicit protocol version is
provided, the dictionary will contain either zero or one items,
depending on whether that version is supported. If no protocol
version is provided, all available versions will be returned.
:param protocol_version: tuple identifying a specific protocol
version (e.g. (3, 5)) or None
:return: dictionary of version tuple to handler class for all
relevant and supported protocol versions
:raise TypeError: if protocol version is not passed in a tuple
"""
# Carry out Bolt subclass imports locally to avoid circular dependency issues.
from ._bolt3 import Bolt3
from ._bolt4 import (
Bolt4x1,
Bolt4x2,
Bolt4x3,
Bolt4x4,
)
from ._bolt5 import Bolt5x0
handlers = {
Bolt3.PROTOCOL_VERSION: Bolt3,
# 4.0 unsupported because no space left in the handshake
Bolt4x1.PROTOCOL_VERSION: Bolt4x1,
Bolt4x2.PROTOCOL_VERSION: Bolt4x2,
Bolt4x3.PROTOCOL_VERSION: Bolt4x3,
Bolt4x4.PROTOCOL_VERSION: Bolt4x4,
Bolt5x0.PROTOCOL_VERSION: Bolt5x0,
}
if protocol_version is None:
return handlers
if not isinstance(protocol_version, tuple):
raise TypeError("Protocol version must be specified as a tuple")
if protocol_version in handlers:
return {protocol_version: handlers[protocol_version]}
return {}
@classmethod
def version_list(cls, versions, limit=4):
""" Return a list of supported protocol versions in order of
preference. The number of protocol versions (or ranges)
returned is limited to four.
"""
# In fact, 4.3 is the fist version to support ranges. However, the
# range support got backported to 4.2. But even if the server is too
# old to have the backport, negotiating BOLT 4.1 is no problem as it's
# equivalent to 4.2
first_with_range_support = Version(4, 2)
result = []
for version in versions:
if (result
and version >= first_with_range_support
and result[-1][0] == version[0]
and result[-1][1][1] == version[1] + 1):
# can use range to encompass this version
result[-1][1][1] = version[1]
continue
result.append(Version(version[0], [version[1], version[1]]))
if len(result) == 4:
break
return result
@classmethod
def get_handshake(cls):
""" Return the supported Bolt versions as bytes.
The length is 16 bytes as specified in the Bolt version negotiation.
:return: bytes
"""
supported_versions = sorted(cls.protocol_handlers().keys(), reverse=True)
offered_versions = cls.version_list(supported_versions)
return b"".join(version.to_bytes() for version in offered_versions).ljust(16, b"\x00")
@classmethod
def ping(cls, address, *, timeout=None, **config):
""" Attempt to establish a Bolt connection, returning the
agreed Bolt protocol version if successful.
"""
config = PoolConfig.consume(config)
try:
s, protocol_version, handshake, data = \
BoltSocket.connect(
address,
timeout=timeout,
custom_resolver=config.resolver,
ssl_context=config.get_ssl_context(),
keep_alive=config.keep_alive,
)
except (ServiceUnavailable, SessionExpired, BoltHandshakeError):
return None
else:
BoltSocket.close_socket(s)
return protocol_version
@classmethod
def open(
cls, address, *, auth=None, timeout=None, routing_context=None,
**pool_config
):
"""Open a new Bolt connection to a given server address.
:param address:
:param auth:
:param timeout: the connection timeout in seconds
:param routing_context: dict containing routing context
:param pool_config:
:return: connected Bolt instance
:raise BoltHandshakeError:
raised if the Bolt Protocol can not negotiate a protocol version.
:raise ServiceUnavailable: raised if there was a connection issue.
"""
def time_remaining():
if timeout is None:
return None
t = timeout - (perf_counter() - t0)
return t if t > 0 else 0
t0 = perf_counter()
pool_config = PoolConfig.consume(pool_config)
socket_connection_timeout = pool_config.connection_timeout
if socket_connection_timeout is None:
socket_connection_timeout = time_remaining()
elif timeout is not None:
socket_connection_timeout = min(pool_config.connection_timeout,
time_remaining())
s, pool_config.protocol_version, handshake, data = \
BoltSocket.connect(
address,
timeout=socket_connection_timeout,
custom_resolver=pool_config.resolver,
ssl_context=pool_config.get_ssl_context(),
keep_alive=pool_config.keep_alive,
)
# Carry out Bolt subclass imports locally to avoid circular dependency
# issues.
if pool_config.protocol_version == (3, 0):
from ._bolt3 import Bolt3
bolt_cls = Bolt3
# Implementation for 4.0 exists, but there was no space left in the
# handshake to offer this version to the server. Hence, the server
# should never request us to speak bolt 4.0.
# elif pool_config.protocol_version == (4, 0):
# from ._bolt4 import AsyncBolt4x0
# bolt_cls = AsyncBolt4x0
elif pool_config.protocol_version == (4, 1):
from ._bolt4 import Bolt4x1
bolt_cls = Bolt4x1
elif pool_config.protocol_version == (4, 2):
from ._bolt4 import Bolt4x2
bolt_cls = Bolt4x2
elif pool_config.protocol_version == (4, 3):
from ._bolt4 import Bolt4x3
bolt_cls = Bolt4x3
elif pool_config.protocol_version == (4, 4):
from ._bolt4 import Bolt4x4
bolt_cls = Bolt4x4
elif pool_config.protocol_version == (5, 0):
from ._bolt5 import Bolt5x0
bolt_cls = Bolt5x0
else:
log.debug("[#%04X] S: <CLOSE>", s.getsockname()[1])
BoltSocket.close_socket(s)
supported_versions = cls.protocol_handlers().keys()
raise BoltHandshakeError(
"The Neo4J server does not support communication with this "
"driver. This driver has support for Bolt protocols "
"{}".format(tuple(map(str, supported_versions))),
address=address, request_data=handshake, response_data=data
)
connection = bolt_cls(
address, s, pool_config.max_connection_lifetime, auth=auth,
user_agent=pool_config.user_agent, routing_context=routing_context
)
try:
connection.socket.set_deadline(time_remaining())
try:
connection.hello()
finally:
connection.socket.set_deadline(None)
except Exception:
connection.close_non_blocking()
raise
return connection
@property
@abc.abstractmethod
def encrypted(self):
pass
@property
@abc.abstractmethod
def der_encoded_server_certificate(self):
pass
@abc.abstractmethod
def hello(self, dehydration_hooks=None, hydration_hooks=None):
""" Appends a HELLO message to the outgoing queue, sends it and consumes
all remaining messages.
"""
pass
@abc.abstractmethod
def route(
self, database=None, imp_user=None, bookmarks=None,
dehydration_hooks=None, hydration_hooks=None
):
""" Fetch a routing table from the server for the given
`database`. For Bolt 4.3 and above, this appends a ROUTE
message; for earlier versions, a procedure call is made via
the regular Cypher execution mechanism. In all cases, this is
sent to the network, and a response is fetched.
:param database: database for which to fetch a routing table
Requires Bolt 4.0+.
:param imp_user: the user to impersonate
Requires Bolt 4.4+.
:param bookmarks: iterable of bookmark values after which this
transaction should begin
:param dehydration_hooks:
Hooks to dehydrate types (dict from type (class) to dehydration
function). Dehydration functions receive the value and returns an
object of type understood by packstream.
:param hydration_hooks:
Hooks to hydrate types (mapping from type (class) to
dehydration function). Dehydration functions receive the value of
type understood by packstream and are free to return anything.
"""
pass
@abc.abstractmethod
def run(self, query, parameters=None, mode=None, bookmarks=None,
metadata=None, timeout=None, db=None, imp_user=None,
dehydration_hooks=None, hydration_hooks=None,
**handlers):
""" Appends a RUN message to the output queue.
:param query: Cypher query string
:param parameters: dictionary of Cypher parameters
:param mode: access mode for routing - "READ" or "WRITE" (default)
:param bookmarks: iterable of bookmark values after which this transaction should begin
:param metadata: custom metadata dictionary to attach to the transaction
:param timeout: timeout for transaction execution (seconds)
:param db: name of the database against which to begin the transaction
Requires Bolt 4.0+.
:param imp_user: the user to impersonate
Requires Bolt 4.4+.
:param dehydration_hooks:
Hooks to dehydrate types (dict from type (class) to dehydration
function). Dehydration functions receive the value and returns an
object of type understood by packstream.
:param hydration_hooks:
Hooks to hydrate types (mapping from type (class) to
dehydration function). Dehydration functions receive the value of
type understood by packstream and are free to return anything.
:param handlers: handler functions passed into the returned Response object
"""
pass
@abc.abstractmethod
def discard(self, n=-1, qid=-1, dehydration_hooks=None,
hydration_hooks=None, **handlers):
""" Appends a DISCARD message to the output queue.
:param n: number of records to discard, default = -1 (ALL)
:param qid: query ID to discard for, default = -1 (last query)
:param dehydration_hooks:
Hooks to dehydrate types (dict from type (class) to dehydration
function). Dehydration functions receive the value and returns an
object of type understood by packstream.
:param hydration_hooks:
Hooks to hydrate types (mapping from type (class) to
dehydration function). Dehydration functions receive the value of
type understood by packstream and are free to return anything.
:param handlers: handler functions passed into the returned Response object
"""
pass
@abc.abstractmethod
def pull(self, n=-1, qid=-1, dehydration_hooks=None, hydration_hooks=None,
**handlers):
""" Appends a PULL message to the output queue.
:param n: number of records to pull, default = -1 (ALL)
:param qid: query ID to pull for, default = -1 (last query)
:param dehydration_hooks:
Hooks to dehydrate types (dict from type (class) to dehydration
function). Dehydration functions receive the value and returns an
object of type understood by packstream.
:param hydration_hooks:
Hooks to hydrate types (mapping from type (class) to
dehydration function). Dehydration functions receive the value of
type understood by packstream and are free to return anything.
:param handlers: handler functions passed into the returned Response object
"""
pass
@abc.abstractmethod
def begin(self, mode=None, bookmarks=None, metadata=None, timeout=None,
db=None, imp_user=None, dehydration_hooks=None,
hydration_hooks=None, **handlers):
""" Appends a BEGIN message to the output queue.
:param mode: access mode for routing - "READ" or "WRITE" (default)
:param bookmarks: iterable of bookmark values after which this transaction should begin
:param metadata: custom metadata dictionary to attach to the transaction
:param timeout: timeout for transaction execution (seconds)
:param db: name of the database against which to begin the transaction
Requires Bolt 4.0+.
:param imp_user: the user to impersonate
Requires Bolt 4.4+
:param dehydration_hooks:
Hooks to dehydrate types (dict from type (class) to dehydration
function). Dehydration functions receive the value and returns an
object of type understood by packstream.
:param hydration_hooks:
Hooks to hydrate types (mapping from type (class) to
dehydration function). Dehydration functions receive the value of
type understood by packstream and are free to return anything.
:param handlers: handler functions passed into the returned Response object
:return: Response object
"""
pass
@abc.abstractmethod
def commit(self, dehydration_hooks=None, hydration_hooks=None, **handlers):
""" Appends a COMMIT message to the output queue.
:param dehydration_hooks:
Hooks to dehydrate types (dict from type (class) to dehydration
function). Dehydration functions receive the value and returns an
object of type understood by packstream.
:param hydration_hooks:
Hooks to hydrate types (mapping from type (class) to
dehydration function). Dehydration functions receive the value of
type understood by packstream and are free to return anything.
"""
pass
@abc.abstractmethod
def rollback(self, dehydration_hooks=None, hydration_hooks=None, **handlers):
""" Appends a ROLLBACK message to the output queue.
:param dehydration_hooks:
Hooks to dehydrate types (dict from type (class) to dehydration
function). Dehydration functions receive the value and returns an
object of type understood by packstream.
:param hydration_hooks:
Hooks to hydrate types (mapping from type (class) to
dehydration function). Dehydration functions receive the value of
type understood by packstream and are free to return anything."""
pass
@abc.abstractmethod
def reset(self, dehydration_hooks=None, hydration_hooks=None):
""" Appends a RESET message to the outgoing queue, sends it and consumes
all remaining messages.
:param dehydration_hooks:
Hooks to dehydrate types (dict from type (class) to dehydration
function). Dehydration functions receive the value and returns an
object of type understood by packstream.
:param hydration_hooks:
Hooks to hydrate types (mapping from type (class) to
dehydration function). Dehydration functions receive the value of
type understood by packstream and are free to return anything.
"""
pass
@abc.abstractmethod
def goodbye(self, dehydration_hooks=None, hydration_hooks=None):
"""Append a GOODBYE message to the outgoing queue.
:param dehydration_hooks:
Hooks to dehydrate types (dict from type (class) to dehydration
function). Dehydration functions receive the value and returns an
object of type understood by packstream.
:param hydration_hooks:
Hooks to hydrate types (mapping from type (class) to
dehydration function). Dehydration functions receive the value of
type understood by packstream and are free to return anything.
"""
pass
def new_hydration_scope(self):
return self.hydration_handler.new_hydration_scope()
def _append(self, signature, fields=(), response=None,
dehydration_hooks=None):
""" Appends a message to the outgoing queue.
:param signature: the signature of the message
:param fields: the fields of the message as a tuple
:param response: a response object to handle callbacks
:param dehydration_hooks:
Hooks to dehydrate types (dict from type (class) to dehydration
function). Dehydration functions receive the value and returns an
object of type understood by packstream.
"""
self.outbox.append_message(signature, fields, dehydration_hooks)
self.responses.append(response)
def _send_all(self):
if self.outbox.flush():
self.idle_since = perf_counter()
def send_all(self):
""" Send all queued messages to the server.
"""
if self.closed():
raise ServiceUnavailable(
"Failed to write to closed connection {!r} ({!r})".format(
self.unresolved_address, self.server_info.address
)
)
if self.defunct():
raise ServiceUnavailable(
"Failed to write to defunct connection {!r} ({!r})".format(
self.unresolved_address, self.server_info.address
)
)
self._send_all()
@abc.abstractmethod
def _process_message(self, tag, fields):
""" Receive at most one message from the server, if available.
:return: 2-tuple of number of detail messages and number of summary
messages fetched
"""
pass
def fetch_message(self):
if self._closed:
raise ServiceUnavailable(
"Failed to read from closed connection {!r} ({!r})".format(
self.unresolved_address, self.server_info.address
)
)
if self._defunct:
raise ServiceUnavailable(
"Failed to read from defunct connection {!r} ({!r})".format(
self.unresolved_address, self.server_info.address
)
)
if not self.responses:
return 0, 0
# Receive exactly one message
tag, fields = self.inbox.pop(
hydration_hooks=self.responses[0].hydration_hooks
)
res = self._process_message(tag, fields)
self.idle_since = perf_counter()
return res
def fetch_all(self):
""" Fetch all outstanding messages.
:return: 2-tuple of number of detail messages and number of summary
messages fetched
"""
detail_count = summary_count = 0
while self.responses:
response = self.responses[0]
while not response.complete:
detail_delta, summary_delta = self.fetch_message()
detail_count += detail_delta
summary_count += summary_delta
return detail_count, summary_count
def _set_defunct_read(self, error=None, silent=False):
message = "Failed to read from defunct connection {!r} ({!r})".format(
self.unresolved_address, self.server_info.address
)
self._set_defunct(message, error=error, silent=silent)
def _set_defunct_write(self, error=None, silent=False):
message = "Failed to write data to connection {!r} ({!r})".format(
self.unresolved_address, self.server_info.address
)
self._set_defunct(message, error=error, silent=silent)
def _set_defunct(self, message, error=None, silent=False):
from ._pool import BoltPool
direct_driver = isinstance(self.pool, BoltPool)
if error:
log.debug("[#%04X] %r", self.socket.getsockname()[1], error)
log.error(message)
# We were attempting to receive data but the connection
# has unexpectedly terminated. So, we need to close the
# connection from the client side, and remove the address
# from the connection pool.
self._defunct = True
if not self._closing:
# If we fail while closing the connection, there is no need to
# remove the connection from the pool, nor to try to close the
# connection again.
self.close()
if self.pool:
self.pool.deactivate(address=self.unresolved_address)
# Iterate through the outstanding responses, and if any correspond
# to COMMIT requests then raise an error to signal that we are
# unable to confirm that the COMMIT completed successfully.
if silent:
return
for response in self.responses:
if isinstance(response, CommitResponse):
if error:
raise IncompleteCommit(message) from error
else:
raise IncompleteCommit(message)
if direct_driver:
if error:
raise ServiceUnavailable(message) from error
else:
raise ServiceUnavailable(message)
else:
if error:
raise SessionExpired(message) from error
else:
raise SessionExpired(message)
def stale(self):
return (self._stale
or (0 <= self._max_connection_lifetime
<= perf_counter() - self._creation_timestamp))
_stale = False
def set_stale(self):
self._stale = True
def close(self):
"""Close the connection."""
if self._closed or self._closing:
return
self._closing = True
if not self._defunct:
self.goodbye()
try:
self._send_all()
except (OSError, BoltError, DriverError):
pass
log.debug("[#%04X] C: <CLOSE>", self.local_port)
try:
self.socket.close()
except OSError:
pass
finally:
self._closed = True
def close_non_blocking(self):
"""Set the socket to non-blocking and close it.
This will try to send the `GOODBYE` message (given the socket is not
marked as defunct). However, should the write operation require
blocking (e.g., a full network buffer), then the socket will be closed
immediately (without `GOODBYE` message).
"""
if self._closed or self._closing:
return
self.socket.settimeout(0)
self.close()
def closed(self):
return self._closed
def defunct(self):
return self._defunct
def is_idle_for(self, timeout):
"""Check if connection has been idle for at least the given timeout.
:param timeout: timeout in seconds
:type timeout: float
:rtype: bool
"""
return perf_counter() - self.idle_since > timeout
BoltSocket.Bolt = Bolt
|
neo4j/_sync/io/_bolt.py
|
codereval_python_data_7
|
This function is a decorator for transaction functions that allows extra control over how the transaction is carried out.
For example, a timeout may be applied::
from neo4j import unit_of_work
@unit_of_work(timeout=100)
def count_people_tx(tx):
result = tx.run("MATCH (a:Person) RETURN count(a) AS persons")
record = result.single()
return record["persons"]
:param metadata:
a dictionary with metadata.
Specified metadata will be attached to the executing transaction and visible in the output of ``dbms.listQueries`` and ``dbms.listTransactions`` procedures.
It will also get logged to the ``query.log``.
This functionality makes it easier to tag transactions and is equivalent to ``dbms.setTXMetaData`` procedure, see https://neo4j.com/docs/operations-manual/current/reference/procedures/ for procedure reference.
:type metadata: dict
:param timeout:
the transaction timeout in seconds.
Transactions that execute longer than the configured timeout will be terminated by the database.
This functionality allows to limit query/transaction execution time.
Specified timeout overrides the default timeout configured in the database using ``dbms.transaction.timeout`` setting.
Value should not represent a negative duration.
A zero duration will make the transaction execute indefinitely.
None will use the default timeout configured in the database.
:type timeout: float or :const:`None`
def unit_of_work(metadata=None, timeout=None):
"""This function is a decorator for transaction functions that allows extra control over how the transaction is carried out.
For example, a timeout may be applied::
from neo4j import unit_of_work
@unit_of_work(timeout=100)
def count_people_tx(tx):
result = tx.run("MATCH (a:Person) RETURN count(a) AS persons")
record = result.single()
return record["persons"]
:param metadata:
a dictionary with metadata.
Specified metadata will be attached to the executing transaction and visible in the output of ``dbms.listQueries`` and ``dbms.listTransactions`` procedures.
It will also get logged to the ``query.log``.
This functionality makes it easier to tag transactions and is equivalent to ``dbms.setTXMetaData`` procedure, see https://neo4j.com/docs/operations-manual/current/reference/procedures/ for procedure reference.
:type metadata: dict
:param timeout:
the transaction timeout in seconds.
Transactions that execute longer than the configured timeout will be terminated by the database.
This functionality allows to limit query/transaction execution time.
Specified timeout overrides the default timeout configured in the database using ``dbms.transaction.timeout`` setting.
Value should not represent a negative duration.
A zero duration will make the transaction execute indefinitely.
None will use the default timeout configured in the database.
:type timeout: float or :const:`None`
"""
def wrapper(f):
def wrapped(*args, **kwargs):
return f(*args, **kwargs)
wrapped.metadata = metadata
wrapped.timeout = timeout
return wrapped
return wrapper
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Query:
""" Create a new query.
:param text: The query text.
:type text: str
:param metadata: metadata attached to the query.
:type metadata: dict
:param timeout: seconds.
:type timeout: float or :const:`None`
"""
def __init__(self, text, metadata=None, timeout=None):
self.text = text
self.metadata = metadata
self.timeout = timeout
def __str__(self):
return str(self.text)
def unit_of_work(metadata=None, timeout=None):
"""This function is a decorator for transaction functions that allows extra control over how the transaction is carried out.
For example, a timeout may be applied::
from neo4j import unit_of_work
@unit_of_work(timeout=100)
def count_people_tx(tx):
result = tx.run("MATCH (a:Person) RETURN count(a) AS persons")
record = result.single()
return record["persons"]
:param metadata:
a dictionary with metadata.
Specified metadata will be attached to the executing transaction and visible in the output of ``dbms.listQueries`` and ``dbms.listTransactions`` procedures.
It will also get logged to the ``query.log``.
This functionality makes it easier to tag transactions and is equivalent to ``dbms.setTXMetaData`` procedure, see https://neo4j.com/docs/operations-manual/current/reference/procedures/ for procedure reference.
:type metadata: dict
:param timeout:
the transaction timeout in seconds.
Transactions that execute longer than the configured timeout will be terminated by the database.
This functionality allows to limit query/transaction execution time.
Specified timeout overrides the default timeout configured in the database using ``dbms.transaction.timeout`` setting.
Value should not represent a negative duration.
A zero duration will make the transaction execute indefinitely.
None will use the default timeout configured in the database.
:type timeout: float or :const:`None`
"""
def wrapper(f):
def wrapped(*args, **kwargs):
return f(*args, **kwargs)
wrapped.metadata = metadata
wrapped.timeout = timeout
return wrapped
return wrapper
|
neo4j/work/query.py
|
codereval_python_data_8
|
Return the index of the given item.
:param key: a key
:return: index
:rtype: int
def index(self, key):
""" Return the index of the given item.
:param key: a key
:return: index
:rtype: int
"""
if isinstance(key, int):
if 0 <= key < len(self.__keys):
return key
raise IndexError(key)
elif isinstance(key, str):
try:
return self.__keys.index(key)
except ValueError:
raise KeyError(key)
else:
raise TypeError(key)
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import (
ABCMeta,
abstractmethod,
)
from collections.abc import (
Mapping,
Sequence,
Set,
)
from functools import reduce
from operator import xor as xor_operator
from ._codec.hydration import BrokenHydrationObject
from ._conf import iter_items
from ._meta import deprecated
from .exceptions import BrokenRecordError
from .graph import (
Node,
Path,
Relationship,
)
class Record(tuple, Mapping):
""" A :class:`.Record` is an immutable ordered collection of key-value
pairs. It is generally closer to a :py:class:`namedtuple` than to a
:py:class:`OrderedDict` in as much as iteration of the collection will
yield values rather than keys.
"""
__keys = None
def __new__(cls, iterable=()):
keys = []
values = []
for key, value in iter_items(iterable):
keys.append(key)
values.append(value)
inst = tuple.__new__(cls, values)
inst.__keys = tuple(keys)
return inst
def _broken_record_error(self, index):
return BrokenRecordError(
f"Record contains broken data at {index} ('{self.__keys[index]}')"
)
def _super_getitem_single(self, index):
value = super().__getitem__(index)
if isinstance(value, BrokenHydrationObject):
raise self._broken_record_error(index) from value.error
return value
def __repr__(self):
return "<%s %s>" % (
self.__class__.__name__,
" ".join("%s=%r" % (field, value)
for field, value in zip(self.__keys, super().__iter__()))
)
def __str__(self):
return self.__repr__()
def __eq__(self, other):
""" In order to be flexible regarding comparison, the equality rules
for a record permit comparison with any other Sequence or Mapping.
:param other:
:return:
"""
compare_as_sequence = isinstance(other, Sequence)
compare_as_mapping = isinstance(other, Mapping)
if compare_as_sequence and compare_as_mapping:
return list(self) == list(other) and dict(self) == dict(other)
elif compare_as_sequence:
return list(self) == list(other)
elif compare_as_mapping:
return dict(self) == dict(other)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return reduce(xor_operator, map(hash, self.items()))
def __iter__(self):
for i, v in enumerate(super().__iter__()):
if isinstance(v, BrokenHydrationObject):
raise self._broken_record_error(i) from v.error
yield v
def __getitem__(self, key):
if isinstance(key, slice):
keys = self.__keys[key]
values = super().__getitem__(key)
return self.__class__(zip(keys, values))
try:
index = self.index(key)
except IndexError:
return None
else:
return self._super_getitem_single(index)
# TODO: 6.0 - remove
@deprecated("This method is deprecated and will be removed in the future.")
def __getslice__(self, start, stop):
key = slice(start, stop)
keys = self.__keys[key]
values = tuple(self)[key]
return self.__class__(zip(keys, values))
def get(self, key, default=None):
""" Obtain a value from the record by key, returning a default
value if the key does not exist.
:param key: a key
:param default: default value
:return: a value
"""
try:
index = self.__keys.index(str(key))
except ValueError:
return default
if 0 <= index < len(self):
return self._super_getitem_single(index)
else:
return default
def index(self, key):
""" Return the index of the given item.
:param key: a key
:return: index
:rtype: int
"""
if isinstance(key, int):
if 0 <= key < len(self.__keys):
return key
raise IndexError(key)
elif isinstance(key, str):
try:
return self.__keys.index(key)
except ValueError:
raise KeyError(key)
else:
raise TypeError(key)
def value(self, key=0, default=None):
""" Obtain a single value from the record by index or key. If no
index or key is specified, the first value is returned. If the
specified item does not exist, the default value is returned.
:param key: an index or key
:param default: default value
:return: a single value
"""
try:
index = self.index(key)
except (IndexError, KeyError):
return default
else:
return self[index]
def keys(self):
""" Return the keys of the record.
:return: list of key names
"""
return list(self.__keys)
def values(self, *keys):
""" Return the values of the record, optionally filtering to
include only certain values by index or key.
:param keys: indexes or keys of the items to include; if none
are provided, all values will be included
:return: list of values
:rtype: list
"""
if keys:
d = []
for key in keys:
try:
i = self.index(key)
except KeyError:
d.append(None)
else:
d.append(self[i])
return d
return list(self)
def items(self, *keys):
""" Return the fields of the record as a list of key and value tuples
:return: a list of value tuples
:rtype: list
"""
if keys:
d = []
for key in keys:
try:
i = self.index(key)
except KeyError:
d.append((key, None))
else:
d.append((self.__keys[i], self[i]))
return d
return list((self.__keys[i], self._super_getitem_single(i))
for i in range(len(self)))
def data(self, *keys):
""" Return the keys and values of this record as a dictionary,
optionally including only certain values by index or key. Keys
provided in the items that are not in the record will be
inserted with a value of :const:`None`; indexes provided
that are out of bounds will trigger an :exc:`IndexError`.
:param keys: indexes or keys of the items to include; if none
are provided, all values will be included
:return: dictionary of values, keyed by field name
:raises: :exc:`IndexError` if an out-of-bounds index is specified
"""
return RecordExporter().transform(dict(self.items(*keys)))
class DataTransformer(metaclass=ABCMeta):
""" Abstract base class for transforming data from one form into
another.
"""
@abstractmethod
def transform(self, x):
""" Transform a value, or collection of values.
:param x: input value
:return: output value
"""
class RecordExporter(DataTransformer):
""" Transformer class used by the :meth:`.Record.data` method.
"""
def transform(self, x):
if isinstance(x, Node):
return self.transform(dict(x))
elif isinstance(x, Relationship):
return (self.transform(dict(x.start_node)),
x.__class__.__name__,
self.transform(dict(x.end_node)))
elif isinstance(x, Path):
path = [self.transform(x.start_node)]
for i, relationship in enumerate(x.relationships):
path.append(self.transform(relationship.__class__.__name__))
path.append(self.transform(x.nodes[i + 1]))
return path
elif isinstance(x, str):
return x
elif isinstance(x, Sequence):
t = type(x)
return t(map(self.transform, x))
elif isinstance(x, Set):
t = type(x)
return t(map(self.transform, x))
elif isinstance(x, Mapping):
t = type(x)
return t((k, self.transform(v)) for k, v in x.items())
else:
return x
class RecordTableRowExporter(DataTransformer):
"""Transformer class used by the :meth:`.Result.to_df` method."""
def transform(self, x):
assert isinstance(x, Mapping)
t = type(x)
return t(item
for k, v in x.items()
for item in self._transform(
v, prefix=k.replace("\\", "\\\\").replace(".", "\\.")
).items())
def _transform(self, x, prefix):
if isinstance(x, Node):
res = {
"%s().element_id" % prefix: x.element_id,
"%s().labels" % prefix: x.labels,
}
res.update(("%s().prop.%s" % (prefix, k), v) for k, v in x.items())
return res
elif isinstance(x, Relationship):
res = {
"%s->.element_id" % prefix: x.element_id,
"%s->.start.element_id" % prefix: x.start_node.element_id,
"%s->.end.element_id" % prefix: x.end_node.element_id,
"%s->.type" % prefix: x.__class__.__name__,
}
res.update(("%s->.prop.%s" % (prefix, k), v) for k, v in x.items())
return res
elif isinstance(x, Path) or isinstance(x, str):
return {prefix: x}
elif isinstance(x, Sequence):
return dict(
item
for i, v in enumerate(x)
for item in self._transform(
v, prefix="%s[].%i" % (prefix, i)
).items()
)
elif isinstance(x, Mapping):
t = type(x)
return t(
item
for k, v in x.items()
for item in self._transform(
v, prefix="%s{}.%s" % (prefix, k.replace("\\", "\\\\")
.replace(".", "\\."))
).items()
)
else:
return {prefix: x}
|
neo4j/_data.py
|
codereval_python_data_9
|
Return the values of the record, optionally filtering to
include only certain values by index or key.
:param keys: indexes or keys of the items to include; if none
are provided, all values will be included
:return: list of values
:rtype: list
def values(self, *keys):
""" Return the values of the record, optionally filtering to
include only certain values by index or key.
:param keys: indexes or keys of the items to include; if none
are provided, all values will be included
:return: list of values
:rtype: list
"""
if keys:
d = []
for key in keys:
try:
i = self.index(key)
except KeyError:
d.append(None)
else:
d.append(self[i])
return d
return list(self)
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import (
ABCMeta,
abstractmethod,
)
from collections.abc import (
Mapping,
Sequence,
Set,
)
from functools import reduce
from operator import xor as xor_operator
from ._codec.hydration import BrokenHydrationObject
from ._conf import iter_items
from ._meta import deprecated
from .exceptions import BrokenRecordError
from .graph import (
Node,
Path,
Relationship,
)
class Record(tuple, Mapping):
""" A :class:`.Record` is an immutable ordered collection of key-value
pairs. It is generally closer to a :py:class:`namedtuple` than to a
:py:class:`OrderedDict` in as much as iteration of the collection will
yield values rather than keys.
"""
__keys = None
def __new__(cls, iterable=()):
keys = []
values = []
for key, value in iter_items(iterable):
keys.append(key)
values.append(value)
inst = tuple.__new__(cls, values)
inst.__keys = tuple(keys)
return inst
def _broken_record_error(self, index):
return BrokenRecordError(
f"Record contains broken data at {index} ('{self.__keys[index]}')"
)
def _super_getitem_single(self, index):
value = super().__getitem__(index)
if isinstance(value, BrokenHydrationObject):
raise self._broken_record_error(index) from value.error
return value
def __repr__(self):
return "<%s %s>" % (
self.__class__.__name__,
" ".join("%s=%r" % (field, value)
for field, value in zip(self.__keys, super().__iter__()))
)
def __str__(self):
return self.__repr__()
def __eq__(self, other):
""" In order to be flexible regarding comparison, the equality rules
for a record permit comparison with any other Sequence or Mapping.
:param other:
:return:
"""
compare_as_sequence = isinstance(other, Sequence)
compare_as_mapping = isinstance(other, Mapping)
if compare_as_sequence and compare_as_mapping:
return list(self) == list(other) and dict(self) == dict(other)
elif compare_as_sequence:
return list(self) == list(other)
elif compare_as_mapping:
return dict(self) == dict(other)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return reduce(xor_operator, map(hash, self.items()))
def __iter__(self):
for i, v in enumerate(super().__iter__()):
if isinstance(v, BrokenHydrationObject):
raise self._broken_record_error(i) from v.error
yield v
def __getitem__(self, key):
if isinstance(key, slice):
keys = self.__keys[key]
values = super().__getitem__(key)
return self.__class__(zip(keys, values))
try:
index = self.index(key)
except IndexError:
return None
else:
return self._super_getitem_single(index)
# TODO: 6.0 - remove
@deprecated("This method is deprecated and will be removed in the future.")
def __getslice__(self, start, stop):
key = slice(start, stop)
keys = self.__keys[key]
values = tuple(self)[key]
return self.__class__(zip(keys, values))
def get(self, key, default=None):
""" Obtain a value from the record by key, returning a default
value if the key does not exist.
:param key: a key
:param default: default value
:return: a value
"""
try:
index = self.__keys.index(str(key))
except ValueError:
return default
if 0 <= index < len(self):
return self._super_getitem_single(index)
else:
return default
def index(self, key):
""" Return the index of the given item.
:param key: a key
:return: index
:rtype: int
"""
if isinstance(key, int):
if 0 <= key < len(self.__keys):
return key
raise IndexError(key)
elif isinstance(key, str):
try:
return self.__keys.index(key)
except ValueError:
raise KeyError(key)
else:
raise TypeError(key)
def value(self, key=0, default=None):
""" Obtain a single value from the record by index or key. If no
index or key is specified, the first value is returned. If the
specified item does not exist, the default value is returned.
:param key: an index or key
:param default: default value
:return: a single value
"""
try:
index = self.index(key)
except (IndexError, KeyError):
return default
else:
return self[index]
def keys(self):
""" Return the keys of the record.
:return: list of key names
"""
return list(self.__keys)
def values(self, *keys):
""" Return the values of the record, optionally filtering to
include only certain values by index or key.
:param keys: indexes or keys of the items to include; if none
are provided, all values will be included
:return: list of values
:rtype: list
"""
if keys:
d = []
for key in keys:
try:
i = self.index(key)
except KeyError:
d.append(None)
else:
d.append(self[i])
return d
return list(self)
def items(self, *keys):
""" Return the fields of the record as a list of key and value tuples
:return: a list of value tuples
:rtype: list
"""
if keys:
d = []
for key in keys:
try:
i = self.index(key)
except KeyError:
d.append((key, None))
else:
d.append((self.__keys[i], self[i]))
return d
return list((self.__keys[i], self._super_getitem_single(i))
for i in range(len(self)))
def data(self, *keys):
""" Return the keys and values of this record as a dictionary,
optionally including only certain values by index or key. Keys
provided in the items that are not in the record will be
inserted with a value of :const:`None`; indexes provided
that are out of bounds will trigger an :exc:`IndexError`.
:param keys: indexes or keys of the items to include; if none
are provided, all values will be included
:return: dictionary of values, keyed by field name
:raises: :exc:`IndexError` if an out-of-bounds index is specified
"""
return RecordExporter().transform(dict(self.items(*keys)))
class DataTransformer(metaclass=ABCMeta):
""" Abstract base class for transforming data from one form into
another.
"""
@abstractmethod
def transform(self, x):
""" Transform a value, or collection of values.
:param x: input value
:return: output value
"""
class RecordExporter(DataTransformer):
""" Transformer class used by the :meth:`.Record.data` method.
"""
def transform(self, x):
if isinstance(x, Node):
return self.transform(dict(x))
elif isinstance(x, Relationship):
return (self.transform(dict(x.start_node)),
x.__class__.__name__,
self.transform(dict(x.end_node)))
elif isinstance(x, Path):
path = [self.transform(x.start_node)]
for i, relationship in enumerate(x.relationships):
path.append(self.transform(relationship.__class__.__name__))
path.append(self.transform(x.nodes[i + 1]))
return path
elif isinstance(x, str):
return x
elif isinstance(x, Sequence):
t = type(x)
return t(map(self.transform, x))
elif isinstance(x, Set):
t = type(x)
return t(map(self.transform, x))
elif isinstance(x, Mapping):
t = type(x)
return t((k, self.transform(v)) for k, v in x.items())
else:
return x
class RecordTableRowExporter(DataTransformer):
"""Transformer class used by the :meth:`.Result.to_df` method."""
def transform(self, x):
assert isinstance(x, Mapping)
t = type(x)
return t(item
for k, v in x.items()
for item in self._transform(
v, prefix=k.replace("\\", "\\\\").replace(".", "\\.")
).items())
def _transform(self, x, prefix):
if isinstance(x, Node):
res = {
"%s().element_id" % prefix: x.element_id,
"%s().labels" % prefix: x.labels,
}
res.update(("%s().prop.%s" % (prefix, k), v) for k, v in x.items())
return res
elif isinstance(x, Relationship):
res = {
"%s->.element_id" % prefix: x.element_id,
"%s->.start.element_id" % prefix: x.start_node.element_id,
"%s->.end.element_id" % prefix: x.end_node.element_id,
"%s->.type" % prefix: x.__class__.__name__,
}
res.update(("%s->.prop.%s" % (prefix, k), v) for k, v in x.items())
return res
elif isinstance(x, Path) or isinstance(x, str):
return {prefix: x}
elif isinstance(x, Sequence):
return dict(
item
for i, v in enumerate(x)
for item in self._transform(
v, prefix="%s[].%i" % (prefix, i)
).items()
)
elif isinstance(x, Mapping):
t = type(x)
return t(
item
for k, v in x.items()
for item in self._transform(
v, prefix="%s{}.%s" % (prefix, k.replace("\\", "\\\\")
.replace(".", "\\."))
).items()
)
else:
return {prefix: x}
|
neo4j/_data.py
|
codereval_python_data_10
|
Return the keys and values of this record as a dictionary,
optionally including only certain values by index or key. Keys
provided in the items that are not in the record will be
inserted with a value of :const:`None`; indexes provided
that are out of bounds will trigger an :exc:`IndexError`.
:param keys: indexes or keys of the items to include; if none
are provided, all values will be included
:return: dictionary of values, keyed by field name
:raises: :exc:`IndexError` if an out-of-bounds index is specified
def data(self, *keys):
""" Return the keys and values of this record as a dictionary,
optionally including only certain values by index or key. Keys
provided in the items that are not in the record will be
inserted with a value of :const:`None`; indexes provided
that are out of bounds will trigger an :exc:`IndexError`.
:param keys: indexes or keys of the items to include; if none
are provided, all values will be included
:return: dictionary of values, keyed by field name
:raises: :exc:`IndexError` if an out-of-bounds index is specified
"""
return RecordExporter().transform(dict(self.items(*keys)))
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import (
ABCMeta,
abstractmethod,
)
from collections.abc import (
Mapping,
Sequence,
Set,
)
from functools import reduce
from operator import xor as xor_operator
from ._codec.hydration import BrokenHydrationObject
from ._conf import iter_items
from ._meta import deprecated
from .exceptions import BrokenRecordError
from .graph import (
Node,
Path,
Relationship,
)
class Record(tuple, Mapping):
""" A :class:`.Record` is an immutable ordered collection of key-value
pairs. It is generally closer to a :py:class:`namedtuple` than to a
:py:class:`OrderedDict` in as much as iteration of the collection will
yield values rather than keys.
"""
__keys = None
def __new__(cls, iterable=()):
keys = []
values = []
for key, value in iter_items(iterable):
keys.append(key)
values.append(value)
inst = tuple.__new__(cls, values)
inst.__keys = tuple(keys)
return inst
def _broken_record_error(self, index):
return BrokenRecordError(
f"Record contains broken data at {index} ('{self.__keys[index]}')"
)
def _super_getitem_single(self, index):
value = super().__getitem__(index)
if isinstance(value, BrokenHydrationObject):
raise self._broken_record_error(index) from value.error
return value
def __repr__(self):
return "<%s %s>" % (
self.__class__.__name__,
" ".join("%s=%r" % (field, value)
for field, value in zip(self.__keys, super().__iter__()))
)
def __str__(self):
return self.__repr__()
def __eq__(self, other):
""" In order to be flexible regarding comparison, the equality rules
for a record permit comparison with any other Sequence or Mapping.
:param other:
:return:
"""
compare_as_sequence = isinstance(other, Sequence)
compare_as_mapping = isinstance(other, Mapping)
if compare_as_sequence and compare_as_mapping:
return list(self) == list(other) and dict(self) == dict(other)
elif compare_as_sequence:
return list(self) == list(other)
elif compare_as_mapping:
return dict(self) == dict(other)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return reduce(xor_operator, map(hash, self.items()))
def __iter__(self):
for i, v in enumerate(super().__iter__()):
if isinstance(v, BrokenHydrationObject):
raise self._broken_record_error(i) from v.error
yield v
def __getitem__(self, key):
if isinstance(key, slice):
keys = self.__keys[key]
values = super().__getitem__(key)
return self.__class__(zip(keys, values))
try:
index = self.index(key)
except IndexError:
return None
else:
return self._super_getitem_single(index)
# TODO: 6.0 - remove
@deprecated("This method is deprecated and will be removed in the future.")
def __getslice__(self, start, stop):
key = slice(start, stop)
keys = self.__keys[key]
values = tuple(self)[key]
return self.__class__(zip(keys, values))
def get(self, key, default=None):
""" Obtain a value from the record by key, returning a default
value if the key does not exist.
:param key: a key
:param default: default value
:return: a value
"""
try:
index = self.__keys.index(str(key))
except ValueError:
return default
if 0 <= index < len(self):
return self._super_getitem_single(index)
else:
return default
def index(self, key):
""" Return the index of the given item.
:param key: a key
:return: index
:rtype: int
"""
if isinstance(key, int):
if 0 <= key < len(self.__keys):
return key
raise IndexError(key)
elif isinstance(key, str):
try:
return self.__keys.index(key)
except ValueError:
raise KeyError(key)
else:
raise TypeError(key)
def value(self, key=0, default=None):
""" Obtain a single value from the record by index or key. If no
index or key is specified, the first value is returned. If the
specified item does not exist, the default value is returned.
:param key: an index or key
:param default: default value
:return: a single value
"""
try:
index = self.index(key)
except (IndexError, KeyError):
return default
else:
return self[index]
def keys(self):
""" Return the keys of the record.
:return: list of key names
"""
return list(self.__keys)
def values(self, *keys):
""" Return the values of the record, optionally filtering to
include only certain values by index or key.
:param keys: indexes or keys of the items to include; if none
are provided, all values will be included
:return: list of values
:rtype: list
"""
if keys:
d = []
for key in keys:
try:
i = self.index(key)
except KeyError:
d.append(None)
else:
d.append(self[i])
return d
return list(self)
def items(self, *keys):
""" Return the fields of the record as a list of key and value tuples
:return: a list of value tuples
:rtype: list
"""
if keys:
d = []
for key in keys:
try:
i = self.index(key)
except KeyError:
d.append((key, None))
else:
d.append((self.__keys[i], self[i]))
return d
return list((self.__keys[i], self._super_getitem_single(i))
for i in range(len(self)))
def data(self, *keys):
""" Return the keys and values of this record as a dictionary,
optionally including only certain values by index or key. Keys
provided in the items that are not in the record will be
inserted with a value of :const:`None`; indexes provided
that are out of bounds will trigger an :exc:`IndexError`.
:param keys: indexes or keys of the items to include; if none
are provided, all values will be included
:return: dictionary of values, keyed by field name
:raises: :exc:`IndexError` if an out-of-bounds index is specified
"""
return RecordExporter().transform(dict(self.items(*keys)))
class DataTransformer(metaclass=ABCMeta):
""" Abstract base class for transforming data from one form into
another.
"""
@abstractmethod
def transform(self, x):
""" Transform a value, or collection of values.
:param x: input value
:return: output value
"""
class RecordExporter(DataTransformer):
""" Transformer class used by the :meth:`.Record.data` method.
"""
def transform(self, x):
if isinstance(x, Node):
return self.transform(dict(x))
elif isinstance(x, Relationship):
return (self.transform(dict(x.start_node)),
x.__class__.__name__,
self.transform(dict(x.end_node)))
elif isinstance(x, Path):
path = [self.transform(x.start_node)]
for i, relationship in enumerate(x.relationships):
path.append(self.transform(relationship.__class__.__name__))
path.append(self.transform(x.nodes[i + 1]))
return path
elif isinstance(x, str):
return x
elif isinstance(x, Sequence):
t = type(x)
return t(map(self.transform, x))
elif isinstance(x, Set):
t = type(x)
return t(map(self.transform, x))
elif isinstance(x, Mapping):
t = type(x)
return t((k, self.transform(v)) for k, v in x.items())
else:
return x
class RecordTableRowExporter(DataTransformer):
"""Transformer class used by the :meth:`.Result.to_df` method."""
def transform(self, x):
assert isinstance(x, Mapping)
t = type(x)
return t(item
for k, v in x.items()
for item in self._transform(
v, prefix=k.replace("\\", "\\\\").replace(".", "\\.")
).items())
def _transform(self, x, prefix):
if isinstance(x, Node):
res = {
"%s().element_id" % prefix: x.element_id,
"%s().labels" % prefix: x.labels,
}
res.update(("%s().prop.%s" % (prefix, k), v) for k, v in x.items())
return res
elif isinstance(x, Relationship):
res = {
"%s->.element_id" % prefix: x.element_id,
"%s->.start.element_id" % prefix: x.start_node.element_id,
"%s->.end.element_id" % prefix: x.end_node.element_id,
"%s->.type" % prefix: x.__class__.__name__,
}
res.update(("%s->.prop.%s" % (prefix, k), v) for k, v in x.items())
return res
elif isinstance(x, Path) or isinstance(x, str):
return {prefix: x}
elif isinstance(x, Sequence):
return dict(
item
for i, v in enumerate(x)
for item in self._transform(
v, prefix="%s[].%i" % (prefix, i)
).items()
)
elif isinstance(x, Mapping):
t = type(x)
return t(
item
for k, v in x.items()
for item in self._transform(
v, prefix="%s{}.%s" % (prefix, k.replace("\\", "\\\\")
.replace(".", "\\."))
).items()
)
else:
return {prefix: x}
|
neo4j/_data.py
|
codereval_python_data_11
|
Remove the last two bytes of data, returning them as a big-endian
16-bit unsigned integer.
def pop_u16(self):
""" Remove the last two bytes of data, returning them as a big-endian
16-bit unsigned integer.
"""
if self.used >= 2:
value = 0x100 * self.data[self.used - 2] + self.data[self.used - 1]
self.used -= 2
return value
else:
return -1
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from codecs import decode
from contextlib import contextmanager
from struct import (
pack as struct_pack,
unpack as struct_unpack,
)
from .._common import Structure
PACKED_UINT_8 = [struct_pack(">B", value) for value in range(0x100)]
PACKED_UINT_16 = [struct_pack(">H", value) for value in range(0x10000)]
UNPACKED_UINT_8 = {bytes(bytearray([x])): x for x in range(0x100)}
UNPACKED_UINT_16 = {struct_pack(">H", x): x for x in range(0x10000)}
UNPACKED_MARKERS = {b"\xC0": None, b"\xC2": False, b"\xC3": True}
UNPACKED_MARKERS.update({bytes(bytearray([z])): z for z in range(0x00, 0x80)})
UNPACKED_MARKERS.update({bytes(bytearray([z + 256])): z for z in range(-0x10, 0x00)})
INT64_MIN = -(2 ** 63)
INT64_MAX = 2 ** 63
class Packer:
def __init__(self, stream):
self.stream = stream
self._write = self.stream.write
def pack_raw(self, data):
self._write(data)
def pack(self, value, dehydration_hooks=None):
write = self._write
# None
if value is None:
write(b"\xC0") # NULL
# Boolean
elif value is True:
write(b"\xC3")
elif value is False:
write(b"\xC2")
# Float (only double precision is supported)
elif isinstance(value, float):
write(b"\xC1")
write(struct_pack(">d", value))
# Integer
elif isinstance(value, int):
if -0x10 <= value < 0x80:
write(PACKED_UINT_8[value % 0x100])
elif -0x80 <= value < -0x10:
write(b"\xC8")
write(PACKED_UINT_8[value % 0x100])
elif -0x8000 <= value < 0x8000:
write(b"\xC9")
write(PACKED_UINT_16[value % 0x10000])
elif -0x80000000 <= value < 0x80000000:
write(b"\xCA")
write(struct_pack(">i", value))
elif INT64_MIN <= value < INT64_MAX:
write(b"\xCB")
write(struct_pack(">q", value))
else:
raise OverflowError("Integer %s out of range" % value)
# String
elif isinstance(value, str):
encoded = value.encode("utf-8")
self.pack_string_header(len(encoded))
self.pack_raw(encoded)
# Bytes
elif isinstance(value, (bytes, bytearray)):
self.pack_bytes_header(len(value))
self.pack_raw(value)
# List
elif isinstance(value, list):
self.pack_list_header(len(value))
for item in value:
self.pack(item, dehydration_hooks=dehydration_hooks)
# Map
elif isinstance(value, dict):
self.pack_map_header(len(value))
for key, item in value.items():
if not isinstance(key, str):
raise TypeError(
"Map keys must be strings, not {}".format(type(key))
)
self.pack(key, dehydration_hooks=dehydration_hooks)
self.pack(item, dehydration_hooks=dehydration_hooks)
# Structure
elif isinstance(value, Structure):
self.pack_struct(value.tag, value.fields)
# Other
elif dehydration_hooks and type(value) in dehydration_hooks:
self.pack(dehydration_hooks[type(value)](value))
else:
raise ValueError("Values of type %s are not supported" % type(value))
def pack_bytes_header(self, size):
write = self._write
if size < 0x100:
write(b"\xCC")
write(PACKED_UINT_8[size])
elif size < 0x10000:
write(b"\xCD")
write(PACKED_UINT_16[size])
elif size < 0x100000000:
write(b"\xCE")
write(struct_pack(">I", size))
else:
raise OverflowError("Bytes header size out of range")
def pack_string_header(self, size):
write = self._write
if size <= 0x0F:
write(bytes((0x80 | size,)))
elif size < 0x100:
write(b"\xD0")
write(PACKED_UINT_8[size])
elif size < 0x10000:
write(b"\xD1")
write(PACKED_UINT_16[size])
elif size < 0x100000000:
write(b"\xD2")
write(struct_pack(">I", size))
else:
raise OverflowError("String header size out of range")
def pack_list_header(self, size):
write = self._write
if size <= 0x0F:
write(bytes((0x90 | size,)))
elif size < 0x100:
write(b"\xD4")
write(PACKED_UINT_8[size])
elif size < 0x10000:
write(b"\xD5")
write(PACKED_UINT_16[size])
elif size < 0x100000000:
write(b"\xD6")
write(struct_pack(">I", size))
else:
raise OverflowError("List header size out of range")
def pack_map_header(self, size):
write = self._write
if size <= 0x0F:
write(bytes((0xA0 | size,)))
elif size < 0x100:
write(b"\xD8")
write(PACKED_UINT_8[size])
elif size < 0x10000:
write(b"\xD9")
write(PACKED_UINT_16[size])
elif size < 0x100000000:
write(b"\xDA")
write(struct_pack(">I", size))
else:
raise OverflowError("Map header size out of range")
def pack_struct(self, signature, fields, dehydration_hooks=None):
if len(signature) != 1 or not isinstance(signature, bytes):
raise ValueError("Structure signature must be a single byte value")
write = self._write
size = len(fields)
if size <= 0x0F:
write(bytes((0xB0 | size,)))
else:
raise OverflowError("Structure size out of range")
write(signature)
for field in fields:
self.pack(field, dehydration_hooks=dehydration_hooks)
@staticmethod
def new_packable_buffer():
return PackableBuffer()
class PackableBuffer:
def __init__(self):
self.data = bytearray()
# export write method for packer; "inline" for performance
self.write = self.data.extend
self.clear = self.data.clear
self._tmp_buffering = 0
@contextmanager
def tmp_buffer(self):
self._tmp_buffering += 1
old_len = len(self.data)
try:
yield
except Exception:
del self.data[old_len:]
raise
finally:
self._tmp_buffering -= 1
def is_tmp_buffering(self):
return bool(self._tmp_buffering)
class Unpacker:
def __init__(self, unpackable):
self.unpackable = unpackable
def reset(self):
self.unpackable.reset()
def read(self, n=1):
return self.unpackable.read(n)
def read_u8(self):
return self.unpackable.read_u8()
def unpack(self, hydration_hooks=None):
value = self._unpack(hydration_hooks=hydration_hooks)
if hydration_hooks and type(value) in hydration_hooks:
return hydration_hooks[type(value)](value)
return value
def _unpack(self, hydration_hooks=None):
marker = self.read_u8()
if marker == -1:
raise ValueError("Nothing to unpack")
# Tiny Integer
if 0x00 <= marker <= 0x7F:
return marker
elif 0xF0 <= marker <= 0xFF:
return marker - 0x100
# Null
elif marker == 0xC0:
return None
# Float
elif marker == 0xC1:
value, = struct_unpack(">d", self.read(8))
return value
# Boolean
elif marker == 0xC2:
return False
elif marker == 0xC3:
return True
# Integer
elif marker == 0xC8:
return struct_unpack(">b", self.read(1))[0]
elif marker == 0xC9:
return struct_unpack(">h", self.read(2))[0]
elif marker == 0xCA:
return struct_unpack(">i", self.read(4))[0]
elif marker == 0xCB:
return struct_unpack(">q", self.read(8))[0]
# Bytes
elif marker == 0xCC:
size, = struct_unpack(">B", self.read(1))
return self.read(size).tobytes()
elif marker == 0xCD:
size, = struct_unpack(">H", self.read(2))
return self.read(size).tobytes()
elif marker == 0xCE:
size, = struct_unpack(">I", self.read(4))
return self.read(size).tobytes()
else:
marker_high = marker & 0xF0
# String
if marker_high == 0x80: # TINY_STRING
return decode(self.read(marker & 0x0F), "utf-8")
elif marker == 0xD0: # STRING_8:
size, = struct_unpack(">B", self.read(1))
return decode(self.read(size), "utf-8")
elif marker == 0xD1: # STRING_16:
size, = struct_unpack(">H", self.read(2))
return decode(self.read(size), "utf-8")
elif marker == 0xD2: # STRING_32:
size, = struct_unpack(">I", self.read(4))
return decode(self.read(size), "utf-8")
# List
elif 0x90 <= marker <= 0x9F or 0xD4 <= marker <= 0xD6:
return list(self._unpack_list_items(
marker, hydration_hooks=hydration_hooks)
)
# Map
elif 0xA0 <= marker <= 0xAF or 0xD8 <= marker <= 0xDA:
return self._unpack_map(
marker, hydration_hooks=hydration_hooks
)
# Structure
elif 0xB0 <= marker <= 0xBF:
size, tag = self._unpack_structure_header(marker)
value = Structure(tag, *([None] * size))
for i in range(len(value)):
value[i] = self.unpack(hydration_hooks=hydration_hooks)
return value
else:
raise ValueError("Unknown PackStream marker %02X" % marker)
def _unpack_list_items(self, marker, hydration_hooks=None):
marker_high = marker & 0xF0
if marker_high == 0x90:
size = marker & 0x0F
if size == 0:
return
elif size == 1:
yield self.unpack(hydration_hooks=hydration_hooks)
else:
for _ in range(size):
yield self.unpack(hydration_hooks=hydration_hooks)
elif marker == 0xD4: # LIST_8:
size, = struct_unpack(">B", self.read(1))
for _ in range(size):
yield self.unpack(hydration_hooks=hydration_hooks)
elif marker == 0xD5: # LIST_16:
size, = struct_unpack(">H", self.read(2))
for _ in range(size):
yield self.unpack(hydration_hooks=hydration_hooks)
elif marker == 0xD6: # LIST_32:
size, = struct_unpack(">I", self.read(4))
for _ in range(size):
yield self.unpack(hydration_hooks=hydration_hooks)
else:
return
def unpack_map(self, hydration_hooks=None):
marker = self.read_u8()
return self._unpack_map(marker, hydration_hooks=hydration_hooks)
def _unpack_map(self, marker, hydration_hooks=None):
marker_high = marker & 0xF0
if marker_high == 0xA0:
size = marker & 0x0F
value = {}
for _ in range(size):
key = self.unpack(hydration_hooks=hydration_hooks)
value[key] = self.unpack(hydration_hooks=hydration_hooks)
return value
elif marker == 0xD8: # MAP_8:
size, = struct_unpack(">B", self.read(1))
value = {}
for _ in range(size):
key = self.unpack(hydration_hooks=hydration_hooks)
value[key] = self.unpack(hydration_hooks=hydration_hooks)
return value
elif marker == 0xD9: # MAP_16:
size, = struct_unpack(">H", self.read(2))
value = {}
for _ in range(size):
key = self.unpack(hydration_hooks=hydration_hooks)
value[key] = self.unpack(hydration_hooks=hydration_hooks)
return value
elif marker == 0xDA: # MAP_32:
size, = struct_unpack(">I", self.read(4))
value = {}
for _ in range(size):
key = self.unpack(hydration_hooks=hydration_hooks)
value[key] = self.unpack(hydration_hooks=hydration_hooks)
return value
else:
return None
def unpack_structure_header(self):
marker = self.read_u8()
if marker == -1:
return None, None
else:
return self._unpack_structure_header(marker)
def _unpack_structure_header(self, marker):
marker_high = marker & 0xF0
if marker_high == 0xB0: # TINY_STRUCT
signature = self.read(1).tobytes()
return marker & 0x0F, signature
else:
raise ValueError("Expected structure, found marker %02X" % marker)
@staticmethod
def new_unpackable_buffer():
return UnpackableBuffer()
class UnpackableBuffer:
initial_capacity = 8192
def __init__(self, data=None):
if data is None:
self.data = bytearray(self.initial_capacity)
self.used = 0
else:
self.data = bytearray(data)
self.used = len(self.data)
self.p = 0
def reset(self):
self.used = 0
self.p = 0
def read(self, n=1):
view = memoryview(self.data)
q = self.p + n
subview = view[self.p:q]
self.p = q
return subview
def read_u8(self):
if self.used - self.p >= 1:
value = self.data[self.p]
self.p += 1
return value
else:
return -1
def pop_u16(self):
""" Remove the last two bytes of data, returning them as a big-endian
16-bit unsigned integer.
"""
if self.used >= 2:
value = 0x100 * self.data[self.used - 2] + self.data[self.used - 1]
self.used -= 2
return value
else:
return -1
|
neo4j/_codec/packstream/v1/__init__.py
|
codereval_python_data_12
|
Appends a DISCARD message to the output queue.
:param n: number of records to discard, default = -1 (ALL)
:param qid: query ID to discard for, default = -1 (last query)
:param dehydration_hooks:
Hooks to dehydrate types (dict from type (class) to dehydration
function). Dehydration functions receive the value and returns an
object of type understood by packstream.
:param hydration_hooks:
Hooks to hydrate types (mapping from type (class) to
dehydration function). Dehydration functions receive the value of
type understood by packstream and are free to return anything.
:param handlers: handler functions passed into the returned Response object
def discard(self, n=-1, qid=-1, dehydration_hooks=None,
hydration_hooks=None, **handlers):
# Just ignore n and qid, it is not supported in the Bolt 3 Protocol.
log.debug("[#%04X] C: DISCARD_ALL", self.local_port)
self._append(b"\x2F", (),
Response(self, "discard", hydration_hooks, **handlers),
dehydration_hooks=dehydration_hooks)
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
from logging import getLogger
from ssl import SSLSocket
from ..._async_compat.util import AsyncUtil
from ..._exceptions import (
BoltError,
BoltProtocolError,
)
from ...api import (
READ_ACCESS,
Version,
)
from ...exceptions import (
ConfigurationError,
DatabaseUnavailable,
DriverError,
ForbiddenOnReadOnlyDatabase,
Neo4jError,
NotALeader,
ServiceUnavailable,
)
from ._bolt import AsyncBolt
from ._common import (
check_supported_server_product,
CommitResponse,
InitResponse,
Response,
)
log = getLogger("neo4j")
class ServerStates(Enum):
CONNECTED = "CONNECTED"
READY = "READY"
STREAMING = "STREAMING"
TX_READY_OR_TX_STREAMING = "TX_READY||TX_STREAMING"
FAILED = "FAILED"
class ServerStateManager:
_STATE_TRANSITIONS = {
ServerStates.CONNECTED: {
"hello": ServerStates.READY,
},
ServerStates.READY: {
"run": ServerStates.STREAMING,
"begin": ServerStates.TX_READY_OR_TX_STREAMING,
},
ServerStates.STREAMING: {
"pull": ServerStates.READY,
"discard": ServerStates.READY,
"reset": ServerStates.READY,
},
ServerStates.TX_READY_OR_TX_STREAMING: {
"commit": ServerStates.READY,
"rollback": ServerStates.READY,
"reset": ServerStates.READY,
},
ServerStates.FAILED: {
"reset": ServerStates.READY,
}
}
def __init__(self, init_state, on_change=None):
self.state = init_state
self._on_change = on_change
def transition(self, message, metadata):
if metadata.get("has_more"):
return
state_before = self.state
self.state = self._STATE_TRANSITIONS\
.get(self.state, {})\
.get(message, self.state)
if state_before != self.state and callable(self._on_change):
self._on_change(state_before, self.state)
class AsyncBolt3(AsyncBolt):
""" Protocol handler for Bolt 3.
This is supported by Neo4j versions 3.5, 4.0, 4.1, 4.2, 4.3, and 4.4.
"""
PROTOCOL_VERSION = Version(3, 0)
supports_multiple_results = False
supports_multiple_databases = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._server_state_manager = ServerStateManager(
ServerStates.CONNECTED, on_change=self._on_server_state_change
)
def _on_server_state_change(self, old_state, new_state):
log.debug("[#%04X] State: %s > %s", self.local_port,
old_state.name, new_state.name)
@property
def is_reset(self):
# We can't be sure of the server's state if there are still pending
# responses. Unless the last message we sent was RESET. In that case
# the server state will always be READY when we're done.
if (self.responses and self.responses[-1]
and self.responses[-1].message == "reset"):
return True
return self._server_state_manager.state == ServerStates.READY
@property
def encrypted(self):
return isinstance(self.socket, SSLSocket)
@property
def der_encoded_server_certificate(self):
return self.socket.getpeercert(binary_form=True)
def get_base_headers(self):
return {
"user_agent": self.user_agent,
}
async def hello(self, dehydration_hooks=None, hydration_hooks=None):
headers = self.get_base_headers()
headers.update(self.auth_dict)
logged_headers = dict(headers)
if "credentials" in logged_headers:
logged_headers["credentials"] = "*******"
log.debug("[#%04X] C: HELLO %r", self.local_port, logged_headers)
self._append(b"\x01", (headers,),
response=InitResponse(self, "hello", hydration_hooks,
on_success=self.server_info.update),
dehydration_hooks=dehydration_hooks)
await self.send_all()
await self.fetch_all()
check_supported_server_product(self.server_info.agent)
async def route(
self, database=None, imp_user=None, bookmarks=None,
dehydration_hooks=None, hydration_hooks=None
):
if database is not None:
raise ConfigurationError(
"Database name parameter for selecting database is not "
"supported in Bolt Protocol {!r}. Database name {!r}. "
"Server Agent {!r}".format(
self.PROTOCOL_VERSION, database, self.server_info.agent
)
)
if imp_user is not None:
raise ConfigurationError(
"Impersonation is not supported in Bolt Protocol {!r}. "
"Trying to impersonate {!r}.".format(
self.PROTOCOL_VERSION, imp_user
)
)
metadata = {}
records = []
# Ignoring database and bookmarks because there is no multi-db support.
# The bookmarks are only relevant for making sure a previously created
# db exists before querying a routing table for it.
self.run(
"CALL dbms.cluster.routing.getRoutingTable($context)", # This is an internal procedure call. Only available if the Neo4j 3.5 is setup with clustering.
{"context": self.routing_context},
mode="r", # Bolt Protocol Version(3, 0) supports mode="r"
dehydration_hooks=dehydration_hooks,
hydration_hooks=hydration_hooks,
on_success=metadata.update
)
self.pull(dehydration_hooks = None, hydration_hooks = None,
on_success=metadata.update, on_records=records.extend)
await self.send_all()
await self.fetch_all()
routing_info = [dict(zip(metadata.get("fields", ()), values)) for values in records]
return routing_info
def run(self, query, parameters=None, mode=None, bookmarks=None,
metadata=None, timeout=None, db=None, imp_user=None,
dehydration_hooks=None, hydration_hooks=None, **handlers):
if db is not None:
raise ConfigurationError(
"Database name parameter for selecting database is not "
"supported in Bolt Protocol {!r}. Database name {!r}.".format(
self.PROTOCOL_VERSION, db
)
)
if imp_user is not None:
raise ConfigurationError(
"Impersonation is not supported in Bolt Protocol {!r}. "
"Trying to impersonate {!r}.".format(
self.PROTOCOL_VERSION, imp_user
)
)
if not parameters:
parameters = {}
extra = {}
if mode in (READ_ACCESS, "r"):
extra["mode"] = "r" # It will default to mode "w" if nothing is specified
if bookmarks:
try:
extra["bookmarks"] = list(bookmarks)
except TypeError:
raise TypeError("Bookmarks must be provided within an iterable")
if metadata:
try:
extra["tx_metadata"] = dict(metadata)
except TypeError:
raise TypeError("Metadata must be coercible to a dict")
if timeout is not None:
try:
extra["tx_timeout"] = int(1000 * float(timeout))
except TypeError:
raise TypeError("Timeout must be specified as a number of seconds")
if extra["tx_timeout"] < 0:
raise ValueError("Timeout must be a positive number or 0.")
fields = (query, parameters, extra)
log.debug("[#%04X] C: RUN %s", self.local_port, " ".join(map(repr, fields)))
self._append(b"\x10", fields,
Response(self, "run", hydration_hooks, **handlers),
dehydration_hooks=dehydration_hooks)
def discard(self, n=-1, qid=-1, dehydration_hooks=None,
hydration_hooks=None, **handlers):
# Just ignore n and qid, it is not supported in the Bolt 3 Protocol.
log.debug("[#%04X] C: DISCARD_ALL", self.local_port)
self._append(b"\x2F", (),
Response(self, "discard", hydration_hooks, **handlers),
dehydration_hooks=dehydration_hooks)
def pull(self, n=-1, qid=-1, dehydration_hooks=None, hydration_hooks=None,
**handlers):
# Just ignore n and qid, it is not supported in the Bolt 3 Protocol.
log.debug("[#%04X] C: PULL_ALL", self.local_port)
self._append(b"\x3F", (),
Response(self, "pull", hydration_hooks, **handlers),
dehydration_hooks=dehydration_hooks)
def begin(self, mode=None, bookmarks=None, metadata=None, timeout=None,
db=None, imp_user=None, dehydration_hooks=None,
hydration_hooks=None, **handlers):
if db is not None:
raise ConfigurationError(
"Database name parameter for selecting database is not "
"supported in Bolt Protocol {!r}. Database name {!r}.".format(
self.PROTOCOL_VERSION, db
)
)
if imp_user is not None:
raise ConfigurationError(
"Impersonation is not supported in Bolt Protocol {!r}. "
"Trying to impersonate {!r}.".format(
self.PROTOCOL_VERSION, imp_user
)
)
extra = {}
if mode in (READ_ACCESS, "r"):
extra["mode"] = "r" # It will default to mode "w" if nothing is specified
if bookmarks:
try:
extra["bookmarks"] = list(bookmarks)
except TypeError:
raise TypeError("Bookmarks must be provided within an iterable")
if metadata:
try:
extra["tx_metadata"] = dict(metadata)
except TypeError:
raise TypeError("Metadata must be coercible to a dict")
if timeout is not None:
try:
extra["tx_timeout"] = int(1000 * float(timeout))
except TypeError:
raise TypeError("Timeout must be specified as a number of seconds")
if extra["tx_timeout"] < 0:
raise ValueError("Timeout must be a positive number or 0.")
log.debug("[#%04X] C: BEGIN %r", self.local_port, extra)
self._append(b"\x11", (extra,),
Response(self, "begin", hydration_hooks, **handlers),
dehydration_hooks=dehydration_hooks)
def commit(self, dehydration_hooks=None, hydration_hooks=None, **handlers):
log.debug("[#%04X] C: COMMIT", self.local_port)
self._append(b"\x12", (),
CommitResponse(self, "commit", hydration_hooks,
**handlers),
dehydration_hooks=dehydration_hooks)
def rollback(self, dehydration_hooks=None, hydration_hooks=None,
**handlers):
log.debug("[#%04X] C: ROLLBACK", self.local_port)
self._append(b"\x13", (),
Response(self, "rollback", hydration_hooks, **handlers),
dehydration_hooks=dehydration_hooks)
async def reset(self, dehydration_hooks=None, hydration_hooks=None):
""" Add a RESET message to the outgoing queue, send
it and consume all remaining messages.
"""
def fail(metadata):
raise BoltProtocolError("RESET failed %r" % metadata, address=self.unresolved_address)
log.debug("[#%04X] C: RESET", self.local_port)
self._append(b"\x0F",
response=Response(self, "reset", hydration_hooks,
on_failure=fail),
dehydration_hooks=dehydration_hooks)
await self.send_all()
await self.fetch_all()
def goodbye(self, dehydration_hooks=None, hydration_hooks=None):
log.debug("[#%04X] C: GOODBYE", self.local_port)
self._append(b"\x02", (), dehydration_hooks=dehydration_hooks)
async def _process_message(self, tag, fields):
""" Process at most one message from the server, if available.
:return: 2-tuple of number of detail messages and number of summary
messages fetched
"""
details = []
summary_signature = summary_metadata = None
if tag == b"\x71": # RECORD
details = fields
elif fields:
summary_signature = tag
summary_metadata = fields[0]
else:
summary_signature = tag
if details:
log.debug("[#%04X] S: RECORD * %d", self.local_port, len(details)) # Do not log any data
await self.responses[0].on_records(details)
if summary_signature is None:
return len(details), 0
response = self.responses.popleft()
response.complete = True
if summary_signature == b"\x70":
log.debug("[#%04X] S: SUCCESS %r", self.local_port, summary_metadata)
self._server_state_manager.transition(response.message,
summary_metadata)
await response.on_success(summary_metadata or {})
elif summary_signature == b"\x7E":
log.debug("[#%04X] S: IGNORED", self.local_port)
await response.on_ignored(summary_metadata or {})
elif summary_signature == b"\x7F":
log.debug("[#%04X] S: FAILURE %r", self.local_port, summary_metadata)
self._server_state_manager.state = ServerStates.FAILED
try:
await response.on_failure(summary_metadata or {})
except (ServiceUnavailable, DatabaseUnavailable):
if self.pool:
await self.pool.deactivate(address=self.unresolved_address)
raise
except (NotALeader, ForbiddenOnReadOnlyDatabase):
if self.pool:
self.pool.on_write_failure(address=self.unresolved_address)
raise
except Neo4jError as e:
if self.pool and e.invalidates_all_connections():
await self.pool.mark_all_stale()
raise
else:
raise BoltProtocolError("Unexpected response message with signature %02X" % summary_signature, address=self.unresolved_address)
return len(details), 1
|
neo4j/_async/io/_bolt3.py
|
codereval_python_data_13
|
Appends a BEGIN message to the output queue.
:param mode: access mode for routing - "READ" or "WRITE" (default)
:param bookmarks: iterable of bookmark values after which this transaction should begin
:param metadata: custom metadata dictionary to attach to the transaction
:param timeout: timeout for transaction execution (seconds)
:param db: name of the database against which to begin the transaction
Requires Bolt 4.0+.
:param imp_user: the user to impersonate
Requires Bolt 4.4+
:param dehydration_hooks:
Hooks to dehydrate types (dict from type (class) to dehydration
function). Dehydration functions receive the value and returns an
object of type understood by packstream.
:param hydration_hooks:
Hooks to hydrate types (mapping from type (class) to
dehydration function). Dehydration functions receive the value of
type understood by packstream and are free to return anything.
:param handlers: handler functions passed into the returned Response object
:return: Response object
def begin(self, mode=None, bookmarks=None, metadata=None, timeout=None,
db=None, imp_user=None, dehydration_hooks=None,
hydration_hooks=None, **handlers):
if db is not None:
raise ConfigurationError(
"Database name parameter for selecting database is not "
"supported in Bolt Protocol {!r}. Database name {!r}.".format(
self.PROTOCOL_VERSION, db
)
)
if imp_user is not None:
raise ConfigurationError(
"Impersonation is not supported in Bolt Protocol {!r}. "
"Trying to impersonate {!r}.".format(
self.PROTOCOL_VERSION, imp_user
)
)
extra = {}
if mode in (READ_ACCESS, "r"):
extra["mode"] = "r" # It will default to mode "w" if nothing is specified
if bookmarks:
try:
extra["bookmarks"] = list(bookmarks)
except TypeError:
raise TypeError("Bookmarks must be provided within an iterable")
if metadata:
try:
extra["tx_metadata"] = dict(metadata)
except TypeError:
raise TypeError("Metadata must be coercible to a dict")
if timeout is not None:
try:
extra["tx_timeout"] = int(1000 * float(timeout))
except TypeError:
raise TypeError("Timeout must be specified as a number of seconds")
if extra["tx_timeout"] < 0:
raise ValueError("Timeout must be a positive number or 0.")
log.debug("[#%04X] C: BEGIN %r", self.local_port, extra)
self._append(b"\x11", (extra,),
Response(self, "begin", hydration_hooks, **handlers),
dehydration_hooks=dehydration_hooks)
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
from logging import getLogger
from ssl import SSLSocket
from ..._async_compat.util import AsyncUtil
from ..._exceptions import (
BoltError,
BoltProtocolError,
)
from ...api import (
READ_ACCESS,
Version,
)
from ...exceptions import (
ConfigurationError,
DatabaseUnavailable,
DriverError,
ForbiddenOnReadOnlyDatabase,
Neo4jError,
NotALeader,
ServiceUnavailable,
)
from ._bolt import AsyncBolt
from ._common import (
check_supported_server_product,
CommitResponse,
InitResponse,
Response,
)
log = getLogger("neo4j")
class ServerStates(Enum):
CONNECTED = "CONNECTED"
READY = "READY"
STREAMING = "STREAMING"
TX_READY_OR_TX_STREAMING = "TX_READY||TX_STREAMING"
FAILED = "FAILED"
class ServerStateManager:
_STATE_TRANSITIONS = {
ServerStates.CONNECTED: {
"hello": ServerStates.READY,
},
ServerStates.READY: {
"run": ServerStates.STREAMING,
"begin": ServerStates.TX_READY_OR_TX_STREAMING,
},
ServerStates.STREAMING: {
"pull": ServerStates.READY,
"discard": ServerStates.READY,
"reset": ServerStates.READY,
},
ServerStates.TX_READY_OR_TX_STREAMING: {
"commit": ServerStates.READY,
"rollback": ServerStates.READY,
"reset": ServerStates.READY,
},
ServerStates.FAILED: {
"reset": ServerStates.READY,
}
}
def __init__(self, init_state, on_change=None):
self.state = init_state
self._on_change = on_change
def transition(self, message, metadata):
if metadata.get("has_more"):
return
state_before = self.state
self.state = self._STATE_TRANSITIONS\
.get(self.state, {})\
.get(message, self.state)
if state_before != self.state and callable(self._on_change):
self._on_change(state_before, self.state)
class AsyncBolt3(AsyncBolt):
""" Protocol handler for Bolt 3.
This is supported by Neo4j versions 3.5, 4.0, 4.1, 4.2, 4.3, and 4.4.
"""
PROTOCOL_VERSION = Version(3, 0)
supports_multiple_results = False
supports_multiple_databases = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._server_state_manager = ServerStateManager(
ServerStates.CONNECTED, on_change=self._on_server_state_change
)
def _on_server_state_change(self, old_state, new_state):
log.debug("[#%04X] State: %s > %s", self.local_port,
old_state.name, new_state.name)
@property
def is_reset(self):
# We can't be sure of the server's state if there are still pending
# responses. Unless the last message we sent was RESET. In that case
# the server state will always be READY when we're done.
if (self.responses and self.responses[-1]
and self.responses[-1].message == "reset"):
return True
return self._server_state_manager.state == ServerStates.READY
@property
def encrypted(self):
return isinstance(self.socket, SSLSocket)
@property
def der_encoded_server_certificate(self):
return self.socket.getpeercert(binary_form=True)
def get_base_headers(self):
return {
"user_agent": self.user_agent,
}
async def hello(self, dehydration_hooks=None, hydration_hooks=None):
headers = self.get_base_headers()
headers.update(self.auth_dict)
logged_headers = dict(headers)
if "credentials" in logged_headers:
logged_headers["credentials"] = "*******"
log.debug("[#%04X] C: HELLO %r", self.local_port, logged_headers)
self._append(b"\x01", (headers,),
response=InitResponse(self, "hello", hydration_hooks,
on_success=self.server_info.update),
dehydration_hooks=dehydration_hooks)
await self.send_all()
await self.fetch_all()
check_supported_server_product(self.server_info.agent)
async def route(
self, database=None, imp_user=None, bookmarks=None,
dehydration_hooks=None, hydration_hooks=None
):
if database is not None:
raise ConfigurationError(
"Database name parameter for selecting database is not "
"supported in Bolt Protocol {!r}. Database name {!r}. "
"Server Agent {!r}".format(
self.PROTOCOL_VERSION, database, self.server_info.agent
)
)
if imp_user is not None:
raise ConfigurationError(
"Impersonation is not supported in Bolt Protocol {!r}. "
"Trying to impersonate {!r}.".format(
self.PROTOCOL_VERSION, imp_user
)
)
metadata = {}
records = []
# Ignoring database and bookmarks because there is no multi-db support.
# The bookmarks are only relevant for making sure a previously created
# db exists before querying a routing table for it.
self.run(
"CALL dbms.cluster.routing.getRoutingTable($context)", # This is an internal procedure call. Only available if the Neo4j 3.5 is setup with clustering.
{"context": self.routing_context},
mode="r", # Bolt Protocol Version(3, 0) supports mode="r"
dehydration_hooks=dehydration_hooks,
hydration_hooks=hydration_hooks,
on_success=metadata.update
)
self.pull(dehydration_hooks = None, hydration_hooks = None,
on_success=metadata.update, on_records=records.extend)
await self.send_all()
await self.fetch_all()
routing_info = [dict(zip(metadata.get("fields", ()), values)) for values in records]
return routing_info
def run(self, query, parameters=None, mode=None, bookmarks=None,
metadata=None, timeout=None, db=None, imp_user=None,
dehydration_hooks=None, hydration_hooks=None, **handlers):
if db is not None:
raise ConfigurationError(
"Database name parameter for selecting database is not "
"supported in Bolt Protocol {!r}. Database name {!r}.".format(
self.PROTOCOL_VERSION, db
)
)
if imp_user is not None:
raise ConfigurationError(
"Impersonation is not supported in Bolt Protocol {!r}. "
"Trying to impersonate {!r}.".format(
self.PROTOCOL_VERSION, imp_user
)
)
if not parameters:
parameters = {}
extra = {}
if mode in (READ_ACCESS, "r"):
extra["mode"] = "r" # It will default to mode "w" if nothing is specified
if bookmarks:
try:
extra["bookmarks"] = list(bookmarks)
except TypeError:
raise TypeError("Bookmarks must be provided within an iterable")
if metadata:
try:
extra["tx_metadata"] = dict(metadata)
except TypeError:
raise TypeError("Metadata must be coercible to a dict")
if timeout is not None:
try:
extra["tx_timeout"] = int(1000 * float(timeout))
except TypeError:
raise TypeError("Timeout must be specified as a number of seconds")
if extra["tx_timeout"] < 0:
raise ValueError("Timeout must be a positive number or 0.")
fields = (query, parameters, extra)
log.debug("[#%04X] C: RUN %s", self.local_port, " ".join(map(repr, fields)))
self._append(b"\x10", fields,
Response(self, "run", hydration_hooks, **handlers),
dehydration_hooks=dehydration_hooks)
def discard(self, n=-1, qid=-1, dehydration_hooks=None,
hydration_hooks=None, **handlers):
# Just ignore n and qid, it is not supported in the Bolt 3 Protocol.
log.debug("[#%04X] C: DISCARD_ALL", self.local_port)
self._append(b"\x2F", (),
Response(self, "discard", hydration_hooks, **handlers),
dehydration_hooks=dehydration_hooks)
def pull(self, n=-1, qid=-1, dehydration_hooks=None, hydration_hooks=None,
**handlers):
# Just ignore n and qid, it is not supported in the Bolt 3 Protocol.
log.debug("[#%04X] C: PULL_ALL", self.local_port)
self._append(b"\x3F", (),
Response(self, "pull", hydration_hooks, **handlers),
dehydration_hooks=dehydration_hooks)
def begin(self, mode=None, bookmarks=None, metadata=None, timeout=None,
db=None, imp_user=None, dehydration_hooks=None,
hydration_hooks=None, **handlers):
if db is not None:
raise ConfigurationError(
"Database name parameter for selecting database is not "
"supported in Bolt Protocol {!r}. Database name {!r}.".format(
self.PROTOCOL_VERSION, db
)
)
if imp_user is not None:
raise ConfigurationError(
"Impersonation is not supported in Bolt Protocol {!r}. "
"Trying to impersonate {!r}.".format(
self.PROTOCOL_VERSION, imp_user
)
)
extra = {}
if mode in (READ_ACCESS, "r"):
extra["mode"] = "r" # It will default to mode "w" if nothing is specified
if bookmarks:
try:
extra["bookmarks"] = list(bookmarks)
except TypeError:
raise TypeError("Bookmarks must be provided within an iterable")
if metadata:
try:
extra["tx_metadata"] = dict(metadata)
except TypeError:
raise TypeError("Metadata must be coercible to a dict")
if timeout is not None:
try:
extra["tx_timeout"] = int(1000 * float(timeout))
except TypeError:
raise TypeError("Timeout must be specified as a number of seconds")
if extra["tx_timeout"] < 0:
raise ValueError("Timeout must be a positive number or 0.")
log.debug("[#%04X] C: BEGIN %r", self.local_port, extra)
self._append(b"\x11", (extra,),
Response(self, "begin", hydration_hooks, **handlers),
dehydration_hooks=dehydration_hooks)
def commit(self, dehydration_hooks=None, hydration_hooks=None, **handlers):
log.debug("[#%04X] C: COMMIT", self.local_port)
self._append(b"\x12", (),
CommitResponse(self, "commit", hydration_hooks,
**handlers),
dehydration_hooks=dehydration_hooks)
def rollback(self, dehydration_hooks=None, hydration_hooks=None,
**handlers):
log.debug("[#%04X] C: ROLLBACK", self.local_port)
self._append(b"\x13", (),
Response(self, "rollback", hydration_hooks, **handlers),
dehydration_hooks=dehydration_hooks)
async def reset(self, dehydration_hooks=None, hydration_hooks=None):
""" Add a RESET message to the outgoing queue, send
it and consume all remaining messages.
"""
def fail(metadata):
raise BoltProtocolError("RESET failed %r" % metadata, address=self.unresolved_address)
log.debug("[#%04X] C: RESET", self.local_port)
self._append(b"\x0F",
response=Response(self, "reset", hydration_hooks,
on_failure=fail),
dehydration_hooks=dehydration_hooks)
await self.send_all()
await self.fetch_all()
def goodbye(self, dehydration_hooks=None, hydration_hooks=None):
log.debug("[#%04X] C: GOODBYE", self.local_port)
self._append(b"\x02", (), dehydration_hooks=dehydration_hooks)
async def _process_message(self, tag, fields):
""" Process at most one message from the server, if available.
:return: 2-tuple of number of detail messages and number of summary
messages fetched
"""
details = []
summary_signature = summary_metadata = None
if tag == b"\x71": # RECORD
details = fields
elif fields:
summary_signature = tag
summary_metadata = fields[0]
else:
summary_signature = tag
if details:
log.debug("[#%04X] S: RECORD * %d", self.local_port, len(details)) # Do not log any data
await self.responses[0].on_records(details)
if summary_signature is None:
return len(details), 0
response = self.responses.popleft()
response.complete = True
if summary_signature == b"\x70":
log.debug("[#%04X] S: SUCCESS %r", self.local_port, summary_metadata)
self._server_state_manager.transition(response.message,
summary_metadata)
await response.on_success(summary_metadata or {})
elif summary_signature == b"\x7E":
log.debug("[#%04X] S: IGNORED", self.local_port)
await response.on_ignored(summary_metadata or {})
elif summary_signature == b"\x7F":
log.debug("[#%04X] S: FAILURE %r", self.local_port, summary_metadata)
self._server_state_manager.state = ServerStates.FAILED
try:
await response.on_failure(summary_metadata or {})
except (ServiceUnavailable, DatabaseUnavailable):
if self.pool:
await self.pool.deactivate(address=self.unresolved_address)
raise
except (NotALeader, ForbiddenOnReadOnlyDatabase):
if self.pool:
self.pool.on_write_failure(address=self.unresolved_address)
raise
except Neo4jError as e:
if self.pool and e.invalidates_all_connections():
await self.pool.mark_all_stale()
raise
else:
raise BoltProtocolError("Unexpected response message with signature %02X" % summary_signature, address=self.unresolved_address)
return len(details), 1
|
neo4j/_async/io/_bolt3.py
|
codereval_python_data_14
|
>>> round_half_to_even(3)
3
>>> round_half_to_even(3.2)
3
>>> round_half_to_even(3.5)
4
>>> round_half_to_even(3.7)
4
>>> round_half_to_even(4)
4
>>> round_half_to_even(4.2)
4
>>> round_half_to_even(4.5)
4
>>> round_half_to_even(4.7)
5
:param n:
:return:
def round_half_to_even(n):
"""
>>> round_half_to_even(3)
3
>>> round_half_to_even(3.2)
3
>>> round_half_to_even(3.5)
4
>>> round_half_to_even(3.7)
4
>>> round_half_to_even(4)
4
>>> round_half_to_even(4.2)
4
>>> round_half_to_even(4.5)
4
>>> round_half_to_even(4.7)
5
:param n:
:return:
"""
ten_n = 10 * n
if ten_n == int(ten_n) and ten_n % 10 == 5:
up = int(n + 0.5)
down = int(n - 0.5)
return up if up % 2 == 0 else down
else:
return int(round(n))
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
"nano_add",
"nano_div",
"nano_divmod",
"symmetric_divmod",
"round_half_to_even",
]
def nano_add(x, y):
"""
>>> 0.7 + 0.2
0.8999999999999999
>>> -0.7 + 0.2
-0.49999999999999994
>>> nano_add(0.7, 0.2)
0.9
>>> nano_add(-0.7, 0.2)
-0.5
:param x:
:param y:
:return:
"""
return (int(1000000000 * x) + int(1000000000 * y)) / 1000000000
def nano_div(x, y):
"""
>>> 0.7 / 0.2
3.4999999999999996
>>> -0.7 / 0.2
-3.4999999999999996
>>> nano_div(0.7, 0.2)
3.5
>>> nano_div(-0.7, 0.2)
-3.5
:param x:
:param y:
:return:
"""
return float(1000000000 * x) / int(1000000000 * y)
def nano_divmod(x, y):
"""
>>> divmod(0.7, 0.2)
(3.0, 0.09999999999999992)
>>> nano_divmod(0.7, 0.2)
(3, 0.1)
:param x:
:param y:
:return:
"""
number = type(x)
nx = int(1000000000 * x)
ny = int(1000000000 * y)
q, r = divmod(nx, ny)
return int(q), number(r / 1000000000)
def symmetric_divmod(dividend, divisor):
number = type(dividend)
if dividend >= 0:
quotient, remainder = divmod(dividend, divisor)
return int(quotient), number(remainder)
else:
quotient, remainder = divmod(-dividend, divisor)
return -int(quotient), -number(remainder)
def round_half_to_even(n):
"""
>>> round_half_to_even(3)
3
>>> round_half_to_even(3.2)
3
>>> round_half_to_even(3.5)
4
>>> round_half_to_even(3.7)
4
>>> round_half_to_even(4)
4
>>> round_half_to_even(4.2)
4
>>> round_half_to_even(4.5)
4
>>> round_half_to_even(4.7)
5
:param n:
:return:
"""
ten_n = 10 * n
if ten_n == int(ten_n) and ten_n % 10 == 5:
up = int(n + 0.5)
down = int(n - 0.5)
return up if up % 2 == 0 else down
else:
return int(round(n))
|
neo4j/time/_arithmetic.py
|
codereval_python_data_15
|
Dynamically create a Point subclass.
def point_type(name, fields, srid_map):
""" Dynamically create a Point subclass.
"""
def srid(self):
try:
return srid_map[len(self)]
except KeyError:
return None
attributes = {"srid": property(srid)}
for index, subclass_field in enumerate(fields):
def accessor(self, i=index, f=subclass_field):
try:
return self[i]
except IndexError:
raise AttributeError(f)
for field_alias in {subclass_field, "xyz"[index]}:
attributes[field_alias] = property(accessor)
cls = type(name, (Point,), attributes)
with srid_table_lock:
for dim, srid in srid_map.items():
srid_table[srid] = (cls, dim)
return cls
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module defines _spatial data types.
"""
from threading import Lock
# SRID to subclass mappings
srid_table = {}
srid_table_lock = Lock()
class Point(tuple):
"""Base-class for _spatial data.
A point within a geometric space. This type is generally used via its
subclasses and should not be instantiated directly unless there is no
subclass defined for the required SRID.
:param iterable:
An iterable of coordinates.
All items will be converted to :class:`float`.
"""
#: The SRID (_spatial reference identifier) of the _spatial data.
#: A number that identifies the coordinate system the _spatial type is to be
#: interpreted in.
#:
#: :type: int
srid = None
def __new__(cls, iterable):
return tuple.__new__(cls, map(float, iterable))
def __repr__(self):
return "POINT(%s)" % " ".join(map(str, self))
def __eq__(self, other):
try:
return type(self) is type(other) and tuple(self) == tuple(other)
except (AttributeError, TypeError):
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(type(self)) ^ hash(tuple(self))
def point_type(name, fields, srid_map):
""" Dynamically create a Point subclass.
"""
def srid(self):
try:
return srid_map[len(self)]
except KeyError:
return None
attributes = {"srid": property(srid)}
for index, subclass_field in enumerate(fields):
def accessor(self, i=index, f=subclass_field):
try:
return self[i]
except IndexError:
raise AttributeError(f)
for field_alias in {subclass_field, "xyz"[index]}:
attributes[field_alias] = property(accessor)
cls = type(name, (Point,), attributes)
with srid_table_lock:
for dim, srid in srid_map.items():
srid_table[srid] = (cls, dim)
return cls
# Point subclass definitions
CartesianPoint = point_type("CartesianPoint", ["x", "y", "z"],
{2: 7203, 3: 9157})
WGS84Point = point_type("WGS84Point", ["longitude", "latitude", "height"],
{2: 4326, 3: 4979})
|
neo4j/_spatial/__init__.py
|
codereval_python_data_16
|
Decorator for deprecating functions and methods.
::
@deprecated("'foo' has been deprecated in favour of 'bar'")
def foo(x):
pass
def deprecated(message):
""" Decorator for deprecating functions and methods.
::
@deprecated("'foo' has been deprecated in favour of 'bar'")
def foo(x):
pass
"""
def decorator(f):
if asyncio.iscoroutinefunction(f):
@wraps(f)
async def inner(*args, **kwargs):
deprecation_warn(message, stack_level=2)
return await f(*args, **kwargs)
return inner
else:
@wraps(f)
def inner(*args, **kwargs):
deprecation_warn(message, stack_level=2)
return f(*args, **kwargs)
return inner
return decorator
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from functools import wraps
from warnings import warn
# Can be automatically overridden in builds
package = "neo4j"
version = "5.0.dev0"
def get_user_agent():
""" Obtain the default user agent string sent to the server after
a successful handshake.
"""
from sys import (
platform,
version_info,
)
template = "neo4j-python/{} Python/{}.{}.{}-{}-{} ({})"
fields = (version,) + tuple(version_info) + (platform,)
return template.format(*fields)
def deprecation_warn(message, stack_level=1):
warn(message, category=DeprecationWarning, stacklevel=stack_level + 1)
def deprecated(message):
""" Decorator for deprecating functions and methods.
::
@deprecated("'foo' has been deprecated in favour of 'bar'")
def foo(x):
pass
"""
def decorator(f):
if asyncio.iscoroutinefunction(f):
@wraps(f)
async def inner(*args, **kwargs):
deprecation_warn(message, stack_level=2)
return await f(*args, **kwargs)
return inner
else:
@wraps(f)
def inner(*args, **kwargs):
deprecation_warn(message, stack_level=2)
return f(*args, **kwargs)
return inner
return decorator
class ExperimentalWarning(Warning):
""" Base class for warnings about experimental features.
"""
def experimental_warn(message, stack_level=1):
warn(message, category=ExperimentalWarning, stacklevel=stack_level + 1)
def experimental(message):
""" Decorator for tagging experimental functions and methods.
::
@experimental("'foo' is an experimental function and may be "
"removed in a future release")
def foo(x):
pass
"""
def decorator(f):
if asyncio.iscoroutinefunction(f):
@wraps(f)
async def inner(*args, **kwargs):
experimental_warn(message, stack_level=2)
return await f(*args, **kwargs)
return inner
else:
@wraps(f)
def inner(*args, **kwargs):
experimental_warn(message, stack_level=2)
return f(*args, **kwargs)
return inner
return decorator
def unclosed_resource_warn(obj):
import tracemalloc
from warnings import warn
msg = f"Unclosed {obj!r}."
trace = tracemalloc.get_object_traceback(obj)
if trace:
msg += "\nObject allocated at (most recent call last):\n"
msg += "\n".join(trace.format())
else:
msg += "\nEnable tracemalloc to get the object allocation traceback."
warn(msg, ResourceWarning, stacklevel=2, source=obj)
|
neo4j/_meta.py
|
codereval_python_data_17
|
Some behaviour of R cannot be configured via env variables, but can
only be configured via R options once R has started. These are set here.
def _inline_r_setup(code: str) -> str:
"""
Some behaviour of R cannot be configured via env variables, but can
only be configured via R options once R has started. These are set here.
"""
with_option = f"""\
options(install.packages.compile.from.source = "never")
{code}
"""
return with_option
from __future__ import annotations
import contextlib
import os
import shlex
import shutil
from typing import Generator
from typing import Sequence
from pre_commit.envcontext import envcontext
from pre_commit.envcontext import PatchesT
from pre_commit.envcontext import UNSET
from pre_commit.hook import Hook
from pre_commit.languages import helpers
from pre_commit.prefix import Prefix
from pre_commit.util import clean_path_on_failure
from pre_commit.util import cmd_output_b
ENVIRONMENT_DIR = 'renv'
RSCRIPT_OPTS = ('--no-save', '--no-restore', '--no-site-file', '--no-environ')
get_default_version = helpers.basic_get_default_version
health_check = helpers.basic_health_check
def get_env_patch(venv: str) -> PatchesT:
return (
('R_PROFILE_USER', os.path.join(venv, 'activate.R')),
('RENV_PROJECT', UNSET),
)
@contextlib.contextmanager
def in_env(
prefix: Prefix,
language_version: str,
) -> Generator[None, None, None]:
envdir = _get_env_dir(prefix, language_version)
with envcontext(get_env_patch(envdir)):
yield
def _get_env_dir(prefix: Prefix, version: str) -> str:
return prefix.path(helpers.environment_dir(ENVIRONMENT_DIR, version))
def _prefix_if_non_local_file_entry(
entry: Sequence[str],
prefix: Prefix,
src: str,
) -> Sequence[str]:
if entry[1] == '-e':
return entry[1:]
else:
if src == 'local':
path = entry[1]
else:
path = prefix.path(entry[1])
return (path,)
def _rscript_exec() -> str:
r_home = os.environ.get('R_HOME')
if r_home is None:
return 'Rscript'
else:
return os.path.join(r_home, 'bin', 'Rscript')
def _entry_validate(entry: Sequence[str]) -> None:
"""
Allowed entries:
# Rscript -e expr
# Rscript path/to/file
"""
if entry[0] != 'Rscript':
raise ValueError('entry must start with `Rscript`.')
if entry[1] == '-e':
if len(entry) > 3:
raise ValueError('You can supply at most one expression.')
elif len(entry) > 2:
raise ValueError(
'The only valid syntax is `Rscript -e {expr}`',
'or `Rscript path/to/hook/script`',
)
def _cmd_from_hook(hook: Hook) -> tuple[str, ...]:
entry = shlex.split(hook.entry)
_entry_validate(entry)
return (
*entry[:1], *RSCRIPT_OPTS,
*_prefix_if_non_local_file_entry(entry, hook.prefix, hook.src),
*hook.args,
)
def install_environment(
prefix: Prefix,
version: str,
additional_dependencies: Sequence[str],
) -> None:
env_dir = _get_env_dir(prefix, version)
with clean_path_on_failure(env_dir):
os.makedirs(env_dir, exist_ok=True)
shutil.copy(prefix.path('renv.lock'), env_dir)
shutil.copytree(prefix.path('renv'), os.path.join(env_dir, 'renv'))
r_code_inst_environment = f"""\
prefix_dir <- {prefix.prefix_dir!r}
options(
repos = c(CRAN = "https://cran.rstudio.com"),
renv.consent = TRUE
)
source("renv/activate.R")
renv::restore()
activate_statement <- paste0(
'suppressWarnings({{',
'old <- setwd("', getwd(), '"); ',
'source("renv/activate.R"); ',
'setwd(old); ',
'renv::load("', getwd(), '");}})'
)
writeLines(activate_statement, 'activate.R')
is_package <- tryCatch(
{{
path_desc <- file.path(prefix_dir, 'DESCRIPTION')
suppressWarnings(desc <- read.dcf(path_desc))
"Package" %in% colnames(desc)
}},
error = function(...) FALSE
)
if (is_package) {{
renv::install(prefix_dir)
}}
"""
cmd_output_b(
_rscript_exec(), '--vanilla', '-e',
_inline_r_setup(r_code_inst_environment),
cwd=env_dir,
)
if additional_dependencies:
r_code_inst_add = 'renv::install(commandArgs(trailingOnly = TRUE))'
with in_env(prefix, version):
cmd_output_b(
_rscript_exec(), *RSCRIPT_OPTS, '-e',
_inline_r_setup(r_code_inst_add),
*additional_dependencies,
cwd=env_dir,
)
def _inline_r_setup(code: str) -> str:
"""
Some behaviour of R cannot be configured via env variables, but can
only be configured via R options once R has started. These are set here.
"""
with_option = f"""\
options(install.packages.compile.from.source = "never")
{code}
"""
return with_option
def run_hook(
hook: Hook,
file_args: Sequence[str],
color: bool,
) -> tuple[int, bytes]:
with in_env(hook.prefix, hook.language_version):
return helpers.run_xargs(
hook, _cmd_from_hook(hook), file_args, color=color,
)
|
pre_commit/languages/r.py
|
codereval_python_data_18
|
A simplified implementation of xargs.
color: Make a pty if on a platform that supports it
target_concurrency: Target number of partitions to run concurrently
def xargs(
cmd: tuple[str, ...],
varargs: Sequence[str],
*,
color: bool = False,
target_concurrency: int = 1,
_max_length: int = _get_platform_max_length(),
**kwargs: Any,
) -> tuple[int, bytes]:
"""A simplified implementation of xargs.
color: Make a pty if on a platform that supports it
target_concurrency: Target number of partitions to run concurrently
"""
cmd_fn = cmd_output_p if color else cmd_output_b
retcode = 0
stdout = b''
try:
cmd = parse_shebang.normalize_cmd(cmd)
except parse_shebang.ExecutableNotFoundError as e:
return e.to_output()[:2]
# on windows, batch files have a separate length limit than windows itself
if (
sys.platform == 'win32' and
cmd[0].lower().endswith(('.bat', '.cmd'))
): # pragma: win32 cover
# this is implementation details but the command gets translated into
# full/path/to/cmd.exe /c *cmd
cmd_exe = parse_shebang.find_executable('cmd.exe')
# 1024 is additionally subtracted to give headroom for further
# expansion inside the batch file
_max_length = 8192 - len(cmd_exe) - len(' /c ') - 1024
partitions = partition(cmd, varargs, target_concurrency, _max_length)
def run_cmd_partition(
run_cmd: tuple[str, ...],
) -> tuple[int, bytes, bytes | None]:
return cmd_fn(
*run_cmd, retcode=None, stderr=subprocess.STDOUT, **kwargs,
)
threads = min(len(partitions), target_concurrency)
with _thread_mapper(threads) as thread_map:
results = thread_map(run_cmd_partition, partitions)
for proc_retcode, proc_out, _ in results:
retcode = max(retcode, proc_retcode)
stdout += proc_out
return retcode, stdout
from __future__ import annotations
import concurrent.futures
import contextlib
import math
import os
import subprocess
import sys
from typing import Any
from typing import Callable
from typing import Generator
from typing import Iterable
from typing import MutableMapping
from typing import Sequence
from typing import TypeVar
from pre_commit import parse_shebang
from pre_commit.util import cmd_output_b
from pre_commit.util import cmd_output_p
TArg = TypeVar('TArg')
TRet = TypeVar('TRet')
def _environ_size(_env: MutableMapping[str, str] | None = None) -> int:
environ = _env if _env is not None else getattr(os, 'environb', os.environ)
size = 8 * len(environ) # number of pointers in `envp`
for k, v in environ.items():
size += len(k) + len(v) + 2 # c strings in `envp`
return size
def _get_platform_max_length() -> int: # pragma: no cover (platform specific)
if os.name == 'posix':
maximum = os.sysconf('SC_ARG_MAX') - 2048 - _environ_size()
maximum = max(min(maximum, 2 ** 17), 2 ** 12)
return maximum
elif os.name == 'nt':
return 2 ** 15 - 2048 # UNICODE_STRING max - headroom
else:
# posix minimum
return 2 ** 12
def _command_length(*cmd: str) -> int:
full_cmd = ' '.join(cmd)
# win32 uses the amount of characters, more details at:
# https://github.com/pre-commit/pre-commit/pull/839
if sys.platform == 'win32':
return len(full_cmd.encode('utf-16le')) // 2
else:
return len(full_cmd.encode(sys.getfilesystemencoding()))
class ArgumentTooLongError(RuntimeError):
pass
def partition(
cmd: Sequence[str],
varargs: Sequence[str],
target_concurrency: int,
_max_length: int | None = None,
) -> tuple[tuple[str, ...], ...]:
_max_length = _max_length or _get_platform_max_length()
# Generally, we try to partition evenly into at least `target_concurrency`
# partitions, but we don't want a bunch of tiny partitions.
max_args = max(4, math.ceil(len(varargs) / target_concurrency))
cmd = tuple(cmd)
ret = []
ret_cmd: list[str] = []
# Reversed so arguments are in order
varargs = list(reversed(varargs))
total_length = _command_length(*cmd) + 1
while varargs:
arg = varargs.pop()
arg_length = _command_length(arg) + 1
if (
total_length + arg_length <= _max_length and
len(ret_cmd) < max_args
):
ret_cmd.append(arg)
total_length += arg_length
elif not ret_cmd:
raise ArgumentTooLongError(arg)
else:
# We've exceeded the length, yield a command
ret.append(cmd + tuple(ret_cmd))
ret_cmd = []
total_length = _command_length(*cmd) + 1
varargs.append(arg)
ret.append(cmd + tuple(ret_cmd))
return tuple(ret)
@contextlib.contextmanager
def _thread_mapper(maxsize: int) -> Generator[
Callable[[Callable[[TArg], TRet], Iterable[TArg]], Iterable[TRet]],
None, None,
]:
if maxsize == 1:
yield map
else:
with concurrent.futures.ThreadPoolExecutor(maxsize) as ex:
yield ex.map
def xargs(
cmd: tuple[str, ...],
varargs: Sequence[str],
*,
color: bool = False,
target_concurrency: int = 1,
_max_length: int = _get_platform_max_length(),
**kwargs: Any,
) -> tuple[int, bytes]:
"""A simplified implementation of xargs.
color: Make a pty if on a platform that supports it
target_concurrency: Target number of partitions to run concurrently
"""
cmd_fn = cmd_output_p if color else cmd_output_b
retcode = 0
stdout = b''
try:
cmd = parse_shebang.normalize_cmd(cmd)
except parse_shebang.ExecutableNotFoundError as e:
return e.to_output()[:2]
# on windows, batch files have a separate length limit than windows itself
if (
sys.platform == 'win32' and
cmd[0].lower().endswith(('.bat', '.cmd'))
): # pragma: win32 cover
# this is implementation details but the command gets translated into
# full/path/to/cmd.exe /c *cmd
cmd_exe = parse_shebang.find_executable('cmd.exe')
# 1024 is additionally subtracted to give headroom for further
# expansion inside the batch file
_max_length = 8192 - len(cmd_exe) - len(' /c ') - 1024
partitions = partition(cmd, varargs, target_concurrency, _max_length)
def run_cmd_partition(
run_cmd: tuple[str, ...],
) -> tuple[int, bytes, bytes | None]:
return cmd_fn(
*run_cmd, retcode=None, stderr=subprocess.STDOUT, **kwargs,
)
threads = min(len(partitions), target_concurrency)
with _thread_mapper(threads) as thread_map:
results = thread_map(run_cmd_partition, partitions)
for proc_retcode, proc_out, _ in results:
retcode = max(retcode, proc_retcode)
stdout += proc_out
return retcode, stdout
|
pre_commit/xargs.py
|
codereval_python_data_19
|
Deterministically shuffle
def _shuffled(seq: Sequence[str]) -> list[str]:
"""Deterministically shuffle"""
fixed_random = random.Random()
fixed_random.seed(FIXED_RANDOM_SEED, version=1)
seq = list(seq)
fixed_random.shuffle(seq)
return seq
from __future__ import annotations
import multiprocessing
import os
import random
import re
from typing import Any
from typing import NoReturn
from typing import overload
from typing import Sequence
import pre_commit.constants as C
from pre_commit import parse_shebang
from pre_commit.hook import Hook
from pre_commit.prefix import Prefix
from pre_commit.util import cmd_output_b
from pre_commit.xargs import xargs
FIXED_RANDOM_SEED = 1542676187
SHIMS_RE = re.compile(r'[/\\]shims[/\\]')
def exe_exists(exe: str) -> bool:
found = parse_shebang.find_executable(exe)
if found is None: # exe exists
return False
homedir = os.path.expanduser('~')
try:
common: str | None = os.path.commonpath((found, homedir))
except ValueError: # on windows, different drives raises ValueError
common = None
return (
# it is not in a /shims/ directory
not SHIMS_RE.search(found) and
(
# the homedir is / (docker, service user, etc.)
os.path.dirname(homedir) == homedir or
# the exe is not contained in the home directory
common != homedir
)
)
def run_setup_cmd(prefix: Prefix, cmd: tuple[str, ...], **kwargs: Any) -> None:
cmd_output_b(*cmd, cwd=prefix.prefix_dir, **kwargs)
@overload
def environment_dir(d: None, language_version: str) -> None: ...
@overload
def environment_dir(d: str, language_version: str) -> str: ...
def environment_dir(d: str | None, language_version: str) -> str | None:
if d is None:
return None
else:
return f'{d}-{language_version}'
def assert_version_default(binary: str, version: str) -> None:
if version != C.DEFAULT:
raise AssertionError(
f'for now, pre-commit requires system-installed {binary} -- '
f'you selected `language_version: {version}`',
)
def assert_no_additional_deps(
lang: str,
additional_deps: Sequence[str],
) -> None:
if additional_deps:
raise AssertionError(
f'for now, pre-commit does not support '
f'additional_dependencies for {lang} -- '
f'you selected `additional_dependencies: {additional_deps}`',
)
def basic_get_default_version() -> str:
return C.DEFAULT
def basic_health_check(prefix: Prefix, language_version: str) -> str | None:
return None
def no_install(
prefix: Prefix,
version: str,
additional_dependencies: Sequence[str],
) -> NoReturn:
raise AssertionError('This type is not installable')
def target_concurrency(hook: Hook) -> int:
if hook.require_serial or 'PRE_COMMIT_NO_CONCURRENCY' in os.environ:
return 1
else:
# Travis appears to have a bunch of CPUs, but we can't use them all.
if 'TRAVIS' in os.environ:
return 2
else:
try:
return multiprocessing.cpu_count()
except NotImplementedError:
return 1
def _shuffled(seq: Sequence[str]) -> list[str]:
"""Deterministically shuffle"""
fixed_random = random.Random()
fixed_random.seed(FIXED_RANDOM_SEED, version=1)
seq = list(seq)
fixed_random.shuffle(seq)
return seq
def run_xargs(
hook: Hook,
cmd: tuple[str, ...],
file_args: Sequence[str],
**kwargs: Any,
) -> tuple[int, bytes]:
# Shuffle the files so that they more evenly fill out the xargs partitions,
# but do it deterministically in case a hook cares about ordering.
file_args = _shuffled(file_args)
kwargs['target_concurrency'] = target_concurrency(hook)
return xargs(cmd, file_args, **kwargs)
|
pre_commit/languages/helpers.py
|
codereval_python_data_20
|
poor man's version comparison
def parse_version(s: str) -> tuple[int, ...]:
"""poor man's version comparison"""
return tuple(int(p) for p in s.split('.'))
from __future__ import annotations
import contextlib
import errno
import functools
import importlib.resources
import os.path
import shutil
import stat
import subprocess
import sys
import tempfile
from types import TracebackType
from typing import Any
from typing import Callable
from typing import Generator
from typing import IO
import yaml
from pre_commit import parse_shebang
Loader = getattr(yaml, 'CSafeLoader', yaml.SafeLoader)
yaml_load = functools.partial(yaml.load, Loader=Loader)
Dumper = getattr(yaml, 'CSafeDumper', yaml.SafeDumper)
def yaml_dump(o: Any, **kwargs: Any) -> str:
# when python/mypy#1484 is solved, this can be `functools.partial`
return yaml.dump(
o, Dumper=Dumper, default_flow_style=False, indent=4, sort_keys=False,
**kwargs,
)
def force_bytes(exc: Any) -> bytes:
with contextlib.suppress(TypeError):
return bytes(exc)
with contextlib.suppress(Exception):
return str(exc).encode()
return f'<unprintable {type(exc).__name__} object>'.encode()
@contextlib.contextmanager
def clean_path_on_failure(path: str) -> Generator[None, None, None]:
"""Cleans up the directory on an exceptional failure."""
try:
yield
except BaseException:
if os.path.exists(path):
rmtree(path)
raise
@contextlib.contextmanager
def tmpdir() -> Generator[str, None, None]:
"""Contextmanager to create a temporary directory. It will be cleaned up
afterwards.
"""
tempdir = tempfile.mkdtemp()
try:
yield tempdir
finally:
rmtree(tempdir)
def resource_bytesio(filename: str) -> IO[bytes]:
return importlib.resources.open_binary('pre_commit.resources', filename)
def resource_text(filename: str) -> str:
return importlib.resources.read_text('pre_commit.resources', filename)
def make_executable(filename: str) -> None:
original_mode = os.stat(filename).st_mode
new_mode = original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
os.chmod(filename, new_mode)
class CalledProcessError(RuntimeError):
def __init__(
self,
returncode: int,
cmd: tuple[str, ...],
expected_returncode: int,
stdout: bytes,
stderr: bytes | None,
) -> None:
super().__init__(returncode, cmd, expected_returncode, stdout, stderr)
self.returncode = returncode
self.cmd = cmd
self.expected_returncode = expected_returncode
self.stdout = stdout
self.stderr = stderr
def __bytes__(self) -> bytes:
def _indent_or_none(part: bytes | None) -> bytes:
if part:
return b'\n ' + part.replace(b'\n', b'\n ')
else:
return b' (none)'
return b''.join((
f'command: {self.cmd!r}\n'.encode(),
f'return code: {self.returncode}\n'.encode(),
f'expected return code: {self.expected_returncode}\n'.encode(),
b'stdout:', _indent_or_none(self.stdout), b'\n',
b'stderr:', _indent_or_none(self.stderr),
))
def __str__(self) -> str:
return self.__bytes__().decode()
def _setdefault_kwargs(kwargs: dict[str, Any]) -> None:
for arg in ('stdin', 'stdout', 'stderr'):
kwargs.setdefault(arg, subprocess.PIPE)
def _oserror_to_output(e: OSError) -> tuple[int, bytes, None]:
return 1, force_bytes(e).rstrip(b'\n') + b'\n', None
def cmd_output_b(
*cmd: str,
retcode: int | None = 0,
**kwargs: Any,
) -> tuple[int, bytes, bytes | None]:
_setdefault_kwargs(kwargs)
try:
cmd = parse_shebang.normalize_cmd(cmd)
except parse_shebang.ExecutableNotFoundError as e:
returncode, stdout_b, stderr_b = e.to_output()
else:
try:
proc = subprocess.Popen(cmd, **kwargs)
except OSError as e:
returncode, stdout_b, stderr_b = _oserror_to_output(e)
else:
stdout_b, stderr_b = proc.communicate()
returncode = proc.returncode
if retcode is not None and retcode != returncode:
raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b)
return returncode, stdout_b, stderr_b
def cmd_output(*cmd: str, **kwargs: Any) -> tuple[int, str, str | None]:
returncode, stdout_b, stderr_b = cmd_output_b(*cmd, **kwargs)
stdout = stdout_b.decode() if stdout_b is not None else None
stderr = stderr_b.decode() if stderr_b is not None else None
return returncode, stdout, stderr
if os.name != 'nt': # pragma: win32 no cover
from os import openpty
import termios
class Pty:
def __init__(self) -> None:
self.r: int | None = None
self.w: int | None = None
def __enter__(self) -> Pty:
self.r, self.w = openpty()
# tty flags normally change \n to \r\n
attrs = termios.tcgetattr(self.w)
assert isinstance(attrs[1], int)
attrs[1] &= ~(termios.ONLCR | termios.OPOST)
termios.tcsetattr(self.w, termios.TCSANOW, attrs)
return self
def close_w(self) -> None:
if self.w is not None:
os.close(self.w)
self.w = None
def close_r(self) -> None:
assert self.r is not None
os.close(self.r)
self.r = None
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
self.close_w()
self.close_r()
def cmd_output_p(
*cmd: str,
retcode: int | None = 0,
**kwargs: Any,
) -> tuple[int, bytes, bytes | None]:
assert retcode is None
assert kwargs['stderr'] == subprocess.STDOUT, kwargs['stderr']
_setdefault_kwargs(kwargs)
try:
cmd = parse_shebang.normalize_cmd(cmd)
except parse_shebang.ExecutableNotFoundError as e:
return e.to_output()
with open(os.devnull) as devnull, Pty() as pty:
assert pty.r is not None
kwargs.update({'stdin': devnull, 'stdout': pty.w, 'stderr': pty.w})
try:
proc = subprocess.Popen(cmd, **kwargs)
except OSError as e:
return _oserror_to_output(e)
pty.close_w()
buf = b''
while True:
try:
bts = os.read(pty.r, 4096)
except OSError as e:
if e.errno == errno.EIO:
bts = b''
else:
raise
else:
buf += bts
if not bts:
break
return proc.wait(), buf, None
else: # pragma: no cover
cmd_output_p = cmd_output_b
def rmtree(path: str) -> None:
"""On windows, rmtree fails for readonly dirs."""
def handle_remove_readonly(
func: Callable[..., Any],
path: str,
exc: tuple[type[OSError], OSError, TracebackType],
) -> None:
excvalue = exc[1]
if (
func in (os.rmdir, os.remove, os.unlink) and
excvalue.errno in {errno.EACCES, errno.EPERM}
):
for p in (path, os.path.dirname(path)):
os.chmod(p, os.stat(p).st_mode | stat.S_IWUSR)
func(path)
else:
raise
shutil.rmtree(path, ignore_errors=False, onerror=handle_remove_readonly)
def parse_version(s: str) -> tuple[int, ...]:
"""poor man's version comparison"""
return tuple(int(p) for p in s.split('.'))
def win_exe(s: str) -> str:
return s if sys.platform != 'win32' else f'{s}.exe'
|
pre_commit/util.py
|
codereval_python_data_21
|
Fixes for the following issues on windows
- https://bugs.python.org/issue8557
- windows does not parse shebangs
This function also makes deep-path shebangs work just fine
def normalize_cmd(cmd: tuple[str, ...]) -> tuple[str, ...]:
"""Fixes for the following issues on windows
- https://bugs.python.org/issue8557
- windows does not parse shebangs
This function also makes deep-path shebangs work just fine
"""
# Use PATH to determine the executable
exe = normexe(cmd[0])
# Figure out the shebang from the resulting command
cmd = parse_filename(exe) + (exe,) + cmd[1:]
# This could have given us back another bare executable
exe = normexe(cmd[0])
return (exe,) + cmd[1:]
from __future__ import annotations
import os.path
from typing import Mapping
from typing import NoReturn
from identify.identify import parse_shebang_from_file
class ExecutableNotFoundError(OSError):
def to_output(self) -> tuple[int, bytes, None]:
return (1, self.args[0].encode(), None)
def parse_filename(filename: str) -> tuple[str, ...]:
if not os.path.exists(filename):
return ()
else:
return parse_shebang_from_file(filename)
def find_executable(
exe: str, _environ: Mapping[str, str] | None = None,
) -> str | None:
exe = os.path.normpath(exe)
if os.sep in exe:
return exe
environ = _environ if _environ is not None else os.environ
if 'PATHEXT' in environ:
exts = environ['PATHEXT'].split(os.pathsep)
possible_exe_names = tuple(f'{exe}{ext}' for ext in exts) + (exe,)
else:
possible_exe_names = (exe,)
for path in environ.get('PATH', '').split(os.pathsep):
for possible_exe_name in possible_exe_names:
joined = os.path.join(path, possible_exe_name)
if os.path.isfile(joined) and os.access(joined, os.X_OK):
return joined
else:
return None
def normexe(orig: str) -> str:
def _error(msg: str) -> NoReturn:
raise ExecutableNotFoundError(f'Executable `{orig}` {msg}')
if os.sep not in orig and (not os.altsep or os.altsep not in orig):
exe = find_executable(orig)
if exe is None:
_error('not found')
return exe
elif os.path.isdir(orig):
_error('is a directory')
elif not os.path.isfile(orig):
_error('not found')
elif not os.access(orig, os.X_OK): # pragma: win32 no cover
_error('is not executable')
else:
return orig
def normalize_cmd(cmd: tuple[str, ...]) -> tuple[str, ...]:
"""Fixes for the following issues on windows
- https://bugs.python.org/issue8557
- windows does not parse shebangs
This function also makes deep-path shebangs work just fine
"""
# Use PATH to determine the executable
exe = normexe(cmd[0])
# Figure out the shebang from the resulting command
cmd = parse_filename(exe) + (exe,) + cmd[1:]
# This could have given us back another bare executable
exe = normexe(cmd[0])
return (exe,) + cmd[1:]
|
pre_commit/parse_shebang.py
|
codereval_python_data_22
|
Decorator to wrap a function with a memoizing callable that saves
results in a cache.
def cached(cache, key=hashkey, lock=None):
"""Decorator to wrap a function with a memoizing callable that saves
results in a cache.
"""
def decorator(func):
if cache is None:
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
elif lock is None:
def wrapper(*args, **kwargs):
k = key(*args, **kwargs)
try:
return cache[k]
except KeyError:
pass # key not found
v = func(*args, **kwargs)
try:
cache[k] = v
except ValueError:
pass # value too large
return v
else:
def wrapper(*args, **kwargs):
k = key(*args, **kwargs)
try:
with lock:
return cache[k]
except KeyError:
pass # key not found
v = func(*args, **kwargs)
# in case of a race, prefer the item already in the cache
try:
with lock:
return cache.setdefault(k, v)
except ValueError:
return v # value too large
return functools.update_wrapper(wrapper, func)
return decorator
import functools
from .keys import hashkey
def cached(cache, key=hashkey, lock=None):
"""Decorator to wrap a function with a memoizing callable that saves
results in a cache.
"""
def decorator(func):
if cache is None:
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
elif lock is None:
def wrapper(*args, **kwargs):
k = key(*args, **kwargs)
try:
return cache[k]
except KeyError:
pass # key not found
v = func(*args, **kwargs)
try:
cache[k] = v
except ValueError:
pass # value too large
return v
else:
def wrapper(*args, **kwargs):
k = key(*args, **kwargs)
try:
with lock:
return cache[k]
except KeyError:
pass # key not found
v = func(*args, **kwargs)
# in case of a race, prefer the item already in the cache
try:
with lock:
return cache.setdefault(k, v)
except ValueError:
return v # value too large
return functools.update_wrapper(wrapper, func)
return decorator
def cachedmethod(cache, key=hashkey, lock=None):
"""Decorator to wrap a class or instance method with a memoizing
callable that saves results in a cache.
"""
def decorator(method):
if lock is None:
def wrapper(self, *args, **kwargs):
c = cache(self)
if c is None:
return method(self, *args, **kwargs)
k = key(*args, **kwargs)
try:
return c[k]
except KeyError:
pass # key not found
v = method(self, *args, **kwargs)
try:
c[k] = v
except ValueError:
pass # value too large
return v
else:
def wrapper(self, *args, **kwargs):
c = cache(self)
if c is None:
return method(self, *args, **kwargs)
k = key(*args, **kwargs)
try:
with lock(self):
return c[k]
except KeyError:
pass # key not found
v = method(self, *args, **kwargs)
# in case of a race, prefer the item already in the cache
try:
with lock(self):
return c.setdefault(k, v)
except ValueError:
return v # value too large
return functools.update_wrapper(wrapper, method)
return decorator
|
cachetools/decorators.py
|
codereval_python_data_23
|
Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm with a per-item time-to-live (TTL) value.
def ttl_cache(maxsize=128, ttl=600, timer=time.monotonic, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm with a per-item time-to-live (TTL) value.
"""
if maxsize is None:
return _cache(_UnboundTTLCache(ttl, timer), typed)
elif callable(maxsize):
return _cache(TTLCache(128, ttl, timer), typed)(maxsize)
else:
return _cache(TTLCache(maxsize, ttl, timer), typed)
"""`functools.lru_cache` compatible memoizing function decorators."""
import collections
import functools
import math
import random
import time
try:
from threading import RLock
except ImportError: # pragma: no cover
from dummy_threading import RLock
from . import keys
from .fifo import FIFOCache
from .lfu import LFUCache
from .lru import LRUCache
from .mru import MRUCache
from .rr import RRCache
from .ttl import TTLCache
__all__ = ('lfu_cache', 'lru_cache', 'mru_cache', 'rr_cache', 'ttl_cache')
_CacheInfo = collections.namedtuple('CacheInfo', [
'hits', 'misses', 'maxsize', 'currsize'
])
class _UnboundCache(dict):
@property
def maxsize(self):
return None
@property
def currsize(self):
return len(self)
class _UnboundTTLCache(TTLCache):
def __init__(self, ttl, timer):
TTLCache.__init__(self, math.inf, ttl, timer)
@property
def maxsize(self):
return None
def _cache(cache, typed):
maxsize = cache.maxsize
def decorator(func):
key = keys.typedkey if typed else keys.hashkey
lock = RLock()
stats = [0, 0]
def wrapper(*args, **kwargs):
k = key(*args, **kwargs)
with lock:
try:
v = cache[k]
stats[0] += 1
return v
except KeyError:
stats[1] += 1
v = func(*args, **kwargs)
# in case of a race, prefer the item already in the cache
try:
with lock:
return cache.setdefault(k, v)
except ValueError:
return v # value too large
def cache_info():
with lock:
hits, misses = stats
maxsize = cache.maxsize
currsize = cache.currsize
return _CacheInfo(hits, misses, maxsize, currsize)
def cache_clear():
with lock:
try:
cache.clear()
finally:
stats[:] = [0, 0]
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
wrapper.cache_parameters = lambda: {'maxsize': maxsize, 'typed': typed}
functools.update_wrapper(wrapper, func)
return wrapper
return decorator
def fifo_cache(maxsize=128, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a First In First Out (FIFO)
algorithm.
"""
if maxsize is None:
return _cache(_UnboundCache(), typed)
elif callable(maxsize):
return _cache(FIFOCache(128), typed)(maxsize)
else:
return _cache(FIFOCache(maxsize), typed)
def lfu_cache(maxsize=128, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Frequently Used (LFU)
algorithm.
"""
if maxsize is None:
return _cache(_UnboundCache(), typed)
elif callable(maxsize):
return _cache(LFUCache(128), typed)(maxsize)
else:
return _cache(LFUCache(maxsize), typed)
def lru_cache(maxsize=128, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm.
"""
if maxsize is None:
return _cache(_UnboundCache(), typed)
elif callable(maxsize):
return _cache(LRUCache(128), typed)(maxsize)
else:
return _cache(LRUCache(maxsize), typed)
def mru_cache(maxsize=128, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Most Recently Used (MRU)
algorithm.
"""
if maxsize is None:
return _cache(_UnboundCache(), typed)
elif callable(maxsize):
return _cache(MRUCache(128), typed)(maxsize)
else:
return _cache(MRUCache(maxsize), typed)
def rr_cache(maxsize=128, choice=random.choice, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Random Replacement (RR)
algorithm.
"""
if maxsize is None:
return _cache(_UnboundCache(), typed)
elif callable(maxsize):
return _cache(RRCache(128, choice), typed)(maxsize)
else:
return _cache(RRCache(maxsize, choice), typed)
def ttl_cache(maxsize=128, ttl=600, timer=time.monotonic, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm with a per-item time-to-live (TTL) value.
"""
if maxsize is None:
return _cache(_UnboundTTLCache(ttl, timer), typed)
elif callable(maxsize):
return _cache(TTLCache(128, ttl, timer), typed)(maxsize)
else:
return _cache(TTLCache(maxsize, ttl, timer), typed)
|
cachetools/func.py
|
codereval_python_data_24
|
Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Most Recently Used (MRU)
algorithm.
def mru_cache(maxsize=128, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Most Recently Used (MRU)
algorithm.
"""
if maxsize is None:
return _cache(_UnboundCache(), typed)
elif callable(maxsize):
return _cache(MRUCache(128), typed)(maxsize)
else:
return _cache(MRUCache(maxsize), typed)
"""`functools.lru_cache` compatible memoizing function decorators."""
import collections
import functools
import math
import random
import time
try:
from threading import RLock
except ImportError: # pragma: no cover
from dummy_threading import RLock
from . import keys
from .fifo import FIFOCache
from .lfu import LFUCache
from .lru import LRUCache
from .mru import MRUCache
from .rr import RRCache
from .ttl import TTLCache
__all__ = ('lfu_cache', 'lru_cache', 'mru_cache', 'rr_cache', 'ttl_cache')
_CacheInfo = collections.namedtuple('CacheInfo', [
'hits', 'misses', 'maxsize', 'currsize'
])
class _UnboundCache(dict):
@property
def maxsize(self):
return None
@property
def currsize(self):
return len(self)
class _UnboundTTLCache(TTLCache):
def __init__(self, ttl, timer):
TTLCache.__init__(self, math.inf, ttl, timer)
@property
def maxsize(self):
return None
def _cache(cache, typed):
maxsize = cache.maxsize
def decorator(func):
key = keys.typedkey if typed else keys.hashkey
lock = RLock()
stats = [0, 0]
def wrapper(*args, **kwargs):
k = key(*args, **kwargs)
with lock:
try:
v = cache[k]
stats[0] += 1
return v
except KeyError:
stats[1] += 1
v = func(*args, **kwargs)
# in case of a race, prefer the item already in the cache
try:
with lock:
return cache.setdefault(k, v)
except ValueError:
return v # value too large
def cache_info():
with lock:
hits, misses = stats
maxsize = cache.maxsize
currsize = cache.currsize
return _CacheInfo(hits, misses, maxsize, currsize)
def cache_clear():
with lock:
try:
cache.clear()
finally:
stats[:] = [0, 0]
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
wrapper.cache_parameters = lambda: {'maxsize': maxsize, 'typed': typed}
functools.update_wrapper(wrapper, func)
return wrapper
return decorator
def fifo_cache(maxsize=128, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a First In First Out (FIFO)
algorithm.
"""
if maxsize is None:
return _cache(_UnboundCache(), typed)
elif callable(maxsize):
return _cache(FIFOCache(128), typed)(maxsize)
else:
return _cache(FIFOCache(maxsize), typed)
def lfu_cache(maxsize=128, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Frequently Used (LFU)
algorithm.
"""
if maxsize is None:
return _cache(_UnboundCache(), typed)
elif callable(maxsize):
return _cache(LFUCache(128), typed)(maxsize)
else:
return _cache(LFUCache(maxsize), typed)
def lru_cache(maxsize=128, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm.
"""
if maxsize is None:
return _cache(_UnboundCache(), typed)
elif callable(maxsize):
return _cache(LRUCache(128), typed)(maxsize)
else:
return _cache(LRUCache(maxsize), typed)
def mru_cache(maxsize=128, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Most Recently Used (MRU)
algorithm.
"""
if maxsize is None:
return _cache(_UnboundCache(), typed)
elif callable(maxsize):
return _cache(MRUCache(128), typed)(maxsize)
else:
return _cache(MRUCache(maxsize), typed)
def rr_cache(maxsize=128, choice=random.choice, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Random Replacement (RR)
algorithm.
"""
if maxsize is None:
return _cache(_UnboundCache(), typed)
elif callable(maxsize):
return _cache(RRCache(128, choice), typed)(maxsize)
else:
return _cache(RRCache(maxsize, choice), typed)
def ttl_cache(maxsize=128, ttl=600, timer=time.monotonic, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm with a per-item time-to-live (TTL) value.
"""
if maxsize is None:
return _cache(_UnboundTTLCache(ttl, timer), typed)
elif callable(maxsize):
return _cache(TTLCache(128, ttl, timer), typed)(maxsize)
else:
return _cache(TTLCache(maxsize, ttl, timer), typed)
|
cachetools/func.py
|
codereval_python_data_25
|
Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm.
def lru_cache(maxsize=128, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm.
"""
if maxsize is None:
return _cache(_UnboundCache(), typed)
elif callable(maxsize):
return _cache(LRUCache(128), typed)(maxsize)
else:
return _cache(LRUCache(maxsize), typed)
"""`functools.lru_cache` compatible memoizing function decorators."""
import collections
import functools
import math
import random
import time
try:
from threading import RLock
except ImportError: # pragma: no cover
from dummy_threading import RLock
from . import keys
from .fifo import FIFOCache
from .lfu import LFUCache
from .lru import LRUCache
from .mru import MRUCache
from .rr import RRCache
from .ttl import TTLCache
__all__ = ('lfu_cache', 'lru_cache', 'mru_cache', 'rr_cache', 'ttl_cache')
_CacheInfo = collections.namedtuple('CacheInfo', [
'hits', 'misses', 'maxsize', 'currsize'
])
class _UnboundCache(dict):
@property
def maxsize(self):
return None
@property
def currsize(self):
return len(self)
class _UnboundTTLCache(TTLCache):
def __init__(self, ttl, timer):
TTLCache.__init__(self, math.inf, ttl, timer)
@property
def maxsize(self):
return None
def _cache(cache, typed):
maxsize = cache.maxsize
def decorator(func):
key = keys.typedkey if typed else keys.hashkey
lock = RLock()
stats = [0, 0]
def wrapper(*args, **kwargs):
k = key(*args, **kwargs)
with lock:
try:
v = cache[k]
stats[0] += 1
return v
except KeyError:
stats[1] += 1
v = func(*args, **kwargs)
# in case of a race, prefer the item already in the cache
try:
with lock:
return cache.setdefault(k, v)
except ValueError:
return v # value too large
def cache_info():
with lock:
hits, misses = stats
maxsize = cache.maxsize
currsize = cache.currsize
return _CacheInfo(hits, misses, maxsize, currsize)
def cache_clear():
with lock:
try:
cache.clear()
finally:
stats[:] = [0, 0]
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
wrapper.cache_parameters = lambda: {'maxsize': maxsize, 'typed': typed}
functools.update_wrapper(wrapper, func)
return wrapper
return decorator
def fifo_cache(maxsize=128, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a First In First Out (FIFO)
algorithm.
"""
if maxsize is None:
return _cache(_UnboundCache(), typed)
elif callable(maxsize):
return _cache(FIFOCache(128), typed)(maxsize)
else:
return _cache(FIFOCache(maxsize), typed)
def lfu_cache(maxsize=128, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Frequently Used (LFU)
algorithm.
"""
if maxsize is None:
return _cache(_UnboundCache(), typed)
elif callable(maxsize):
return _cache(LFUCache(128), typed)(maxsize)
else:
return _cache(LFUCache(maxsize), typed)
def lru_cache(maxsize=128, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm.
"""
if maxsize is None:
return _cache(_UnboundCache(), typed)
elif callable(maxsize):
return _cache(LRUCache(128), typed)(maxsize)
else:
return _cache(LRUCache(maxsize), typed)
def mru_cache(maxsize=128, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Most Recently Used (MRU)
algorithm.
"""
if maxsize is None:
return _cache(_UnboundCache(), typed)
elif callable(maxsize):
return _cache(MRUCache(128), typed)(maxsize)
else:
return _cache(MRUCache(maxsize), typed)
def rr_cache(maxsize=128, choice=random.choice, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Random Replacement (RR)
algorithm.
"""
if maxsize is None:
return _cache(_UnboundCache(), typed)
elif callable(maxsize):
return _cache(RRCache(128, choice), typed)(maxsize)
else:
return _cache(RRCache(maxsize, choice), typed)
def ttl_cache(maxsize=128, ttl=600, timer=time.monotonic, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm with a per-item time-to-live (TTL) value.
"""
if maxsize is None:
return _cache(_UnboundTTLCache(ttl, timer), typed)
elif callable(maxsize):
return _cache(TTLCache(128, ttl, timer), typed)(maxsize)
else:
return _cache(TTLCache(maxsize, ttl, timer), typed)
|
cachetools/func.py
|
codereval_python_data_26
|
Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Frequently Used (LFU)
algorithm.
def lfu_cache(maxsize=128, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Frequently Used (LFU)
algorithm.
"""
if maxsize is None:
return _cache(_UnboundCache(), typed)
elif callable(maxsize):
return _cache(LFUCache(128), typed)(maxsize)
else:
return _cache(LFUCache(maxsize), typed)
"""`functools.lru_cache` compatible memoizing function decorators."""
import collections
import functools
import math
import random
import time
try:
from threading import RLock
except ImportError: # pragma: no cover
from dummy_threading import RLock
from . import keys
from .fifo import FIFOCache
from .lfu import LFUCache
from .lru import LRUCache
from .mru import MRUCache
from .rr import RRCache
from .ttl import TTLCache
__all__ = ('lfu_cache', 'lru_cache', 'mru_cache', 'rr_cache', 'ttl_cache')
_CacheInfo = collections.namedtuple('CacheInfo', [
'hits', 'misses', 'maxsize', 'currsize'
])
class _UnboundCache(dict):
@property
def maxsize(self):
return None
@property
def currsize(self):
return len(self)
class _UnboundTTLCache(TTLCache):
def __init__(self, ttl, timer):
TTLCache.__init__(self, math.inf, ttl, timer)
@property
def maxsize(self):
return None
def _cache(cache, typed):
maxsize = cache.maxsize
def decorator(func):
key = keys.typedkey if typed else keys.hashkey
lock = RLock()
stats = [0, 0]
def wrapper(*args, **kwargs):
k = key(*args, **kwargs)
with lock:
try:
v = cache[k]
stats[0] += 1
return v
except KeyError:
stats[1] += 1
v = func(*args, **kwargs)
# in case of a race, prefer the item already in the cache
try:
with lock:
return cache.setdefault(k, v)
except ValueError:
return v # value too large
def cache_info():
with lock:
hits, misses = stats
maxsize = cache.maxsize
currsize = cache.currsize
return _CacheInfo(hits, misses, maxsize, currsize)
def cache_clear():
with lock:
try:
cache.clear()
finally:
stats[:] = [0, 0]
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
wrapper.cache_parameters = lambda: {'maxsize': maxsize, 'typed': typed}
functools.update_wrapper(wrapper, func)
return wrapper
return decorator
def fifo_cache(maxsize=128, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a First In First Out (FIFO)
algorithm.
"""
if maxsize is None:
return _cache(_UnboundCache(), typed)
elif callable(maxsize):
return _cache(FIFOCache(128), typed)(maxsize)
else:
return _cache(FIFOCache(maxsize), typed)
def lfu_cache(maxsize=128, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Frequently Used (LFU)
algorithm.
"""
if maxsize is None:
return _cache(_UnboundCache(), typed)
elif callable(maxsize):
return _cache(LFUCache(128), typed)(maxsize)
else:
return _cache(LFUCache(maxsize), typed)
def lru_cache(maxsize=128, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm.
"""
if maxsize is None:
return _cache(_UnboundCache(), typed)
elif callable(maxsize):
return _cache(LRUCache(128), typed)(maxsize)
else:
return _cache(LRUCache(maxsize), typed)
def mru_cache(maxsize=128, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Most Recently Used (MRU)
algorithm.
"""
if maxsize is None:
return _cache(_UnboundCache(), typed)
elif callable(maxsize):
return _cache(MRUCache(128), typed)(maxsize)
else:
return _cache(MRUCache(maxsize), typed)
def rr_cache(maxsize=128, choice=random.choice, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Random Replacement (RR)
algorithm.
"""
if maxsize is None:
return _cache(_UnboundCache(), typed)
elif callable(maxsize):
return _cache(RRCache(128, choice), typed)(maxsize)
else:
return _cache(RRCache(maxsize, choice), typed)
def ttl_cache(maxsize=128, ttl=600, timer=time.monotonic, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm with a per-item time-to-live (TTL) value.
"""
if maxsize is None:
return _cache(_UnboundTTLCache(ttl, timer), typed)
elif callable(maxsize):
return _cache(TTLCache(128, ttl, timer), typed)(maxsize)
else:
return _cache(TTLCache(maxsize, ttl, timer), typed)
|
cachetools/func.py
|
codereval_python_data_27
|
Remove and return the `(key, value)` pair first inserted.
def popitem(self):
"""Remove and return the `(key, value)` pair first inserted."""
try:
key = next(iter(self.__order))
except StopIteration:
raise KeyError('%s is empty' % type(self).__name__) from None
else:
return (key, self.pop(key))
import collections
from .cache import Cache
class FIFOCache(Cache):
"""First In First Out (FIFO) cache implementation."""
def __init__(self, maxsize, getsizeof=None):
Cache.__init__(self, maxsize, getsizeof)
self.__order = collections.OrderedDict()
def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
cache_setitem(self, key, value)
try:
self.__order.move_to_end(key)
except KeyError:
self.__order[key] = None
def __delitem__(self, key, cache_delitem=Cache.__delitem__):
cache_delitem(self, key)
del self.__order[key]
def popitem(self):
"""Remove and return the `(key, value)` pair first inserted."""
try:
key = next(iter(self.__order))
except StopIteration:
raise KeyError('%s is empty' % type(self).__name__) from None
else:
return (key, self.pop(key))
|
cachetools/fifo.py
|
codereval_python_data_28
|
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
def setdefault(self, key, default=None):
if key in self:
value = self[key]
else:
self[key] = value = default
return value
from collections.abc import MutableMapping
class _DefaultSize(object):
__slots__ = ()
def __getitem__(self, _):
return 1
def __setitem__(self, _, value):
assert value == 1
def pop(self, _):
return 1
class Cache(MutableMapping):
"""Mutable mapping to serve as a simple cache or cache base class."""
__marker = object()
__size = _DefaultSize()
def __init__(self, maxsize, getsizeof=None):
if getsizeof:
self.getsizeof = getsizeof
if self.getsizeof is not Cache.getsizeof:
self.__size = dict()
self.__data = dict()
self.__currsize = 0
self.__maxsize = maxsize
def __repr__(self):
return '%s(%r, maxsize=%r, currsize=%r)' % (
self.__class__.__name__,
list(self.__data.items()),
self.__maxsize,
self.__currsize,
)
def __getitem__(self, key):
try:
return self.__data[key]
except KeyError:
return self.__missing__(key)
def __setitem__(self, key, value):
maxsize = self.__maxsize
size = self.getsizeof(value)
if size > maxsize:
raise ValueError('value too large')
if key not in self.__data or self.__size[key] < size:
while self.__currsize + size > maxsize:
self.popitem()
if key in self.__data:
diffsize = size - self.__size[key]
else:
diffsize = size
self.__data[key] = value
self.__size[key] = size
self.__currsize += diffsize
def __delitem__(self, key):
size = self.__size.pop(key)
del self.__data[key]
self.__currsize -= size
def __contains__(self, key):
return key in self.__data
def __missing__(self, key):
raise KeyError(key)
def __iter__(self):
return iter(self.__data)
def __len__(self):
return len(self.__data)
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
def pop(self, key, default=__marker):
if key in self:
value = self[key]
del self[key]
elif default is self.__marker:
raise KeyError(key)
else:
value = default
return value
def setdefault(self, key, default=None):
if key in self:
value = self[key]
else:
self[key] = value = default
return value
@property
def maxsize(self):
"""The maximum size of the cache."""
return self.__maxsize
@property
def currsize(self):
"""The current size of the cache."""
return self.__currsize
@staticmethod
def getsizeof(value):
"""Return the size of a cache element's value."""
return 1
|
cachetools/cache.py
|
codereval_python_data_29
|
D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
from collections.abc import MutableMapping
class _DefaultSize(object):
__slots__ = ()
def __getitem__(self, _):
return 1
def __setitem__(self, _, value):
assert value == 1
def pop(self, _):
return 1
class Cache(MutableMapping):
"""Mutable mapping to serve as a simple cache or cache base class."""
__marker = object()
__size = _DefaultSize()
def __init__(self, maxsize, getsizeof=None):
if getsizeof:
self.getsizeof = getsizeof
if self.getsizeof is not Cache.getsizeof:
self.__size = dict()
self.__data = dict()
self.__currsize = 0
self.__maxsize = maxsize
def __repr__(self):
return '%s(%r, maxsize=%r, currsize=%r)' % (
self.__class__.__name__,
list(self.__data.items()),
self.__maxsize,
self.__currsize,
)
def __getitem__(self, key):
try:
return self.__data[key]
except KeyError:
return self.__missing__(key)
def __setitem__(self, key, value):
maxsize = self.__maxsize
size = self.getsizeof(value)
if size > maxsize:
raise ValueError('value too large')
if key not in self.__data or self.__size[key] < size:
while self.__currsize + size > maxsize:
self.popitem()
if key in self.__data:
diffsize = size - self.__size[key]
else:
diffsize = size
self.__data[key] = value
self.__size[key] = size
self.__currsize += diffsize
def __delitem__(self, key):
size = self.__size.pop(key)
del self.__data[key]
self.__currsize -= size
def __contains__(self, key):
return key in self.__data
def __missing__(self, key):
raise KeyError(key)
def __iter__(self):
return iter(self.__data)
def __len__(self):
return len(self.__data)
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
def pop(self, key, default=__marker):
if key in self:
value = self[key]
del self[key]
elif default is self.__marker:
raise KeyError(key)
else:
value = default
return value
def setdefault(self, key, default=None):
if key in self:
value = self[key]
else:
self[key] = value = default
return value
@property
def maxsize(self):
"""The maximum size of the cache."""
return self.__maxsize
@property
def currsize(self):
"""The current size of the cache."""
return self.__currsize
@staticmethod
def getsizeof(value):
"""Return the size of a cache element's value."""
return 1
|
cachetools/cache.py
|
codereval_python_data_30
|
Decorator to wrap a class or instance method with a memoizing
callable that saves results in a cache.
def cachedmethod(cache, key=hashkey, lock=None):
"""Decorator to wrap a class or instance method with a memoizing
callable that saves results in a cache.
"""
def decorator(method):
if lock is None:
def wrapper(self, *args, **kwargs):
c = cache(self)
if c is None:
return method(self, *args, **kwargs)
k = key(*args, **kwargs)
try:
return c[k]
except KeyError:
pass # key not found
v = method(self, *args, **kwargs)
try:
c[k] = v
except ValueError:
pass # value too large
return v
else:
def wrapper(self, *args, **kwargs):
c = cache(self)
if c is None:
return method(self, *args, **kwargs)
k = key(*args, **kwargs)
try:
with lock(self):
return c[k]
except KeyError:
pass # key not found
v = method(self, *args, **kwargs)
# in case of a race, prefer the item already in the cache
try:
with lock(self):
return c.setdefault(k, v)
except ValueError:
return v # value too large
return functools.update_wrapper(wrapper, method)
return decorator
import functools
from .keys import hashkey
def cached(cache, key=hashkey, lock=None):
"""Decorator to wrap a function with a memoizing callable that saves
results in a cache.
"""
def decorator(func):
if cache is None:
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
elif lock is None:
def wrapper(*args, **kwargs):
k = key(*args, **kwargs)
try:
return cache[k]
except KeyError:
pass # key not found
v = func(*args, **kwargs)
try:
cache[k] = v
except ValueError:
pass # value too large
return v
else:
def wrapper(*args, **kwargs):
k = key(*args, **kwargs)
try:
with lock:
return cache[k]
except KeyError:
pass # key not found
v = func(*args, **kwargs)
# in case of a race, prefer the item already in the cache
try:
with lock:
return cache.setdefault(k, v)
except ValueError:
return v # value too large
return functools.update_wrapper(wrapper, func)
return decorator
def cachedmethod(cache, key=hashkey, lock=None):
"""Decorator to wrap a class or instance method with a memoizing
callable that saves results in a cache.
"""
def decorator(method):
if lock is None:
def wrapper(self, *args, **kwargs):
c = cache(self)
if c is None:
return method(self, *args, **kwargs)
k = key(*args, **kwargs)
try:
return c[k]
except KeyError:
pass # key not found
v = method(self, *args, **kwargs)
try:
c[k] = v
except ValueError:
pass # value too large
return v
else:
def wrapper(self, *args, **kwargs):
c = cache(self)
if c is None:
return method(self, *args, **kwargs)
k = key(*args, **kwargs)
try:
with lock(self):
return c[k]
except KeyError:
pass # key not found
v = method(self, *args, **kwargs)
# in case of a race, prefer the item already in the cache
try:
with lock(self):
return c.setdefault(k, v)
except ValueError:
return v # value too large
return functools.update_wrapper(wrapper, method)
return decorator
|
cachetools/decorators.py
|
codereval_python_data_31
|
Format an exception.
:param e: Any exception instance.
:type e: Exception
:param max_level: Maximum call stack level (default 30)
:type max_level: int
:param max_path_level: Maximum path level (default 5)
:type max_path_level: int
:return The exception readable string
:rtype str
@classmethod
def extostr(cls, e, max_level=30, max_path_level=5):
"""
Format an exception.
:param e: Any exception instance.
:type e: Exception
:param max_level: Maximum call stack level (default 30)
:type max_level: int
:param max_path_level: Maximum path level (default 5)
:type max_path_level: int
:return The exception readable string
:rtype str
"""
# Go
list_frame = None
try:
out_buffer = ""
# Class type
out_buffer += "e.cls:[{0}]".format(e.__class__.__name__)
# To string
try:
ex_buf = str(e)
except UnicodeEncodeError:
ex_buf = repr(str(e))
except Exception as e:
logger.warning("Exception, e=%s", e)
raise
out_buffer += ", e.bytes:[{0}]".format(ex_buf)
# Traceback
si = sys.exc_info()
# Raw frame
# tuple : (file, lineno, method, code)
raw_frame = traceback.extract_tb(si[2])
raw_frame.reverse()
# Go to last tb_next
last_tb_next = None
cur_tb = si[2]
while cur_tb:
last_tb_next = cur_tb
cur_tb = cur_tb.tb_next
# Skip frame up to current raw frame count
list_frame = list()
cur_count = -1
skip_count = len(raw_frame)
if last_tb_next:
cur_frame = last_tb_next.tb_frame
else:
cur_frame = None
while cur_frame:
cur_count += 1
if cur_count < skip_count:
cur_frame = cur_frame.f_back
else:
# Need : tuple : (file, lineno, method, code)
raw_frame.append((cur_frame.f_code.co_filename, cur_frame.f_lineno, cur_frame.f_code.co_name, ""))
cur_frame = cur_frame.f_back
# Build it
cur_idx = 0
out_buffer += ", e.cs=["
for tu in raw_frame:
line = tu[1]
cur_file = tu[0]
method = tu[2]
# Handle max path level
ar_token = cur_file.rsplit(os.sep, max_path_level)
if len(ar_token) > max_path_level:
# Remove head
ar_token.pop(0)
# Join
cur_file = "..." + os.sep.join(ar_token)
# Format
out_buffer += "in:{0}#{1}@{2} ".format(method, cur_file, line)
# Loop
cur_idx += 1
if cur_idx >= max_level:
out_buffer += "..."
break
# Close
out_buffer += "]"
# Ok
return out_buffer
finally:
if list_frame:
del list_frame
"""
# -*- coding: utf-8 -*-
# ===============================================================================
#
# Copyright (C) 2013/2017 Laurent Labatut / Laurent Champagnac
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# ===============================================================================
"""
import ast
import logging
import os
import platform
import sys
import time
import traceback
from logging.config import dictConfig
from logging.handlers import WatchedFileHandler, TimedRotatingFileHandler, SysLogHandler
from threading import Lock
import gevent
import pytz
from datetime import datetime
from gevent import monkey, config
from yaml import load, SafeLoader
from pysolbase.ContextFilter import ContextFilter
logger = logging.getLogger(__name__)
lifecyclelogger = logging.getLogger("lifecycle")
class SolBase(object):
"""
Base utilities & helpers.
"""
# ===============================
# STATIC STUFF
# ===============================
# Component name (mainly for rsyslog)
_compo_name = "CompoNotSet"
# Global init stuff
_voodoo_initialized = False
_voodoo_lock = Lock()
# Logging stuff
_logging_initialized = False
_logging_lock = Lock()
# Fork stuff
_master_process = True
# ===============================
# DATE & MS
# ===============================
@classmethod
def mscurrent(cls):
"""
Return current millis since epoch
:return float
:rtype float
"""
return time.time() * 1000.0
@classmethod
def securrent(cls):
"""
Return current seconds since epoch
:return float
:rtype float
"""
return time.time()
@classmethod
def msdiff(cls, ms_start, ms_end=None):
"""
Get difference in millis between current millis and provided millis.
:param ms_start: Start millis
:type ms_start: float
:param ms_end: End millis (will use current if not provided)
:type ms_end: float
:return float
:rtype float
"""
if not ms_end:
ms_end = cls.mscurrent()
return ms_end - ms_start
@classmethod
def datecurrent(cls, erase_mode=0):
"""
Return current date (UTC)
:param erase_mode: Erase mode (0=nothing, 1=remove microseconds but keep millis, 2=remove millis completely)
:return datetime.datetime
:rtype datetime.datetime
"""
if erase_mode == 0:
return datetime.utcnow()
elif erase_mode == 1:
# Force precision loss (keep millis, kick micro)
dt = datetime.utcnow()
return dt.replace(microsecond=int((dt.microsecond * 0.001) * 1000))
elif erase_mode == 2:
return datetime.utcnow().replace(microsecond=0)
@classmethod
def datediff(cls, dt_start, dt_end=None):
"""
Get difference in millis between two datetime
:param dt_start: Start datetime
:type dt_start: datetime.datetime
:param dt_end: End datetime (will use current utc if not provided)
:type dt_end: datetime.datetime
:return float
:rtype float
"""
# Fix
if not dt_end:
dt_end = cls.datecurrent()
# Get delta
delta = dt_end - dt_start
return ((delta.days * 86400 + delta.seconds) * 1000) + (delta.microseconds * 0.001)
# ==========================================
# EPOCH / DT
# ==========================================
DT_EPOCH = datetime.utcfromtimestamp(0)
@classmethod
def dt_to_epoch(cls, dt):
"""
Convert a datetime (UTC required) to a unix time since epoch, as seconds, as integer.
Note that millis precision is lost.
:param dt: datetime
:type dt: datetime
:return int
:rtype int
"""
return int((dt - cls.DT_EPOCH).total_seconds())
@classmethod
def epoch_to_dt(cls, epoch):
"""
Convert an epoch float or int to datetime (UTC)
:param epoch: float,int
:type epoch: float,int
:return datetime
:rtype datetime
"""
return datetime.utcfromtimestamp(epoch)
@classmethod
def dt_is_naive(cls, dt):
"""
Return true if dt is naive
:param dt: datetime.datetime
:type dt: datetime.datetime
:return bool
:rtype bool
"""
# Naive : no tzinfo
if not dt.tzinfo:
return True
# Aware
return False
@classmethod
def dt_ensure_utc_aware(cls, dt):
"""
Switch dt to utc time zone. If dt is naive, assume utc, otherwise, convert it to utc timezone.
Return an AWARE timezone (utc switched) datetime,
:param dt: datetime.datetime
:type dt:datetime.datetime
:return datetime.datetime
:rtype datetime.datetime
"""
# If naive, add utc
if cls.dt_is_naive(dt):
return dt.replace(tzinfo=pytz.utc)
else:
# Not naive, go utc, keep aware
return dt.astimezone(pytz.utc)
@classmethod
def dt_ensure_utc_naive(cls, dt):
"""
Ensure dt is naive. Return dt, switched to UTC (if applicable), and naive.
:param dt: datetime.datetime
:type dt:datetime.datetime
:return datetime.datetime
:rtype datetime.datetime
"""
dt = cls.dt_ensure_utc_aware(dt)
return dt.replace(tzinfo=None)
# ===============================
# COMPO NAME (FOR RSYSLOG)
# ===============================
@classmethod
def set_compo_name(cls, compo_name):
"""
Set the component name. Useful for rsyslog.
:param compo_name: The component name or None. If None, method do nothing.
:type compo_name: str,None
"""
if compo_name:
cls._compo_name = compo_name
lifecyclelogger.debug("compo_name now set to=%s", cls._compo_name)
@classmethod
def get_compo_name(cls):
"""
Get current component name.
:return str
:rtype str
"""
return cls._compo_name
@classmethod
def get_machine_name(cls):
"""
Get machine name
:return: Machine name
:rtype: str
"""
return platform.uname()[1]
# ===============================
# MISC
# ===============================
@classmethod
def sleep(cls, sleep_ms):
"""
Sleep for specified ms.
Also used as gevent context switch in code, since it rely on gevent.sleep.
:param sleep_ms: Millis to sleep.
:type sleep_ms: int
:return Nothing.
"""
ms = sleep_ms * 0.001
# gevent 1.3 : ms is not fully respected (100 can be 80-100)
gevent.sleep(ms)
# ===============================
# EXCEPTION HELPER
# ===============================
@classmethod
def extostr(cls, e, max_level=30, max_path_level=5):
"""
Format an exception.
:param e: Any exception instance.
:type e: Exception
:param max_level: Maximum call stack level (default 30)
:type max_level: int
:param max_path_level: Maximum path level (default 5)
:type max_path_level: int
:return The exception readable string
:rtype str
"""
# Go
list_frame = None
try:
out_buffer = ""
# Class type
out_buffer += "e.cls:[{0}]".format(e.__class__.__name__)
# To string
try:
ex_buf = str(e)
except UnicodeEncodeError:
ex_buf = repr(str(e))
except Exception as e:
logger.warning("Exception, e=%s", e)
raise
out_buffer += ", e.bytes:[{0}]".format(ex_buf)
# Traceback
si = sys.exc_info()
# Raw frame
# tuple : (file, lineno, method, code)
raw_frame = traceback.extract_tb(si[2])
raw_frame.reverse()
# Go to last tb_next
last_tb_next = None
cur_tb = si[2]
while cur_tb:
last_tb_next = cur_tb
cur_tb = cur_tb.tb_next
# Skip frame up to current raw frame count
list_frame = list()
cur_count = -1
skip_count = len(raw_frame)
if last_tb_next:
cur_frame = last_tb_next.tb_frame
else:
cur_frame = None
while cur_frame:
cur_count += 1
if cur_count < skip_count:
cur_frame = cur_frame.f_back
else:
# Need : tuple : (file, lineno, method, code)
raw_frame.append((cur_frame.f_code.co_filename, cur_frame.f_lineno, cur_frame.f_code.co_name, ""))
cur_frame = cur_frame.f_back
# Build it
cur_idx = 0
out_buffer += ", e.cs=["
for tu in raw_frame:
line = tu[1]
cur_file = tu[0]
method = tu[2]
# Handle max path level
ar_token = cur_file.rsplit(os.sep, max_path_level)
if len(ar_token) > max_path_level:
# Remove head
ar_token.pop(0)
# Join
cur_file = "..." + os.sep.join(ar_token)
# Format
out_buffer += "in:{0}#{1}@{2} ".format(method, cur_file, line)
# Loop
cur_idx += 1
if cur_idx >= max_level:
out_buffer += "..."
break
# Close
out_buffer += "]"
# Ok
return out_buffer
finally:
if list_frame:
del list_frame
# ===============================
# VOODOO INIT
# ===============================
@classmethod
def _reset(cls):
"""
For unittest only
"""
cls._logging_initialized = False
cls._voodoo_initialized = False
@classmethod
def voodoo_init(cls, aggressive=True, init_logging=True):
"""
Global initialization, to call asap.
Apply gevent stuff & default logging configuration.
:param aggressive: bool
:type aggressive: bool
:param init_logging: If True, logging_init is called.
:type init_logging: bool
:return Nothing.
"""
try:
# Check
if cls._voodoo_initialized:
return
# Lock
with cls._voodoo_lock:
# Re-check
if cls._voodoo_initialized:
return
# Fire the voodoo magic :)
lifecyclelogger.debug("Voodoo : gevent : entering, aggressive=%s", aggressive)
monkey.patch_all(aggressive=aggressive)
lifecyclelogger.debug("Voodoo : gevent : entering")
# Gevent 1.3 : by default, gevent keep tracks of spawn call stack
# This may lead to memory leak, if a method spawn itself in loop (timer mode)
# We disable this
config.track_greenlet_tree = False
# Initialize log level to INFO
if init_logging:
lifecyclelogger.debug("Voodoo : logging : entering")
cls.logging_init()
lifecyclelogger.debug("Voodoo : logging : done")
# Done
cls._voodoo_initialized = True
finally:
# If whenever init_logging if set AND it is NOT initialized => we must init it
# => we may have been called previously with init_logging=false, but monkey patch is SET and logging not initialized
# => so it must be init now
if init_logging and not cls._logging_initialized:
lifecyclelogger.debug("Voodoo : logging : not yet init : entering")
cls.logging_init()
lifecyclelogger.debug("Voodoo : logging : not yet init : done")
# ===============================
# LOGGING
# ===============================
@classmethod
def logging_init(cls, log_level="INFO", force_reset=False, log_callback=None,
log_to_file=None,
log_to_syslog=True,
log_to_syslog_facility=SysLogHandler.LOG_LOCAL0,
log_to_console=True,
log_to_file_mode="watched_file",
context_filter=None):
"""
Initialize logging sub system with default settings (console, pre-formatted output)
:param log_to_console: if True to console
:type log_to_console: bool
:param log_level: The log level to set. Any value in "DEBUG", "INFO", "WARN", "ERROR", "CRITICAL"
:type log_level: str
:param force_reset: If true, logging system is reset.
:type force_reset: bool
:param log_to_file: If specified, log to file
:type log_to_file: str,None
:param log_to_syslog: If specified, log to syslog
:type log_to_syslog: bool
:param log_to_syslog_facility: Syslog facility.
:type log_to_syslog_facility: int
:param log_to_file_mode: str "watched_file" for WatchedFileHandler, "time_file" for TimedRotatingFileHandler (or time_file_seconds for unittest)
:type log_to_file_mode: str
:param log_callback: Callback for unittest
:param context_filter: Context filter. If None, pysolbase.ContextFilter.ContextFilter is used. If used instance has an attr "filter", it is added to all handlers and "%(kfilter)s" will be populated by all thread context key/values, using filter method call. Refer to our ContextFilter default implementation for details.
:type context_filter: None,object
:return Nothing.
"""
if cls._logging_initialized and not force_reset:
return
with cls._logging_lock:
if cls._logging_initialized and not force_reset:
return
# Reset
cls._reset_logging()
# Default
logging.basicConfig(level=log_level)
# Filter
if context_filter:
c_filter = context_filter
else:
c_filter = ContextFilter()
# Format begin
s_f = "%(asctime)s | %(levelname)s | %(module)s@%(funcName)s@%(lineno)d | %(message)s "
# Browse
if hasattr(c_filter, "filter"):
# Push generic field
# We expect it to be formatted like our pysolbase.ContextFilter.ContextFilter#filter method.
s_f += "|%(kfilter)s"
# Format end
s_f += "| %(thread)d:%(threadName)s | %(process)d:%(processName)s"
# Formatter
f = logging.Formatter(s_f)
# Console handler
c = None
if log_to_console:
# This can be overriden by unittest, we use __stdout__
c = logging.StreamHandler(sys.__stdout__)
c.setLevel(logging.getLevelName(log_level))
c.setFormatter(f)
# File handler to /tmp
cf = None
if log_to_file:
if log_to_file_mode == "watched_file":
cf = WatchedFileHandler(log_to_file, encoding="utf-8")
cf.setLevel(logging.getLevelName(log_level))
cf.setFormatter(f)
elif log_to_file_mode == "time_file":
cf = TimedRotatingFileHandler(log_to_file, encoding="utf-8", utc=True, when="D", interval=1, backupCount=7)
cf.setLevel(logging.getLevelName(log_level))
cf.setFormatter(f)
elif log_to_file_mode == "time_file_seconds":
# For unittest only
cf = TimedRotatingFileHandler(log_to_file, encoding="utf-8", utc=True, when="S", interval=1, backupCount=7)
cf.setLevel(logging.getLevelName(log_level))
cf.setFormatter(f)
else:
logger.warning("Invalid log_to_file_mode=%s", log_to_file_mode)
# Syslog handler
syslog = None
if log_to_syslog:
try:
from pysolbase.SysLogger import SysLogger
syslog = SysLogger(log_callback=log_callback, facility=log_to_syslog_facility)
syslog.setLevel(logging.getLevelName(log_level))
syslog.setFormatter(f)
except Exception as e:
# This will fail on WINDOWS (no attr AF_UNIX)
logger.debug("Unable to import SysLogger, e=%s", SolBase.extostr(e))
syslog = False
# Initialize
root = logging.getLogger()
root.setLevel(logging.getLevelName(log_level))
root.handlers = []
if log_to_console:
c.addFilter(c_filter)
root.addHandler(c)
if log_to_file and cf:
cf.addFilter(c_filter)
root.addHandler(cf)
if log_to_syslog and syslog:
syslog.addFilter(c_filter)
root.addHandler(syslog)
# Done
cls._logging_initialized = True
if force_reset:
lifecyclelogger.info("Logging : initialized from memory, log_level=%s, force_reset=%s", log_level, force_reset)
else:
lifecyclelogger.debug("Logging : initialized from memory, log_level=%s, force_reset=%s", log_level, force_reset)
@classmethod
def _register_filter(cls, c_filter):
"""
Register filter across the whole logging (root and all loggers)
Notice : addFilter is protected against duplicates add
:param c_filter: pysolbase.ContextFilter.ContextFilter
:type c_filter: pysolbase.ContextFilter.ContextFilter
"""
# Initialize
root = logging.getLogger()
for h in list(root.handlers):
h.addFilter(c_filter)
# Browse all loggers and set
for name in logging.root.manager.loggerDict:
cur_logger = logging.getLogger(name)
for h in list(cur_logger.handlers):
h.addFilter(c_filter)
@classmethod
def _reset_logging(cls):
"""
Reset
"""
# Found no way to fully reset the logging stuff while running
# We reset root and all loggers to INFO, and kick handlers
# Initialize
root = logging.getLogger()
root.setLevel(logging.getLevelName("INFO"))
for h in root.handlers:
# noinspection PyBroadException
try:
h.close()
except:
pass
root.handlers = []
# Browse all loggers and set
for name in logging.root.manager.loggerDict:
cur_logger = logging.getLogger(name)
cur_logger.setLevel(logging.getLevelName("INFO"))
for h in cur_logger.handlers:
# noinspection PyBroadException
try:
h.close()
except:
pass
cur_logger.handlers = []
@classmethod
def logging_initfromfile(cls, config_file_name, force_reset=False, context_filter=None):
"""
Initialize logging system from a configuration file, with optional reset.
:param config_file_name: Configuration file name
:type config_file_name: str
:param force_reset: If true, logging system is reset.
:type force_reset: bool
:param context_filter: Context filter. If None, pysolbase.ContextFilter.ContextFilter is used. If used instance has an attr "filter", it is added to all handlers and "%(kfilter)s" will be populated by all thread context key/values, using filter method call. Refer to our ContextFilter default implementation for details.
:type context_filter: None,object
:return Nothing.
"""
if cls._logging_initialized and not force_reset:
return
with cls._logging_lock:
if cls._logging_initialized and not force_reset:
return
try:
# Filter
if context_filter:
c_filter = context_filter
else:
c_filter = ContextFilter()
# Reset
cls._reset_logging()
# Load
logger.debug("Logging : yaml config_file_name=%s", config_file_name)
with open(config_file_name, 'r') as f:
d = load(f, Loader=SafeLoader)
dictConfig(d)
# Register filter
if c_filter:
cls._register_filter(c_filter)
if force_reset:
lifecyclelogger.info("Logging : initialized from yaml file, config_file_name=%s", config_file_name)
else:
lifecyclelogger.debug("Logging : initialized from yaml file, config_file_name=%s", config_file_name)
except Exception:
raise
@classmethod
def context_set(cls, k, v):
"""
Set thread/greenlet context value
This is a wrapper to pysolbase.ContextFilter.ContextFilter#set_value
and will work only if ContextFilter is defined (which is by default)
:param k: key name
:type k: basestring
:param v: value
:type v: object
"""
ContextFilter.set_value(k, v)
# ===============================
# FORK STUFF
# ===============================
@classmethod
def get_master_process(cls):
"""
Return True if we are the master process, False otherwise.
:return bool
:rtype bool
"""
return cls._master_process
@classmethod
def set_master_process(cls, b):
"""
Set is we are a fork master or not
:param b: True if we are master process, False if we are a child process.
:type b: bool
:return Nothing
"""
logger.debug("Switching _masterProcess to %s", b)
cls._master_process = b
# ===============================
# BINARY STUFF
# ===============================
@classmethod
def binary_to_unicode(cls, bin_buf, encoding="utf-8"):
"""
Binary buffer to str, using the specified encoding
:param bin_buf: Binary buffer
:type bin_buf: bytes
:param encoding: Encoding to use
:type encoding: str
:return str
:rtype str
"""
return bin_buf.decode(encoding)
@classmethod
def unicode_to_binary(cls, unicode_buf, encoding="utf-8"):
"""
Unicode to binary buffer, using the specified encoding
:param unicode_buf: String to convert.
:type unicode_buf: str
:param encoding: Encoding to use.
:type encoding: str
:return bytes
:rtype bytes
"""
return unicode_buf.encode(encoding)
@classmethod
def fix_paths_for_popen(cls):
"""
Fix path and env for popen calls toward current project
Mainly used for unittests, which requires current env to be propagated while testing command line invocation within same project
"""
# Merge all
ar_p_path = sys.path
if os.environ.get("PYTHONPATH"):
ar_p_path.extend(os.environ.get("PYTHONPATH").split(":"))
if os.environ.get("PATH"):
ar_p_path.extend(os.environ.get("PATH").split(":"))
# Join
new_path = ":".join(ar_p_path)
# Re-Assign
os.environ["PATH"] = new_path
os.environ["PYTHONPATH"] = new_path
# ===============================
# CONVERSIONS
# ===============================
@classmethod
def to_int(cls, v):
"""
Convert to int
:param v: int,str
:type v: int,str
:return: int
:rtype int
"""
if isinstance(v, int):
return v
else:
return int(v)
@classmethod
def to_bool(cls, v):
"""
Convert to bool
:param v: bool,str
:type v: bool,str
:return: bool
:rtype bool
"""
if isinstance(v, bool):
return v
else:
return ast.literal_eval(v)
@classmethod
def get_classname(cls, my_instance):
"""
Return the class name of my_instance, or "Instance.None".
:param cls: Our class.
:param my_instance: Instance to use.
:return: Return the class name of my_instance, or "Instance.None" in case of error/None value.
"""
if my_instance is None:
return "Instance.None"
else:
return my_instance.__class__.__name__
@classmethod
def get_pathseparator(cls):
"""
Return the path separator.
https://docs.python.org/library/os.html#os.sep
:param cls: Our class
:return: The path separator (string)
"""
return os.sep
@classmethod
def is_bool(cls, my_bool):
"""
Return true if the provided my_bool is a boolean.
:param cls: Our class.
:param my_bool: A boolean..
:return: Return true if the provided my_bool is a boolean. False otherwise.
"""
if my_bool is None:
return False
else:
return isinstance(my_bool, bool)
@classmethod
def is_int(cls, my_int):
"""
Return true if the provided my_int is a integer.
:param cls: Our class.
:param my_int: An integer..
:return: Return true if the provided my_int is a integer. False otherwise.
"""
if my_int is None:
return False
# Caution, boolean is an integer...
elif SolBase.is_bool(my_int):
return False
else:
return isinstance(my_int, int)
@classmethod
def get_current_pid_as_string(cls):
"""
Return the current pids as string.
:param cls: Our class.
:return: A String
"""
try:
return "pid={0}, ppid={1}".format(os.getpid(), os.getppid())
except AttributeError:
return "pid={0}".format(os.getpid())
# =====================================================
# HELPER FOR SOCKET CLOSING
# =====================================================
@classmethod
def safe_close_socket(cls, soc_to_close):
"""
Safe close a socket
:param soc_to_close: socket
:type soc_to_close: socket.socket
"""
if soc_to_close is None:
return
try:
soc_to_close.shutdown(2)
except Exception as e:
logger.debug("Socket shutdown ex=%s", SolBase.extostr(e))
try:
soc_to_close.close()
except Exception as e:
logger.debug("Socket close ex=%s", SolBase.extostr(e))
try:
del soc_to_close
except Exception as e:
logger.debug("Socket del ex=%s", SolBase.extostr(e))
|
pysolbase/SolBase.py
|
codereval_python_data_32
|
Write to the specified filename, the provided binary buffer
Create the file if required.
:param file_name: File name.
:type file_name: str
:param text_buffer: Text buffer to write.
:type text_buffer: str
:param encoding: The encoding to use.
:type encoding: str
:param overwrite: If true, file is overwritten.
:type overwrite: bool
:return: The number of bytes written or lt 0 if error.
:rtype int
@staticmethod
def append_text_to_file(file_name, text_buffer, encoding, overwrite=False):
"""
Write to the specified filename, the provided binary buffer
Create the file if required.
:param file_name: File name.
:type file_name: str
:param text_buffer: Text buffer to write.
:type text_buffer: str
:param encoding: The encoding to use.
:type encoding: str
:param overwrite: If true, file is overwritten.
:type overwrite: bool
:return: The number of bytes written or lt 0 if error.
:rtype int
"""
# Go
rd = None
try:
# Open (text : open return a io.BufferedReader)
if not overwrite:
rd = codecs.open(file_name, "a+", encoding, "strict", -1)
else:
rd = codecs.open(file_name, "w", encoding, "strict", -1)
# Read everything
# CAUTION : 2.7 return None :(
return rd.write(text_buffer)
except IOError as e:
# Exception...
logger.warning("append_text_to_file : IOError, ex=%s", SolBase.extostr(e))
return -1
except Exception as e:
logger.warning("append_text_to_file : Exception, ex=%s", SolBase.extostr(e))
return -1
finally:
# Close if not None...
if rd:
rd.close()
"""
# -*- coding: utf-8 -*-
# ===============================================================================
#
# Copyright (C) 2013/2017 Laurent Labatut / Laurent Champagnac
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# ===============================================================================
"""
# Import
import logging
import os
import codecs
from pysolbase.SolBase import SolBase
logger = logging.getLogger(__name__)
class FileUtility(object):
"""
File utility
"""
@staticmethod
def is_path_exist(path_name):
"""
Check if a path (file or dir) name exist.
:param path_name: Path name.
:type path_name text_type
:return: Return true (exist), false (do not exist, or invalid file name)
:rtype bool
"""
# Check
if path_name is None:
logger.warning("is_path_exist : file_name is None")
return False
elif not isinstance(path_name, str):
logger.warning("is_path_exist : path_name not a text_type, className=%s", SolBase.get_classname(path_name))
return False
# Go
return os.path.exists(path_name)
@staticmethod
def is_file_exist(file_name):
"""
Check if file name exist.
:param file_name: File name.
:type file_name: str
:return: Return true (exist), false (do not exist, or invalid file name)
:rtype bool
"""
# Check
if file_name is None:
logger.warning("is_file_exist : file_name is None")
return False
elif not isinstance(file_name, str):
logger.warning("is_file_exist : file_name not a text_type, className=%s", SolBase.get_classname(file_name))
return False
# Go
return os.path.isfile(file_name)
@staticmethod
def is_dir_exist(dir_name):
"""
Check if dir name exist.
:param dir_name: Directory name.
:type dir_name: str
:return: Return true (exist), false (do not exist, or invalid file name)
:rtype bool
"""
# Check
if dir_name is None:
logger.warning("is_dir_exist : file_name is None")
return False
elif not isinstance(dir_name, str):
logger.warning("is_dir_exist : file_name not a text_type, className=%s", SolBase.get_classname(dir_name))
return False
# Go
return os.path.isdir(dir_name)
@staticmethod
def get_file_size(file_name):
"""
Return a file size in bytes.
:param file_name: File name.
:type file_name: str
:return: An integer, gt-eq 0 if file exist, lt 0 if error.
:rtype int
"""
if not FileUtility.is_file_exist(file_name):
return -1
else:
return os.path.getsize(file_name)
@classmethod
def get_current_dir(cls):
"""
Return the current directory.
:return: A String
:rtype text_type
"""
return os.getcwd()
@staticmethod
def file_to_binary(file_name):
"""
Load a file toward a binary buffer.
:param file_name: File name.
:type file_name: str
:return: Return the binary buffer or None in case of error.
:rtype: bytes,None
"""
# Check
if not FileUtility.is_file_exist(file_name):
logger.warning("file_to_binary : file_name not exist, file_name=%s", file_name)
return None
# Go
rd = None
try:
# Open (binary : open return a io.BufferedReader)
rd = open(file_name, "rb")
# Read everything
return rd.read()
except IOError as e:
# Exception...
logger.warning("IOError, ex=%s", SolBase.extostr(e))
return None
except Exception as e:
logger.warning("Exception, ex=%s", SolBase.extostr(e))
return None
finally:
# Close if not None...
if rd:
rd.close()
@staticmethod
def file_to_textbuffer(file_name, encoding):
"""
Load a file toward a text buffer (UTF-8), using the specify encoding while reading.
CAUTION : This will read the whole file IN MEMORY.
:param file_name: File name.
:type file_name: str
:param encoding: Encoding to use.
:type encoding: str
:return: A text buffer or None in case of error.
:rtype str
"""
# Check
if not FileUtility.is_file_exist(file_name):
logger.warning("file_to_textbuffer : file_name not exist, file_name=%s", file_name)
return None
# Go
rd = None
try:
# Open (text : open return a io.BufferedReader)
rd = codecs.open(file_name, "r", encoding, "strict", -1)
# Read everything
return rd.read()
except IOError as e:
# Exception...
logger.warning("file_to_binary : IOError, ex=%s", SolBase.extostr(e))
return None
except Exception as e:
logger.warning("file_to_binary : Exception, ex=%s", SolBase.extostr(e))
return None
finally:
# Close if not None...
if rd:
rd.close()
@staticmethod
def append_binary_to_file(file_name, bin_buf):
"""
Write to the specified filename, the provided binary buffer.
Create the file if required.
:param file_name: File name.
:type file_name: str
:param bin_buf: Binary buffer to write.
:type bin_buf: bytes
:return: The number of bytes written or lt 0 if error.
:rtype int
"""
# Go
rd = None
try:
# Open (text : open return a io.BufferedReader)
rd = open(file_name, "ab+")
# Read everything
return rd.write(bin_buf)
except IOError as e:
# Exception...
logger.warning("append_binary_to_file : IOError, ex=%s", SolBase.extostr(e))
return -1
except Exception as e:
logger.warning("append_binary_to_file : Exception, ex=%s", SolBase.extostr(e))
return -1
finally:
# Close if not None...
if rd:
rd.close()
@staticmethod
def append_text_to_file(file_name, text_buffer, encoding, overwrite=False):
"""
Write to the specified filename, the provided binary buffer
Create the file if required.
:param file_name: File name.
:type file_name: str
:param text_buffer: Text buffer to write.
:type text_buffer: str
:param encoding: The encoding to use.
:type encoding: str
:param overwrite: If true, file is overwritten.
:type overwrite: bool
:return: The number of bytes written or lt 0 if error.
:rtype int
"""
# Go
rd = None
try:
# Open (text : open return a io.BufferedReader)
if not overwrite:
rd = codecs.open(file_name, "a+", encoding, "strict", -1)
else:
rd = codecs.open(file_name, "w", encoding, "strict", -1)
# Read everything
# CAUTION : 2.7 return None :(
return rd.write(text_buffer)
except IOError as e:
# Exception...
logger.warning("append_text_to_file : IOError, ex=%s", SolBase.extostr(e))
return -1
except Exception as e:
logger.warning("append_text_to_file : Exception, ex=%s", SolBase.extostr(e))
return -1
finally:
# Close if not None...
if rd:
rd.close()
|
pysolbase/FileUtility.py
|
codereval_python_data_33
|
Load a file toward a text buffer (UTF-8), using the specify encoding while reading.
CAUTION : This will read the whole file IN MEMORY.
:param file_name: File name.
:type file_name: str
:param encoding: Encoding to use.
:type encoding: str
:return: A text buffer or None in case of error.
:rtype str
@staticmethod
def file_to_textbuffer(file_name, encoding):
"""
Load a file toward a text buffer (UTF-8), using the specify encoding while reading.
CAUTION : This will read the whole file IN MEMORY.
:param file_name: File name.
:type file_name: str
:param encoding: Encoding to use.
:type encoding: str
:return: A text buffer or None in case of error.
:rtype str
"""
# Check
if not FileUtility.is_file_exist(file_name):
logger.warning("file_to_textbuffer : file_name not exist, file_name=%s", file_name)
return None
# Go
rd = None
try:
# Open (text : open return a io.BufferedReader)
rd = codecs.open(file_name, "r", encoding, "strict", -1)
# Read everything
return rd.read()
except IOError as e:
# Exception...
logger.warning("file_to_binary : IOError, ex=%s", SolBase.extostr(e))
return None
except Exception as e:
logger.warning("file_to_binary : Exception, ex=%s", SolBase.extostr(e))
return None
finally:
# Close if not None...
if rd:
rd.close()
"""
# -*- coding: utf-8 -*-
# ===============================================================================
#
# Copyright (C) 2013/2017 Laurent Labatut / Laurent Champagnac
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# ===============================================================================
"""
# Import
import logging
import os
import codecs
from pysolbase.SolBase import SolBase
logger = logging.getLogger(__name__)
class FileUtility(object):
"""
File utility
"""
@staticmethod
def is_path_exist(path_name):
"""
Check if a path (file or dir) name exist.
:param path_name: Path name.
:type path_name text_type
:return: Return true (exist), false (do not exist, or invalid file name)
:rtype bool
"""
# Check
if path_name is None:
logger.warning("is_path_exist : file_name is None")
return False
elif not isinstance(path_name, str):
logger.warning("is_path_exist : path_name not a text_type, className=%s", SolBase.get_classname(path_name))
return False
# Go
return os.path.exists(path_name)
@staticmethod
def is_file_exist(file_name):
"""
Check if file name exist.
:param file_name: File name.
:type file_name: str
:return: Return true (exist), false (do not exist, or invalid file name)
:rtype bool
"""
# Check
if file_name is None:
logger.warning("is_file_exist : file_name is None")
return False
elif not isinstance(file_name, str):
logger.warning("is_file_exist : file_name not a text_type, className=%s", SolBase.get_classname(file_name))
return False
# Go
return os.path.isfile(file_name)
@staticmethod
def is_dir_exist(dir_name):
"""
Check if dir name exist.
:param dir_name: Directory name.
:type dir_name: str
:return: Return true (exist), false (do not exist, or invalid file name)
:rtype bool
"""
# Check
if dir_name is None:
logger.warning("is_dir_exist : file_name is None")
return False
elif not isinstance(dir_name, str):
logger.warning("is_dir_exist : file_name not a text_type, className=%s", SolBase.get_classname(dir_name))
return False
# Go
return os.path.isdir(dir_name)
@staticmethod
def get_file_size(file_name):
"""
Return a file size in bytes.
:param file_name: File name.
:type file_name: str
:return: An integer, gt-eq 0 if file exist, lt 0 if error.
:rtype int
"""
if not FileUtility.is_file_exist(file_name):
return -1
else:
return os.path.getsize(file_name)
@classmethod
def get_current_dir(cls):
"""
Return the current directory.
:return: A String
:rtype text_type
"""
return os.getcwd()
@staticmethod
def file_to_binary(file_name):
"""
Load a file toward a binary buffer.
:param file_name: File name.
:type file_name: str
:return: Return the binary buffer or None in case of error.
:rtype: bytes,None
"""
# Check
if not FileUtility.is_file_exist(file_name):
logger.warning("file_to_binary : file_name not exist, file_name=%s", file_name)
return None
# Go
rd = None
try:
# Open (binary : open return a io.BufferedReader)
rd = open(file_name, "rb")
# Read everything
return rd.read()
except IOError as e:
# Exception...
logger.warning("IOError, ex=%s", SolBase.extostr(e))
return None
except Exception as e:
logger.warning("Exception, ex=%s", SolBase.extostr(e))
return None
finally:
# Close if not None...
if rd:
rd.close()
@staticmethod
def file_to_textbuffer(file_name, encoding):
"""
Load a file toward a text buffer (UTF-8), using the specify encoding while reading.
CAUTION : This will read the whole file IN MEMORY.
:param file_name: File name.
:type file_name: str
:param encoding: Encoding to use.
:type encoding: str
:return: A text buffer or None in case of error.
:rtype str
"""
# Check
if not FileUtility.is_file_exist(file_name):
logger.warning("file_to_textbuffer : file_name not exist, file_name=%s", file_name)
return None
# Go
rd = None
try:
# Open (text : open return a io.BufferedReader)
rd = codecs.open(file_name, "r", encoding, "strict", -1)
# Read everything
return rd.read()
except IOError as e:
# Exception...
logger.warning("file_to_binary : IOError, ex=%s", SolBase.extostr(e))
return None
except Exception as e:
logger.warning("file_to_binary : Exception, ex=%s", SolBase.extostr(e))
return None
finally:
# Close if not None...
if rd:
rd.close()
@staticmethod
def append_binary_to_file(file_name, bin_buf):
"""
Write to the specified filename, the provided binary buffer.
Create the file if required.
:param file_name: File name.
:type file_name: str
:param bin_buf: Binary buffer to write.
:type bin_buf: bytes
:return: The number of bytes written or lt 0 if error.
:rtype int
"""
# Go
rd = None
try:
# Open (text : open return a io.BufferedReader)
rd = open(file_name, "ab+")
# Read everything
return rd.write(bin_buf)
except IOError as e:
# Exception...
logger.warning("append_binary_to_file : IOError, ex=%s", SolBase.extostr(e))
return -1
except Exception as e:
logger.warning("append_binary_to_file : Exception, ex=%s", SolBase.extostr(e))
return -1
finally:
# Close if not None...
if rd:
rd.close()
@staticmethod
def append_text_to_file(file_name, text_buffer, encoding, overwrite=False):
"""
Write to the specified filename, the provided binary buffer
Create the file if required.
:param file_name: File name.
:type file_name: str
:param text_buffer: Text buffer to write.
:type text_buffer: str
:param encoding: The encoding to use.
:type encoding: str
:param overwrite: If true, file is overwritten.
:type overwrite: bool
:return: The number of bytes written or lt 0 if error.
:rtype int
"""
# Go
rd = None
try:
# Open (text : open return a io.BufferedReader)
if not overwrite:
rd = codecs.open(file_name, "a+", encoding, "strict", -1)
else:
rd = codecs.open(file_name, "w", encoding, "strict", -1)
# Read everything
# CAUTION : 2.7 return None :(
return rd.write(text_buffer)
except IOError as e:
# Exception...
logger.warning("append_text_to_file : IOError, ex=%s", SolBase.extostr(e))
return -1
except Exception as e:
logger.warning("append_text_to_file : Exception, ex=%s", SolBase.extostr(e))
return -1
finally:
# Close if not None...
if rd:
rd.close()
|
pysolbase/FileUtility.py
|
codereval_python_data_34
|
Check if file name exist.
:param file_name: File name.
:type file_name: str
:return: Return true (exist), false (do not exist, or invalid file name)
:rtype bool
@staticmethod
def is_file_exist(file_name):
"""
Check if file name exist.
:param file_name: File name.
:type file_name: str
:return: Return true (exist), false (do not exist, or invalid file name)
:rtype bool
"""
# Check
if file_name is None:
logger.warning("is_file_exist : file_name is None")
return False
elif not isinstance(file_name, str):
logger.warning("is_file_exist : file_name not a text_type, className=%s", SolBase.get_classname(file_name))
return False
# Go
return os.path.isfile(file_name)
"""
# -*- coding: utf-8 -*-
# ===============================================================================
#
# Copyright (C) 2013/2017 Laurent Labatut / Laurent Champagnac
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# ===============================================================================
"""
# Import
import logging
import os
import codecs
from pysolbase.SolBase import SolBase
logger = logging.getLogger(__name__)
class FileUtility(object):
"""
File utility
"""
@staticmethod
def is_path_exist(path_name):
"""
Check if a path (file or dir) name exist.
:param path_name: Path name.
:type path_name text_type
:return: Return true (exist), false (do not exist, or invalid file name)
:rtype bool
"""
# Check
if path_name is None:
logger.warning("is_path_exist : file_name is None")
return False
elif not isinstance(path_name, str):
logger.warning("is_path_exist : path_name not a text_type, className=%s", SolBase.get_classname(path_name))
return False
# Go
return os.path.exists(path_name)
@staticmethod
def is_file_exist(file_name):
"""
Check if file name exist.
:param file_name: File name.
:type file_name: str
:return: Return true (exist), false (do not exist, or invalid file name)
:rtype bool
"""
# Check
if file_name is None:
logger.warning("is_file_exist : file_name is None")
return False
elif not isinstance(file_name, str):
logger.warning("is_file_exist : file_name not a text_type, className=%s", SolBase.get_classname(file_name))
return False
# Go
return os.path.isfile(file_name)
@staticmethod
def is_dir_exist(dir_name):
"""
Check if dir name exist.
:param dir_name: Directory name.
:type dir_name: str
:return: Return true (exist), false (do not exist, or invalid file name)
:rtype bool
"""
# Check
if dir_name is None:
logger.warning("is_dir_exist : file_name is None")
return False
elif not isinstance(dir_name, str):
logger.warning("is_dir_exist : file_name not a text_type, className=%s", SolBase.get_classname(dir_name))
return False
# Go
return os.path.isdir(dir_name)
@staticmethod
def get_file_size(file_name):
"""
Return a file size in bytes.
:param file_name: File name.
:type file_name: str
:return: An integer, gt-eq 0 if file exist, lt 0 if error.
:rtype int
"""
if not FileUtility.is_file_exist(file_name):
return -1
else:
return os.path.getsize(file_name)
@classmethod
def get_current_dir(cls):
"""
Return the current directory.
:return: A String
:rtype text_type
"""
return os.getcwd()
@staticmethod
def file_to_binary(file_name):
"""
Load a file toward a binary buffer.
:param file_name: File name.
:type file_name: str
:return: Return the binary buffer or None in case of error.
:rtype: bytes,None
"""
# Check
if not FileUtility.is_file_exist(file_name):
logger.warning("file_to_binary : file_name not exist, file_name=%s", file_name)
return None
# Go
rd = None
try:
# Open (binary : open return a io.BufferedReader)
rd = open(file_name, "rb")
# Read everything
return rd.read()
except IOError as e:
# Exception...
logger.warning("IOError, ex=%s", SolBase.extostr(e))
return None
except Exception as e:
logger.warning("Exception, ex=%s", SolBase.extostr(e))
return None
finally:
# Close if not None...
if rd:
rd.close()
@staticmethod
def file_to_textbuffer(file_name, encoding):
"""
Load a file toward a text buffer (UTF-8), using the specify encoding while reading.
CAUTION : This will read the whole file IN MEMORY.
:param file_name: File name.
:type file_name: str
:param encoding: Encoding to use.
:type encoding: str
:return: A text buffer or None in case of error.
:rtype str
"""
# Check
if not FileUtility.is_file_exist(file_name):
logger.warning("file_to_textbuffer : file_name not exist, file_name=%s", file_name)
return None
# Go
rd = None
try:
# Open (text : open return a io.BufferedReader)
rd = codecs.open(file_name, "r", encoding, "strict", -1)
# Read everything
return rd.read()
except IOError as e:
# Exception...
logger.warning("file_to_binary : IOError, ex=%s", SolBase.extostr(e))
return None
except Exception as e:
logger.warning("file_to_binary : Exception, ex=%s", SolBase.extostr(e))
return None
finally:
# Close if not None...
if rd:
rd.close()
@staticmethod
def append_binary_to_file(file_name, bin_buf):
"""
Write to the specified filename, the provided binary buffer.
Create the file if required.
:param file_name: File name.
:type file_name: str
:param bin_buf: Binary buffer to write.
:type bin_buf: bytes
:return: The number of bytes written or lt 0 if error.
:rtype int
"""
# Go
rd = None
try:
# Open (text : open return a io.BufferedReader)
rd = open(file_name, "ab+")
# Read everything
return rd.write(bin_buf)
except IOError as e:
# Exception...
logger.warning("append_binary_to_file : IOError, ex=%s", SolBase.extostr(e))
return -1
except Exception as e:
logger.warning("append_binary_to_file : Exception, ex=%s", SolBase.extostr(e))
return -1
finally:
# Close if not None...
if rd:
rd.close()
@staticmethod
def append_text_to_file(file_name, text_buffer, encoding, overwrite=False):
"""
Write to the specified filename, the provided binary buffer
Create the file if required.
:param file_name: File name.
:type file_name: str
:param text_buffer: Text buffer to write.
:type text_buffer: str
:param encoding: The encoding to use.
:type encoding: str
:param overwrite: If true, file is overwritten.
:type overwrite: bool
:return: The number of bytes written or lt 0 if error.
:rtype int
"""
# Go
rd = None
try:
# Open (text : open return a io.BufferedReader)
if not overwrite:
rd = codecs.open(file_name, "a+", encoding, "strict", -1)
else:
rd = codecs.open(file_name, "w", encoding, "strict", -1)
# Read everything
# CAUTION : 2.7 return None :(
return rd.write(text_buffer)
except IOError as e:
# Exception...
logger.warning("append_text_to_file : IOError, ex=%s", SolBase.extostr(e))
return -1
except Exception as e:
logger.warning("append_text_to_file : Exception, ex=%s", SolBase.extostr(e))
return -1
finally:
# Close if not None...
if rd:
rd.close()
|
pysolbase/FileUtility.py
|
codereval_python_data_35
|
Reset
@classmethod
def _reset_logging(cls):
"""
Reset
"""
# Found no way to fully reset the logging stuff while running
# We reset root and all loggers to INFO, and kick handlers
# Initialize
root = logging.getLogger()
root.setLevel(logging.getLevelName("INFO"))
for h in root.handlers:
# noinspection PyBroadException
try:
h.close()
except:
pass
root.handlers = []
# Browse all loggers and set
for name in logging.root.manager.loggerDict:
cur_logger = logging.getLogger(name)
cur_logger.setLevel(logging.getLevelName("INFO"))
for h in cur_logger.handlers:
# noinspection PyBroadException
try:
h.close()
except:
pass
cur_logger.handlers = []
"""
# -*- coding: utf-8 -*-
# ===============================================================================
#
# Copyright (C) 2013/2017 Laurent Labatut / Laurent Champagnac
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# ===============================================================================
"""
import ast
import logging
import os
import platform
import sys
import time
import traceback
from logging.config import dictConfig
from logging.handlers import WatchedFileHandler, TimedRotatingFileHandler, SysLogHandler
from threading import Lock
import gevent
import pytz
from datetime import datetime
from gevent import monkey, config
from yaml import load, SafeLoader
from pysolbase.ContextFilter import ContextFilter
logger = logging.getLogger(__name__)
lifecyclelogger = logging.getLogger("lifecycle")
class SolBase(object):
"""
Base utilities & helpers.
"""
# ===============================
# STATIC STUFF
# ===============================
# Component name (mainly for rsyslog)
_compo_name = "CompoNotSet"
# Global init stuff
_voodoo_initialized = False
_voodoo_lock = Lock()
# Logging stuff
_logging_initialized = False
_logging_lock = Lock()
# Fork stuff
_master_process = True
# ===============================
# DATE & MS
# ===============================
@classmethod
def mscurrent(cls):
"""
Return current millis since epoch
:return float
:rtype float
"""
return time.time() * 1000.0
@classmethod
def securrent(cls):
"""
Return current seconds since epoch
:return float
:rtype float
"""
return time.time()
@classmethod
def msdiff(cls, ms_start, ms_end=None):
"""
Get difference in millis between current millis and provided millis.
:param ms_start: Start millis
:type ms_start: float
:param ms_end: End millis (will use current if not provided)
:type ms_end: float
:return float
:rtype float
"""
if not ms_end:
ms_end = cls.mscurrent()
return ms_end - ms_start
@classmethod
def datecurrent(cls, erase_mode=0):
"""
Return current date (UTC)
:param erase_mode: Erase mode (0=nothing, 1=remove microseconds but keep millis, 2=remove millis completely)
:return datetime.datetime
:rtype datetime.datetime
"""
if erase_mode == 0:
return datetime.utcnow()
elif erase_mode == 1:
# Force precision loss (keep millis, kick micro)
dt = datetime.utcnow()
return dt.replace(microsecond=int((dt.microsecond * 0.001) * 1000))
elif erase_mode == 2:
return datetime.utcnow().replace(microsecond=0)
@classmethod
def datediff(cls, dt_start, dt_end=None):
"""
Get difference in millis between two datetime
:param dt_start: Start datetime
:type dt_start: datetime.datetime
:param dt_end: End datetime (will use current utc if not provided)
:type dt_end: datetime.datetime
:return float
:rtype float
"""
# Fix
if not dt_end:
dt_end = cls.datecurrent()
# Get delta
delta = dt_end - dt_start
return ((delta.days * 86400 + delta.seconds) * 1000) + (delta.microseconds * 0.001)
# ==========================================
# EPOCH / DT
# ==========================================
DT_EPOCH = datetime.utcfromtimestamp(0)
@classmethod
def dt_to_epoch(cls, dt):
"""
Convert a datetime (UTC required) to a unix time since epoch, as seconds, as integer.
Note that millis precision is lost.
:param dt: datetime
:type dt: datetime
:return int
:rtype int
"""
return int((dt - cls.DT_EPOCH).total_seconds())
@classmethod
def epoch_to_dt(cls, epoch):
"""
Convert an epoch float or int to datetime (UTC)
:param epoch: float,int
:type epoch: float,int
:return datetime
:rtype datetime
"""
return datetime.utcfromtimestamp(epoch)
@classmethod
def dt_is_naive(cls, dt):
"""
Return true if dt is naive
:param dt: datetime.datetime
:type dt: datetime.datetime
:return bool
:rtype bool
"""
# Naive : no tzinfo
if not dt.tzinfo:
return True
# Aware
return False
@classmethod
def dt_ensure_utc_aware(cls, dt):
"""
Switch dt to utc time zone. If dt is naive, assume utc, otherwise, convert it to utc timezone.
Return an AWARE timezone (utc switched) datetime,
:param dt: datetime.datetime
:type dt:datetime.datetime
:return datetime.datetime
:rtype datetime.datetime
"""
# If naive, add utc
if cls.dt_is_naive(dt):
return dt.replace(tzinfo=pytz.utc)
else:
# Not naive, go utc, keep aware
return dt.astimezone(pytz.utc)
@classmethod
def dt_ensure_utc_naive(cls, dt):
"""
Ensure dt is naive. Return dt, switched to UTC (if applicable), and naive.
:param dt: datetime.datetime
:type dt:datetime.datetime
:return datetime.datetime
:rtype datetime.datetime
"""
dt = cls.dt_ensure_utc_aware(dt)
return dt.replace(tzinfo=None)
# ===============================
# COMPO NAME (FOR RSYSLOG)
# ===============================
@classmethod
def set_compo_name(cls, compo_name):
"""
Set the component name. Useful for rsyslog.
:param compo_name: The component name or None. If None, method do nothing.
:type compo_name: str,None
"""
if compo_name:
cls._compo_name = compo_name
lifecyclelogger.debug("compo_name now set to=%s", cls._compo_name)
@classmethod
def get_compo_name(cls):
"""
Get current component name.
:return str
:rtype str
"""
return cls._compo_name
@classmethod
def get_machine_name(cls):
"""
Get machine name
:return: Machine name
:rtype: str
"""
return platform.uname()[1]
# ===============================
# MISC
# ===============================
@classmethod
def sleep(cls, sleep_ms):
"""
Sleep for specified ms.
Also used as gevent context switch in code, since it rely on gevent.sleep.
:param sleep_ms: Millis to sleep.
:type sleep_ms: int
:return Nothing.
"""
ms = sleep_ms * 0.001
# gevent 1.3 : ms is not fully respected (100 can be 80-100)
gevent.sleep(ms)
# ===============================
# EXCEPTION HELPER
# ===============================
@classmethod
def extostr(cls, e, max_level=30, max_path_level=5):
"""
Format an exception.
:param e: Any exception instance.
:type e: Exception
:param max_level: Maximum call stack level (default 30)
:type max_level: int
:param max_path_level: Maximum path level (default 5)
:type max_path_level: int
:return The exception readable string
:rtype str
"""
# Go
list_frame = None
try:
out_buffer = ""
# Class type
out_buffer += "e.cls:[{0}]".format(e.__class__.__name__)
# To string
try:
ex_buf = str(e)
except UnicodeEncodeError:
ex_buf = repr(str(e))
except Exception as e:
logger.warning("Exception, e=%s", e)
raise
out_buffer += ", e.bytes:[{0}]".format(ex_buf)
# Traceback
si = sys.exc_info()
# Raw frame
# tuple : (file, lineno, method, code)
raw_frame = traceback.extract_tb(si[2])
raw_frame.reverse()
# Go to last tb_next
last_tb_next = None
cur_tb = si[2]
while cur_tb:
last_tb_next = cur_tb
cur_tb = cur_tb.tb_next
# Skip frame up to current raw frame count
list_frame = list()
cur_count = -1
skip_count = len(raw_frame)
if last_tb_next:
cur_frame = last_tb_next.tb_frame
else:
cur_frame = None
while cur_frame:
cur_count += 1
if cur_count < skip_count:
cur_frame = cur_frame.f_back
else:
# Need : tuple : (file, lineno, method, code)
raw_frame.append((cur_frame.f_code.co_filename, cur_frame.f_lineno, cur_frame.f_code.co_name, ""))
cur_frame = cur_frame.f_back
# Build it
cur_idx = 0
out_buffer += ", e.cs=["
for tu in raw_frame:
line = tu[1]
cur_file = tu[0]
method = tu[2]
# Handle max path level
ar_token = cur_file.rsplit(os.sep, max_path_level)
if len(ar_token) > max_path_level:
# Remove head
ar_token.pop(0)
# Join
cur_file = "..." + os.sep.join(ar_token)
# Format
out_buffer += "in:{0}#{1}@{2} ".format(method, cur_file, line)
# Loop
cur_idx += 1
if cur_idx >= max_level:
out_buffer += "..."
break
# Close
out_buffer += "]"
# Ok
return out_buffer
finally:
if list_frame:
del list_frame
# ===============================
# VOODOO INIT
# ===============================
@classmethod
def _reset(cls):
"""
For unittest only
"""
cls._logging_initialized = False
cls._voodoo_initialized = False
@classmethod
def voodoo_init(cls, aggressive=True, init_logging=True):
"""
Global initialization, to call asap.
Apply gevent stuff & default logging configuration.
:param aggressive: bool
:type aggressive: bool
:param init_logging: If True, logging_init is called.
:type init_logging: bool
:return Nothing.
"""
try:
# Check
if cls._voodoo_initialized:
return
# Lock
with cls._voodoo_lock:
# Re-check
if cls._voodoo_initialized:
return
# Fire the voodoo magic :)
lifecyclelogger.debug("Voodoo : gevent : entering, aggressive=%s", aggressive)
monkey.patch_all(aggressive=aggressive)
lifecyclelogger.debug("Voodoo : gevent : entering")
# Gevent 1.3 : by default, gevent keep tracks of spawn call stack
# This may lead to memory leak, if a method spawn itself in loop (timer mode)
# We disable this
config.track_greenlet_tree = False
# Initialize log level to INFO
if init_logging:
lifecyclelogger.debug("Voodoo : logging : entering")
cls.logging_init()
lifecyclelogger.debug("Voodoo : logging : done")
# Done
cls._voodoo_initialized = True
finally:
# If whenever init_logging if set AND it is NOT initialized => we must init it
# => we may have been called previously with init_logging=false, but monkey patch is SET and logging not initialized
# => so it must be init now
if init_logging and not cls._logging_initialized:
lifecyclelogger.debug("Voodoo : logging : not yet init : entering")
cls.logging_init()
lifecyclelogger.debug("Voodoo : logging : not yet init : done")
# ===============================
# LOGGING
# ===============================
@classmethod
def logging_init(cls, log_level="INFO", force_reset=False, log_callback=None,
log_to_file=None,
log_to_syslog=True,
log_to_syslog_facility=SysLogHandler.LOG_LOCAL0,
log_to_console=True,
log_to_file_mode="watched_file",
context_filter=None):
"""
Initialize logging sub system with default settings (console, pre-formatted output)
:param log_to_console: if True to console
:type log_to_console: bool
:param log_level: The log level to set. Any value in "DEBUG", "INFO", "WARN", "ERROR", "CRITICAL"
:type log_level: str
:param force_reset: If true, logging system is reset.
:type force_reset: bool
:param log_to_file: If specified, log to file
:type log_to_file: str,None
:param log_to_syslog: If specified, log to syslog
:type log_to_syslog: bool
:param log_to_syslog_facility: Syslog facility.
:type log_to_syslog_facility: int
:param log_to_file_mode: str "watched_file" for WatchedFileHandler, "time_file" for TimedRotatingFileHandler (or time_file_seconds for unittest)
:type log_to_file_mode: str
:param log_callback: Callback for unittest
:param context_filter: Context filter. If None, pysolbase.ContextFilter.ContextFilter is used. If used instance has an attr "filter", it is added to all handlers and "%(kfilter)s" will be populated by all thread context key/values, using filter method call. Refer to our ContextFilter default implementation for details.
:type context_filter: None,object
:return Nothing.
"""
if cls._logging_initialized and not force_reset:
return
with cls._logging_lock:
if cls._logging_initialized and not force_reset:
return
# Reset
cls._reset_logging()
# Default
logging.basicConfig(level=log_level)
# Filter
if context_filter:
c_filter = context_filter
else:
c_filter = ContextFilter()
# Format begin
s_f = "%(asctime)s | %(levelname)s | %(module)s@%(funcName)s@%(lineno)d | %(message)s "
# Browse
if hasattr(c_filter, "filter"):
# Push generic field
# We expect it to be formatted like our pysolbase.ContextFilter.ContextFilter#filter method.
s_f += "|%(kfilter)s"
# Format end
s_f += "| %(thread)d:%(threadName)s | %(process)d:%(processName)s"
# Formatter
f = logging.Formatter(s_f)
# Console handler
c = None
if log_to_console:
# This can be overriden by unittest, we use __stdout__
c = logging.StreamHandler(sys.__stdout__)
c.setLevel(logging.getLevelName(log_level))
c.setFormatter(f)
# File handler to /tmp
cf = None
if log_to_file:
if log_to_file_mode == "watched_file":
cf = WatchedFileHandler(log_to_file, encoding="utf-8")
cf.setLevel(logging.getLevelName(log_level))
cf.setFormatter(f)
elif log_to_file_mode == "time_file":
cf = TimedRotatingFileHandler(log_to_file, encoding="utf-8", utc=True, when="D", interval=1, backupCount=7)
cf.setLevel(logging.getLevelName(log_level))
cf.setFormatter(f)
elif log_to_file_mode == "time_file_seconds":
# For unittest only
cf = TimedRotatingFileHandler(log_to_file, encoding="utf-8", utc=True, when="S", interval=1, backupCount=7)
cf.setLevel(logging.getLevelName(log_level))
cf.setFormatter(f)
else:
logger.warning("Invalid log_to_file_mode=%s", log_to_file_mode)
# Syslog handler
syslog = None
if log_to_syslog:
try:
from pysolbase.SysLogger import SysLogger
syslog = SysLogger(log_callback=log_callback, facility=log_to_syslog_facility)
syslog.setLevel(logging.getLevelName(log_level))
syslog.setFormatter(f)
except Exception as e:
# This will fail on WINDOWS (no attr AF_UNIX)
logger.debug("Unable to import SysLogger, e=%s", SolBase.extostr(e))
syslog = False
# Initialize
root = logging.getLogger()
root.setLevel(logging.getLevelName(log_level))
root.handlers = []
if log_to_console:
c.addFilter(c_filter)
root.addHandler(c)
if log_to_file and cf:
cf.addFilter(c_filter)
root.addHandler(cf)
if log_to_syslog and syslog:
syslog.addFilter(c_filter)
root.addHandler(syslog)
# Done
cls._logging_initialized = True
if force_reset:
lifecyclelogger.info("Logging : initialized from memory, log_level=%s, force_reset=%s", log_level, force_reset)
else:
lifecyclelogger.debug("Logging : initialized from memory, log_level=%s, force_reset=%s", log_level, force_reset)
@classmethod
def _register_filter(cls, c_filter):
"""
Register filter across the whole logging (root and all loggers)
Notice : addFilter is protected against duplicates add
:param c_filter: pysolbase.ContextFilter.ContextFilter
:type c_filter: pysolbase.ContextFilter.ContextFilter
"""
# Initialize
root = logging.getLogger()
for h in list(root.handlers):
h.addFilter(c_filter)
# Browse all loggers and set
for name in logging.root.manager.loggerDict:
cur_logger = logging.getLogger(name)
for h in list(cur_logger.handlers):
h.addFilter(c_filter)
@classmethod
def _reset_logging(cls):
"""
Reset
"""
# Found no way to fully reset the logging stuff while running
# We reset root and all loggers to INFO, and kick handlers
# Initialize
root = logging.getLogger()
root.setLevel(logging.getLevelName("INFO"))
for h in root.handlers:
# noinspection PyBroadException
try:
h.close()
except:
pass
root.handlers = []
# Browse all loggers and set
for name in logging.root.manager.loggerDict:
cur_logger = logging.getLogger(name)
cur_logger.setLevel(logging.getLevelName("INFO"))
for h in cur_logger.handlers:
# noinspection PyBroadException
try:
h.close()
except:
pass
cur_logger.handlers = []
@classmethod
def logging_initfromfile(cls, config_file_name, force_reset=False, context_filter=None):
"""
Initialize logging system from a configuration file, with optional reset.
:param config_file_name: Configuration file name
:type config_file_name: str
:param force_reset: If true, logging system is reset.
:type force_reset: bool
:param context_filter: Context filter. If None, pysolbase.ContextFilter.ContextFilter is used. If used instance has an attr "filter", it is added to all handlers and "%(kfilter)s" will be populated by all thread context key/values, using filter method call. Refer to our ContextFilter default implementation for details.
:type context_filter: None,object
:return Nothing.
"""
if cls._logging_initialized and not force_reset:
return
with cls._logging_lock:
if cls._logging_initialized and not force_reset:
return
try:
# Filter
if context_filter:
c_filter = context_filter
else:
c_filter = ContextFilter()
# Reset
cls._reset_logging()
# Load
logger.debug("Logging : yaml config_file_name=%s", config_file_name)
with open(config_file_name, 'r') as f:
d = load(f, Loader=SafeLoader)
dictConfig(d)
# Register filter
if c_filter:
cls._register_filter(c_filter)
if force_reset:
lifecyclelogger.info("Logging : initialized from yaml file, config_file_name=%s", config_file_name)
else:
lifecyclelogger.debug("Logging : initialized from yaml file, config_file_name=%s", config_file_name)
except Exception:
raise
@classmethod
def context_set(cls, k, v):
"""
Set thread/greenlet context value
This is a wrapper to pysolbase.ContextFilter.ContextFilter#set_value
and will work only if ContextFilter is defined (which is by default)
:param k: key name
:type k: basestring
:param v: value
:type v: object
"""
ContextFilter.set_value(k, v)
# ===============================
# FORK STUFF
# ===============================
@classmethod
def get_master_process(cls):
"""
Return True if we are the master process, False otherwise.
:return bool
:rtype bool
"""
return cls._master_process
@classmethod
def set_master_process(cls, b):
"""
Set is we are a fork master or not
:param b: True if we are master process, False if we are a child process.
:type b: bool
:return Nothing
"""
logger.debug("Switching _masterProcess to %s", b)
cls._master_process = b
# ===============================
# BINARY STUFF
# ===============================
@classmethod
def binary_to_unicode(cls, bin_buf, encoding="utf-8"):
"""
Binary buffer to str, using the specified encoding
:param bin_buf: Binary buffer
:type bin_buf: bytes
:param encoding: Encoding to use
:type encoding: str
:return str
:rtype str
"""
return bin_buf.decode(encoding)
@classmethod
def unicode_to_binary(cls, unicode_buf, encoding="utf-8"):
"""
Unicode to binary buffer, using the specified encoding
:param unicode_buf: String to convert.
:type unicode_buf: str
:param encoding: Encoding to use.
:type encoding: str
:return bytes
:rtype bytes
"""
return unicode_buf.encode(encoding)
@classmethod
def fix_paths_for_popen(cls):
"""
Fix path and env for popen calls toward current project
Mainly used for unittests, which requires current env to be propagated while testing command line invocation within same project
"""
# Merge all
ar_p_path = sys.path
if os.environ.get("PYTHONPATH"):
ar_p_path.extend(os.environ.get("PYTHONPATH").split(":"))
if os.environ.get("PATH"):
ar_p_path.extend(os.environ.get("PATH").split(":"))
# Join
new_path = ":".join(ar_p_path)
# Re-Assign
os.environ["PATH"] = new_path
os.environ["PYTHONPATH"] = new_path
# ===============================
# CONVERSIONS
# ===============================
@classmethod
def to_int(cls, v):
"""
Convert to int
:param v: int,str
:type v: int,str
:return: int
:rtype int
"""
if isinstance(v, int):
return v
else:
return int(v)
@classmethod
def to_bool(cls, v):
"""
Convert to bool
:param v: bool,str
:type v: bool,str
:return: bool
:rtype bool
"""
if isinstance(v, bool):
return v
else:
return ast.literal_eval(v)
@classmethod
def get_classname(cls, my_instance):
"""
Return the class name of my_instance, or "Instance.None".
:param cls: Our class.
:param my_instance: Instance to use.
:return: Return the class name of my_instance, or "Instance.None" in case of error/None value.
"""
if my_instance is None:
return "Instance.None"
else:
return my_instance.__class__.__name__
@classmethod
def get_pathseparator(cls):
"""
Return the path separator.
https://docs.python.org/library/os.html#os.sep
:param cls: Our class
:return: The path separator (string)
"""
return os.sep
@classmethod
def is_bool(cls, my_bool):
"""
Return true if the provided my_bool is a boolean.
:param cls: Our class.
:param my_bool: A boolean..
:return: Return true if the provided my_bool is a boolean. False otherwise.
"""
if my_bool is None:
return False
else:
return isinstance(my_bool, bool)
@classmethod
def is_int(cls, my_int):
"""
Return true if the provided my_int is a integer.
:param cls: Our class.
:param my_int: An integer..
:return: Return true if the provided my_int is a integer. False otherwise.
"""
if my_int is None:
return False
# Caution, boolean is an integer...
elif SolBase.is_bool(my_int):
return False
else:
return isinstance(my_int, int)
@classmethod
def get_current_pid_as_string(cls):
"""
Return the current pids as string.
:param cls: Our class.
:return: A String
"""
try:
return "pid={0}, ppid={1}".format(os.getpid(), os.getppid())
except AttributeError:
return "pid={0}".format(os.getpid())
# =====================================================
# HELPER FOR SOCKET CLOSING
# =====================================================
@classmethod
def safe_close_socket(cls, soc_to_close):
"""
Safe close a socket
:param soc_to_close: socket
:type soc_to_close: socket.socket
"""
if soc_to_close is None:
return
try:
soc_to_close.shutdown(2)
except Exception as e:
logger.debug("Socket shutdown ex=%s", SolBase.extostr(e))
try:
soc_to_close.close()
except Exception as e:
logger.debug("Socket close ex=%s", SolBase.extostr(e))
try:
del soc_to_close
except Exception as e:
logger.debug("Socket del ex=%s", SolBase.extostr(e))
|
pysolbase/SolBase.py
|
codereval_python_data_36
|
Define this to return the implementation in use,
without the 'Py' or 'Fallback' suffix.
def _getTargetClass(self):
from zope.interface.declarations import getObjectSpecification
return getObjectSpecification
##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test the new API for making and checking interface declarations
"""
import unittest
from zope.interface._compat import _skip_under_py3k
from zope.interface._compat import PYTHON3
from zope.interface.tests import OptimizationTestMixin
from zope.interface.tests import MissingSomeAttrs
from zope.interface.tests.test_interface import NameAndModuleComparisonTestsMixin
# pylint:disable=inherit-non-class,too-many-lines,protected-access
# pylint:disable=blacklisted-name,attribute-defined-outside-init
class _Py3ClassAdvice(object):
def _run_generated_code(self, code, globs, locs,
fails_under_py3k=True,
):
# pylint:disable=exec-used,no-member
import warnings
with warnings.catch_warnings(record=True) as log:
warnings.resetwarnings()
if not PYTHON3:
exec(code, globs, locs)
self.assertEqual(len(log), 0) # no longer warn
return True
try:
exec(code, globs, locs)
except TypeError:
return False
else:
if fails_under_py3k:
self.fail("Didn't raise TypeError")
return None
class NamedTests(unittest.TestCase):
def test_class(self):
from zope.interface.declarations import named
@named(u'foo')
class Foo(object):
pass
self.assertEqual(Foo.__component_name__, u'foo') # pylint:disable=no-member
def test_function(self):
from zope.interface.declarations import named
@named(u'foo')
def doFoo(o):
raise NotImplementedError()
self.assertEqual(doFoo.__component_name__, u'foo')
def test_instance(self):
from zope.interface.declarations import named
class Foo(object):
pass
foo = Foo()
named(u'foo')(foo)
self.assertEqual(foo.__component_name__, u'foo') # pylint:disable=no-member
class EmptyDeclarationTests(unittest.TestCase):
# Tests that should pass for all objects that are empty
# declarations. This includes a Declaration explicitly created
# that way, and the empty ImmutableDeclaration.
def _getEmpty(self):
from zope.interface.declarations import Declaration
return Declaration()
def test___iter___empty(self):
decl = self._getEmpty()
self.assertEqual(list(decl), [])
def test_flattened_empty(self):
from zope.interface.interface import Interface
decl = self._getEmpty()
self.assertEqual(list(decl.flattened()), [Interface])
def test___contains___empty(self):
from zope.interface.interface import Interface
decl = self._getEmpty()
self.assertNotIn(Interface, decl)
def test_extends_empty(self):
from zope.interface.interface import Interface
decl = self._getEmpty()
self.assertTrue(decl.extends(Interface))
self.assertTrue(decl.extends(Interface, strict=True))
def test_interfaces_empty(self):
decl = self._getEmpty()
l = list(decl.interfaces())
self.assertEqual(l, [])
def test___sro___(self):
from zope.interface.interface import Interface
decl = self._getEmpty()
self.assertEqual(decl.__sro__, (decl, Interface,))
def test___iro___(self):
from zope.interface.interface import Interface
decl = self._getEmpty()
self.assertEqual(decl.__iro__, (Interface,))
def test_get(self):
decl = self._getEmpty()
self.assertIsNone(decl.get('attr'))
self.assertEqual(decl.get('abc', 'def'), 'def')
# It's a positive cache only (when it even exists)
# so this added nothing.
self.assertFalse(decl._v_attrs)
def test_changed_w_existing__v_attrs(self):
decl = self._getEmpty()
decl._v_attrs = object()
decl.changed(decl)
self.assertFalse(decl._v_attrs)
class DeclarationTests(EmptyDeclarationTests):
def _getTargetClass(self):
from zope.interface.declarations import Declaration
return Declaration
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_ctor_no_bases(self):
decl = self._makeOne()
self.assertEqual(list(decl.__bases__), [])
def test_ctor_w_interface_in_bases(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
decl = self._makeOne(IFoo)
self.assertEqual(list(decl.__bases__), [IFoo])
def test_ctor_w_implements_in_bases(self):
from zope.interface.declarations import Implements
impl = Implements()
decl = self._makeOne(impl)
self.assertEqual(list(decl.__bases__), [impl])
def test_changed_wo_existing__v_attrs(self):
decl = self._makeOne()
decl.changed(decl) # doesn't raise
self.assertIsNone(decl._v_attrs)
def test___contains__w_self(self):
decl = self._makeOne()
self.assertNotIn(decl, decl)
def test___contains__w_unrelated_iface(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
decl = self._makeOne()
self.assertNotIn(IFoo, decl)
def test___contains__w_base_interface(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
decl = self._makeOne(IFoo)
self.assertIn(IFoo, decl)
def test___iter___single_base(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
decl = self._makeOne(IFoo)
self.assertEqual(list(decl), [IFoo])
def test___iter___multiple_bases(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar')
decl = self._makeOne(IFoo, IBar)
self.assertEqual(list(decl), [IFoo, IBar])
def test___iter___inheritance(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar', (IFoo,))
decl = self._makeOne(IBar)
self.assertEqual(list(decl), [IBar]) #IBar.interfaces() omits bases
def test___iter___w_nested_sequence_overlap(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar')
decl = self._makeOne(IBar, (IFoo, IBar))
self.assertEqual(list(decl), [IBar, IFoo])
def test_flattened_single_base(self):
from zope.interface.interface import Interface
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
decl = self._makeOne(IFoo)
self.assertEqual(list(decl.flattened()), [IFoo, Interface])
def test_flattened_multiple_bases(self):
from zope.interface.interface import Interface
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar')
decl = self._makeOne(IFoo, IBar)
self.assertEqual(list(decl.flattened()), [IFoo, IBar, Interface])
def test_flattened_inheritance(self):
from zope.interface.interface import Interface
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar', (IFoo,))
decl = self._makeOne(IBar)
self.assertEqual(list(decl.flattened()), [IBar, IFoo, Interface])
def test_flattened_w_nested_sequence_overlap(self):
from zope.interface.interface import Interface
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar')
# This is the same as calling ``Declaration(IBar, IFoo, IBar)``
# which doesn't make much sense, but here it is. In older
# versions of zope.interface, the __iro__ would have been
# IFoo, IBar, Interface, which especially makes no sense.
decl = self._makeOne(IBar, (IFoo, IBar))
# Note that decl.__iro__ has IFoo first.
self.assertEqual(list(decl.flattened()), [IBar, IFoo, Interface])
def test___sub___unrelated_interface(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar')
before = self._makeOne(IFoo)
after = before - IBar
self.assertIsInstance(after, self._getTargetClass())
self.assertEqual(list(after), [IFoo])
def test___sub___related_interface(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
before = self._makeOne(IFoo)
after = before - IFoo
self.assertEqual(list(after), [])
def test___sub___related_interface_by_inheritance(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar', (IFoo,))
before = self._makeOne(IBar)
after = before - IBar
self.assertEqual(list(after), [])
def test___add___unrelated_interface(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar')
before = self._makeOne(IFoo)
after = before + IBar
self.assertIsInstance(after, self._getTargetClass())
self.assertEqual(list(after), [IFoo, IBar])
def test___add___related_interface(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar')
IBaz = InterfaceClass('IBaz')
before = self._makeOne(IFoo, IBar)
other = self._makeOne(IBar, IBaz)
after = before + other
self.assertEqual(list(after), [IFoo, IBar, IBaz])
class TestImmutableDeclaration(EmptyDeclarationTests):
def _getTargetClass(self):
from zope.interface.declarations import _ImmutableDeclaration
return _ImmutableDeclaration
def _getEmpty(self):
from zope.interface.declarations import _empty
return _empty
def test_pickle(self):
import pickle
copied = pickle.loads(pickle.dumps(self._getEmpty()))
self.assertIs(copied, self._getEmpty())
def test_singleton(self):
self.assertIs(
self._getTargetClass()(),
self._getEmpty()
)
def test__bases__(self):
self.assertEqual(self._getEmpty().__bases__, ())
def test_change__bases__(self):
empty = self._getEmpty()
empty.__bases__ = ()
self.assertEqual(self._getEmpty().__bases__, ())
with self.assertRaises(TypeError):
empty.__bases__ = (1,)
def test_dependents(self):
empty = self._getEmpty()
deps = empty.dependents
self.assertEqual({}, deps)
# Doesn't change the return.
deps[1] = 2
self.assertEqual({}, empty.dependents)
def test_changed(self):
# Does nothing, has no visible side-effects
self._getEmpty().changed(None)
def test_extends_always_false(self):
self.assertFalse(self._getEmpty().extends(self))
self.assertFalse(self._getEmpty().extends(self, strict=True))
self.assertFalse(self._getEmpty().extends(self, strict=False))
def test_get_always_default(self):
self.assertIsNone(self._getEmpty().get('name'))
self.assertEqual(self._getEmpty().get('name', 42), 42)
def test_v_attrs(self):
decl = self._getEmpty()
self.assertEqual(decl._v_attrs, {})
decl._v_attrs['attr'] = 42
self.assertEqual(decl._v_attrs, {})
self.assertIsNone(decl.get('attr'))
attrs = decl._v_attrs = {}
attrs['attr'] = 42
self.assertEqual(decl._v_attrs, {})
self.assertIsNone(decl.get('attr'))
class TestImplements(NameAndModuleComparisonTestsMixin,
unittest.TestCase):
def _getTargetClass(self):
from zope.interface.declarations import Implements
return Implements
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def _makeOneToCompare(self):
from zope.interface.declarations import implementedBy
class A(object):
pass
return implementedBy(A)
def test_ctor_no_bases(self):
impl = self._makeOne()
self.assertEqual(impl.inherit, None)
self.assertEqual(impl.declared, ())
self.assertEqual(impl.__name__, '?')
self.assertEqual(list(impl.__bases__), [])
def test___repr__(self):
impl = self._makeOne()
impl.__name__ = 'Testing'
self.assertEqual(repr(impl), '<implementedBy Testing>')
def test___reduce__(self):
from zope.interface.declarations import implementedBy
impl = self._makeOne()
self.assertEqual(impl.__reduce__(), (implementedBy, (None,)))
def test_sort(self):
from zope.interface.declarations import implementedBy
class A(object):
pass
class B(object):
pass
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
self.assertEqual(implementedBy(A), implementedBy(A))
self.assertEqual(hash(implementedBy(A)), hash(implementedBy(A)))
self.assertTrue(implementedBy(A) < None)
self.assertTrue(None > implementedBy(A)) # pylint:disable=misplaced-comparison-constant
self.assertTrue(implementedBy(A) < implementedBy(B))
self.assertTrue(implementedBy(A) > IFoo)
self.assertTrue(implementedBy(A) <= implementedBy(B))
self.assertTrue(implementedBy(A) >= IFoo)
self.assertTrue(implementedBy(A) != IFoo)
def test_proxy_equality(self):
# https://github.com/zopefoundation/zope.interface/issues/55
class Proxy(object):
def __init__(self, wrapped):
self._wrapped = wrapped
def __getattr__(self, name):
raise NotImplementedError()
def __eq__(self, other):
return self._wrapped == other
def __ne__(self, other):
return self._wrapped != other
from zope.interface.declarations import implementedBy
class A(object):
pass
class B(object):
pass
implementedByA = implementedBy(A)
implementedByB = implementedBy(B)
proxy = Proxy(implementedByA)
# The order of arguments to the operators matters,
# test both
self.assertTrue(implementedByA == implementedByA) # pylint:disable=comparison-with-itself
self.assertTrue(implementedByA != implementedByB)
self.assertTrue(implementedByB != implementedByA)
self.assertTrue(proxy == implementedByA)
self.assertTrue(implementedByA == proxy)
self.assertFalse(proxy != implementedByA)
self.assertFalse(implementedByA != proxy)
self.assertTrue(proxy != implementedByB)
self.assertTrue(implementedByB != proxy)
def test_changed_deletes_super_cache(self):
impl = self._makeOne()
self.assertIsNone(impl._super_cache)
self.assertNotIn('_super_cache', impl.__dict__)
impl._super_cache = 42
self.assertIn('_super_cache', impl.__dict__)
impl.changed(None)
self.assertIsNone(impl._super_cache)
self.assertNotIn('_super_cache', impl.__dict__)
def test_changed_does_not_add_super_cache(self):
impl = self._makeOne()
self.assertIsNone(impl._super_cache)
self.assertNotIn('_super_cache', impl.__dict__)
impl.changed(None)
self.assertIsNone(impl._super_cache)
self.assertNotIn('_super_cache', impl.__dict__)
class Test_implementedByFallback(unittest.TestCase):
def _getTargetClass(self):
# pylint:disable=no-name-in-module
from zope.interface.declarations import implementedByFallback
return implementedByFallback
_getFallbackClass = _getTargetClass
def _callFUT(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_dictless_wo_existing_Implements_wo_registrations(self):
class Foo(object):
__slots__ = ('__implemented__',)
foo = Foo()
foo.__implemented__ = None
self.assertEqual(list(self._callFUT(foo)), [])
def test_dictless_wo_existing_Implements_cant_assign___implemented__(self):
class Foo(object):
def _get_impl(self):
raise NotImplementedError()
def _set_impl(self, val):
raise TypeError
__implemented__ = property(_get_impl, _set_impl)
def __call__(self):
# act like a factory
raise NotImplementedError()
foo = Foo()
self.assertRaises(TypeError, self._callFUT, foo)
def test_dictless_wo_existing_Implements_w_registrations(self):
from zope.interface import declarations
class Foo(object):
__slots__ = ('__implemented__',)
foo = Foo()
foo.__implemented__ = None
reg = object()
with _MonkeyDict(declarations,
'BuiltinImplementationSpecifications') as specs:
specs[foo] = reg
self.assertTrue(self._callFUT(foo) is reg)
def test_dictless_w_existing_Implements(self):
from zope.interface.declarations import Implements
impl = Implements()
class Foo(object):
__slots__ = ('__implemented__',)
foo = Foo()
foo.__implemented__ = impl
self.assertTrue(self._callFUT(foo) is impl)
def test_dictless_w_existing_not_Implements(self):
from zope.interface.interface import InterfaceClass
class Foo(object):
__slots__ = ('__implemented__',)
foo = Foo()
IFoo = InterfaceClass('IFoo')
foo.__implemented__ = (IFoo,)
self.assertEqual(list(self._callFUT(foo)), [IFoo])
def test_w_existing_attr_as_Implements(self):
from zope.interface.declarations import Implements
impl = Implements()
class Foo(object):
__implemented__ = impl
self.assertTrue(self._callFUT(Foo) is impl)
def test_builtins_added_to_cache(self):
from zope.interface import declarations
from zope.interface.declarations import Implements
from zope.interface._compat import _BUILTINS
with _MonkeyDict(declarations,
'BuiltinImplementationSpecifications') as specs:
self.assertEqual(list(self._callFUT(tuple)), [])
self.assertEqual(list(self._callFUT(list)), [])
self.assertEqual(list(self._callFUT(dict)), [])
for typ in (tuple, list, dict):
spec = specs[typ]
self.assertIsInstance(spec, Implements)
self.assertEqual(repr(spec),
'<implementedBy %s.%s>'
% (_BUILTINS, typ.__name__))
def test_builtins_w_existing_cache(self):
from zope.interface import declarations
t_spec, l_spec, d_spec = object(), object(), object()
with _MonkeyDict(declarations,
'BuiltinImplementationSpecifications') as specs:
specs[tuple] = t_spec
specs[list] = l_spec
specs[dict] = d_spec
self.assertTrue(self._callFUT(tuple) is t_spec)
self.assertTrue(self._callFUT(list) is l_spec)
self.assertTrue(self._callFUT(dict) is d_spec)
def test_oldstyle_class_no_assertions(self):
# TODO: Figure out P3 story
class Foo:
pass
self.assertEqual(list(self._callFUT(Foo)), [])
def test_no_assertions(self):
# TODO: Figure out P3 story
class Foo(object):
pass
self.assertEqual(list(self._callFUT(Foo)), [])
def test_w_None_no_bases_not_factory(self):
class Foo(object):
__implemented__ = None
foo = Foo()
self.assertRaises(TypeError, self._callFUT, foo)
def test_w_None_no_bases_w_factory(self):
from zope.interface.declarations import objectSpecificationDescriptor
class Foo(object):
__implemented__ = None
def __call__(self):
raise NotImplementedError()
foo = Foo()
foo.__name__ = 'foo'
spec = self._callFUT(foo)
self.assertEqual(spec.__name__,
'zope.interface.tests.test_declarations.foo')
self.assertIs(spec.inherit, foo)
self.assertIs(foo.__implemented__, spec)
self.assertIs(foo.__providedBy__, objectSpecificationDescriptor) # pylint:disable=no-member
self.assertNotIn('__provides__', foo.__dict__)
def test_w_None_no_bases_w_class(self):
from zope.interface.declarations import ClassProvides
class Foo(object):
__implemented__ = None
spec = self._callFUT(Foo)
self.assertEqual(spec.__name__,
'zope.interface.tests.test_declarations.Foo')
self.assertIs(spec.inherit, Foo)
self.assertIs(Foo.__implemented__, spec)
self.assertIsInstance(Foo.__providedBy__, ClassProvides) # pylint:disable=no-member
self.assertIsInstance(Foo.__provides__, ClassProvides) # pylint:disable=no-member
self.assertEqual(Foo.__provides__, Foo.__providedBy__) # pylint:disable=no-member
def test_w_existing_Implements(self):
from zope.interface.declarations import Implements
impl = Implements()
class Foo(object):
__implemented__ = impl
self.assertTrue(self._callFUT(Foo) is impl)
def test_super_when_base_implements_interface(self):
from zope.interface import Interface
from zope.interface.declarations import implementer
class IBase(Interface):
pass
class IDerived(IBase):
pass
@implementer(IBase)
class Base(object):
pass
@implementer(IDerived)
class Derived(Base):
pass
self.assertEqual(list(self._callFUT(Derived)), [IDerived, IBase])
sup = super(Derived, Derived)
self.assertEqual(list(self._callFUT(sup)), [IBase])
def test_super_when_base_implements_interface_diamond(self):
from zope.interface import Interface
from zope.interface.declarations import implementer
class IBase(Interface):
pass
class IDerived(IBase):
pass
@implementer(IBase)
class Base(object):
pass
class Child1(Base):
pass
class Child2(Base):
pass
@implementer(IDerived)
class Derived(Child1, Child2):
pass
self.assertEqual(list(self._callFUT(Derived)), [IDerived, IBase])
sup = super(Derived, Derived)
self.assertEqual(list(self._callFUT(sup)), [IBase])
def test_super_when_parent_implements_interface_diamond(self):
from zope.interface import Interface
from zope.interface.declarations import implementer
class IBase(Interface):
pass
class IDerived(IBase):
pass
class Base(object):
pass
class Child1(Base):
pass
@implementer(IBase)
class Child2(Base):
pass
@implementer(IDerived)
class Derived(Child1, Child2):
pass
self.assertEqual(Derived.__mro__, (Derived, Child1, Child2, Base, object))
self.assertEqual(list(self._callFUT(Derived)), [IDerived, IBase])
sup = super(Derived, Derived)
fut = self._callFUT(sup)
self.assertEqual(list(fut), [IBase])
self.assertIsNone(fut._dependents)
def test_super_when_base_doesnt_implement_interface(self):
from zope.interface import Interface
from zope.interface.declarations import implementer
class IBase(Interface):
pass
class IDerived(IBase):
pass
class Base(object):
pass
@implementer(IDerived)
class Derived(Base):
pass
self.assertEqual(list(self._callFUT(Derived)), [IDerived])
sup = super(Derived, Derived)
self.assertEqual(list(self._callFUT(sup)), [])
def test_super_when_base_is_object(self):
from zope.interface import Interface
from zope.interface.declarations import implementer
class IBase(Interface):
pass
class IDerived(IBase):
pass
@implementer(IDerived)
class Derived(object):
pass
self.assertEqual(list(self._callFUT(Derived)), [IDerived])
sup = super(Derived, Derived)
self.assertEqual(list(self._callFUT(sup)), [])
def test_super_multi_level_multi_inheritance(self):
from zope.interface.declarations import implementer
from zope.interface import Interface
class IBase(Interface):
pass
class IM1(Interface):
pass
class IM2(Interface):
pass
class IDerived(IBase):
pass
class IUnrelated(Interface):
pass
@implementer(IBase)
class Base(object):
pass
@implementer(IM1)
class M1(Base):
pass
@implementer(IM2)
class M2(Base):
pass
@implementer(IDerived, IUnrelated)
class Derived(M1, M2):
pass
d = Derived
sd = super(Derived, Derived)
sm1 = super(M1, Derived)
sm2 = super(M2, Derived)
self.assertEqual(list(self._callFUT(d)),
[IDerived, IUnrelated, IM1, IBase, IM2])
self.assertEqual(list(self._callFUT(sd)),
[IM1, IBase, IM2])
self.assertEqual(list(self._callFUT(sm1)),
[IM2, IBase])
self.assertEqual(list(self._callFUT(sm2)),
[IBase])
class Test_implementedBy(Test_implementedByFallback,
OptimizationTestMixin):
# Repeat tests for C optimizations
def _getTargetClass(self):
from zope.interface.declarations import implementedBy
return implementedBy
class _ImplementsTestMixin(object):
FUT_SETS_PROVIDED_BY = True
def _callFUT(self, cls, iface):
# Declare that *cls* implements *iface*; return *cls*
raise NotImplementedError
def _check_implementer(self, Foo,
orig_spec=None,
spec_name=__name__ + '.Foo',
inherit="not given"):
from zope.interface.declarations import ClassProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
returned = self._callFUT(Foo, IFoo)
self.assertIs(returned, Foo)
spec = Foo.__implemented__
if orig_spec is not None:
self.assertIs(spec, orig_spec)
self.assertEqual(spec.__name__,
spec_name)
inherit = Foo if inherit == "not given" else inherit
self.assertIs(spec.inherit, inherit)
self.assertIs(Foo.__implemented__, spec)
if self.FUT_SETS_PROVIDED_BY:
self.assertIsInstance(Foo.__providedBy__, ClassProvides)
self.assertIsInstance(Foo.__provides__, ClassProvides)
self.assertEqual(Foo.__provides__, Foo.__providedBy__)
return Foo, IFoo
def test_oldstyle_class(self):
# This only matters on Python 2
class Foo:
pass
self._check_implementer(Foo)
def test_newstyle_class(self):
class Foo(object):
pass
self._check_implementer(Foo)
class Test_classImplementsOnly(_ImplementsTestMixin, unittest.TestCase):
FUT_SETS_PROVIDED_BY = False
def _callFUT(self, cls, iface):
from zope.interface.declarations import classImplementsOnly
classImplementsOnly(cls, iface)
return cls
def test_w_existing_Implements(self):
from zope.interface.declarations import Implements
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar')
impl = Implements(IFoo)
impl.declared = (IFoo,)
class Foo(object):
__implemented__ = impl
impl.inherit = Foo
self._callFUT(Foo, IBar)
# Same spec, now different values
self.assertTrue(Foo.__implemented__ is impl)
self.assertEqual(impl.inherit, None)
self.assertEqual(impl.declared, (IBar,))
def test_oldstyle_class(self):
from zope.interface.declarations import Implements
from zope.interface.interface import InterfaceClass
IBar = InterfaceClass('IBar')
old_spec = Implements(IBar)
class Foo:
__implemented__ = old_spec
self._check_implementer(Foo, old_spec, '?', inherit=None)
def test_newstyle_class(self):
from zope.interface.declarations import Implements
from zope.interface.interface import InterfaceClass
IBar = InterfaceClass('IBar')
old_spec = Implements(IBar)
class Foo(object):
__implemented__ = old_spec
self._check_implementer(Foo, old_spec, '?', inherit=None)
def test_redundant_with_super_still_implements(self):
Base, IBase = self._check_implementer(
type('Foo', (object,), {}),
inherit=None,
)
class Child(Base):
pass
self._callFUT(Child, IBase)
self.assertTrue(IBase.implementedBy(Child))
class Test_classImplements(_ImplementsTestMixin, unittest.TestCase):
def _callFUT(self, cls, iface):
from zope.interface.declarations import classImplements
result = classImplements(cls, iface) # pylint:disable=assignment-from-no-return
self.assertIsNone(result)
return cls
def __check_implementer_redundant(self, Base):
# If we @implementer exactly what was already present, we write
# no declared attributes on the parent (we still set everything, though)
Base, IBase = self._check_implementer(Base)
class Child(Base):
pass
returned = self._callFUT(Child, IBase)
self.assertIn('__implemented__', returned.__dict__)
self.assertNotIn('__providedBy__', returned.__dict__)
self.assertIn('__provides__', returned.__dict__)
spec = Child.__implemented__
self.assertEqual(spec.declared, ())
self.assertEqual(spec.inherit, Child)
self.assertTrue(IBase.providedBy(Child()))
def test_redundant_implementer_empty_class_declarations_newstyle(self):
self.__check_implementer_redundant(type('Foo', (object,), {}))
def test_redundant_implementer_empty_class_declarations_oldstyle(self):
# This only matters on Python 2
class Foo:
pass
self.__check_implementer_redundant(Foo)
def test_redundant_implementer_Interface(self):
from zope.interface import Interface
from zope.interface import implementedBy
from zope.interface import ro
from zope.interface.tests.test_ro import C3Setting
class Foo(object):
pass
with C3Setting(ro.C3.STRICT_IRO, False):
self._callFUT(Foo, Interface)
self.assertEqual(list(implementedBy(Foo)), [Interface])
class Baz(Foo):
pass
self._callFUT(Baz, Interface)
self.assertEqual(list(implementedBy(Baz)), [Interface])
def _order_for_two(self, applied_first, applied_second):
return (applied_first, applied_second)
def test_w_existing_Implements(self):
from zope.interface.declarations import Implements
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar')
impl = Implements(IFoo)
impl.declared = (IFoo,)
class Foo(object):
__implemented__ = impl
impl.inherit = Foo
self._callFUT(Foo, IBar)
# Same spec, now different values
self.assertIs(Foo.__implemented__, impl)
self.assertEqual(impl.inherit, Foo)
self.assertEqual(impl.declared,
self._order_for_two(IFoo, IBar))
def test_w_existing_Implements_w_bases(self):
from zope.interface.declarations import Implements
from zope.interface.interface import InterfaceClass
IRoot = InterfaceClass('IRoot')
ISecondRoot = InterfaceClass('ISecondRoot')
IExtendsRoot = InterfaceClass('IExtendsRoot', (IRoot,))
impl_root = Implements.named('Root', IRoot)
impl_root.declared = (IRoot,)
class Root1(object):
__implemented__ = impl_root
class Root2(object):
__implemented__ = impl_root
impl_extends_root = Implements.named('ExtendsRoot1', IExtendsRoot)
impl_extends_root.declared = (IExtendsRoot,)
class ExtendsRoot(Root1, Root2):
__implemented__ = impl_extends_root
impl_extends_root.inherit = ExtendsRoot
self._callFUT(ExtendsRoot, ISecondRoot)
# Same spec, now different values
self.assertIs(ExtendsRoot.__implemented__, impl_extends_root)
self.assertEqual(impl_extends_root.inherit, ExtendsRoot)
self.assertEqual(impl_extends_root.declared,
self._order_for_two(IExtendsRoot, ISecondRoot,))
self.assertEqual(impl_extends_root.__bases__,
self._order_for_two(IExtendsRoot, ISecondRoot) + (impl_root,))
class Test_classImplementsFirst(Test_classImplements):
def _callFUT(self, cls, iface):
from zope.interface.declarations import classImplementsFirst
result = classImplementsFirst(cls, iface) # pylint:disable=assignment-from-no-return
self.assertIsNone(result)
return cls
def _order_for_two(self, applied_first, applied_second):
return (applied_second, applied_first)
class Test__implements_advice(unittest.TestCase):
def _callFUT(self, *args, **kw):
from zope.interface.declarations import _implements_advice
return _implements_advice(*args, **kw)
def test_no_existing_implements(self):
from zope.interface.declarations import classImplements
from zope.interface.declarations import Implements
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
class Foo(object):
__implements_advice_data__ = ((IFoo,), classImplements)
self._callFUT(Foo)
self.assertNotIn('__implements_advice_data__', Foo.__dict__)
self.assertIsInstance(Foo.__implemented__, Implements) # pylint:disable=no-member
self.assertEqual(list(Foo.__implemented__), [IFoo]) # pylint:disable=no-member
class Test_implementer(Test_classImplements):
def _getTargetClass(self):
from zope.interface.declarations import implementer
return implementer
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def _callFUT(self, cls, *ifaces):
decorator = self._makeOne(*ifaces)
return decorator(cls)
def test_nonclass_cannot_assign_attr(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
decorator = self._makeOne(IFoo)
self.assertRaises(TypeError, decorator, object())
def test_nonclass_can_assign_attr(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
class Foo(object):
pass
foo = Foo()
decorator = self._makeOne(IFoo)
returned = decorator(foo)
self.assertTrue(returned is foo)
spec = foo.__implemented__ # pylint:disable=no-member
self.assertEqual(spec.__name__, 'zope.interface.tests.test_declarations.?')
self.assertIsNone(spec.inherit,)
self.assertIs(foo.__implemented__, spec) # pylint:disable=no-member
def test_does_not_leak_on_unique_classes(self):
# Make sure nothing is hanging on to the class or Implements
# object after they go out of scope. There was briefly a bug
# in 5.x that caused SpecificationBase._bases (in C) to not be
# traversed or cleared.
# https://github.com/zopefoundation/zope.interface/issues/216
import gc
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
begin_count = len(gc.get_objects())
for _ in range(1900):
class TestClass(object):
pass
self._callFUT(TestClass, IFoo)
gc.collect()
end_count = len(gc.get_objects())
# How many new objects might still be around? In all currently
# tested interpreters, there aren't any, so our counts should
# match exactly. When the bug existed, in a steady state, the loop
# would grow by two objects each iteration
fudge_factor = 0
self.assertLessEqual(end_count, begin_count + fudge_factor)
class Test_implementer_only(Test_classImplementsOnly):
def _getTargetClass(self):
from zope.interface.declarations import implementer_only
return implementer_only
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def _callFUT(self, cls, iface):
decorator = self._makeOne(iface)
return decorator(cls)
def test_function(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
decorator = self._makeOne(IFoo)
def _function():
raise NotImplementedError()
self.assertRaises(ValueError, decorator, _function)
def test_method(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
decorator = self._makeOne(IFoo)
class Bar:
def _method(self):
raise NotImplementedError()
self.assertRaises(ValueError, decorator, Bar._method)
# Test '_implements' by way of 'implements{,Only}', its only callers.
class Test_implementsOnly(unittest.TestCase, _Py3ClassAdvice):
def test_simple(self):
import warnings
from zope.interface.declarations import implementsOnly
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
globs = {'implementsOnly': implementsOnly,
'IFoo': IFoo,
}
locs = {}
CODE = "\n".join([
'class Foo(object):'
' implementsOnly(IFoo)',
])
with warnings.catch_warnings(record=True) as log:
warnings.resetwarnings()
try:
exec(CODE, globs, locs) # pylint:disable=exec-used
except TypeError:
self.assertTrue(PYTHON3, "Must be Python 3")
else:
if PYTHON3:
self.fail("Didn't raise TypeError")
Foo = locs['Foo']
spec = Foo.__implemented__
self.assertEqual(list(spec), [IFoo])
self.assertEqual(len(log), 0) # no longer warn
def test_called_once_from_class_w_bases(self):
from zope.interface.declarations import implements
from zope.interface.declarations import implementsOnly
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
IBar = InterfaceClass("IBar")
globs = {'implements': implements,
'implementsOnly': implementsOnly,
'IFoo': IFoo,
'IBar': IBar,
}
locs = {}
CODE = "\n".join([
'class Foo(object):',
' implements(IFoo)',
'class Bar(Foo):'
' implementsOnly(IBar)',
])
if self._run_generated_code(CODE, globs, locs):
Bar = locs['Bar']
spec = Bar.__implemented__
self.assertEqual(list(spec), [IBar])
class Test_implements(unittest.TestCase, _Py3ClassAdvice):
def test_called_from_function(self):
import warnings
from zope.interface.declarations import implements
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
globs = {'implements': implements, 'IFoo': IFoo}
locs = {}
CODE = "\n".join([
'def foo():',
' implements(IFoo)'
])
if self._run_generated_code(CODE, globs, locs, False):
foo = locs['foo']
with warnings.catch_warnings(record=True) as log:
warnings.resetwarnings()
self.assertRaises(TypeError, foo)
self.assertEqual(len(log), 0) # no longer warn
def test_called_twice_from_class(self):
import warnings
from zope.interface.declarations import implements
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
IBar = InterfaceClass("IBar")
globs = {'implements': implements, 'IFoo': IFoo, 'IBar': IBar}
locs = {}
CODE = "\n".join([
'class Foo(object):',
' implements(IFoo)',
' implements(IBar)',
])
with warnings.catch_warnings(record=True) as log:
warnings.resetwarnings()
try:
exec(CODE, globs, locs) # pylint:disable=exec-used
except TypeError:
if not PYTHON3:
self.assertEqual(len(log), 0) # no longer warn
else:
self.fail("Didn't raise TypeError")
def test_called_once_from_class(self):
from zope.interface.declarations import implements
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
globs = {'implements': implements, 'IFoo': IFoo}
locs = {}
CODE = "\n".join([
'class Foo(object):',
' implements(IFoo)',
])
if self._run_generated_code(CODE, globs, locs):
Foo = locs['Foo']
spec = Foo.__implemented__
self.assertEqual(list(spec), [IFoo])
class ProvidesClassTests(unittest.TestCase):
def _getTargetClass(self):
from zope.interface.declarations import ProvidesClass
return ProvidesClass
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_simple_class_one_interface(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
spec = self._makeOne(Foo, IFoo)
self.assertEqual(list(spec), [IFoo])
def test___reduce__(self):
from zope.interface.declarations import Provides # the function
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
spec = self._makeOne(Foo, IFoo)
klass, args = spec.__reduce__()
self.assertTrue(klass is Provides)
self.assertEqual(args, (Foo, IFoo))
def test___get___class(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
spec = self._makeOne(Foo, IFoo)
Foo.__provides__ = spec
self.assertTrue(Foo.__provides__ is spec)
def test___get___instance(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
spec = self._makeOne(Foo, IFoo)
Foo.__provides__ = spec
def _test():
foo = Foo()
return foo.__provides__
self.assertRaises(AttributeError, _test)
def test__repr__(self):
inst = self._makeOne(type(self))
self.assertEqual(
repr(inst),
"<zope.interface.Provides for %r>" % type(self)
)
class Test_Provides(unittest.TestCase):
def _callFUT(self, *args, **kw):
from zope.interface.declarations import Provides
return Provides(*args, **kw)
def test_no_cached_spec(self):
from zope.interface import declarations
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
cache = {}
class Foo(object):
pass
with _Monkey(declarations, InstanceDeclarations=cache):
spec = self._callFUT(Foo, IFoo)
self.assertEqual(list(spec), [IFoo])
self.assertTrue(cache[(Foo, IFoo)] is spec)
def test_w_cached_spec(self):
from zope.interface import declarations
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
prior = object()
class Foo(object):
pass
cache = {(Foo, IFoo): prior}
with _Monkey(declarations, InstanceDeclarations=cache):
spec = self._callFUT(Foo, IFoo)
self.assertTrue(spec is prior)
class Test_directlyProvides(unittest.TestCase):
def _callFUT(self, *args, **kw):
from zope.interface.declarations import directlyProvides
return directlyProvides(*args, **kw)
def test_w_normal_object(self):
from zope.interface.declarations import ProvidesClass
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
obj = Foo()
self._callFUT(obj, IFoo)
self.assertIsInstance(obj.__provides__, ProvidesClass) # pylint:disable=no-member
self.assertEqual(list(obj.__provides__), [IFoo]) # pylint:disable=no-member
def test_w_class(self):
from zope.interface.declarations import ClassProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
self._callFUT(Foo, IFoo)
self.assertIsInstance(Foo.__provides__, ClassProvides) # pylint:disable=no-member
self.assertEqual(list(Foo.__provides__), [IFoo]) # pylint:disable=no-member
@_skip_under_py3k
def test_w_non_descriptor_aware_metaclass(self):
# There are no non-descriptor-aware types in Py3k
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class MetaClass(type):
def __getattribute__(cls, name):
# Emulate metaclass whose base is not the type object.
if name == '__class__':
return cls
# Under certain circumstances, the implementedByFallback
# can get here for __dict__
return type.__getattribute__(cls, name) # pragma: no cover
class Foo(object):
__metaclass__ = MetaClass
obj = Foo()
self.assertRaises(TypeError, self._callFUT, obj, IFoo)
def test_w_classless_object(self):
from zope.interface.declarations import ProvidesClass
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
the_dict = {}
class Foo(object):
def __getattribute__(self, name):
# Emulate object w/o any class
if name == '__class__':
return None
raise NotImplementedError(name)
def __setattr__(self, name, value):
the_dict[name] = value
obj = Foo()
self._callFUT(obj, IFoo)
self.assertIsInstance(the_dict['__provides__'], ProvidesClass)
self.assertEqual(list(the_dict['__provides__']), [IFoo])
class Test_alsoProvides(unittest.TestCase):
def _callFUT(self, *args, **kw):
from zope.interface.declarations import alsoProvides
return alsoProvides(*args, **kw)
def test_wo_existing_provides(self):
from zope.interface.declarations import ProvidesClass
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
obj = Foo()
self._callFUT(obj, IFoo)
self.assertIsInstance(obj.__provides__, ProvidesClass) # pylint:disable=no-member
self.assertEqual(list(obj.__provides__), [IFoo]) # pylint:disable=no-member
def test_w_existing_provides(self):
from zope.interface.declarations import directlyProvides
from zope.interface.declarations import ProvidesClass
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
IBar = InterfaceClass("IBar")
class Foo(object):
pass
obj = Foo()
directlyProvides(obj, IFoo)
self._callFUT(obj, IBar)
self.assertIsInstance(obj.__provides__, ProvidesClass) # pylint:disable=no-member
self.assertEqual(list(obj.__provides__), [IFoo, IBar]) # pylint:disable=no-member
class Test_noLongerProvides(unittest.TestCase):
def _callFUT(self, *args, **kw):
from zope.interface.declarations import noLongerProvides
return noLongerProvides(*args, **kw)
def test_wo_existing_provides(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
obj = Foo()
self._callFUT(obj, IFoo)
self.assertEqual(list(obj.__provides__), []) # pylint:disable=no-member
def test_w_existing_provides_hit(self):
from zope.interface.declarations import directlyProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
obj = Foo()
directlyProvides(obj, IFoo)
self._callFUT(obj, IFoo)
self.assertEqual(list(obj.__provides__), []) # pylint:disable=no-member
def test_w_existing_provides_miss(self):
from zope.interface.declarations import directlyProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
IBar = InterfaceClass("IBar")
class Foo(object):
pass
obj = Foo()
directlyProvides(obj, IFoo)
self._callFUT(obj, IBar)
self.assertEqual(list(obj.__provides__), [IFoo]) # pylint:disable=no-member
def test_w_iface_implemented_by_class(self):
from zope.interface.declarations import implementer
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
@implementer(IFoo)
class Foo(object):
pass
obj = Foo()
self.assertRaises(ValueError, self._callFUT, obj, IFoo)
class ClassProvidesBaseFallbackTests(unittest.TestCase):
def _getTargetClass(self):
# pylint:disable=no-name-in-module
from zope.interface.declarations import ClassProvidesBaseFallback
return ClassProvidesBaseFallback
def _makeOne(self, klass, implements):
# Don't instantiate directly: the C version can't have attributes
# assigned.
class Derived(self._getTargetClass()):
def __init__(self, k, i):
self._cls = k
self._implements = i
return Derived(klass, implements)
def test_w_same_class_via_class(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
cpbp = Foo.__provides__ = self._makeOne(Foo, IFoo)
self.assertTrue(Foo.__provides__ is cpbp)
def test_w_same_class_via_instance(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
foo = Foo()
Foo.__provides__ = self._makeOne(Foo, IFoo)
self.assertIs(foo.__provides__, IFoo)
def test_w_different_class(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
class Bar(Foo):
pass
bar = Bar()
Foo.__provides__ = self._makeOne(Foo, IFoo)
self.assertRaises(AttributeError, getattr, Bar, '__provides__')
self.assertRaises(AttributeError, getattr, bar, '__provides__')
class ClassProvidesBaseTests(OptimizationTestMixin,
ClassProvidesBaseFallbackTests):
# Repeat tests for C optimizations
def _getTargetClass(self):
from zope.interface.declarations import ClassProvidesBase
return ClassProvidesBase
def _getFallbackClass(self):
# pylint:disable=no-name-in-module
from zope.interface.declarations import ClassProvidesBaseFallback
return ClassProvidesBaseFallback
class ClassProvidesTests(unittest.TestCase):
def _getTargetClass(self):
from zope.interface.declarations import ClassProvides
return ClassProvides
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_w_simple_metaclass(self):
from zope.interface.declarations import implementer
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
IBar = InterfaceClass("IBar")
@implementer(IFoo)
class Foo(object):
pass
cp = Foo.__provides__ = self._makeOne(Foo, type(Foo), IBar)
self.assertTrue(Foo.__provides__ is cp)
self.assertEqual(list(Foo().__provides__), [IFoo])
def test___reduce__(self):
from zope.interface.declarations import implementer
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
IBar = InterfaceClass("IBar")
@implementer(IFoo)
class Foo(object):
pass
cp = Foo.__provides__ = self._makeOne(Foo, type(Foo), IBar)
self.assertEqual(cp.__reduce__(),
(self._getTargetClass(), (Foo, type(Foo), IBar)))
def test__repr__(self):
inst = self._makeOne(type(self), type)
self.assertEqual(
repr(inst),
"<zope.interface.declarations.ClassProvides for %r>" % type(self)
)
class Test_directlyProvidedBy(unittest.TestCase):
def _callFUT(self, *args, **kw):
from zope.interface.declarations import directlyProvidedBy
return directlyProvidedBy(*args, **kw)
def test_wo_declarations_in_class_or_instance(self):
class Foo(object):
pass
foo = Foo()
self.assertEqual(list(self._callFUT(foo)), [])
def test_w_declarations_in_class_but_not_instance(self):
from zope.interface.declarations import implementer
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
@implementer(IFoo)
class Foo(object):
pass
foo = Foo()
self.assertEqual(list(self._callFUT(foo)), [])
def test_w_declarations_in_instance_but_not_class(self):
from zope.interface.declarations import directlyProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
foo = Foo()
directlyProvides(foo, IFoo)
self.assertEqual(list(self._callFUT(foo)), [IFoo])
def test_w_declarations_in_instance_and_class(self):
from zope.interface.declarations import directlyProvides
from zope.interface.declarations import implementer
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
IBar = InterfaceClass("IBar")
@implementer(IFoo)
class Foo(object):
pass
foo = Foo()
directlyProvides(foo, IBar)
self.assertEqual(list(self._callFUT(foo)), [IBar])
class Test_classProvides(unittest.TestCase, _Py3ClassAdvice):
# pylint:disable=exec-used
def test_called_from_function(self):
import warnings
from zope.interface.declarations import classProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
globs = {'classProvides': classProvides, 'IFoo': IFoo}
locs = {}
CODE = "\n".join([
'def foo():',
' classProvides(IFoo)'
])
exec(CODE, globs, locs)
foo = locs['foo']
with warnings.catch_warnings(record=True) as log:
warnings.resetwarnings()
self.assertRaises(TypeError, foo)
if not PYTHON3:
self.assertEqual(len(log), 0) # no longer warn
def test_called_twice_from_class(self):
import warnings
from zope.interface.declarations import classProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
IBar = InterfaceClass("IBar")
globs = {'classProvides': classProvides, 'IFoo': IFoo, 'IBar': IBar}
locs = {}
CODE = "\n".join([
'class Foo(object):',
' classProvides(IFoo)',
' classProvides(IBar)',
])
with warnings.catch_warnings(record=True) as log:
warnings.resetwarnings()
try:
exec(CODE, globs, locs)
except TypeError:
if not PYTHON3:
self.assertEqual(len(log), 0) # no longer warn
else:
self.fail("Didn't raise TypeError")
def test_called_once_from_class(self):
from zope.interface.declarations import classProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
globs = {'classProvides': classProvides, 'IFoo': IFoo}
locs = {}
CODE = "\n".join([
'class Foo(object):',
' classProvides(IFoo)',
])
if self._run_generated_code(CODE, globs, locs):
Foo = locs['Foo']
spec = Foo.__providedBy__
self.assertEqual(list(spec), [IFoo])
# Test _classProvides_advice through classProvides, its only caller.
class Test_provider(unittest.TestCase):
def _getTargetClass(self):
from zope.interface.declarations import provider
return provider
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_w_class(self):
from zope.interface.declarations import ClassProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
@self._makeOne(IFoo)
class Foo(object):
pass
self.assertIsInstance(Foo.__provides__, ClassProvides) # pylint:disable=no-member
self.assertEqual(list(Foo.__provides__), [IFoo]) # pylint:disable=no-member
class Test_moduleProvides(unittest.TestCase):
# pylint:disable=exec-used
def test_called_from_function(self):
from zope.interface.declarations import moduleProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
globs = {'__name__': 'zope.interface.tests.foo',
'moduleProvides': moduleProvides, 'IFoo': IFoo}
locs = {}
CODE = "\n".join([
'def foo():',
' moduleProvides(IFoo)'
])
exec(CODE, globs, locs)
foo = locs['foo']
self.assertRaises(TypeError, foo)
def test_called_from_class(self):
from zope.interface.declarations import moduleProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
globs = {'__name__': 'zope.interface.tests.foo',
'moduleProvides': moduleProvides, 'IFoo': IFoo}
locs = {}
CODE = "\n".join([
'class Foo(object):',
' moduleProvides(IFoo)',
])
with self.assertRaises(TypeError):
exec(CODE, globs, locs)
def test_called_once_from_module_scope(self):
from zope.interface.declarations import moduleProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
globs = {'__name__': 'zope.interface.tests.foo',
'moduleProvides': moduleProvides, 'IFoo': IFoo}
CODE = "\n".join([
'moduleProvides(IFoo)',
])
exec(CODE, globs)
spec = globs['__provides__']
self.assertEqual(list(spec), [IFoo])
def test_called_twice_from_module_scope(self):
from zope.interface.declarations import moduleProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
globs = {'__name__': 'zope.interface.tests.foo',
'moduleProvides': moduleProvides, 'IFoo': IFoo}
CODE = "\n".join([
'moduleProvides(IFoo)',
'moduleProvides(IFoo)',
])
with self.assertRaises(TypeError):
exec(CODE, globs)
class Test_getObjectSpecificationFallback(unittest.TestCase):
def _getFallbackClass(self):
# pylint:disable=no-name-in-module
from zope.interface.declarations import getObjectSpecificationFallback
return getObjectSpecificationFallback
_getTargetClass = _getFallbackClass
def _callFUT(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_wo_existing_provides_classless(self):
the_dict = {}
class Foo(object):
def __getattribute__(self, name):
# Emulate object w/o any class
if name == '__class__':
raise AttributeError(name)
try:
return the_dict[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
raise NotImplementedError()
foo = Foo()
spec = self._callFUT(foo)
self.assertEqual(list(spec), [])
def test_existing_provides_is_spec(self):
from zope.interface.declarations import directlyProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
def foo():
raise NotImplementedError()
directlyProvides(foo, IFoo)
spec = self._callFUT(foo)
self.assertIs(spec, foo.__provides__) # pylint:disable=no-member
def test_existing_provides_is_not_spec(self):
def foo():
raise NotImplementedError()
foo.__provides__ = object() # not a valid spec
spec = self._callFUT(foo)
self.assertEqual(list(spec), [])
def test_existing_provides(self):
from zope.interface.declarations import directlyProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
foo = Foo()
directlyProvides(foo, IFoo)
spec = self._callFUT(foo)
self.assertEqual(list(spec), [IFoo])
def test_wo_provides_on_class_w_implements(self):
from zope.interface.declarations import implementer
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
@implementer(IFoo)
class Foo(object):
pass
foo = Foo()
spec = self._callFUT(foo)
self.assertEqual(list(spec), [IFoo])
def test_wo_provides_on_class_wo_implements(self):
class Foo(object):
pass
foo = Foo()
spec = self._callFUT(foo)
self.assertEqual(list(spec), [])
def test_catches_only_AttributeError_on_provides(self):
MissingSomeAttrs.test_raises(self, self._callFUT, expected_missing='__provides__')
def test_catches_only_AttributeError_on_class(self):
MissingSomeAttrs.test_raises(self, self._callFUT, expected_missing='__class__',
__provides__=None)
def test_raises_AttributeError_when_provides_fails_type_check_AttributeError(self):
# isinstance(ob.__provides__, SpecificationBase) is not
# protected inside any kind of block.
class Foo(object):
__provides__ = MissingSomeAttrs(AttributeError)
# isinstance() ignores AttributeError on __class__
self._callFUT(Foo())
def test_raises_AttributeError_when_provides_fails_type_check_RuntimeError(self):
# isinstance(ob.__provides__, SpecificationBase) is not
# protected inside any kind of block.
class Foo(object):
__provides__ = MissingSomeAttrs(RuntimeError)
if PYTHON3:
with self.assertRaises(RuntimeError) as exc:
self._callFUT(Foo())
self.assertEqual('__class__', exc.exception.args[0])
else:
# Python 2 catches everything.
self._callFUT(Foo())
class Test_getObjectSpecification(Test_getObjectSpecificationFallback,
OptimizationTestMixin):
# Repeat tests for C optimizations
def _getTargetClass(self):
from zope.interface.declarations import getObjectSpecification
return getObjectSpecification
class Test_providedByFallback(unittest.TestCase):
def _getFallbackClass(self):
# pylint:disable=no-name-in-module
from zope.interface.declarations import providedByFallback
return providedByFallback
_getTargetClass = _getFallbackClass
def _callFUT(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_wo_providedBy_on_class_wo_implements(self):
class Foo(object):
pass
foo = Foo()
spec = self._callFUT(foo)
self.assertEqual(list(spec), [])
def test_w_providedBy_valid_spec(self):
from zope.interface.declarations import Provides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
foo = Foo()
foo.__providedBy__ = Provides(Foo, IFoo)
spec = self._callFUT(foo)
self.assertEqual(list(spec), [IFoo])
def test_w_providedBy_invalid_spec(self):
class Foo(object):
pass
foo = Foo()
foo.__providedBy__ = object()
spec = self._callFUT(foo)
self.assertEqual(list(spec), [])
def test_w_providedBy_invalid_spec_class_w_implements(self):
from zope.interface.declarations import implementer
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
@implementer(IFoo)
class Foo(object):
pass
foo = Foo()
foo.__providedBy__ = object()
spec = self._callFUT(foo)
self.assertEqual(list(spec), [IFoo])
def test_w_providedBy_invalid_spec_w_provides_no_provides_on_class(self):
class Foo(object):
pass
foo = Foo()
foo.__providedBy__ = object()
expected = foo.__provides__ = object()
spec = self._callFUT(foo)
self.assertTrue(spec is expected)
def test_w_providedBy_invalid_spec_w_provides_diff_provides_on_class(self):
class Foo(object):
pass
foo = Foo()
foo.__providedBy__ = object()
expected = foo.__provides__ = object()
Foo.__provides__ = object()
spec = self._callFUT(foo)
self.assertTrue(spec is expected)
def test_w_providedBy_invalid_spec_w_provides_same_provides_on_class(self):
from zope.interface.declarations import implementer
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
@implementer(IFoo)
class Foo(object):
pass
foo = Foo()
foo.__providedBy__ = object()
foo.__provides__ = Foo.__provides__ = object()
spec = self._callFUT(foo)
self.assertEqual(list(spec), [IFoo])
def test_super_when_base_implements_interface(self):
from zope.interface import Interface
from zope.interface.declarations import implementer
class IBase(Interface):
pass
class IDerived(IBase):
pass
@implementer(IBase)
class Base(object):
pass
@implementer(IDerived)
class Derived(Base):
pass
derived = Derived()
self.assertEqual(list(self._callFUT(derived)), [IDerived, IBase])
sup = super(Derived, derived)
fut = self._callFUT(sup)
self.assertIsNone(fut._dependents)
self.assertEqual(list(fut), [IBase])
def test_super_when_base_doesnt_implement_interface(self):
from zope.interface import Interface
from zope.interface.declarations import implementer
class IBase(Interface):
pass
class IDerived(IBase):
pass
class Base(object):
pass
@implementer(IDerived)
class Derived(Base):
pass
derived = Derived()
self.assertEqual(list(self._callFUT(derived)), [IDerived])
sup = super(Derived, derived)
self.assertEqual(list(self._callFUT(sup)), [])
def test_super_when_base_is_object(self):
from zope.interface import Interface
from zope.interface.declarations import implementer
class IBase(Interface):
pass
class IDerived(IBase):
pass
@implementer(IDerived)
class Derived(object):
pass
derived = Derived()
self.assertEqual(list(self._callFUT(derived)), [IDerived])
sup = super(Derived, derived)
fut = self._callFUT(sup)
self.assertIsNone(fut._dependents)
self.assertEqual(list(fut), [])
def test_super_when_object_directly_provides(self):
from zope.interface import Interface
from zope.interface.declarations import implementer
from zope.interface.declarations import directlyProvides
class IBase(Interface):
pass
class IDerived(IBase):
pass
@implementer(IBase)
class Base(object):
pass
class Derived(Base):
pass
derived = Derived()
self.assertEqual(list(self._callFUT(derived)), [IBase])
directlyProvides(derived, IDerived)
self.assertEqual(list(self._callFUT(derived)), [IDerived, IBase])
sup = super(Derived, derived)
fut = self._callFUT(sup)
self.assertIsNone(fut._dependents)
self.assertEqual(list(fut), [IBase])
def test_super_multi_level_multi_inheritance(self):
from zope.interface.declarations import implementer
from zope.interface import Interface
class IBase(Interface):
pass
class IM1(Interface):
pass
class IM2(Interface):
pass
class IDerived(IBase):
pass
class IUnrelated(Interface):
pass
@implementer(IBase)
class Base(object):
pass
@implementer(IM1)
class M1(Base):
pass
@implementer(IM2)
class M2(Base):
pass
@implementer(IDerived, IUnrelated)
class Derived(M1, M2):
pass
d = Derived()
sd = super(Derived, d)
sm1 = super(M1, d)
sm2 = super(M2, d)
self.assertEqual(list(self._callFUT(d)),
[IDerived, IUnrelated, IM1, IBase, IM2])
self.assertEqual(list(self._callFUT(sd)),
[IM1, IBase, IM2])
self.assertEqual(list(self._callFUT(sm1)),
[IM2, IBase])
self.assertEqual(list(self._callFUT(sm2)),
[IBase])
def test_catches_only_AttributeError_on_providedBy(self):
MissingSomeAttrs.test_raises(self, self._callFUT,
expected_missing='__providedBy__',
__class__=object)
def test_catches_only_AttributeError_on_class(self):
# isinstance() tries to get the __class__, which is non-obvious,
# so it must be protected too.
PY3 = str is not bytes
MissingSomeAttrs.test_raises(self, self._callFUT,
expected_missing='__class__' if PY3 else '__providedBy__')
class Test_providedBy(Test_providedByFallback,
OptimizationTestMixin):
# Repeat tests for C optimizations
def _getTargetClass(self):
from zope.interface.declarations import providedBy
return providedBy
class ObjectSpecificationDescriptorFallbackTests(unittest.TestCase):
def _getFallbackClass(self):
# pylint:disable=no-name-in-module
from zope.interface.declarations \
import ObjectSpecificationDescriptorFallback
return ObjectSpecificationDescriptorFallback
_getTargetClass = _getFallbackClass
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_accessed_via_class(self):
from zope.interface.declarations import Provides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
Foo.__provides__ = Provides(Foo, IFoo)
Foo.__providedBy__ = self._makeOne()
self.assertEqual(list(Foo.__providedBy__), [IFoo])
def test_accessed_via_inst_wo_provides(self):
from zope.interface.declarations import implementer
from zope.interface.declarations import Provides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
IBar = InterfaceClass("IBar")
@implementer(IFoo)
class Foo(object):
pass
Foo.__provides__ = Provides(Foo, IBar)
Foo.__providedBy__ = self._makeOne()
foo = Foo()
self.assertEqual(list(foo.__providedBy__), [IFoo])
def test_accessed_via_inst_w_provides(self):
from zope.interface.declarations import directlyProvides
from zope.interface.declarations import implementer
from zope.interface.declarations import Provides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
IBar = InterfaceClass("IBar")
IBaz = InterfaceClass("IBaz")
@implementer(IFoo)
class Foo(object):
pass
Foo.__provides__ = Provides(Foo, IBar)
Foo.__providedBy__ = self._makeOne()
foo = Foo()
directlyProvides(foo, IBaz)
self.assertEqual(list(foo.__providedBy__), [IBaz, IFoo])
class ObjectSpecificationDescriptorTests(
ObjectSpecificationDescriptorFallbackTests,
OptimizationTestMixin):
# Repeat tests for C optimizations
def _getTargetClass(self):
from zope.interface.declarations import ObjectSpecificationDescriptor
return ObjectSpecificationDescriptor
# Test _normalizeargs through its callers.
class _Monkey(object):
# context-manager for replacing module names in the scope of a test.
def __init__(self, module, **kw):
self.module = module
self.to_restore = {key: getattr(module, key) for key in kw}
for key, value in kw.items():
setattr(module, key, value)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for key, value in self.to_restore.items():
setattr(self.module, key, value)
class _MonkeyDict(object):
# context-manager for restoring a dict w/in a module in the scope of a test.
def __init__(self, module, attrname, **kw):
self.module = module
self.target = getattr(module, attrname)
self.to_restore = self.target.copy()
self.target.clear()
self.target.update(kw)
def __enter__(self):
return self.target
def __exit__(self, exc_type, exc_val, exc_tb):
self.target.clear()
self.target.update(self.to_restore)
|
src/zope/interface/tests/test_declarations.py
|
codereval_python_data_37
|
Merge multiple orderings so that within-ordering order is preserved
Orderings are constrained in such a way that if an object appears
in two or more orderings, then the suffix that begins with the
object must be in both orderings.
For example:
>>> _mergeOrderings([
... ['x', 'y', 'z'],
... ['q', 'z'],
... [1, 3, 5],
... ['z']
... ])
['x', 'y', 'q', 1, 3, 5, 'z']
def _legacy_mergeOrderings(orderings):
"""Merge multiple orderings so that within-ordering order is preserved
Orderings are constrained in such a way that if an object appears
in two or more orderings, then the suffix that begins with the
object must be in both orderings.
For example:
>>> _mergeOrderings([
... ['x', 'y', 'z'],
... ['q', 'z'],
... [1, 3, 5],
... ['z']
... ])
['x', 'y', 'q', 1, 3, 5, 'z']
"""
seen = set()
result = []
for ordering in reversed(orderings):
for o in reversed(ordering):
if o not in seen:
seen.add(o)
result.insert(0, o)
return result
##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
Compute a resolution order for an object and its bases.
.. versionchanged:: 5.0
The resolution order is now based on the same C3 order that Python
uses for classes. In complex instances of multiple inheritance, this
may result in a different ordering.
In older versions, the ordering wasn't required to be C3 compliant,
and for backwards compatibility, it still isn't. If the ordering
isn't C3 compliant (if it is *inconsistent*), zope.interface will
make a best guess to try to produce a reasonable resolution order.
Still (just as before), the results in such cases may be
surprising.
.. rubric:: Environment Variables
Due to the change in 5.0, certain environment variables can be used to control errors
and warnings about inconsistent resolution orders. They are listed in priority order, with
variables at the bottom generally overriding variables above them.
ZOPE_INTERFACE_WARN_BAD_IRO
If this is set to "1", then if there is at least one inconsistent resolution
order discovered, a warning (:class:`InconsistentResolutionOrderWarning`) will
be issued. Use the usual warning mechanisms to control this behaviour. The warning
text will contain additional information on debugging.
ZOPE_INTERFACE_TRACK_BAD_IRO
If this is set to "1", then zope.interface will log information about each
inconsistent resolution order discovered, and keep those details in memory in this module
for later inspection.
ZOPE_INTERFACE_STRICT_IRO
If this is set to "1", any attempt to use :func:`ro` that would produce a non-C3
ordering will fail by raising :class:`InconsistentResolutionOrderError`.
There are two environment variables that are independent.
ZOPE_INTERFACE_LOG_CHANGED_IRO
If this is set to "1", then if the C3 resolution order is different from
the legacy resolution order for any given object, a message explaining the differences
will be logged. This is intended to be used for debugging complicated IROs.
ZOPE_INTERFACE_USE_LEGACY_IRO
If this is set to "1", then the C3 resolution order will *not* be used. The
legacy IRO will be used instead. This is a temporary measure and will be removed in the
future. It is intended to help during the transition.
It implies ``ZOPE_INTERFACE_LOG_CHANGED_IRO``.
"""
from __future__ import print_function
__docformat__ = 'restructuredtext'
__all__ = [
'ro',
'InconsistentResolutionOrderError',
'InconsistentResolutionOrderWarning',
]
__logger = None
def _logger():
global __logger # pylint:disable=global-statement
if __logger is None:
import logging
__logger = logging.getLogger(__name__)
return __logger
def _legacy_mergeOrderings(orderings):
"""Merge multiple orderings so that within-ordering order is preserved
Orderings are constrained in such a way that if an object appears
in two or more orderings, then the suffix that begins with the
object must be in both orderings.
For example:
>>> _mergeOrderings([
... ['x', 'y', 'z'],
... ['q', 'z'],
... [1, 3, 5],
... ['z']
... ])
['x', 'y', 'q', 1, 3, 5, 'z']
"""
seen = set()
result = []
for ordering in reversed(orderings):
for o in reversed(ordering):
if o not in seen:
seen.add(o)
result.insert(0, o)
return result
def _legacy_flatten(begin):
result = [begin]
i = 0
for ob in iter(result):
i += 1
# The recursive calls can be avoided by inserting the base classes
# into the dynamically growing list directly after the currently
# considered object; the iterator makes sure this will keep working
# in the future, since it cannot rely on the length of the list
# by definition.
result[i:i] = ob.__bases__
return result
def _legacy_ro(ob):
return _legacy_mergeOrderings([_legacy_flatten(ob)])
###
# Compare base objects using identity, not equality. This matches what
# the CPython MRO algorithm does, and is *much* faster to boot: that,
# plus some other small tweaks makes the difference between 25s and 6s
# in loading 446 plone/zope interface.py modules (1925 InterfaceClass,
# 1200 Implements, 1100 ClassProvides objects)
###
class InconsistentResolutionOrderWarning(PendingDeprecationWarning):
"""
The warning issued when an invalid IRO is requested.
"""
class InconsistentResolutionOrderError(TypeError):
"""
The error raised when an invalid IRO is requested in strict mode.
"""
def __init__(self, c3, base_tree_remaining):
self.C = c3.leaf
base_tree = c3.base_tree
self.base_ros = {
base: base_tree[i + 1]
for i, base in enumerate(self.C.__bases__)
}
# Unfortunately, this doesn't necessarily directly match
# up to any transformation on C.__bases__, because
# if any were fully used up, they were removed already.
self.base_tree_remaining = base_tree_remaining
TypeError.__init__(self)
def __str__(self):
import pprint
return "%s: For object %r.\nBase ROs:\n%s\nConflict Location:\n%s" % (
self.__class__.__name__,
self.C,
pprint.pformat(self.base_ros),
pprint.pformat(self.base_tree_remaining),
)
class _NamedBool(int): # cannot actually inherit bool
def __new__(cls, val, name):
inst = super(cls, _NamedBool).__new__(cls, val)
inst.__name__ = name
return inst
class _ClassBoolFromEnv(object):
"""
Non-data descriptor that reads a transformed environment variable
as a boolean, and caches the result in the class.
"""
def __get__(self, inst, klass):
import os
for cls in klass.__mro__:
my_name = None
for k in dir(klass):
if k in cls.__dict__ and cls.__dict__[k] is self:
my_name = k
break
if my_name is not None:
break
else: # pragma: no cover
raise RuntimeError("Unable to find self")
env_name = 'ZOPE_INTERFACE_' + my_name
val = os.environ.get(env_name, '') == '1'
val = _NamedBool(val, my_name)
setattr(klass, my_name, val)
setattr(klass, 'ORIG_' + my_name, self)
return val
class _StaticMRO(object):
# A previously resolved MRO, supplied by the caller.
# Used in place of calculating it.
had_inconsistency = None # We don't know...
def __init__(self, C, mro):
self.leaf = C
self.__mro = tuple(mro)
def mro(self):
return list(self.__mro)
class C3(object):
# Holds the shared state during computation of an MRO.
@staticmethod
def resolver(C, strict, base_mros):
strict = strict if strict is not None else C3.STRICT_IRO
factory = C3
if strict:
factory = _StrictC3
elif C3.TRACK_BAD_IRO:
factory = _TrackingC3
memo = {}
base_mros = base_mros or {}
for base, mro in base_mros.items():
assert base in C.__bases__
memo[base] = _StaticMRO(base, mro)
return factory(C, memo)
__mro = None
__legacy_ro = None
direct_inconsistency = False
def __init__(self, C, memo):
self.leaf = C
self.memo = memo
kind = self.__class__
base_resolvers = []
for base in C.__bases__:
if base not in memo:
resolver = kind(base, memo)
memo[base] = resolver
base_resolvers.append(memo[base])
self.base_tree = [
[C]
] + [
memo[base].mro() for base in C.__bases__
] + [
list(C.__bases__)
]
self.bases_had_inconsistency = any(base.had_inconsistency for base in base_resolvers)
if len(C.__bases__) == 1:
self.__mro = [C] + memo[C.__bases__[0]].mro()
@property
def had_inconsistency(self):
return self.direct_inconsistency or self.bases_had_inconsistency
@property
def legacy_ro(self):
if self.__legacy_ro is None:
self.__legacy_ro = tuple(_legacy_ro(self.leaf))
return list(self.__legacy_ro)
TRACK_BAD_IRO = _ClassBoolFromEnv()
STRICT_IRO = _ClassBoolFromEnv()
WARN_BAD_IRO = _ClassBoolFromEnv()
LOG_CHANGED_IRO = _ClassBoolFromEnv()
USE_LEGACY_IRO = _ClassBoolFromEnv()
BAD_IROS = ()
def _warn_iro(self):
if not self.WARN_BAD_IRO:
# For the initial release, one must opt-in to see the warning.
# In the future (2021?) seeing at least the first warning will
# be the default
return
import warnings
warnings.warn(
"An inconsistent resolution order is being requested. "
"(Interfaces should follow the Python class rules known as C3.) "
"For backwards compatibility, zope.interface will allow this, "
"making the best guess it can to produce as meaningful an order as possible. "
"In the future this might be an error. Set the warning filter to error, or set "
"the environment variable 'ZOPE_INTERFACE_TRACK_BAD_IRO' to '1' and examine "
"ro.C3.BAD_IROS to debug, or set 'ZOPE_INTERFACE_STRICT_IRO' to raise exceptions.",
InconsistentResolutionOrderWarning,
)
@staticmethod
def _can_choose_base(base, base_tree_remaining):
# From C3:
# nothead = [s for s in nonemptyseqs if cand in s[1:]]
for bases in base_tree_remaining:
if not bases or bases[0] is base:
continue
for b in bases:
if b is base:
return False
return True
@staticmethod
def _nonempty_bases_ignoring(base_tree, ignoring):
return list(filter(None, [
[b for b in bases if b is not ignoring]
for bases
in base_tree
]))
def _choose_next_base(self, base_tree_remaining):
"""
Return the next base.
The return value will either fit the C3 constraints or be our best
guess about what to do. If we cannot guess, this may raise an exception.
"""
base = self._find_next_C3_base(base_tree_remaining)
if base is not None:
return base
return self._guess_next_base(base_tree_remaining)
def _find_next_C3_base(self, base_tree_remaining):
"""
Return the next base that fits the constraints, or ``None`` if there isn't one.
"""
for bases in base_tree_remaining:
base = bases[0]
if self._can_choose_base(base, base_tree_remaining):
return base
return None
class _UseLegacyRO(Exception):
pass
def _guess_next_base(self, base_tree_remaining):
# Narf. We may have an inconsistent order (we won't know for
# sure until we check all the bases). Python cannot create
# classes like this:
#
# class B1:
# pass
# class B2(B1):
# pass
# class C(B1, B2): # -> TypeError; this is like saying C(B1, B2, B1).
# pass
#
# However, older versions of zope.interface were fine with this order.
# A good example is ``providedBy(IOError())``. Because of the way
# ``classImplements`` works, it winds up with ``__bases__`` ==
# ``[IEnvironmentError, IIOError, IOSError, <implementedBy Exception>]``
# (on Python 3). But ``IEnvironmentError`` is a base of both ``IIOError``
# and ``IOSError``. Previously, we would get a resolution order of
# ``[IIOError, IOSError, IEnvironmentError, IStandardError, IException, Interface]``
# but the standard Python algorithm would forbid creating that order entirely.
# Unlike Python's MRO, we attempt to resolve the issue. A few
# heuristics have been tried. One was:
#
# Strip off the first (highest priority) base of each direct
# base one at a time and seeing if we can come to an agreement
# with the other bases. (We're trying for a partial ordering
# here.) This often resolves cases (such as the IOSError case
# above), and frequently produces the same ordering as the
# legacy MRO did. If we looked at all the highest priority
# bases and couldn't find any partial ordering, then we strip
# them *all* out and begin the C3 step again. We take care not
# to promote a common root over all others.
#
# If we only did the first part, stripped off the first
# element of the first item, we could resolve simple cases.
# But it tended to fail badly. If we did the whole thing, it
# could be extremely painful from a performance perspective
# for deep/wide things like Zope's OFS.SimpleItem.Item. Plus,
# anytime you get ExtensionClass.Base into the mix, you're
# likely to wind up in trouble, because it messes with the MRO
# of classes. Sigh.
#
# So now, we fall back to the old linearization (fast to compute).
self._warn_iro()
self.direct_inconsistency = InconsistentResolutionOrderError(self, base_tree_remaining)
raise self._UseLegacyRO
def _merge(self):
# Returns a merged *list*.
result = self.__mro = []
base_tree_remaining = self.base_tree
base = None
while 1:
# Take last picked base out of the base tree wherever it is.
# This differs slightly from the standard Python MRO and is needed
# because we have no other step that prevents duplicates
# from coming in (e.g., in the inconsistent fallback path)
base_tree_remaining = self._nonempty_bases_ignoring(base_tree_remaining, base)
if not base_tree_remaining:
return result
try:
base = self._choose_next_base(base_tree_remaining)
except self._UseLegacyRO:
self.__mro = self.legacy_ro
return self.legacy_ro
result.append(base)
def mro(self):
if self.__mro is None:
self.__mro = tuple(self._merge())
return list(self.__mro)
class _StrictC3(C3):
__slots__ = ()
def _guess_next_base(self, base_tree_remaining):
raise InconsistentResolutionOrderError(self, base_tree_remaining)
class _TrackingC3(C3):
__slots__ = ()
def _guess_next_base(self, base_tree_remaining):
import traceback
bad_iros = C3.BAD_IROS
if self.leaf not in bad_iros:
if bad_iros == ():
import weakref
# This is a race condition, but it doesn't matter much.
bad_iros = C3.BAD_IROS = weakref.WeakKeyDictionary()
bad_iros[self.leaf] = t = (
InconsistentResolutionOrderError(self, base_tree_remaining),
traceback.format_stack()
)
_logger().warning("Tracking inconsistent IRO: %s", t[0])
return C3._guess_next_base(self, base_tree_remaining)
class _ROComparison(object):
# Exists to compute and print a pretty string comparison
# for differing ROs.
# Since we're used in a logging context, and may actually never be printed,
# this is a class so we can defer computing the diff until asked.
# Components we use to build up the comparison report
class Item(object):
prefix = ' '
def __init__(self, item):
self.item = item
def __str__(self):
return "%s%s" % (
self.prefix,
self.item,
)
class Deleted(Item):
prefix = '- '
class Inserted(Item):
prefix = '+ '
Empty = str
class ReplacedBy(object): # pragma: no cover
prefix = '- '
suffix = ''
def __init__(self, chunk, total_count):
self.chunk = chunk
self.total_count = total_count
def __iter__(self):
lines = [
self.prefix + str(item) + self.suffix
for item in self.chunk
]
while len(lines) < self.total_count:
lines.append('')
return iter(lines)
class Replacing(ReplacedBy):
prefix = "+ "
suffix = ''
_c3_report = None
_legacy_report = None
def __init__(self, c3, c3_ro, legacy_ro):
self.c3 = c3
self.c3_ro = c3_ro
self.legacy_ro = legacy_ro
def __move(self, from_, to_, chunk, operation):
for x in chunk:
to_.append(operation(x))
from_.append(self.Empty())
def _generate_report(self):
if self._c3_report is None:
import difflib
# The opcodes we get describe how to turn 'a' into 'b'. So
# the old one (legacy) needs to be first ('a')
matcher = difflib.SequenceMatcher(None, self.legacy_ro, self.c3_ro)
# The reports are equal length sequences. We're going for a
# side-by-side diff.
self._c3_report = c3_report = []
self._legacy_report = legacy_report = []
for opcode, leg1, leg2, c31, c32 in matcher.get_opcodes():
c3_chunk = self.c3_ro[c31:c32]
legacy_chunk = self.legacy_ro[leg1:leg2]
if opcode == 'equal':
# Guaranteed same length
c3_report.extend((self.Item(x) for x in c3_chunk))
legacy_report.extend(self.Item(x) for x in legacy_chunk)
if opcode == 'delete':
# Guaranteed same length
assert not c3_chunk
self.__move(c3_report, legacy_report, legacy_chunk, self.Deleted)
if opcode == 'insert':
# Guaranteed same length
assert not legacy_chunk
self.__move(legacy_report, c3_report, c3_chunk, self.Inserted)
if opcode == 'replace': # pragma: no cover (How do you make it output this?)
# Either side could be longer.
chunk_size = max(len(c3_chunk), len(legacy_chunk))
c3_report.extend(self.Replacing(c3_chunk, chunk_size))
legacy_report.extend(self.ReplacedBy(legacy_chunk, chunk_size))
return self._c3_report, self._legacy_report
@property
def _inconsistent_label(self):
inconsistent = []
if self.c3.direct_inconsistency:
inconsistent.append('direct')
if self.c3.bases_had_inconsistency:
inconsistent.append('bases')
return '+'.join(inconsistent) if inconsistent else 'no'
def __str__(self):
c3_report, legacy_report = self._generate_report()
assert len(c3_report) == len(legacy_report)
left_lines = [str(x) for x in legacy_report]
right_lines = [str(x) for x in c3_report]
# We have the same number of lines in the report; this is not
# necessarily the same as the number of items in either RO.
assert len(left_lines) == len(right_lines)
padding = ' ' * 2
max_left = max(len(x) for x in left_lines)
max_right = max(len(x) for x in right_lines)
left_title = 'Legacy RO (len=%s)' % (len(self.legacy_ro),)
right_title = 'C3 RO (len=%s; inconsistent=%s)' % (
len(self.c3_ro),
self._inconsistent_label,
)
lines = [
(padding + left_title.ljust(max_left) + padding + right_title.ljust(max_right)),
padding + '=' * (max_left + len(padding) + max_right)
]
lines += [
padding + left.ljust(max_left) + padding + right
for left, right in zip(left_lines, right_lines)
]
return '\n'.join(lines)
# Set to `Interface` once it is defined. This is used to
# avoid logging false positives about changed ROs.
_ROOT = None
def ro(C, strict=None, base_mros=None, log_changed_ro=None, use_legacy_ro=None):
"""
ro(C) -> list
Compute the precedence list (mro) according to C3.
:return: A fresh `list` object.
.. versionchanged:: 5.0.0
Add the *strict*, *log_changed_ro* and *use_legacy_ro*
keyword arguments. These are provisional and likely to be
removed in the future. They are most useful for testing.
"""
# The ``base_mros`` argument is for internal optimization and
# not documented.
resolver = C3.resolver(C, strict, base_mros)
mro = resolver.mro()
log_changed = log_changed_ro if log_changed_ro is not None else resolver.LOG_CHANGED_IRO
use_legacy = use_legacy_ro if use_legacy_ro is not None else resolver.USE_LEGACY_IRO
if log_changed or use_legacy:
legacy_ro = resolver.legacy_ro
assert isinstance(legacy_ro, list)
assert isinstance(mro, list)
changed = legacy_ro != mro
if changed:
# Did only Interface move? The fix for issue #8 made that
# somewhat common. It's almost certainly not a problem, though,
# so allow ignoring it.
legacy_without_root = [x for x in legacy_ro if x is not _ROOT]
mro_without_root = [x for x in mro if x is not _ROOT]
changed = legacy_without_root != mro_without_root
if changed:
comparison = _ROComparison(resolver, mro, legacy_ro)
_logger().warning(
"Object %r has different legacy and C3 MROs:\n%s",
C, comparison
)
if resolver.had_inconsistency and legacy_ro == mro:
comparison = _ROComparison(resolver, mro, legacy_ro)
_logger().warning(
"Object %r had inconsistent IRO and used the legacy RO:\n%s"
"\nInconsistency entered at:\n%s",
C, comparison, resolver.direct_inconsistency
)
if use_legacy:
return legacy_ro
return mro
def is_consistent(C):
"""
Check if the resolution order for *C*, as computed by :func:`ro`, is consistent
according to C3.
"""
return not C3.resolver(C, False, None).had_inconsistency
|
src/zope/interface/ro.py
|
codereval_python_data_38
|
Return the interfaces directly provided by the given object
The value returned is an `~zope.interface.interfaces.IDeclaration`.
def directlyProvidedBy(object): # pylint:disable=redefined-builtin
"""Return the interfaces directly provided by the given object
The value returned is an `~zope.interface.interfaces.IDeclaration`.
"""
provides = getattr(object, "__provides__", None)
if (
provides is None # no spec
# We might have gotten the implements spec, as an
# optimization. If so, it's like having only one base, that we
# lop off to exclude class-supplied declarations:
or isinstance(provides, Implements)
):
return _empty
# Strip off the class part of the spec:
return Declaration(provides.__bases__[:-1])
##############################################################################
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
##############################################################################
"""Implementation of interface declarations
There are three flavors of declarations:
- Declarations are used to simply name declared interfaces.
- ImplementsDeclarations are used to express the interfaces that a
class implements (that instances of the class provides).
Implements specifications support inheriting interfaces.
- ProvidesDeclarations are used to express interfaces directly
provided by objects.
"""
__docformat__ = 'restructuredtext'
import sys
from types import FunctionType
from types import MethodType
from types import ModuleType
import weakref
from zope.interface.advice import addClassAdvisor
from zope.interface.interface import Interface
from zope.interface.interface import InterfaceClass
from zope.interface.interface import SpecificationBase
from zope.interface.interface import Specification
from zope.interface.interface import NameAndModuleComparisonMixin
from zope.interface._compat import CLASS_TYPES as DescriptorAwareMetaClasses
from zope.interface._compat import PYTHON3
from zope.interface._compat import _use_c_impl
__all__ = [
# None. The public APIs of this module are
# re-exported from zope.interface directly.
]
# pylint:disable=too-many-lines
# Registry of class-implementation specifications
BuiltinImplementationSpecifications = {}
_ADVICE_ERROR = ('Class advice impossible in Python3. '
'Use the @%s class decorator instead.')
_ADVICE_WARNING = ('The %s API is deprecated, and will not work in Python3 '
'Use the @%s class decorator instead.')
def _next_super_class(ob):
# When ``ob`` is an instance of ``super``, return
# the next class in the MRO that we should actually be
# looking at. Watch out for diamond inheritance!
self_class = ob.__self_class__
class_that_invoked_super = ob.__thisclass__
complete_mro = self_class.__mro__
next_class = complete_mro[complete_mro.index(class_that_invoked_super) + 1]
return next_class
class named(object):
def __init__(self, name):
self.name = name
def __call__(self, ob):
ob.__component_name__ = self.name
return ob
class Declaration(Specification):
"""Interface declarations"""
__slots__ = ()
def __init__(self, *bases):
Specification.__init__(self, _normalizeargs(bases))
def __contains__(self, interface):
"""Test whether an interface is in the specification
"""
return self.extends(interface) and interface in self.interfaces()
def __iter__(self):
"""Return an iterator for the interfaces in the specification
"""
return self.interfaces()
def flattened(self):
"""Return an iterator of all included and extended interfaces
"""
return iter(self.__iro__)
def __sub__(self, other):
"""Remove interfaces from a specification
"""
return Declaration(*[
i for i in self.interfaces()
if not [
j
for j in other.interfaces()
if i.extends(j, 0) # non-strict extends
]
])
def __add__(self, other):
"""Add two specifications or a specification and an interface
"""
seen = {}
result = []
for i in self.interfaces():
seen[i] = 1
result.append(i)
for i in other.interfaces():
if i not in seen:
seen[i] = 1
result.append(i)
return Declaration(*result)
__radd__ = __add__
class _ImmutableDeclaration(Declaration):
# A Declaration that is immutable. Used as a singleton to
# return empty answers for things like ``implementedBy``.
# We have to define the actual singleton after normalizeargs
# is defined, and that in turn is defined after InterfaceClass and
# Implements.
__slots__ = ()
__instance = None
def __new__(cls):
if _ImmutableDeclaration.__instance is None:
_ImmutableDeclaration.__instance = object.__new__(cls)
return _ImmutableDeclaration.__instance
def __reduce__(self):
return "_empty"
@property
def __bases__(self):
return ()
@__bases__.setter
def __bases__(self, new_bases):
# We expect the superclass constructor to set ``self.__bases__ = ()``.
# Rather than attempt to special case that in the constructor and allow
# setting __bases__ only at that time, it's easier to just allow setting
# the empty tuple at any time. That makes ``x.__bases__ = x.__bases__`` a nice
# no-op too. (Skipping the superclass constructor altogether is a recipe
# for maintenance headaches.)
if new_bases != ():
raise TypeError("Cannot set non-empty bases on shared empty Declaration.")
# As the immutable empty declaration, we cannot be changed.
# This means there's no logical reason for us to have dependents
# or subscriptions: we'll never notify them. So there's no need for
# us to keep track of any of that.
@property
def dependents(self):
return {}
changed = subscribe = unsubscribe = lambda self, _ignored: None
def interfaces(self):
# An empty iterator
return iter(())
def extends(self, interface, strict=True):
return interface is self._ROOT
def get(self, name, default=None):
return default
def weakref(self, callback=None):
# We're a singleton, we never go away. So there's no need to return
# distinct weakref objects here; their callbacks will never
# be called. Instead, we only need to return a callable that
# returns ourself. The easiest one is to return _ImmutableDeclaration
# itself; testing on Python 3.8 shows that's faster than a function that
# returns _empty. (Remember, one goal is to avoid allocating any
# object, and that includes a method.)
return _ImmutableDeclaration
@property
def _v_attrs(self):
# _v_attrs is not a public, documented property, but some client
# code uses it anyway as a convenient place to cache things. To keep
# the empty declaration truly immutable, we must ignore that. That includes
# ignoring assignments as well.
return {}
@_v_attrs.setter
def _v_attrs(self, new_attrs):
pass
##############################################################################
#
# Implementation specifications
#
# These specify interfaces implemented by instances of classes
class Implements(NameAndModuleComparisonMixin,
Declaration):
# Inherit from NameAndModuleComparisonMixin to be
# mutually comparable with InterfaceClass objects.
# (The two must be mutually comparable to be able to work in e.g., BTrees.)
# Instances of this class generally don't have a __module__ other than
# `zope.interface.declarations`, whereas they *do* have a __name__ that is the
# fully qualified name of the object they are representing.
# Note, though, that equality and hashing are still identity based. This
# accounts for things like nested objects that have the same name (typically
# only in tests) and is consistent with pickling. As far as comparisons to InterfaceClass
# goes, we'll never have equal name and module to those, so we're still consistent there.
# Instances of this class are essentially intended to be unique and are
# heavily cached (note how our __reduce__ handles this) so having identity
# based hash and eq should also work.
# We want equality and hashing to be based on identity. However, we can't actually
# implement __eq__/__ne__ to do this because sometimes we get wrapped in a proxy.
# We need to let the proxy types implement these methods so they can handle unwrapping
# and then rely on: (1) the interpreter automatically changing `implements == proxy` into
# `proxy == implements` (which will call proxy.__eq__ to do the unwrapping) and then
# (2) the default equality and hashing semantics being identity based.
# class whose specification should be used as additional base
inherit = None
# interfaces actually declared for a class
declared = ()
# Weak cache of {class: <implements>} for super objects.
# Created on demand. These are rare, as of 5.0 anyway. Using a class
# level default doesn't take space in instances. Using _v_attrs would be
# another place to store this without taking space unless needed.
_super_cache = None
__name__ = '?'
@classmethod
def named(cls, name, *bases):
# Implementation method: Produce an Implements interface with
# a fully fleshed out __name__ before calling the constructor, which
# sets bases to the given interfaces and which may pass this object to
# other objects (e.g., to adjust dependents). If they're sorting or comparing
# by name, this needs to be set.
inst = cls.__new__(cls)
inst.__name__ = name
inst.__init__(*bases)
return inst
def changed(self, originally_changed):
try:
del self._super_cache
except AttributeError:
pass
return super(Implements, self).changed(originally_changed)
def __repr__(self):
return '<implementedBy %s>' % (self.__name__)
def __reduce__(self):
return implementedBy, (self.inherit, )
def _implements_name(ob):
# Return the __name__ attribute to be used by its __implemented__
# property.
# This must be stable for the "same" object across processes
# because it is used for sorting. It needn't be unique, though, in cases
# like nested classes named Foo created by different functions, because
# equality and hashing is still based on identity.
# It might be nice to use __qualname__ on Python 3, but that would produce
# different values between Py2 and Py3.
return (getattr(ob, '__module__', '?') or '?') + \
'.' + (getattr(ob, '__name__', '?') or '?')
def _implementedBy_super(sup):
# TODO: This is now simple enough we could probably implement
# in C if needed.
# If the class MRO is strictly linear, we could just
# follow the normal algorithm for the next class in the
# search order (e.g., just return
# ``implemented_by_next``). But when diamond inheritance
# or mixins + interface declarations are present, we have
# to consider the whole MRO and compute a new Implements
# that excludes the classes being skipped over but
# includes everything else.
implemented_by_self = implementedBy(sup.__self_class__)
cache = implemented_by_self._super_cache # pylint:disable=protected-access
if cache is None:
cache = implemented_by_self._super_cache = weakref.WeakKeyDictionary()
key = sup.__thisclass__
try:
return cache[key]
except KeyError:
pass
next_cls = _next_super_class(sup)
# For ``implementedBy(cls)``:
# .__bases__ is .declared + [implementedBy(b) for b in cls.__bases__]
# .inherit is cls
implemented_by_next = implementedBy(next_cls)
mro = sup.__self_class__.__mro__
ix_next_cls = mro.index(next_cls)
classes_to_keep = mro[ix_next_cls:]
new_bases = [implementedBy(c) for c in classes_to_keep]
new = Implements.named(
implemented_by_self.__name__ + ':' + implemented_by_next.__name__,
*new_bases
)
new.inherit = implemented_by_next.inherit
new.declared = implemented_by_next.declared
# I don't *think* that new needs to subscribe to ``implemented_by_self``;
# it auto-subscribed to its bases, and that should be good enough.
cache[key] = new
return new
@_use_c_impl
def implementedBy(cls): # pylint:disable=too-many-return-statements,too-many-branches
"""Return the interfaces implemented for a class' instances
The value returned is an `~zope.interface.interfaces.IDeclaration`.
"""
try:
if isinstance(cls, super):
# Yes, this needs to be inside the try: block. Some objects
# like security proxies even break isinstance.
return _implementedBy_super(cls)
spec = cls.__dict__.get('__implemented__')
except AttributeError:
# we can't get the class dict. This is probably due to a
# security proxy. If this is the case, then probably no
# descriptor was installed for the class.
# We don't want to depend directly on zope.security in
# zope.interface, but we'll try to make reasonable
# accommodations in an indirect way.
# We'll check to see if there's an implements:
spec = getattr(cls, '__implemented__', None)
if spec is None:
# There's no spec stred in the class. Maybe its a builtin:
spec = BuiltinImplementationSpecifications.get(cls)
if spec is not None:
return spec
return _empty
if spec.__class__ == Implements:
# we defaulted to _empty or there was a spec. Good enough.
# Return it.
return spec
# TODO: need old style __implements__ compatibility?
# Hm, there's an __implemented__, but it's not a spec. Must be
# an old-style declaration. Just compute a spec for it
return Declaration(*_normalizeargs((spec, )))
if isinstance(spec, Implements):
return spec
if spec is None:
spec = BuiltinImplementationSpecifications.get(cls)
if spec is not None:
return spec
# TODO: need old style __implements__ compatibility?
spec_name = _implements_name(cls)
if spec is not None:
# old-style __implemented__ = foo declaration
spec = (spec, ) # tuplefy, as it might be just an int
spec = Implements.named(spec_name, *_normalizeargs(spec))
spec.inherit = None # old-style implies no inherit
del cls.__implemented__ # get rid of the old-style declaration
else:
try:
bases = cls.__bases__
except AttributeError:
if not callable(cls):
raise TypeError("ImplementedBy called for non-factory", cls)
bases = ()
spec = Implements.named(spec_name, *[implementedBy(c) for c in bases])
spec.inherit = cls
try:
cls.__implemented__ = spec
if not hasattr(cls, '__providedBy__'):
cls.__providedBy__ = objectSpecificationDescriptor
if (isinstance(cls, DescriptorAwareMetaClasses)
and '__provides__' not in cls.__dict__):
# Make sure we get a __provides__ descriptor
cls.__provides__ = ClassProvides(
cls,
getattr(cls, '__class__', type(cls)),
)
except TypeError:
if not isinstance(cls, type):
raise TypeError("ImplementedBy called for non-type", cls)
BuiltinImplementationSpecifications[cls] = spec
return spec
def classImplementsOnly(cls, *interfaces):
"""
Declare the only interfaces implemented by instances of a class
The arguments after the class are one or more interfaces or interface
specifications (`~zope.interface.interfaces.IDeclaration` objects).
The interfaces given (including the interfaces in the specifications)
replace any previous declarations, *including* inherited definitions. If you
wish to preserve inherited declarations, you can pass ``implementedBy(cls)``
in *interfaces*. This can be used to alter the interface resolution order.
"""
spec = implementedBy(cls)
# Clear out everything inherited. It's important to
# also clear the bases right now so that we don't improperly discard
# interfaces that are already implemented by *old* bases that we're
# about to get rid of.
spec.declared = ()
spec.inherit = None
spec.__bases__ = ()
_classImplements_ordered(spec, interfaces, ())
def classImplements(cls, *interfaces):
"""
Declare additional interfaces implemented for instances of a class
The arguments after the class are one or more interfaces or
interface specifications (`~zope.interface.interfaces.IDeclaration` objects).
The interfaces given (including the interfaces in the specifications)
are added to any interfaces previously declared. An effort is made to
keep a consistent C3 resolution order, but this cannot be guaranteed.
.. versionchanged:: 5.0.0
Each individual interface in *interfaces* may be added to either the
beginning or end of the list of interfaces declared for *cls*,
based on inheritance, in order to try to maintain a consistent
resolution order. Previously, all interfaces were added to the end.
.. versionchanged:: 5.1.0
If *cls* is already declared to implement an interface (or derived interface)
in *interfaces* through inheritance, the interface is ignored. Previously, it
would redundantly be made direct base of *cls*, which often produced inconsistent
interface resolution orders. Now, the order will be consistent, but may change.
Also, if the ``__bases__`` of the *cls* are later changed, the *cls* will no
longer be considered to implement such an interface (changing the ``__bases__`` of *cls*
has never been supported).
"""
spec = implementedBy(cls)
interfaces = tuple(_normalizeargs(interfaces))
before = []
after = []
# Take steps to try to avoid producing an invalid resolution
# order, while still allowing for BWC (in the past, we always
# appended)
for iface in interfaces:
for b in spec.declared:
if iface.extends(b):
before.append(iface)
break
else:
after.append(iface)
_classImplements_ordered(spec, tuple(before), tuple(after))
def classImplementsFirst(cls, iface):
"""
Declare that instances of *cls* additionally provide *iface*.
The second argument is an interface or interface specification.
It is added as the highest priority (first in the IRO) interface;
no attempt is made to keep a consistent resolution order.
.. versionadded:: 5.0.0
"""
spec = implementedBy(cls)
_classImplements_ordered(spec, (iface,), ())
def _classImplements_ordered(spec, before=(), after=()):
# Elide everything already inherited.
# Except, if it is the root, and we don't already declare anything else
# that would imply it, allow the root through. (TODO: When we disallow non-strict
# IRO, this part of the check can be removed because it's not possible to re-declare
# like that.)
before = [
x
for x in before
if not spec.isOrExtends(x) or (x is Interface and not spec.declared)
]
after = [
x
for x in after
if not spec.isOrExtends(x) or (x is Interface and not spec.declared)
]
# eliminate duplicates
new_declared = []
seen = set()
for l in before, spec.declared, after:
for b in l:
if b not in seen:
new_declared.append(b)
seen.add(b)
spec.declared = tuple(new_declared)
# compute the bases
bases = new_declared # guaranteed no dupes
if spec.inherit is not None:
for c in spec.inherit.__bases__:
b = implementedBy(c)
if b not in seen:
seen.add(b)
bases.append(b)
spec.__bases__ = tuple(bases)
def _implements_advice(cls):
interfaces, do_classImplements = cls.__dict__['__implements_advice_data__']
del cls.__implements_advice_data__
do_classImplements(cls, *interfaces)
return cls
class implementer(object):
"""
Declare the interfaces implemented by instances of a class.
This function is called as a class decorator.
The arguments are one or more interfaces or interface
specifications (`~zope.interface.interfaces.IDeclaration`
objects).
The interfaces given (including the interfaces in the
specifications) are added to any interfaces previously declared,
unless the interface is already implemented.
Previous declarations include declarations for base classes unless
implementsOnly was used.
This function is provided for convenience. It provides a more
convenient way to call `classImplements`. For example::
@implementer(I1)
class C(object):
pass
is equivalent to calling::
classImplements(C, I1)
after the class has been created.
.. seealso:: `classImplements`
The change history provided there applies to this function too.
"""
__slots__ = ('interfaces',)
def __init__(self, *interfaces):
self.interfaces = interfaces
def __call__(self, ob):
if isinstance(ob, DescriptorAwareMetaClasses):
# This is the common branch for new-style (object) and
# on Python 2 old-style classes.
classImplements(ob, *self.interfaces)
return ob
spec_name = _implements_name(ob)
spec = Implements.named(spec_name, *self.interfaces)
try:
ob.__implemented__ = spec
except AttributeError:
raise TypeError("Can't declare implements", ob)
return ob
class implementer_only(object):
"""Declare the only interfaces implemented by instances of a class
This function is called as a class decorator.
The arguments are one or more interfaces or interface
specifications (`~zope.interface.interfaces.IDeclaration` objects).
Previous declarations including declarations for base classes
are overridden.
This function is provided for convenience. It provides a more
convenient way to call `classImplementsOnly`. For example::
@implementer_only(I1)
class C(object): pass
is equivalent to calling::
classImplementsOnly(I1)
after the class has been created.
"""
def __init__(self, *interfaces):
self.interfaces = interfaces
def __call__(self, ob):
if isinstance(ob, (FunctionType, MethodType)):
# XXX Does this decorator make sense for anything but classes?
# I don't think so. There can be no inheritance of interfaces
# on a method or function....
raise ValueError('The implementer_only decorator is not '
'supported for methods or functions.')
# Assume it's a class:
classImplementsOnly(ob, *self.interfaces)
return ob
def _implements(name, interfaces, do_classImplements):
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
frame = sys._getframe(2) # pylint:disable=protected-access
locals = frame.f_locals # pylint:disable=redefined-builtin
# Try to make sure we were called from a class def. In 2.2.0 we can't
# check for __module__ since it doesn't seem to be added to the locals
# until later on.
if locals is frame.f_globals or '__module__' not in locals:
raise TypeError(name+" can be used only from a class definition.")
if '__implements_advice_data__' in locals:
raise TypeError(name+" can be used only once in a class definition.")
locals['__implements_advice_data__'] = interfaces, do_classImplements
addClassAdvisor(_implements_advice, depth=3)
def implements(*interfaces):
"""
Declare interfaces implemented by instances of a class.
.. deprecated:: 5.0
This only works for Python 2. The `implementer` decorator
is preferred for all versions.
This function is called in a class definition.
The arguments are one or more interfaces or interface
specifications (`~zope.interface.interfaces.IDeclaration`
objects).
The interfaces given (including the interfaces in the
specifications) are added to any interfaces previously declared.
Previous declarations include declarations for base classes unless
`implementsOnly` was used.
This function is provided for convenience. It provides a more
convenient way to call `classImplements`. For example::
implements(I1)
is equivalent to calling::
classImplements(C, I1)
after the class has been created.
"""
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
if PYTHON3:
raise TypeError(_ADVICE_ERROR % 'implementer')
_implements("implements", interfaces, classImplements)
def implementsOnly(*interfaces):
"""Declare the only interfaces implemented by instances of a class
This function is called in a class definition.
The arguments are one or more interfaces or interface
specifications (`~zope.interface.interfaces.IDeclaration` objects).
Previous declarations including declarations for base classes
are overridden.
This function is provided for convenience. It provides a more
convenient way to call `classImplementsOnly`. For example::
implementsOnly(I1)
is equivalent to calling::
classImplementsOnly(I1)
after the class has been created.
"""
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
if PYTHON3:
raise TypeError(_ADVICE_ERROR % 'implementer_only')
_implements("implementsOnly", interfaces, classImplementsOnly)
##############################################################################
#
# Instance declarations
class Provides(Declaration): # Really named ProvidesClass
"""Implement ``__provides__``, the instance-specific specification
When an object is pickled, we pickle the interfaces that it implements.
"""
def __init__(self, cls, *interfaces):
self.__args = (cls, ) + interfaces
self._cls = cls
Declaration.__init__(self, *(interfaces + (implementedBy(cls), )))
def __repr__(self):
return "<%s.%s for %s>" % (
self.__class__.__module__,
self.__class__.__name__,
self._cls,
)
def __reduce__(self):
return Provides, self.__args
__module__ = 'zope.interface'
def __get__(self, inst, cls):
"""Make sure that a class __provides__ doesn't leak to an instance
"""
if inst is None and cls is self._cls:
# We were accessed through a class, so we are the class'
# provides spec. Just return this object, but only if we are
# being called on the same class that we were defined for:
return self
raise AttributeError('__provides__')
ProvidesClass = Provides
# Registry of instance declarations
# This is a memory optimization to allow objects to share specifications.
InstanceDeclarations = weakref.WeakValueDictionary()
def Provides(*interfaces): # pylint:disable=function-redefined
"""Cache instance declarations
Instance declarations are shared among instances that have the same
declaration. The declarations are cached in a weak value dictionary.
"""
spec = InstanceDeclarations.get(interfaces)
if spec is None:
spec = ProvidesClass(*interfaces)
InstanceDeclarations[interfaces] = spec
return spec
Provides.__safe_for_unpickling__ = True
def directlyProvides(object, *interfaces): # pylint:disable=redefined-builtin
"""Declare interfaces declared directly for an object
The arguments after the object are one or more interfaces or interface
specifications (`~zope.interface.interfaces.IDeclaration` objects).
The interfaces given (including the interfaces in the specifications)
replace interfaces previously declared for the object.
"""
cls = getattr(object, '__class__', None)
if cls is not None and getattr(cls, '__class__', None) is cls:
# It's a meta class (well, at least it it could be an extension class)
# Note that we can't get here from Py3k tests: there is no normal
# class which isn't descriptor aware.
if not isinstance(object,
DescriptorAwareMetaClasses):
raise TypeError("Attempt to make an interface declaration on a "
"non-descriptor-aware class")
interfaces = _normalizeargs(interfaces)
if cls is None:
cls = type(object)
issub = False
for damc in DescriptorAwareMetaClasses:
if issubclass(cls, damc):
issub = True
break
if issub:
# we have a class or type. We'll use a special descriptor
# that provides some extra caching
object.__provides__ = ClassProvides(object, cls, *interfaces)
else:
object.__provides__ = Provides(cls, *interfaces)
def alsoProvides(object, *interfaces): # pylint:disable=redefined-builtin
"""Declare interfaces declared directly for an object
The arguments after the object are one or more interfaces or interface
specifications (`~zope.interface.interfaces.IDeclaration` objects).
The interfaces given (including the interfaces in the specifications) are
added to the interfaces previously declared for the object.
"""
directlyProvides(object, directlyProvidedBy(object), *interfaces)
def noLongerProvides(object, interface): # pylint:disable=redefined-builtin
""" Removes a directly provided interface from an object.
"""
directlyProvides(object, directlyProvidedBy(object) - interface)
if interface.providedBy(object):
raise ValueError("Can only remove directly provided interfaces.")
@_use_c_impl
class ClassProvidesBase(SpecificationBase):
__slots__ = (
'_cls',
'_implements',
)
def __get__(self, inst, cls):
# member slots are set by subclass
# pylint:disable=no-member
if cls is self._cls:
# We only work if called on the class we were defined for
if inst is None:
# We were accessed through a class, so we are the class'
# provides spec. Just return this object as is:
return self
return self._implements
raise AttributeError('__provides__')
class ClassProvides(Declaration, ClassProvidesBase):
"""Special descriptor for class ``__provides__``
The descriptor caches the implementedBy info, so that
we can get declarations for objects without instance-specific
interfaces a bit quicker.
"""
__slots__ = (
'__args',
)
def __init__(self, cls, metacls, *interfaces):
self._cls = cls
self._implements = implementedBy(cls)
self.__args = (cls, metacls, ) + interfaces
Declaration.__init__(self, *(interfaces + (implementedBy(metacls), )))
def __repr__(self):
return "<%s.%s for %s>" % (
self.__class__.__module__,
self.__class__.__name__,
self._cls,
)
def __reduce__(self):
return self.__class__, self.__args
# Copy base-class method for speed
__get__ = ClassProvidesBase.__get__
def directlyProvidedBy(object): # pylint:disable=redefined-builtin
"""Return the interfaces directly provided by the given object
The value returned is an `~zope.interface.interfaces.IDeclaration`.
"""
provides = getattr(object, "__provides__", None)
if (
provides is None # no spec
# We might have gotten the implements spec, as an
# optimization. If so, it's like having only one base, that we
# lop off to exclude class-supplied declarations:
or isinstance(provides, Implements)
):
return _empty
# Strip off the class part of the spec:
return Declaration(provides.__bases__[:-1])
def classProvides(*interfaces):
"""Declare interfaces provided directly by a class
This function is called in a class definition.
The arguments are one or more interfaces or interface specifications
(`~zope.interface.interfaces.IDeclaration` objects).
The given interfaces (including the interfaces in the specifications)
are used to create the class's direct-object interface specification.
An error will be raised if the module class has an direct interface
specification. In other words, it is an error to call this function more
than once in a class definition.
Note that the given interfaces have nothing to do with the interfaces
implemented by instances of the class.
This function is provided for convenience. It provides a more convenient
way to call `directlyProvides` for a class. For example::
classProvides(I1)
is equivalent to calling::
directlyProvides(theclass, I1)
after the class has been created.
"""
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
if PYTHON3:
raise TypeError(_ADVICE_ERROR % 'provider')
frame = sys._getframe(1) # pylint:disable=protected-access
locals = frame.f_locals # pylint:disable=redefined-builtin
# Try to make sure we were called from a class def
if (locals is frame.f_globals) or ('__module__' not in locals):
raise TypeError("classProvides can be used only from a "
"class definition.")
if '__provides__' in locals:
raise TypeError(
"classProvides can only be used once in a class definition.")
locals["__provides__"] = _normalizeargs(interfaces)
addClassAdvisor(_classProvides_advice, depth=2)
def _classProvides_advice(cls):
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
interfaces = cls.__dict__['__provides__']
del cls.__provides__
directlyProvides(cls, *interfaces)
return cls
class provider(object):
"""Class decorator version of classProvides"""
def __init__(self, *interfaces):
self.interfaces = interfaces
def __call__(self, ob):
directlyProvides(ob, *self.interfaces)
return ob
def moduleProvides(*interfaces):
"""Declare interfaces provided by a module
This function is used in a module definition.
The arguments are one or more interfaces or interface specifications
(`~zope.interface.interfaces.IDeclaration` objects).
The given interfaces (including the interfaces in the specifications) are
used to create the module's direct-object interface specification. An
error will be raised if the module already has an interface specification.
In other words, it is an error to call this function more than once in a
module definition.
This function is provided for convenience. It provides a more convenient
way to call directlyProvides. For example::
moduleImplements(I1)
is equivalent to::
directlyProvides(sys.modules[__name__], I1)
"""
frame = sys._getframe(1) # pylint:disable=protected-access
locals = frame.f_locals # pylint:disable=redefined-builtin
# Try to make sure we were called from a class def
if (locals is not frame.f_globals) or ('__name__' not in locals):
raise TypeError(
"moduleProvides can only be used from a module definition.")
if '__provides__' in locals:
raise TypeError(
"moduleProvides can only be used once in a module definition.")
locals["__provides__"] = Provides(ModuleType,
*_normalizeargs(interfaces))
##############################################################################
#
# Declaration querying support
# XXX: is this a fossil? Nobody calls it, no unit tests exercise it, no
# doctests import it, and the package __init__ doesn't import it.
# (Answer: Versions of zope.container prior to 4.4.0 called this.)
def ObjectSpecification(direct, cls):
"""Provide object specifications
These combine information for the object and for it's classes.
"""
return Provides(cls, direct) # pragma: no cover fossil
@_use_c_impl
def getObjectSpecification(ob):
try:
provides = ob.__provides__
except AttributeError:
provides = None
if provides is not None:
if isinstance(provides, SpecificationBase):
return provides
try:
cls = ob.__class__
except AttributeError:
# We can't get the class, so just consider provides
return _empty
return implementedBy(cls)
@_use_c_impl
def providedBy(ob):
"""
Return the interfaces provided by *ob*.
If *ob* is a :class:`super` object, then only interfaces implemented
by the remainder of the classes in the method resolution order are
considered. Interfaces directly provided by the object underlying *ob*
are not.
"""
# Here we have either a special object, an old-style declaration
# or a descriptor
# Try to get __providedBy__
try:
if isinstance(ob, super): # Some objects raise errors on isinstance()
return implementedBy(ob)
r = ob.__providedBy__
except AttributeError:
# Not set yet. Fall back to lower-level thing that computes it
return getObjectSpecification(ob)
try:
# We might have gotten a descriptor from an instance of a
# class (like an ExtensionClass) that doesn't support
# descriptors. We'll make sure we got one by trying to get
# the only attribute, which all specs have.
r.extends
except AttributeError:
# The object's class doesn't understand descriptors.
# Sigh. We need to get an object descriptor, but we have to be
# careful. We want to use the instance's __provides__, if
# there is one, but only if it didn't come from the class.
try:
r = ob.__provides__
except AttributeError:
# No __provides__, so just fall back to implementedBy
return implementedBy(ob.__class__)
# We need to make sure we got the __provides__ from the
# instance. We'll do this by making sure we don't get the same
# thing from the class:
try:
cp = ob.__class__.__provides__
except AttributeError:
# The ob doesn't have a class or the class has no
# provides, assume we're done:
return r
if r is cp:
# Oops, we got the provides from the class. This means
# the object doesn't have it's own. We should use implementedBy
return implementedBy(ob.__class__)
return r
@_use_c_impl
class ObjectSpecificationDescriptor(object):
"""Implement the `__providedBy__` attribute
The `__providedBy__` attribute computes the interfaces provided by
an object.
"""
def __get__(self, inst, cls):
"""Get an object specification for an object
"""
if inst is None:
return getObjectSpecification(cls)
provides = getattr(inst, '__provides__', None)
if provides is not None:
return provides
return implementedBy(cls)
##############################################################################
def _normalizeargs(sequence, output=None):
"""Normalize declaration arguments
Normalization arguments might contain Declarions, tuples, or single
interfaces.
Anything but individial interfaces or implements specs will be expanded.
"""
if output is None:
output = []
cls = sequence.__class__
if InterfaceClass in cls.__mro__ or Implements in cls.__mro__:
output.append(sequence)
else:
for v in sequence:
_normalizeargs(v, output)
return output
_empty = _ImmutableDeclaration()
objectSpecificationDescriptor = ObjectSpecificationDescriptor()
|
src/zope/interface/declarations.py
|
codereval_python_data_39
|
Reduce a list of base classes to its ordered minimum equivalent
def minimalBases(classes):
"""Reduce a list of base classes to its ordered minimum equivalent"""
if not __python3: # pragma: no cover
classes = [c for c in classes if c is not ClassType]
candidates = []
for m in classes:
for n in classes:
if issubclass(n,m) and m is not n:
break
else:
# m has no subclasses in 'classes'
if m in candidates:
candidates.remove(m) # ensure that we're later in the list
candidates.append(m)
return candidates
##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Class advice.
This module was adapted from 'protocols.advice', part of the Python
Enterprise Application Kit (PEAK). Please notify the PEAK authors
(pje@telecommunity.com and tsarna@sarna.org) if bugs are found or
Zope-specific changes are required, so that the PEAK version of this module
can be kept in sync.
PEAK is a Python application framework that interoperates with (but does
not require) Zope 3 and Twisted. It provides tools for manipulating UML
models, object-relational persistence, aspect-oriented programming, and more.
Visit the PEAK home page at http://peak.telecommunity.com for more information.
"""
from types import FunctionType
try:
from types import ClassType
except ImportError:
__python3 = True
else:
__python3 = False
__all__ = [
'addClassAdvisor',
'determineMetaclass',
'getFrameInfo',
'isClassAdvisor',
'minimalBases',
]
import sys
def getFrameInfo(frame):
"""Return (kind,module,locals,globals) for a frame
'kind' is one of "exec", "module", "class", "function call", or "unknown".
"""
f_locals = frame.f_locals
f_globals = frame.f_globals
sameNamespace = f_locals is f_globals
hasModule = '__module__' in f_locals
hasName = '__name__' in f_globals
sameName = hasModule and hasName
sameName = sameName and f_globals['__name__']==f_locals['__module__']
module = hasName and sys.modules.get(f_globals['__name__']) or None
namespaceIsModule = module and module.__dict__ is f_globals
if not namespaceIsModule:
# some kind of funky exec
kind = "exec"
elif sameNamespace and not hasModule:
kind = "module"
elif sameName and not sameNamespace:
kind = "class"
elif not sameNamespace:
kind = "function call"
else: # pragma: no cover
# How can you have f_locals is f_globals, and have '__module__' set?
# This is probably module-level code, but with a '__module__' variable.
kind = "unknown"
return kind, module, f_locals, f_globals
def addClassAdvisor(callback, depth=2):
"""Set up 'callback' to be passed the containing class upon creation
This function is designed to be called by an "advising" function executed
in a class suite. The "advising" function supplies a callback that it
wishes to have executed when the containing class is created. The
callback will be given one argument: the newly created containing class.
The return value of the callback will be used in place of the class, so
the callback should return the input if it does not wish to replace the
class.
The optional 'depth' argument to this function determines the number of
frames between this function and the targeted class suite. 'depth'
defaults to 2, since this skips this function's frame and one calling
function frame. If you use this function from a function called directly
in the class suite, the default will be correct, otherwise you will need
to determine the correct depth yourself.
This function works by installing a special class factory function in
place of the '__metaclass__' of the containing class. Therefore, only
callbacks *after* the last '__metaclass__' assignment in the containing
class will be executed. Be sure that classes using "advising" functions
declare any '__metaclass__' *first*, to ensure all callbacks are run."""
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
if __python3: # pragma: no cover
raise TypeError('Class advice impossible in Python3')
frame = sys._getframe(depth)
kind, module, caller_locals, caller_globals = getFrameInfo(frame)
# This causes a problem when zope interfaces are used from doctest.
# In these cases, kind == "exec".
#
#if kind != "class":
# raise SyntaxError(
# "Advice must be in the body of a class statement"
# )
previousMetaclass = caller_locals.get('__metaclass__')
if __python3: # pragma: no cover
defaultMetaclass = caller_globals.get('__metaclass__', type)
else:
defaultMetaclass = caller_globals.get('__metaclass__', ClassType)
def advise(name, bases, cdict):
if '__metaclass__' in cdict:
del cdict['__metaclass__']
if previousMetaclass is None:
if bases:
# find best metaclass or use global __metaclass__ if no bases
meta = determineMetaclass(bases)
else:
meta = defaultMetaclass
elif isClassAdvisor(previousMetaclass):
# special case: we can't compute the "true" metaclass here,
# so we need to invoke the previous metaclass and let it
# figure it out for us (and apply its own advice in the process)
meta = previousMetaclass
else:
meta = determineMetaclass(bases, previousMetaclass)
newClass = meta(name,bases,cdict)
# this lets the callback replace the class completely, if it wants to
return callback(newClass)
# introspection data only, not used by inner function
advise.previousMetaclass = previousMetaclass
advise.callback = callback
# install the advisor
caller_locals['__metaclass__'] = advise
def isClassAdvisor(ob):
"""True if 'ob' is a class advisor function"""
return isinstance(ob,FunctionType) and hasattr(ob,'previousMetaclass')
def determineMetaclass(bases, explicit_mc=None):
"""Determine metaclass from 1+ bases and optional explicit __metaclass__"""
meta = [getattr(b,'__class__',type(b)) for b in bases]
if explicit_mc is not None:
# The explicit metaclass needs to be verified for compatibility
# as well, and allowed to resolve the incompatible bases, if any
meta.append(explicit_mc)
if len(meta)==1:
# easy case
return meta[0]
candidates = minimalBases(meta) # minimal set of metaclasses
if not candidates: # pragma: no cover
# they're all "classic" classes
assert(not __python3) # This should not happen under Python 3
return ClassType
elif len(candidates)>1:
# We could auto-combine, but for now we won't...
raise TypeError("Incompatible metatypes",bases)
# Just one, return it
return candidates[0]
def minimalBases(classes):
"""Reduce a list of base classes to its ordered minimum equivalent"""
if not __python3: # pragma: no cover
classes = [c for c in classes if c is not ClassType]
candidates = []
for m in classes:
for n in classes:
if issubclass(n,m) and m is not n:
break
else:
# m has no subclasses in 'classes'
if m in candidates:
candidates.remove(m) # ensure that we're later in the list
candidates.append(m)
return candidates
|
src/zope/interface/advice.py
|
codereval_python_data_40
|
Return attribute names and descriptions defined by interface.
def namesAndDescriptions(self, all=False): # pylint:disable=redefined-builtin
"""Return attribute names and descriptions defined by interface."""
if not all:
return self.__attrs.items()
r = {}
for base in self.__bases__[::-1]:
r.update(dict(base.namesAndDescriptions(all)))
r.update(self.__attrs)
return r.items()
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Interface object implementation
"""
# pylint:disable=protected-access
import sys
from types import MethodType
from types import FunctionType
import weakref
from zope.interface._compat import _use_c_impl
from zope.interface._compat import PYTHON2 as PY2
from zope.interface.exceptions import Invalid
from zope.interface.ro import ro as calculate_ro
from zope.interface import ro
__all__ = [
# Most of the public API from this module is directly exported
# from zope.interface. The only remaining public API intended to
# be imported from here should be those few things documented as
# such.
'InterfaceClass',
'Specification',
'adapter_hooks',
]
CO_VARARGS = 4
CO_VARKEYWORDS = 8
# Put in the attrs dict of an interface by ``taggedValue`` and ``invariants``
TAGGED_DATA = '__interface_tagged_values__'
# Put in the attrs dict of an interface by ``interfacemethod``
INTERFACE_METHODS = '__interface_methods__'
_decorator_non_return = object()
_marker = object()
def invariant(call):
f_locals = sys._getframe(1).f_locals
tags = f_locals.setdefault(TAGGED_DATA, {})
invariants = tags.setdefault('invariants', [])
invariants.append(call)
return _decorator_non_return
def taggedValue(key, value):
"""Attaches a tagged value to an interface at definition time."""
f_locals = sys._getframe(1).f_locals
tagged_values = f_locals.setdefault(TAGGED_DATA, {})
tagged_values[key] = value
return _decorator_non_return
class Element(object):
"""
Default implementation of `zope.interface.interfaces.IElement`.
"""
# We can't say this yet because we don't have enough
# infrastructure in place.
#
#implements(IElement)
def __init__(self, __name__, __doc__=''): # pylint:disable=redefined-builtin
if not __doc__ and __name__.find(' ') >= 0:
__doc__ = __name__
__name__ = None
self.__name__ = __name__
self.__doc__ = __doc__
# Tagged values are rare, especially on methods or attributes.
# Deferring the allocation can save substantial memory.
self.__tagged_values = None
def getName(self):
""" Returns the name of the object. """
return self.__name__
def getDoc(self):
""" Returns the documentation for the object. """
return self.__doc__
###
# Tagged values.
#
# Direct tagged values are set only in this instance. Others
# may be inherited (for those subclasses that have that concept).
###
def getTaggedValue(self, tag):
""" Returns the value associated with 'tag'. """
if not self.__tagged_values:
raise KeyError(tag)
return self.__tagged_values[tag]
def queryTaggedValue(self, tag, default=None):
""" Returns the value associated with 'tag'. """
return self.__tagged_values.get(tag, default) if self.__tagged_values else default
def getTaggedValueTags(self):
""" Returns a collection of all tags. """
return self.__tagged_values.keys() if self.__tagged_values else ()
def setTaggedValue(self, tag, value):
""" Associates 'value' with 'key'. """
if self.__tagged_values is None:
self.__tagged_values = {}
self.__tagged_values[tag] = value
queryDirectTaggedValue = queryTaggedValue
getDirectTaggedValue = getTaggedValue
getDirectTaggedValueTags = getTaggedValueTags
SpecificationBasePy = object # filled by _use_c_impl.
@_use_c_impl
class SpecificationBase(object):
# This object is the base of the inheritance hierarchy for ClassProvides:
#
# ClassProvides < ClassProvidesBase, Declaration
# Declaration < Specification < SpecificationBase
# ClassProvidesBase < SpecificationBase
#
# In order to have compatible instance layouts, we need to declare
# the storage used by Specification and Declaration here (and
# those classes must have ``__slots__ = ()``); fortunately this is
# not a waste of space because those are the only two inheritance
# trees. These all translate into tp_members in C.
__slots__ = (
# Things used here.
'_implied',
# Things used in Specification.
'_dependents',
'_bases',
'_v_attrs',
'__iro__',
'__sro__',
'__weakref__',
)
def providedBy(self, ob):
"""Is the interface implemented by an object
"""
spec = providedBy(ob)
return self in spec._implied
def implementedBy(self, cls):
"""Test whether the specification is implemented by a class or factory.
Raise TypeError if argument is neither a class nor a callable.
"""
spec = implementedBy(cls)
return self in spec._implied
def isOrExtends(self, interface):
"""Is the interface the same as or extend the given interface
"""
return interface in self._implied # pylint:disable=no-member
__call__ = isOrExtends
class NameAndModuleComparisonMixin(object):
# Internal use. Implement the basic sorting operators (but not (in)equality
# or hashing). Subclasses must provide ``__name__`` and ``__module__``
# attributes. Subclasses will be mutually comparable; but because equality
# and hashing semantics are missing from this class, take care in how
# you define those two attributes: If you stick with the default equality
# and hashing (identity based) you should make sure that all possible ``__name__``
# and ``__module__`` pairs are unique ACROSS ALL SUBCLASSES. (Actually, pretty
# much the same thing goes if you define equality and hashing to be based on
# those two attributes: they must still be consistent ACROSS ALL SUBCLASSES.)
# pylint:disable=assigning-non-slot
__slots__ = ()
def _compare(self, other):
"""
Compare *self* to *other* based on ``__name__`` and ``__module__``.
Return 0 if they are equal, return 1 if *self* is
greater than *other*, and return -1 if *self* is less than
*other*.
If *other* does not have ``__name__`` or ``__module__``, then
return ``NotImplemented``.
.. caution::
This allows comparison to things well outside the type hierarchy,
perhaps not symmetrically.
For example, ``class Foo(object)`` and ``class Foo(Interface)``
in the same file would compare equal, depending on the order of
operands. Writing code like this by hand would be unusual, but it could
happen with dynamic creation of types and interfaces.
None is treated as a pseudo interface that implies the loosest
contact possible, no contract. For that reason, all interfaces
sort before None.
"""
if other is self:
return 0
if other is None:
return -1
n1 = (self.__name__, self.__module__)
try:
n2 = (other.__name__, other.__module__)
except AttributeError:
return NotImplemented
# This spelling works under Python3, which doesn't have cmp().
return (n1 > n2) - (n1 < n2)
def __lt__(self, other):
c = self._compare(other)
if c is NotImplemented:
return c
return c < 0
def __le__(self, other):
c = self._compare(other)
if c is NotImplemented:
return c
return c <= 0
def __gt__(self, other):
c = self._compare(other)
if c is NotImplemented:
return c
return c > 0
def __ge__(self, other):
c = self._compare(other)
if c is NotImplemented:
return c
return c >= 0
@_use_c_impl
class InterfaceBase(NameAndModuleComparisonMixin, SpecificationBasePy):
"""Base class that wants to be replaced with a C base :)
"""
__slots__ = (
'__name__',
'__ibmodule__',
'_v_cached_hash',
)
def __init__(self, name=None, module=None):
self.__name__ = name
self.__ibmodule__ = module
def _call_conform(self, conform):
raise NotImplementedError
@property
def __module_property__(self):
# This is for _InterfaceMetaClass
return self.__ibmodule__
def __call__(self, obj, alternate=_marker):
"""Adapt an object to the interface
"""
try:
conform = obj.__conform__
except AttributeError:
conform = None
if conform is not None:
adapter = self._call_conform(conform)
if adapter is not None:
return adapter
adapter = self.__adapt__(obj)
if adapter is not None:
return adapter
if alternate is not _marker:
return alternate
raise TypeError("Could not adapt", obj, self)
def __adapt__(self, obj):
"""Adapt an object to the receiver
"""
if self.providedBy(obj):
return obj
for hook in adapter_hooks:
adapter = hook(self, obj)
if adapter is not None:
return adapter
return None
def __hash__(self):
# pylint:disable=assigning-non-slot,attribute-defined-outside-init
try:
return self._v_cached_hash
except AttributeError:
self._v_cached_hash = hash((self.__name__, self.__module__))
return self._v_cached_hash
def __eq__(self, other):
c = self._compare(other)
if c is NotImplemented:
return c
return c == 0
def __ne__(self, other):
if other is self:
return False
c = self._compare(other)
if c is NotImplemented:
return c
return c != 0
adapter_hooks = _use_c_impl([], 'adapter_hooks')
class Specification(SpecificationBase):
"""Specifications
An interface specification is used to track interface declarations
and component registrations.
This class is a base class for both interfaces themselves and for
interface specifications (declarations).
Specifications are mutable. If you reassign their bases, their
relations with other specifications are adjusted accordingly.
"""
__slots__ = ()
# The root of all Specifications. This will be assigned `Interface`,
# once it is defined.
_ROOT = None
# Copy some base class methods for speed
isOrExtends = SpecificationBase.isOrExtends
providedBy = SpecificationBase.providedBy
def __init__(self, bases=()):
# There are many leaf interfaces with no dependents,
# and a few with very many. It's a heavily left-skewed
# distribution. In a survey of Plone and Zope related packages
# that loaded 2245 InterfaceClass objects and 2235 ClassProvides
# instances, there were a total of 7000 Specification objects created.
# 4700 had 0 dependents, 1400 had 1, 382 had 2 and so on. Only one
# for <type> had 1664. So there's savings to be had deferring
# the creation of dependents.
self._dependents = None # type: weakref.WeakKeyDictionary
self._bases = ()
self._implied = {}
self._v_attrs = None
self.__iro__ = ()
self.__sro__ = ()
self.__bases__ = tuple(bases)
@property
def dependents(self):
if self._dependents is None:
self._dependents = weakref.WeakKeyDictionary()
return self._dependents
def subscribe(self, dependent):
self._dependents[dependent] = self.dependents.get(dependent, 0) + 1
def unsubscribe(self, dependent):
try:
n = self._dependents[dependent]
except TypeError:
raise KeyError(dependent)
n -= 1
if not n:
del self.dependents[dependent]
else:
assert n > 0
self.dependents[dependent] = n
def __setBases(self, bases):
# Remove ourselves as a dependent of our old bases
for b in self.__bases__:
b.unsubscribe(self)
# Register ourselves as a dependent of our new bases
self._bases = bases
for b in bases:
b.subscribe(self)
self.changed(self)
__bases__ = property(
lambda self: self._bases,
__setBases,
)
def _calculate_sro(self):
"""
Calculate and return the resolution order for this object, using its ``__bases__``.
Ensures that ``Interface`` is always the last (lowest priority) element.
"""
# We'd like to make Interface the lowest priority as a
# property of the resolution order algorithm. That almost
# works out naturally, but it fails when class inheritance has
# some bases that DO implement an interface, and some that DO
# NOT. In such a mixed scenario, you wind up with a set of
# bases to consider that look like this: [[..., Interface],
# [..., object], ...]. Depending on the order if inheritance,
# Interface can wind up before or after object, and that can
# happen at any point in the tree, meaning Interface can wind
# up somewhere in the middle of the order. Since Interface is
# treated as something that everything winds up implementing
# anyway (a catch-all for things like adapters), having it high up
# the order is bad. It's also bad to have it at the end, just before
# some concrete class: concrete classes should be HIGHER priority than
# interfaces (because there's only one class, but many implementations).
#
# One technically nice way to fix this would be to have
# ``implementedBy(object).__bases__ = (Interface,)``
#
# But: (1) That fails for old-style classes and (2) that causes
# everything to appear to *explicitly* implement Interface, when up
# to this point it's been an implicit virtual sort of relationship.
#
# So we force the issue by mutating the resolution order.
# Note that we let C3 use pre-computed __sro__ for our bases.
# This requires that by the time this method is invoked, our bases
# have settled their SROs. Thus, ``changed()`` must first
# update itself before telling its descendents of changes.
sro = calculate_ro(self, base_mros={
b: b.__sro__
for b in self.__bases__
})
root = self._ROOT
if root is not None and sro and sro[-1] is not root:
# In one dataset of 1823 Interface objects, 1117 ClassProvides objects,
# sro[-1] was root 4496 times, and only not root 118 times. So it's
# probably worth checking.
# Once we don't have to deal with old-style classes,
# we can add a check and only do this if base_count > 1,
# if we tweak the bootstrapping for ``<implementedBy object>``
sro = [
x
for x in sro
if x is not root
]
sro.append(root)
return sro
def changed(self, originally_changed):
"""
We, or something we depend on, have changed.
By the time this is called, the things we depend on,
such as our bases, should themselves be stable.
"""
self._v_attrs = None
implied = self._implied
implied.clear()
ancestors = self._calculate_sro()
self.__sro__ = tuple(ancestors)
self.__iro__ = tuple([ancestor for ancestor in ancestors
if isinstance(ancestor, InterfaceClass)
])
for ancestor in ancestors:
# We directly imply our ancestors:
implied[ancestor] = ()
# Now, advise our dependents of change
# (being careful not to create the WeakKeyDictionary if not needed):
for dependent in tuple(self._dependents.keys() if self._dependents else ()):
dependent.changed(originally_changed)
# Just in case something called get() at some point
# during that process and we have a cycle of some sort
# make sure we didn't cache incomplete results.
self._v_attrs = None
def interfaces(self):
"""Return an iterator for the interfaces in the specification.
"""
seen = {}
for base in self.__bases__:
for interface in base.interfaces():
if interface not in seen:
seen[interface] = 1
yield interface
def extends(self, interface, strict=True):
"""Does the specification extend the given interface?
Test whether an interface in the specification extends the
given interface
"""
return ((interface in self._implied)
and
((not strict) or (self != interface))
)
def weakref(self, callback=None):
return weakref.ref(self, callback)
def get(self, name, default=None):
"""Query for an attribute description
"""
attrs = self._v_attrs
if attrs is None:
attrs = self._v_attrs = {}
attr = attrs.get(name)
if attr is None:
for iface in self.__iro__:
attr = iface.direct(name)
if attr is not None:
attrs[name] = attr
break
return default if attr is None else attr
class _InterfaceMetaClass(type):
# Handling ``__module__`` on ``InterfaceClass`` is tricky. We need
# to be able to read it on a type and get the expected string. We
# also need to be able to set it on an instance and get the value
# we set. So far so good. But what gets tricky is that we'd like
# to store the value in the C structure (``InterfaceBase.__ibmodule__``) for
# direct access during equality, sorting, and hashing. "No
# problem, you think, I'll just use a property" (well, the C
# equivalents, ``PyMemberDef`` or ``PyGetSetDef``).
#
# Except there is a problem. When a subclass is created, the
# metaclass (``type``) always automatically puts the expected
# string in the class's dictionary under ``__module__``, thus
# overriding the property inherited from the superclass. Writing
# ``Subclass.__module__`` still works, but
# ``Subclass().__module__`` fails.
#
# There are multiple ways to work around this:
#
# (1) Define ``InterfaceBase.__getattribute__`` to watch for
# ``__module__`` and return the C storage.
#
# This works, but slows down *all* attribute access (except,
# ironically, to ``__module__``) by about 25% (40ns becomes 50ns)
# (when implemented in C). Since that includes methods like
# ``providedBy``, that's probably not acceptable.
#
# All the other methods involve modifying subclasses. This can be
# done either on the fly in some cases, as instances are
# constructed, or by using a metaclass. These next few can be done on the fly.
#
# (2) Make ``__module__`` a descriptor in each subclass dictionary.
# It can't be a straight up ``@property`` descriptor, though, because accessing
# it on the class returns a ``property`` object, not the desired string.
#
# (3) Implement a data descriptor (``__get__`` and ``__set__``)
# that is both a subclass of string, and also does the redirect of
# ``__module__`` to ``__ibmodule__`` and does the correct thing
# with the ``instance`` argument to ``__get__`` is None (returns
# the class's value.) (Why must it be a subclass of string? Because
# when it' s in the class's dict, it's defined on an *instance* of the
# metaclass; descriptors in an instance's dict aren't honored --- their
# ``__get__`` is never invoked --- so it must also *be* the value we want
# returned.)
#
# This works, preserves the ability to read and write
# ``__module__``, and eliminates any penalty accessing other
# attributes. But it slows down accessing ``__module__`` of
# instances by 200% (40ns to 124ns), requires editing class dicts on the fly
# (in InterfaceClass.__init__), thus slightly slowing down all interface creation,
# and is ugly.
#
# (4) As in the last step, but make it a non-data descriptor (no ``__set__``).
#
# If you then *also* store a copy of ``__ibmodule__`` in
# ``__module__`` in the instance's dict, reading works for both
# class and instance and is full speed for instances. But the cost
# is storage space, and you can't write to it anymore, not without
# things getting out of sync.
#
# (Actually, ``__module__`` was never meant to be writable. Doing
# so would break BTrees and normal dictionaries, as well as the
# repr, maybe more.)
#
# That leaves us with a metaclass. (Recall that a class is an
# instance of its metaclass, so properties/descriptors defined in
# the metaclass are used when accessing attributes on the
# instance/class. We'll use that to define ``__module__``.) Here
# we can have our cake and eat it too: no extra storage, and
# C-speed access to the underlying storage. The only substantial
# cost is that metaclasses tend to make people's heads hurt. (But
# still less than the descriptor-is-string, hopefully.)
__slots__ = ()
def __new__(cls, name, bases, attrs):
# Figure out what module defined the interface.
# This is copied from ``InterfaceClass.__init__``;
# reviewers aren't sure how AttributeError or KeyError
# could be raised.
__module__ = sys._getframe(1).f_globals['__name__']
# Get the C optimized __module__ accessor and give it
# to the new class.
moduledescr = InterfaceBase.__dict__['__module__']
if isinstance(moduledescr, str):
# We're working with the Python implementation,
# not the C version
moduledescr = InterfaceBase.__dict__['__module_property__']
attrs['__module__'] = moduledescr
kind = type.__new__(cls, name, bases, attrs)
kind.__module = __module__
return kind
@property
def __module__(cls):
return cls.__module
def __repr__(cls):
return "<class '%s.%s'>" % (
cls.__module,
cls.__name__,
)
_InterfaceClassBase = _InterfaceMetaClass(
'InterfaceClass',
# From least specific to most specific.
(InterfaceBase, Specification, Element),
{'__slots__': ()}
)
def interfacemethod(func):
"""
Convert a method specification to an actual method of the interface.
This is a decorator that functions like `staticmethod` et al.
The primary use of this decorator is to allow interface definitions to
define the ``__adapt__`` method, but other interface methods can be
overridden this way too.
.. seealso:: `zope.interface.interfaces.IInterfaceDeclaration.interfacemethod`
"""
f_locals = sys._getframe(1).f_locals
methods = f_locals.setdefault(INTERFACE_METHODS, {})
methods[func.__name__] = func
return _decorator_non_return
class InterfaceClass(_InterfaceClassBase):
"""
Prototype (scarecrow) Interfaces Implementation.
Note that it is not possible to change the ``__name__`` or ``__module__``
after an instance of this object has been constructed.
"""
# We can't say this yet because we don't have enough
# infrastructure in place.
#
#implements(IInterface)
def __new__(cls, name=None, bases=(), attrs=None, __doc__=None, # pylint:disable=redefined-builtin
__module__=None):
assert isinstance(bases, tuple)
attrs = attrs or {}
needs_custom_class = attrs.pop(INTERFACE_METHODS, None)
if needs_custom_class:
needs_custom_class.update(
{'__classcell__': attrs.pop('__classcell__')}
if '__classcell__' in attrs
else {}
)
if '__adapt__' in needs_custom_class:
# We need to tell the C code to call this.
needs_custom_class['_CALL_CUSTOM_ADAPT'] = 1
if issubclass(cls, _InterfaceClassWithCustomMethods):
cls_bases = (cls,)
elif cls is InterfaceClass:
cls_bases = (_InterfaceClassWithCustomMethods,)
else:
cls_bases = (cls, _InterfaceClassWithCustomMethods)
cls = type(cls)( # pylint:disable=self-cls-assignment
name + "<WithCustomMethods>",
cls_bases,
needs_custom_class
)
elif PY2 and bases and len(bases) > 1:
bases_with_custom_methods = tuple(
type(b)
for b in bases
if issubclass(type(b), _InterfaceClassWithCustomMethods)
)
# If we have a subclass of InterfaceClass in *bases*,
# Python 3 is smart enough to pass that as *cls*, but Python
# 2 just passes whatever the first base in *bases* is. This means that if
# we have multiple inheritance, and one of our bases has already defined
# a custom method like ``__adapt__``, we do the right thing automatically
# and extend it on Python 3, but not necessarily on Python 2. To fix this, we need
# to run the MRO algorithm and get the most derived base manually.
# Note that this only works for consistent resolution orders
if bases_with_custom_methods:
cls = type( # pylint:disable=self-cls-assignment
name + "<WithCustomMethods>",
bases_with_custom_methods,
{}
).__mro__[1] # Not the class we created, the most derived.
return _InterfaceClassBase.__new__(cls)
def __init__(self, name, bases=(), attrs=None, __doc__=None, # pylint:disable=redefined-builtin
__module__=None):
# We don't call our metaclass parent directly
# pylint:disable=non-parent-init-called
# pylint:disable=super-init-not-called
if not all(isinstance(base, InterfaceClass) for base in bases):
raise TypeError('Expected base interfaces')
if attrs is None:
attrs = {}
if __module__ is None:
__module__ = attrs.get('__module__')
if isinstance(__module__, str):
del attrs['__module__']
else:
try:
# Figure out what module defined the interface.
# This is how cPython figures out the module of
# a class, but of course it does it in C. :-/
__module__ = sys._getframe(1).f_globals['__name__']
except (AttributeError, KeyError): # pragma: no cover
pass
InterfaceBase.__init__(self, name, __module__)
# These asserts assisted debugging the metaclass
# assert '__module__' not in self.__dict__
# assert self.__ibmodule__ is self.__module__ is __module__
d = attrs.get('__doc__')
if d is not None:
if not isinstance(d, Attribute):
if __doc__ is None:
__doc__ = d
del attrs['__doc__']
if __doc__ is None:
__doc__ = ''
Element.__init__(self, name, __doc__)
tagged_data = attrs.pop(TAGGED_DATA, None)
if tagged_data is not None:
for key, val in tagged_data.items():
self.setTaggedValue(key, val)
Specification.__init__(self, bases)
self.__attrs = self.__compute_attrs(attrs)
self.__identifier__ = "%s.%s" % (__module__, name)
def __compute_attrs(self, attrs):
# Make sure that all recorded attributes (and methods) are of type
# `Attribute` and `Method`
def update_value(aname, aval):
if isinstance(aval, Attribute):
aval.interface = self
if not aval.__name__:
aval.__name__ = aname
elif isinstance(aval, FunctionType):
aval = fromFunction(aval, self, name=aname)
else:
raise InvalidInterface("Concrete attribute, " + aname)
return aval
return {
aname: update_value(aname, aval)
for aname, aval in attrs.items()
if aname not in (
# __locals__: Python 3 sometimes adds this.
'__locals__',
# __qualname__: PEP 3155 (Python 3.3+)
'__qualname__',
# __annotations__: PEP 3107 (Python 3.0+)
'__annotations__',
)
and aval is not _decorator_non_return
}
def interfaces(self):
"""Return an iterator for the interfaces in the specification.
"""
yield self
def getBases(self):
return self.__bases__
def isEqualOrExtendedBy(self, other):
"""Same interface or extends?"""
return self == other or other.extends(self)
def names(self, all=False): # pylint:disable=redefined-builtin
"""Return the attribute names defined by the interface."""
if not all:
return self.__attrs.keys()
r = self.__attrs.copy()
for base in self.__bases__:
r.update(dict.fromkeys(base.names(all)))
return r.keys()
def __iter__(self):
return iter(self.names(all=True))
def namesAndDescriptions(self, all=False): # pylint:disable=redefined-builtin
"""Return attribute names and descriptions defined by interface."""
if not all:
return self.__attrs.items()
r = {}
for base in self.__bases__[::-1]:
r.update(dict(base.namesAndDescriptions(all)))
r.update(self.__attrs)
return r.items()
def getDescriptionFor(self, name):
"""Return the attribute description for the given name."""
r = self.get(name)
if r is not None:
return r
raise KeyError(name)
__getitem__ = getDescriptionFor
def __contains__(self, name):
return self.get(name) is not None
def direct(self, name):
return self.__attrs.get(name)
def queryDescriptionFor(self, name, default=None):
return self.get(name, default)
def validateInvariants(self, obj, errors=None):
"""validate object to defined invariants."""
for iface in self.__iro__:
for invariant in iface.queryDirectTaggedValue('invariants', ()):
try:
invariant(obj)
except Invalid as error:
if errors is not None:
errors.append(error)
else:
raise
if errors:
raise Invalid(errors)
def queryTaggedValue(self, tag, default=None):
"""
Queries for the value associated with *tag*, returning it from the nearest
interface in the ``__iro__``.
If not found, returns *default*.
"""
for iface in self.__iro__:
value = iface.queryDirectTaggedValue(tag, _marker)
if value is not _marker:
return value
return default
def getTaggedValue(self, tag):
""" Returns the value associated with 'tag'. """
value = self.queryTaggedValue(tag, default=_marker)
if value is _marker:
raise KeyError(tag)
return value
def getTaggedValueTags(self):
""" Returns a list of all tags. """
keys = set()
for base in self.__iro__:
keys.update(base.getDirectTaggedValueTags())
return keys
def __repr__(self): # pragma: no cover
try:
return self._v_repr
except AttributeError:
name = self.__name__
m = self.__ibmodule__
if m:
name = '%s.%s' % (m, name)
r = "<%s %s>" % (self.__class__.__name__, name)
self._v_repr = r # pylint:disable=attribute-defined-outside-init
return r
def _call_conform(self, conform):
try:
return conform(self)
except TypeError: # pragma: no cover
# We got a TypeError. It might be an error raised by
# the __conform__ implementation, or *we* may have
# made the TypeError by calling an unbound method
# (object is a class). In the later case, we behave
# as though there is no __conform__ method. We can
# detect this case by checking whether there is more
# than one traceback object in the traceback chain:
if sys.exc_info()[2].tb_next is not None:
# There is more than one entry in the chain, so
# reraise the error:
raise
# This clever trick is from Phillip Eby
return None # pragma: no cover
def __reduce__(self):
return self.__name__
Interface = InterfaceClass("Interface", __module__='zope.interface')
# Interface is the only member of its own SRO.
Interface._calculate_sro = lambda: (Interface,)
Interface.changed(Interface)
assert Interface.__sro__ == (Interface,)
Specification._ROOT = Interface
ro._ROOT = Interface
class _InterfaceClassWithCustomMethods(InterfaceClass):
"""
Marker class for interfaces with custom methods that override InterfaceClass methods.
"""
class Attribute(Element):
"""Attribute descriptions
"""
# We can't say this yet because we don't have enough
# infrastructure in place.
#
# implements(IAttribute)
interface = None
def _get_str_info(self):
"""Return extra data to put at the end of __str__."""
return ""
def __str__(self):
of = ''
if self.interface is not None:
of = self.interface.__module__ + '.' + self.interface.__name__ + '.'
# self.__name__ may be None during construction (e.g., debugging)
return of + (self.__name__ or '<unknown>') + self._get_str_info()
def __repr__(self):
return "<%s.%s object at 0x%x %s>" % (
type(self).__module__,
type(self).__name__,
id(self),
self
)
class Method(Attribute):
"""Method interfaces
The idea here is that you have objects that describe methods.
This provides an opportunity for rich meta-data.
"""
# We can't say this yet because we don't have enough
# infrastructure in place.
#
# implements(IMethod)
positional = required = ()
_optional = varargs = kwargs = None
def _get_optional(self):
if self._optional is None:
return {}
return self._optional
def _set_optional(self, opt):
self._optional = opt
def _del_optional(self):
self._optional = None
optional = property(_get_optional, _set_optional, _del_optional)
def __call__(self, *args, **kw):
raise BrokenImplementation(self.interface, self.__name__)
def getSignatureInfo(self):
return {'positional': self.positional,
'required': self.required,
'optional': self.optional,
'varargs': self.varargs,
'kwargs': self.kwargs,
}
def getSignatureString(self):
sig = []
for v in self.positional:
sig.append(v)
if v in self.optional.keys():
sig[-1] += "=" + repr(self.optional[v])
if self.varargs:
sig.append("*" + self.varargs)
if self.kwargs:
sig.append("**" + self.kwargs)
return "(%s)" % ", ".join(sig)
_get_str_info = getSignatureString
def fromFunction(func, interface=None, imlevel=0, name=None):
name = name or func.__name__
method = Method(name, func.__doc__)
defaults = getattr(func, '__defaults__', None) or ()
code = func.__code__
# Number of positional arguments
na = code.co_argcount - imlevel
names = code.co_varnames[imlevel:]
opt = {}
# Number of required arguments
defaults_count = len(defaults)
if not defaults_count:
# PyPy3 uses ``__defaults_count__`` for builtin methods
# like ``dict.pop``. Surprisingly, these don't have recorded
# ``__defaults__``
defaults_count = getattr(func, '__defaults_count__', 0)
nr = na - defaults_count
if nr < 0:
defaults = defaults[-nr:]
nr = 0
# Determine the optional arguments.
opt.update(dict(zip(names[nr:], defaults)))
method.positional = names[:na]
method.required = names[:nr]
method.optional = opt
argno = na
# Determine the function's variable argument's name (i.e. *args)
if code.co_flags & CO_VARARGS:
method.varargs = names[argno]
argno = argno + 1
else:
method.varargs = None
# Determine the function's keyword argument's name (i.e. **kw)
if code.co_flags & CO_VARKEYWORDS:
method.kwargs = names[argno]
else:
method.kwargs = None
method.interface = interface
for key, value in func.__dict__.items():
method.setTaggedValue(key, value)
return method
def fromMethod(meth, interface=None, name=None):
if isinstance(meth, MethodType):
func = meth.__func__
else:
func = meth
return fromFunction(func, interface, imlevel=1, name=name)
# Now we can create the interesting interfaces and wire them up:
def _wire():
from zope.interface.declarations import classImplements
# From lest specific to most specific.
from zope.interface.interfaces import IElement
classImplements(Element, IElement)
from zope.interface.interfaces import IAttribute
classImplements(Attribute, IAttribute)
from zope.interface.interfaces import IMethod
classImplements(Method, IMethod)
from zope.interface.interfaces import ISpecification
classImplements(Specification, ISpecification)
from zope.interface.interfaces import IInterface
classImplements(InterfaceClass, IInterface)
# We import this here to deal with module dependencies.
# pylint:disable=wrong-import-position
from zope.interface.declarations import implementedBy
from zope.interface.declarations import providedBy
from zope.interface.exceptions import InvalidInterface
from zope.interface.exceptions import BrokenImplementation
# This ensures that ``Interface`` winds up in the flattened()
# list of the immutable declaration. It correctly overrides changed()
# as a no-op, so we bypass that.
from zope.interface.declarations import _empty
Specification.changed(_empty, _empty)
|
src/zope/interface/interface.py
|
codereval_python_data_41
|
Return the attribute names defined by the interface.
def names(self, all=False): # pylint:disable=redefined-builtin
"""Return the attribute names defined by the interface."""
if not all:
return self.__attrs.keys()
r = self.__attrs.copy()
for base in self.__bases__:
r.update(dict.fromkeys(base.names(all)))
return r.keys()
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Interface object implementation
"""
# pylint:disable=protected-access
import sys
from types import MethodType
from types import FunctionType
import weakref
from zope.interface._compat import _use_c_impl
from zope.interface._compat import PYTHON2 as PY2
from zope.interface.exceptions import Invalid
from zope.interface.ro import ro as calculate_ro
from zope.interface import ro
__all__ = [
# Most of the public API from this module is directly exported
# from zope.interface. The only remaining public API intended to
# be imported from here should be those few things documented as
# such.
'InterfaceClass',
'Specification',
'adapter_hooks',
]
CO_VARARGS = 4
CO_VARKEYWORDS = 8
# Put in the attrs dict of an interface by ``taggedValue`` and ``invariants``
TAGGED_DATA = '__interface_tagged_values__'
# Put in the attrs dict of an interface by ``interfacemethod``
INTERFACE_METHODS = '__interface_methods__'
_decorator_non_return = object()
_marker = object()
def invariant(call):
f_locals = sys._getframe(1).f_locals
tags = f_locals.setdefault(TAGGED_DATA, {})
invariants = tags.setdefault('invariants', [])
invariants.append(call)
return _decorator_non_return
def taggedValue(key, value):
"""Attaches a tagged value to an interface at definition time."""
f_locals = sys._getframe(1).f_locals
tagged_values = f_locals.setdefault(TAGGED_DATA, {})
tagged_values[key] = value
return _decorator_non_return
class Element(object):
"""
Default implementation of `zope.interface.interfaces.IElement`.
"""
# We can't say this yet because we don't have enough
# infrastructure in place.
#
#implements(IElement)
def __init__(self, __name__, __doc__=''): # pylint:disable=redefined-builtin
if not __doc__ and __name__.find(' ') >= 0:
__doc__ = __name__
__name__ = None
self.__name__ = __name__
self.__doc__ = __doc__
# Tagged values are rare, especially on methods or attributes.
# Deferring the allocation can save substantial memory.
self.__tagged_values = None
def getName(self):
""" Returns the name of the object. """
return self.__name__
def getDoc(self):
""" Returns the documentation for the object. """
return self.__doc__
###
# Tagged values.
#
# Direct tagged values are set only in this instance. Others
# may be inherited (for those subclasses that have that concept).
###
def getTaggedValue(self, tag):
""" Returns the value associated with 'tag'. """
if not self.__tagged_values:
raise KeyError(tag)
return self.__tagged_values[tag]
def queryTaggedValue(self, tag, default=None):
""" Returns the value associated with 'tag'. """
return self.__tagged_values.get(tag, default) if self.__tagged_values else default
def getTaggedValueTags(self):
""" Returns a collection of all tags. """
return self.__tagged_values.keys() if self.__tagged_values else ()
def setTaggedValue(self, tag, value):
""" Associates 'value' with 'key'. """
if self.__tagged_values is None:
self.__tagged_values = {}
self.__tagged_values[tag] = value
queryDirectTaggedValue = queryTaggedValue
getDirectTaggedValue = getTaggedValue
getDirectTaggedValueTags = getTaggedValueTags
SpecificationBasePy = object # filled by _use_c_impl.
@_use_c_impl
class SpecificationBase(object):
# This object is the base of the inheritance hierarchy for ClassProvides:
#
# ClassProvides < ClassProvidesBase, Declaration
# Declaration < Specification < SpecificationBase
# ClassProvidesBase < SpecificationBase
#
# In order to have compatible instance layouts, we need to declare
# the storage used by Specification and Declaration here (and
# those classes must have ``__slots__ = ()``); fortunately this is
# not a waste of space because those are the only two inheritance
# trees. These all translate into tp_members in C.
__slots__ = (
# Things used here.
'_implied',
# Things used in Specification.
'_dependents',
'_bases',
'_v_attrs',
'__iro__',
'__sro__',
'__weakref__',
)
def providedBy(self, ob):
"""Is the interface implemented by an object
"""
spec = providedBy(ob)
return self in spec._implied
def implementedBy(self, cls):
"""Test whether the specification is implemented by a class or factory.
Raise TypeError if argument is neither a class nor a callable.
"""
spec = implementedBy(cls)
return self in spec._implied
def isOrExtends(self, interface):
"""Is the interface the same as or extend the given interface
"""
return interface in self._implied # pylint:disable=no-member
__call__ = isOrExtends
class NameAndModuleComparisonMixin(object):
# Internal use. Implement the basic sorting operators (but not (in)equality
# or hashing). Subclasses must provide ``__name__`` and ``__module__``
# attributes. Subclasses will be mutually comparable; but because equality
# and hashing semantics are missing from this class, take care in how
# you define those two attributes: If you stick with the default equality
# and hashing (identity based) you should make sure that all possible ``__name__``
# and ``__module__`` pairs are unique ACROSS ALL SUBCLASSES. (Actually, pretty
# much the same thing goes if you define equality and hashing to be based on
# those two attributes: they must still be consistent ACROSS ALL SUBCLASSES.)
# pylint:disable=assigning-non-slot
__slots__ = ()
def _compare(self, other):
"""
Compare *self* to *other* based on ``__name__`` and ``__module__``.
Return 0 if they are equal, return 1 if *self* is
greater than *other*, and return -1 if *self* is less than
*other*.
If *other* does not have ``__name__`` or ``__module__``, then
return ``NotImplemented``.
.. caution::
This allows comparison to things well outside the type hierarchy,
perhaps not symmetrically.
For example, ``class Foo(object)`` and ``class Foo(Interface)``
in the same file would compare equal, depending on the order of
operands. Writing code like this by hand would be unusual, but it could
happen with dynamic creation of types and interfaces.
None is treated as a pseudo interface that implies the loosest
contact possible, no contract. For that reason, all interfaces
sort before None.
"""
if other is self:
return 0
if other is None:
return -1
n1 = (self.__name__, self.__module__)
try:
n2 = (other.__name__, other.__module__)
except AttributeError:
return NotImplemented
# This spelling works under Python3, which doesn't have cmp().
return (n1 > n2) - (n1 < n2)
def __lt__(self, other):
c = self._compare(other)
if c is NotImplemented:
return c
return c < 0
def __le__(self, other):
c = self._compare(other)
if c is NotImplemented:
return c
return c <= 0
def __gt__(self, other):
c = self._compare(other)
if c is NotImplemented:
return c
return c > 0
def __ge__(self, other):
c = self._compare(other)
if c is NotImplemented:
return c
return c >= 0
@_use_c_impl
class InterfaceBase(NameAndModuleComparisonMixin, SpecificationBasePy):
"""Base class that wants to be replaced with a C base :)
"""
__slots__ = (
'__name__',
'__ibmodule__',
'_v_cached_hash',
)
def __init__(self, name=None, module=None):
self.__name__ = name
self.__ibmodule__ = module
def _call_conform(self, conform):
raise NotImplementedError
@property
def __module_property__(self):
# This is for _InterfaceMetaClass
return self.__ibmodule__
def __call__(self, obj, alternate=_marker):
"""Adapt an object to the interface
"""
try:
conform = obj.__conform__
except AttributeError:
conform = None
if conform is not None:
adapter = self._call_conform(conform)
if adapter is not None:
return adapter
adapter = self.__adapt__(obj)
if adapter is not None:
return adapter
if alternate is not _marker:
return alternate
raise TypeError("Could not adapt", obj, self)
def __adapt__(self, obj):
"""Adapt an object to the receiver
"""
if self.providedBy(obj):
return obj
for hook in adapter_hooks:
adapter = hook(self, obj)
if adapter is not None:
return adapter
return None
def __hash__(self):
# pylint:disable=assigning-non-slot,attribute-defined-outside-init
try:
return self._v_cached_hash
except AttributeError:
self._v_cached_hash = hash((self.__name__, self.__module__))
return self._v_cached_hash
def __eq__(self, other):
c = self._compare(other)
if c is NotImplemented:
return c
return c == 0
def __ne__(self, other):
if other is self:
return False
c = self._compare(other)
if c is NotImplemented:
return c
return c != 0
adapter_hooks = _use_c_impl([], 'adapter_hooks')
class Specification(SpecificationBase):
"""Specifications
An interface specification is used to track interface declarations
and component registrations.
This class is a base class for both interfaces themselves and for
interface specifications (declarations).
Specifications are mutable. If you reassign their bases, their
relations with other specifications are adjusted accordingly.
"""
__slots__ = ()
# The root of all Specifications. This will be assigned `Interface`,
# once it is defined.
_ROOT = None
# Copy some base class methods for speed
isOrExtends = SpecificationBase.isOrExtends
providedBy = SpecificationBase.providedBy
def __init__(self, bases=()):
# There are many leaf interfaces with no dependents,
# and a few with very many. It's a heavily left-skewed
# distribution. In a survey of Plone and Zope related packages
# that loaded 2245 InterfaceClass objects and 2235 ClassProvides
# instances, there were a total of 7000 Specification objects created.
# 4700 had 0 dependents, 1400 had 1, 382 had 2 and so on. Only one
# for <type> had 1664. So there's savings to be had deferring
# the creation of dependents.
self._dependents = None # type: weakref.WeakKeyDictionary
self._bases = ()
self._implied = {}
self._v_attrs = None
self.__iro__ = ()
self.__sro__ = ()
self.__bases__ = tuple(bases)
@property
def dependents(self):
if self._dependents is None:
self._dependents = weakref.WeakKeyDictionary()
return self._dependents
def subscribe(self, dependent):
self._dependents[dependent] = self.dependents.get(dependent, 0) + 1
def unsubscribe(self, dependent):
try:
n = self._dependents[dependent]
except TypeError:
raise KeyError(dependent)
n -= 1
if not n:
del self.dependents[dependent]
else:
assert n > 0
self.dependents[dependent] = n
def __setBases(self, bases):
# Remove ourselves as a dependent of our old bases
for b in self.__bases__:
b.unsubscribe(self)
# Register ourselves as a dependent of our new bases
self._bases = bases
for b in bases:
b.subscribe(self)
self.changed(self)
__bases__ = property(
lambda self: self._bases,
__setBases,
)
def _calculate_sro(self):
"""
Calculate and return the resolution order for this object, using its ``__bases__``.
Ensures that ``Interface`` is always the last (lowest priority) element.
"""
# We'd like to make Interface the lowest priority as a
# property of the resolution order algorithm. That almost
# works out naturally, but it fails when class inheritance has
# some bases that DO implement an interface, and some that DO
# NOT. In such a mixed scenario, you wind up with a set of
# bases to consider that look like this: [[..., Interface],
# [..., object], ...]. Depending on the order if inheritance,
# Interface can wind up before or after object, and that can
# happen at any point in the tree, meaning Interface can wind
# up somewhere in the middle of the order. Since Interface is
# treated as something that everything winds up implementing
# anyway (a catch-all for things like adapters), having it high up
# the order is bad. It's also bad to have it at the end, just before
# some concrete class: concrete classes should be HIGHER priority than
# interfaces (because there's only one class, but many implementations).
#
# One technically nice way to fix this would be to have
# ``implementedBy(object).__bases__ = (Interface,)``
#
# But: (1) That fails for old-style classes and (2) that causes
# everything to appear to *explicitly* implement Interface, when up
# to this point it's been an implicit virtual sort of relationship.
#
# So we force the issue by mutating the resolution order.
# Note that we let C3 use pre-computed __sro__ for our bases.
# This requires that by the time this method is invoked, our bases
# have settled their SROs. Thus, ``changed()`` must first
# update itself before telling its descendents of changes.
sro = calculate_ro(self, base_mros={
b: b.__sro__
for b in self.__bases__
})
root = self._ROOT
if root is not None and sro and sro[-1] is not root:
# In one dataset of 1823 Interface objects, 1117 ClassProvides objects,
# sro[-1] was root 4496 times, and only not root 118 times. So it's
# probably worth checking.
# Once we don't have to deal with old-style classes,
# we can add a check and only do this if base_count > 1,
# if we tweak the bootstrapping for ``<implementedBy object>``
sro = [
x
for x in sro
if x is not root
]
sro.append(root)
return sro
def changed(self, originally_changed):
"""
We, or something we depend on, have changed.
By the time this is called, the things we depend on,
such as our bases, should themselves be stable.
"""
self._v_attrs = None
implied = self._implied
implied.clear()
ancestors = self._calculate_sro()
self.__sro__ = tuple(ancestors)
self.__iro__ = tuple([ancestor for ancestor in ancestors
if isinstance(ancestor, InterfaceClass)
])
for ancestor in ancestors:
# We directly imply our ancestors:
implied[ancestor] = ()
# Now, advise our dependents of change
# (being careful not to create the WeakKeyDictionary if not needed):
for dependent in tuple(self._dependents.keys() if self._dependents else ()):
dependent.changed(originally_changed)
# Just in case something called get() at some point
# during that process and we have a cycle of some sort
# make sure we didn't cache incomplete results.
self._v_attrs = None
def interfaces(self):
"""Return an iterator for the interfaces in the specification.
"""
seen = {}
for base in self.__bases__:
for interface in base.interfaces():
if interface not in seen:
seen[interface] = 1
yield interface
def extends(self, interface, strict=True):
"""Does the specification extend the given interface?
Test whether an interface in the specification extends the
given interface
"""
return ((interface in self._implied)
and
((not strict) or (self != interface))
)
def weakref(self, callback=None):
return weakref.ref(self, callback)
def get(self, name, default=None):
"""Query for an attribute description
"""
attrs = self._v_attrs
if attrs is None:
attrs = self._v_attrs = {}
attr = attrs.get(name)
if attr is None:
for iface in self.__iro__:
attr = iface.direct(name)
if attr is not None:
attrs[name] = attr
break
return default if attr is None else attr
class _InterfaceMetaClass(type):
# Handling ``__module__`` on ``InterfaceClass`` is tricky. We need
# to be able to read it on a type and get the expected string. We
# also need to be able to set it on an instance and get the value
# we set. So far so good. But what gets tricky is that we'd like
# to store the value in the C structure (``InterfaceBase.__ibmodule__``) for
# direct access during equality, sorting, and hashing. "No
# problem, you think, I'll just use a property" (well, the C
# equivalents, ``PyMemberDef`` or ``PyGetSetDef``).
#
# Except there is a problem. When a subclass is created, the
# metaclass (``type``) always automatically puts the expected
# string in the class's dictionary under ``__module__``, thus
# overriding the property inherited from the superclass. Writing
# ``Subclass.__module__`` still works, but
# ``Subclass().__module__`` fails.
#
# There are multiple ways to work around this:
#
# (1) Define ``InterfaceBase.__getattribute__`` to watch for
# ``__module__`` and return the C storage.
#
# This works, but slows down *all* attribute access (except,
# ironically, to ``__module__``) by about 25% (40ns becomes 50ns)
# (when implemented in C). Since that includes methods like
# ``providedBy``, that's probably not acceptable.
#
# All the other methods involve modifying subclasses. This can be
# done either on the fly in some cases, as instances are
# constructed, or by using a metaclass. These next few can be done on the fly.
#
# (2) Make ``__module__`` a descriptor in each subclass dictionary.
# It can't be a straight up ``@property`` descriptor, though, because accessing
# it on the class returns a ``property`` object, not the desired string.
#
# (3) Implement a data descriptor (``__get__`` and ``__set__``)
# that is both a subclass of string, and also does the redirect of
# ``__module__`` to ``__ibmodule__`` and does the correct thing
# with the ``instance`` argument to ``__get__`` is None (returns
# the class's value.) (Why must it be a subclass of string? Because
# when it' s in the class's dict, it's defined on an *instance* of the
# metaclass; descriptors in an instance's dict aren't honored --- their
# ``__get__`` is never invoked --- so it must also *be* the value we want
# returned.)
#
# This works, preserves the ability to read and write
# ``__module__``, and eliminates any penalty accessing other
# attributes. But it slows down accessing ``__module__`` of
# instances by 200% (40ns to 124ns), requires editing class dicts on the fly
# (in InterfaceClass.__init__), thus slightly slowing down all interface creation,
# and is ugly.
#
# (4) As in the last step, but make it a non-data descriptor (no ``__set__``).
#
# If you then *also* store a copy of ``__ibmodule__`` in
# ``__module__`` in the instance's dict, reading works for both
# class and instance and is full speed for instances. But the cost
# is storage space, and you can't write to it anymore, not without
# things getting out of sync.
#
# (Actually, ``__module__`` was never meant to be writable. Doing
# so would break BTrees and normal dictionaries, as well as the
# repr, maybe more.)
#
# That leaves us with a metaclass. (Recall that a class is an
# instance of its metaclass, so properties/descriptors defined in
# the metaclass are used when accessing attributes on the
# instance/class. We'll use that to define ``__module__``.) Here
# we can have our cake and eat it too: no extra storage, and
# C-speed access to the underlying storage. The only substantial
# cost is that metaclasses tend to make people's heads hurt. (But
# still less than the descriptor-is-string, hopefully.)
__slots__ = ()
def __new__(cls, name, bases, attrs):
# Figure out what module defined the interface.
# This is copied from ``InterfaceClass.__init__``;
# reviewers aren't sure how AttributeError or KeyError
# could be raised.
__module__ = sys._getframe(1).f_globals['__name__']
# Get the C optimized __module__ accessor and give it
# to the new class.
moduledescr = InterfaceBase.__dict__['__module__']
if isinstance(moduledescr, str):
# We're working with the Python implementation,
# not the C version
moduledescr = InterfaceBase.__dict__['__module_property__']
attrs['__module__'] = moduledescr
kind = type.__new__(cls, name, bases, attrs)
kind.__module = __module__
return kind
@property
def __module__(cls):
return cls.__module
def __repr__(cls):
return "<class '%s.%s'>" % (
cls.__module,
cls.__name__,
)
_InterfaceClassBase = _InterfaceMetaClass(
'InterfaceClass',
# From least specific to most specific.
(InterfaceBase, Specification, Element),
{'__slots__': ()}
)
def interfacemethod(func):
"""
Convert a method specification to an actual method of the interface.
This is a decorator that functions like `staticmethod` et al.
The primary use of this decorator is to allow interface definitions to
define the ``__adapt__`` method, but other interface methods can be
overridden this way too.
.. seealso:: `zope.interface.interfaces.IInterfaceDeclaration.interfacemethod`
"""
f_locals = sys._getframe(1).f_locals
methods = f_locals.setdefault(INTERFACE_METHODS, {})
methods[func.__name__] = func
return _decorator_non_return
class InterfaceClass(_InterfaceClassBase):
"""
Prototype (scarecrow) Interfaces Implementation.
Note that it is not possible to change the ``__name__`` or ``__module__``
after an instance of this object has been constructed.
"""
# We can't say this yet because we don't have enough
# infrastructure in place.
#
#implements(IInterface)
def __new__(cls, name=None, bases=(), attrs=None, __doc__=None, # pylint:disable=redefined-builtin
__module__=None):
assert isinstance(bases, tuple)
attrs = attrs or {}
needs_custom_class = attrs.pop(INTERFACE_METHODS, None)
if needs_custom_class:
needs_custom_class.update(
{'__classcell__': attrs.pop('__classcell__')}
if '__classcell__' in attrs
else {}
)
if '__adapt__' in needs_custom_class:
# We need to tell the C code to call this.
needs_custom_class['_CALL_CUSTOM_ADAPT'] = 1
if issubclass(cls, _InterfaceClassWithCustomMethods):
cls_bases = (cls,)
elif cls is InterfaceClass:
cls_bases = (_InterfaceClassWithCustomMethods,)
else:
cls_bases = (cls, _InterfaceClassWithCustomMethods)
cls = type(cls)( # pylint:disable=self-cls-assignment
name + "<WithCustomMethods>",
cls_bases,
needs_custom_class
)
elif PY2 and bases and len(bases) > 1:
bases_with_custom_methods = tuple(
type(b)
for b in bases
if issubclass(type(b), _InterfaceClassWithCustomMethods)
)
# If we have a subclass of InterfaceClass in *bases*,
# Python 3 is smart enough to pass that as *cls*, but Python
# 2 just passes whatever the first base in *bases* is. This means that if
# we have multiple inheritance, and one of our bases has already defined
# a custom method like ``__adapt__``, we do the right thing automatically
# and extend it on Python 3, but not necessarily on Python 2. To fix this, we need
# to run the MRO algorithm and get the most derived base manually.
# Note that this only works for consistent resolution orders
if bases_with_custom_methods:
cls = type( # pylint:disable=self-cls-assignment
name + "<WithCustomMethods>",
bases_with_custom_methods,
{}
).__mro__[1] # Not the class we created, the most derived.
return _InterfaceClassBase.__new__(cls)
def __init__(self, name, bases=(), attrs=None, __doc__=None, # pylint:disable=redefined-builtin
__module__=None):
# We don't call our metaclass parent directly
# pylint:disable=non-parent-init-called
# pylint:disable=super-init-not-called
if not all(isinstance(base, InterfaceClass) for base in bases):
raise TypeError('Expected base interfaces')
if attrs is None:
attrs = {}
if __module__ is None:
__module__ = attrs.get('__module__')
if isinstance(__module__, str):
del attrs['__module__']
else:
try:
# Figure out what module defined the interface.
# This is how cPython figures out the module of
# a class, but of course it does it in C. :-/
__module__ = sys._getframe(1).f_globals['__name__']
except (AttributeError, KeyError): # pragma: no cover
pass
InterfaceBase.__init__(self, name, __module__)
# These asserts assisted debugging the metaclass
# assert '__module__' not in self.__dict__
# assert self.__ibmodule__ is self.__module__ is __module__
d = attrs.get('__doc__')
if d is not None:
if not isinstance(d, Attribute):
if __doc__ is None:
__doc__ = d
del attrs['__doc__']
if __doc__ is None:
__doc__ = ''
Element.__init__(self, name, __doc__)
tagged_data = attrs.pop(TAGGED_DATA, None)
if tagged_data is not None:
for key, val in tagged_data.items():
self.setTaggedValue(key, val)
Specification.__init__(self, bases)
self.__attrs = self.__compute_attrs(attrs)
self.__identifier__ = "%s.%s" % (__module__, name)
def __compute_attrs(self, attrs):
# Make sure that all recorded attributes (and methods) are of type
# `Attribute` and `Method`
def update_value(aname, aval):
if isinstance(aval, Attribute):
aval.interface = self
if not aval.__name__:
aval.__name__ = aname
elif isinstance(aval, FunctionType):
aval = fromFunction(aval, self, name=aname)
else:
raise InvalidInterface("Concrete attribute, " + aname)
return aval
return {
aname: update_value(aname, aval)
for aname, aval in attrs.items()
if aname not in (
# __locals__: Python 3 sometimes adds this.
'__locals__',
# __qualname__: PEP 3155 (Python 3.3+)
'__qualname__',
# __annotations__: PEP 3107 (Python 3.0+)
'__annotations__',
)
and aval is not _decorator_non_return
}
def interfaces(self):
"""Return an iterator for the interfaces in the specification.
"""
yield self
def getBases(self):
return self.__bases__
def isEqualOrExtendedBy(self, other):
"""Same interface or extends?"""
return self == other or other.extends(self)
def names(self, all=False): # pylint:disable=redefined-builtin
"""Return the attribute names defined by the interface."""
if not all:
return self.__attrs.keys()
r = self.__attrs.copy()
for base in self.__bases__:
r.update(dict.fromkeys(base.names(all)))
return r.keys()
def __iter__(self):
return iter(self.names(all=True))
def namesAndDescriptions(self, all=False): # pylint:disable=redefined-builtin
"""Return attribute names and descriptions defined by interface."""
if not all:
return self.__attrs.items()
r = {}
for base in self.__bases__[::-1]:
r.update(dict(base.namesAndDescriptions(all)))
r.update(self.__attrs)
return r.items()
def getDescriptionFor(self, name):
"""Return the attribute description for the given name."""
r = self.get(name)
if r is not None:
return r
raise KeyError(name)
__getitem__ = getDescriptionFor
def __contains__(self, name):
return self.get(name) is not None
def direct(self, name):
return self.__attrs.get(name)
def queryDescriptionFor(self, name, default=None):
return self.get(name, default)
def validateInvariants(self, obj, errors=None):
"""validate object to defined invariants."""
for iface in self.__iro__:
for invariant in iface.queryDirectTaggedValue('invariants', ()):
try:
invariant(obj)
except Invalid as error:
if errors is not None:
errors.append(error)
else:
raise
if errors:
raise Invalid(errors)
def queryTaggedValue(self, tag, default=None):
"""
Queries for the value associated with *tag*, returning it from the nearest
interface in the ``__iro__``.
If not found, returns *default*.
"""
for iface in self.__iro__:
value = iface.queryDirectTaggedValue(tag, _marker)
if value is not _marker:
return value
return default
def getTaggedValue(self, tag):
""" Returns the value associated with 'tag'. """
value = self.queryTaggedValue(tag, default=_marker)
if value is _marker:
raise KeyError(tag)
return value
def getTaggedValueTags(self):
""" Returns a list of all tags. """
keys = set()
for base in self.__iro__:
keys.update(base.getDirectTaggedValueTags())
return keys
def __repr__(self): # pragma: no cover
try:
return self._v_repr
except AttributeError:
name = self.__name__
m = self.__ibmodule__
if m:
name = '%s.%s' % (m, name)
r = "<%s %s>" % (self.__class__.__name__, name)
self._v_repr = r # pylint:disable=attribute-defined-outside-init
return r
def _call_conform(self, conform):
try:
return conform(self)
except TypeError: # pragma: no cover
# We got a TypeError. It might be an error raised by
# the __conform__ implementation, or *we* may have
# made the TypeError by calling an unbound method
# (object is a class). In the later case, we behave
# as though there is no __conform__ method. We can
# detect this case by checking whether there is more
# than one traceback object in the traceback chain:
if sys.exc_info()[2].tb_next is not None:
# There is more than one entry in the chain, so
# reraise the error:
raise
# This clever trick is from Phillip Eby
return None # pragma: no cover
def __reduce__(self):
return self.__name__
Interface = InterfaceClass("Interface", __module__='zope.interface')
# Interface is the only member of its own SRO.
Interface._calculate_sro = lambda: (Interface,)
Interface.changed(Interface)
assert Interface.__sro__ == (Interface,)
Specification._ROOT = Interface
ro._ROOT = Interface
class _InterfaceClassWithCustomMethods(InterfaceClass):
"""
Marker class for interfaces with custom methods that override InterfaceClass methods.
"""
class Attribute(Element):
"""Attribute descriptions
"""
# We can't say this yet because we don't have enough
# infrastructure in place.
#
# implements(IAttribute)
interface = None
def _get_str_info(self):
"""Return extra data to put at the end of __str__."""
return ""
def __str__(self):
of = ''
if self.interface is not None:
of = self.interface.__module__ + '.' + self.interface.__name__ + '.'
# self.__name__ may be None during construction (e.g., debugging)
return of + (self.__name__ or '<unknown>') + self._get_str_info()
def __repr__(self):
return "<%s.%s object at 0x%x %s>" % (
type(self).__module__,
type(self).__name__,
id(self),
self
)
class Method(Attribute):
"""Method interfaces
The idea here is that you have objects that describe methods.
This provides an opportunity for rich meta-data.
"""
# We can't say this yet because we don't have enough
# infrastructure in place.
#
# implements(IMethod)
positional = required = ()
_optional = varargs = kwargs = None
def _get_optional(self):
if self._optional is None:
return {}
return self._optional
def _set_optional(self, opt):
self._optional = opt
def _del_optional(self):
self._optional = None
optional = property(_get_optional, _set_optional, _del_optional)
def __call__(self, *args, **kw):
raise BrokenImplementation(self.interface, self.__name__)
def getSignatureInfo(self):
return {'positional': self.positional,
'required': self.required,
'optional': self.optional,
'varargs': self.varargs,
'kwargs': self.kwargs,
}
def getSignatureString(self):
sig = []
for v in self.positional:
sig.append(v)
if v in self.optional.keys():
sig[-1] += "=" + repr(self.optional[v])
if self.varargs:
sig.append("*" + self.varargs)
if self.kwargs:
sig.append("**" + self.kwargs)
return "(%s)" % ", ".join(sig)
_get_str_info = getSignatureString
def fromFunction(func, interface=None, imlevel=0, name=None):
name = name or func.__name__
method = Method(name, func.__doc__)
defaults = getattr(func, '__defaults__', None) or ()
code = func.__code__
# Number of positional arguments
na = code.co_argcount - imlevel
names = code.co_varnames[imlevel:]
opt = {}
# Number of required arguments
defaults_count = len(defaults)
if not defaults_count:
# PyPy3 uses ``__defaults_count__`` for builtin methods
# like ``dict.pop``. Surprisingly, these don't have recorded
# ``__defaults__``
defaults_count = getattr(func, '__defaults_count__', 0)
nr = na - defaults_count
if nr < 0:
defaults = defaults[-nr:]
nr = 0
# Determine the optional arguments.
opt.update(dict(zip(names[nr:], defaults)))
method.positional = names[:na]
method.required = names[:nr]
method.optional = opt
argno = na
# Determine the function's variable argument's name (i.e. *args)
if code.co_flags & CO_VARARGS:
method.varargs = names[argno]
argno = argno + 1
else:
method.varargs = None
# Determine the function's keyword argument's name (i.e. **kw)
if code.co_flags & CO_VARKEYWORDS:
method.kwargs = names[argno]
else:
method.kwargs = None
method.interface = interface
for key, value in func.__dict__.items():
method.setTaggedValue(key, value)
return method
def fromMethod(meth, interface=None, name=None):
if isinstance(meth, MethodType):
func = meth.__func__
else:
func = meth
return fromFunction(func, interface, imlevel=1, name=name)
# Now we can create the interesting interfaces and wire them up:
def _wire():
from zope.interface.declarations import classImplements
# From lest specific to most specific.
from zope.interface.interfaces import IElement
classImplements(Element, IElement)
from zope.interface.interfaces import IAttribute
classImplements(Attribute, IAttribute)
from zope.interface.interfaces import IMethod
classImplements(Method, IMethod)
from zope.interface.interfaces import ISpecification
classImplements(Specification, ISpecification)
from zope.interface.interfaces import IInterface
classImplements(InterfaceClass, IInterface)
# We import this here to deal with module dependencies.
# pylint:disable=wrong-import-position
from zope.interface.declarations import implementedBy
from zope.interface.declarations import providedBy
from zope.interface.exceptions import InvalidInterface
from zope.interface.exceptions import BrokenImplementation
# This ensures that ``Interface`` winds up in the flattened()
# list of the immutable declaration. It correctly overrides changed()
# as a no-op, so we bypass that.
from zope.interface.declarations import _empty
Specification.changed(_empty, _empty)
|
src/zope/interface/interface.py
|
codereval_python_data_42
|
Normalize declaration arguments
Normalization arguments might contain Declarions, tuples, or single
interfaces.
Anything but individial interfaces or implements specs will be expanded.
def _normalizeargs(sequence, output=None):
"""Normalize declaration arguments
Normalization arguments might contain Declarions, tuples, or single
interfaces.
Anything but individial interfaces or implements specs will be expanded.
"""
if output is None:
output = []
cls = sequence.__class__
if InterfaceClass in cls.__mro__ or Implements in cls.__mro__:
output.append(sequence)
else:
for v in sequence:
_normalizeargs(v, output)
return output
##############################################################################
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
##############################################################################
"""Implementation of interface declarations
There are three flavors of declarations:
- Declarations are used to simply name declared interfaces.
- ImplementsDeclarations are used to express the interfaces that a
class implements (that instances of the class provides).
Implements specifications support inheriting interfaces.
- ProvidesDeclarations are used to express interfaces directly
provided by objects.
"""
__docformat__ = 'restructuredtext'
import sys
from types import FunctionType
from types import MethodType
from types import ModuleType
import weakref
from zope.interface.advice import addClassAdvisor
from zope.interface.interface import Interface
from zope.interface.interface import InterfaceClass
from zope.interface.interface import SpecificationBase
from zope.interface.interface import Specification
from zope.interface.interface import NameAndModuleComparisonMixin
from zope.interface._compat import CLASS_TYPES as DescriptorAwareMetaClasses
from zope.interface._compat import PYTHON3
from zope.interface._compat import _use_c_impl
__all__ = [
# None. The public APIs of this module are
# re-exported from zope.interface directly.
]
# pylint:disable=too-many-lines
# Registry of class-implementation specifications
BuiltinImplementationSpecifications = {}
_ADVICE_ERROR = ('Class advice impossible in Python3. '
'Use the @%s class decorator instead.')
_ADVICE_WARNING = ('The %s API is deprecated, and will not work in Python3 '
'Use the @%s class decorator instead.')
def _next_super_class(ob):
# When ``ob`` is an instance of ``super``, return
# the next class in the MRO that we should actually be
# looking at. Watch out for diamond inheritance!
self_class = ob.__self_class__
class_that_invoked_super = ob.__thisclass__
complete_mro = self_class.__mro__
next_class = complete_mro[complete_mro.index(class_that_invoked_super) + 1]
return next_class
class named(object):
def __init__(self, name):
self.name = name
def __call__(self, ob):
ob.__component_name__ = self.name
return ob
class Declaration(Specification):
"""Interface declarations"""
__slots__ = ()
def __init__(self, *bases):
Specification.__init__(self, _normalizeargs(bases))
def __contains__(self, interface):
"""Test whether an interface is in the specification
"""
return self.extends(interface) and interface in self.interfaces()
def __iter__(self):
"""Return an iterator for the interfaces in the specification
"""
return self.interfaces()
def flattened(self):
"""Return an iterator of all included and extended interfaces
"""
return iter(self.__iro__)
def __sub__(self, other):
"""Remove interfaces from a specification
"""
return Declaration(*[
i for i in self.interfaces()
if not [
j
for j in other.interfaces()
if i.extends(j, 0) # non-strict extends
]
])
def __add__(self, other):
"""Add two specifications or a specification and an interface
"""
seen = {}
result = []
for i in self.interfaces():
seen[i] = 1
result.append(i)
for i in other.interfaces():
if i not in seen:
seen[i] = 1
result.append(i)
return Declaration(*result)
__radd__ = __add__
class _ImmutableDeclaration(Declaration):
# A Declaration that is immutable. Used as a singleton to
# return empty answers for things like ``implementedBy``.
# We have to define the actual singleton after normalizeargs
# is defined, and that in turn is defined after InterfaceClass and
# Implements.
__slots__ = ()
__instance = None
def __new__(cls):
if _ImmutableDeclaration.__instance is None:
_ImmutableDeclaration.__instance = object.__new__(cls)
return _ImmutableDeclaration.__instance
def __reduce__(self):
return "_empty"
@property
def __bases__(self):
return ()
@__bases__.setter
def __bases__(self, new_bases):
# We expect the superclass constructor to set ``self.__bases__ = ()``.
# Rather than attempt to special case that in the constructor and allow
# setting __bases__ only at that time, it's easier to just allow setting
# the empty tuple at any time. That makes ``x.__bases__ = x.__bases__`` a nice
# no-op too. (Skipping the superclass constructor altogether is a recipe
# for maintenance headaches.)
if new_bases != ():
raise TypeError("Cannot set non-empty bases on shared empty Declaration.")
# As the immutable empty declaration, we cannot be changed.
# This means there's no logical reason for us to have dependents
# or subscriptions: we'll never notify them. So there's no need for
# us to keep track of any of that.
@property
def dependents(self):
return {}
changed = subscribe = unsubscribe = lambda self, _ignored: None
def interfaces(self):
# An empty iterator
return iter(())
def extends(self, interface, strict=True):
return interface is self._ROOT
def get(self, name, default=None):
return default
def weakref(self, callback=None):
# We're a singleton, we never go away. So there's no need to return
# distinct weakref objects here; their callbacks will never
# be called. Instead, we only need to return a callable that
# returns ourself. The easiest one is to return _ImmutableDeclaration
# itself; testing on Python 3.8 shows that's faster than a function that
# returns _empty. (Remember, one goal is to avoid allocating any
# object, and that includes a method.)
return _ImmutableDeclaration
@property
def _v_attrs(self):
# _v_attrs is not a public, documented property, but some client
# code uses it anyway as a convenient place to cache things. To keep
# the empty declaration truly immutable, we must ignore that. That includes
# ignoring assignments as well.
return {}
@_v_attrs.setter
def _v_attrs(self, new_attrs):
pass
##############################################################################
#
# Implementation specifications
#
# These specify interfaces implemented by instances of classes
class Implements(NameAndModuleComparisonMixin,
Declaration):
# Inherit from NameAndModuleComparisonMixin to be
# mutually comparable with InterfaceClass objects.
# (The two must be mutually comparable to be able to work in e.g., BTrees.)
# Instances of this class generally don't have a __module__ other than
# `zope.interface.declarations`, whereas they *do* have a __name__ that is the
# fully qualified name of the object they are representing.
# Note, though, that equality and hashing are still identity based. This
# accounts for things like nested objects that have the same name (typically
# only in tests) and is consistent with pickling. As far as comparisons to InterfaceClass
# goes, we'll never have equal name and module to those, so we're still consistent there.
# Instances of this class are essentially intended to be unique and are
# heavily cached (note how our __reduce__ handles this) so having identity
# based hash and eq should also work.
# We want equality and hashing to be based on identity. However, we can't actually
# implement __eq__/__ne__ to do this because sometimes we get wrapped in a proxy.
# We need to let the proxy types implement these methods so they can handle unwrapping
# and then rely on: (1) the interpreter automatically changing `implements == proxy` into
# `proxy == implements` (which will call proxy.__eq__ to do the unwrapping) and then
# (2) the default equality and hashing semantics being identity based.
# class whose specification should be used as additional base
inherit = None
# interfaces actually declared for a class
declared = ()
# Weak cache of {class: <implements>} for super objects.
# Created on demand. These are rare, as of 5.0 anyway. Using a class
# level default doesn't take space in instances. Using _v_attrs would be
# another place to store this without taking space unless needed.
_super_cache = None
__name__ = '?'
@classmethod
def named(cls, name, *bases):
# Implementation method: Produce an Implements interface with
# a fully fleshed out __name__ before calling the constructor, which
# sets bases to the given interfaces and which may pass this object to
# other objects (e.g., to adjust dependents). If they're sorting or comparing
# by name, this needs to be set.
inst = cls.__new__(cls)
inst.__name__ = name
inst.__init__(*bases)
return inst
def changed(self, originally_changed):
try:
del self._super_cache
except AttributeError:
pass
return super(Implements, self).changed(originally_changed)
def __repr__(self):
return '<implementedBy %s>' % (self.__name__)
def __reduce__(self):
return implementedBy, (self.inherit, )
def _implements_name(ob):
# Return the __name__ attribute to be used by its __implemented__
# property.
# This must be stable for the "same" object across processes
# because it is used for sorting. It needn't be unique, though, in cases
# like nested classes named Foo created by different functions, because
# equality and hashing is still based on identity.
# It might be nice to use __qualname__ on Python 3, but that would produce
# different values between Py2 and Py3.
return (getattr(ob, '__module__', '?') or '?') + \
'.' + (getattr(ob, '__name__', '?') or '?')
def _implementedBy_super(sup):
# TODO: This is now simple enough we could probably implement
# in C if needed.
# If the class MRO is strictly linear, we could just
# follow the normal algorithm for the next class in the
# search order (e.g., just return
# ``implemented_by_next``). But when diamond inheritance
# or mixins + interface declarations are present, we have
# to consider the whole MRO and compute a new Implements
# that excludes the classes being skipped over but
# includes everything else.
implemented_by_self = implementedBy(sup.__self_class__)
cache = implemented_by_self._super_cache # pylint:disable=protected-access
if cache is None:
cache = implemented_by_self._super_cache = weakref.WeakKeyDictionary()
key = sup.__thisclass__
try:
return cache[key]
except KeyError:
pass
next_cls = _next_super_class(sup)
# For ``implementedBy(cls)``:
# .__bases__ is .declared + [implementedBy(b) for b in cls.__bases__]
# .inherit is cls
implemented_by_next = implementedBy(next_cls)
mro = sup.__self_class__.__mro__
ix_next_cls = mro.index(next_cls)
classes_to_keep = mro[ix_next_cls:]
new_bases = [implementedBy(c) for c in classes_to_keep]
new = Implements.named(
implemented_by_self.__name__ + ':' + implemented_by_next.__name__,
*new_bases
)
new.inherit = implemented_by_next.inherit
new.declared = implemented_by_next.declared
# I don't *think* that new needs to subscribe to ``implemented_by_self``;
# it auto-subscribed to its bases, and that should be good enough.
cache[key] = new
return new
@_use_c_impl
def implementedBy(cls): # pylint:disable=too-many-return-statements,too-many-branches
"""Return the interfaces implemented for a class' instances
The value returned is an `~zope.interface.interfaces.IDeclaration`.
"""
try:
if isinstance(cls, super):
# Yes, this needs to be inside the try: block. Some objects
# like security proxies even break isinstance.
return _implementedBy_super(cls)
spec = cls.__dict__.get('__implemented__')
except AttributeError:
# we can't get the class dict. This is probably due to a
# security proxy. If this is the case, then probably no
# descriptor was installed for the class.
# We don't want to depend directly on zope.security in
# zope.interface, but we'll try to make reasonable
# accommodations in an indirect way.
# We'll check to see if there's an implements:
spec = getattr(cls, '__implemented__', None)
if spec is None:
# There's no spec stred in the class. Maybe its a builtin:
spec = BuiltinImplementationSpecifications.get(cls)
if spec is not None:
return spec
return _empty
if spec.__class__ == Implements:
# we defaulted to _empty or there was a spec. Good enough.
# Return it.
return spec
# TODO: need old style __implements__ compatibility?
# Hm, there's an __implemented__, but it's not a spec. Must be
# an old-style declaration. Just compute a spec for it
return Declaration(*_normalizeargs((spec, )))
if isinstance(spec, Implements):
return spec
if spec is None:
spec = BuiltinImplementationSpecifications.get(cls)
if spec is not None:
return spec
# TODO: need old style __implements__ compatibility?
spec_name = _implements_name(cls)
if spec is not None:
# old-style __implemented__ = foo declaration
spec = (spec, ) # tuplefy, as it might be just an int
spec = Implements.named(spec_name, *_normalizeargs(spec))
spec.inherit = None # old-style implies no inherit
del cls.__implemented__ # get rid of the old-style declaration
else:
try:
bases = cls.__bases__
except AttributeError:
if not callable(cls):
raise TypeError("ImplementedBy called for non-factory", cls)
bases = ()
spec = Implements.named(spec_name, *[implementedBy(c) for c in bases])
spec.inherit = cls
try:
cls.__implemented__ = spec
if not hasattr(cls, '__providedBy__'):
cls.__providedBy__ = objectSpecificationDescriptor
if (isinstance(cls, DescriptorAwareMetaClasses)
and '__provides__' not in cls.__dict__):
# Make sure we get a __provides__ descriptor
cls.__provides__ = ClassProvides(
cls,
getattr(cls, '__class__', type(cls)),
)
except TypeError:
if not isinstance(cls, type):
raise TypeError("ImplementedBy called for non-type", cls)
BuiltinImplementationSpecifications[cls] = spec
return spec
def classImplementsOnly(cls, *interfaces):
"""
Declare the only interfaces implemented by instances of a class
The arguments after the class are one or more interfaces or interface
specifications (`~zope.interface.interfaces.IDeclaration` objects).
The interfaces given (including the interfaces in the specifications)
replace any previous declarations, *including* inherited definitions. If you
wish to preserve inherited declarations, you can pass ``implementedBy(cls)``
in *interfaces*. This can be used to alter the interface resolution order.
"""
spec = implementedBy(cls)
# Clear out everything inherited. It's important to
# also clear the bases right now so that we don't improperly discard
# interfaces that are already implemented by *old* bases that we're
# about to get rid of.
spec.declared = ()
spec.inherit = None
spec.__bases__ = ()
_classImplements_ordered(spec, interfaces, ())
def classImplements(cls, *interfaces):
"""
Declare additional interfaces implemented for instances of a class
The arguments after the class are one or more interfaces or
interface specifications (`~zope.interface.interfaces.IDeclaration` objects).
The interfaces given (including the interfaces in the specifications)
are added to any interfaces previously declared. An effort is made to
keep a consistent C3 resolution order, but this cannot be guaranteed.
.. versionchanged:: 5.0.0
Each individual interface in *interfaces* may be added to either the
beginning or end of the list of interfaces declared for *cls*,
based on inheritance, in order to try to maintain a consistent
resolution order. Previously, all interfaces were added to the end.
.. versionchanged:: 5.1.0
If *cls* is already declared to implement an interface (or derived interface)
in *interfaces* through inheritance, the interface is ignored. Previously, it
would redundantly be made direct base of *cls*, which often produced inconsistent
interface resolution orders. Now, the order will be consistent, but may change.
Also, if the ``__bases__`` of the *cls* are later changed, the *cls* will no
longer be considered to implement such an interface (changing the ``__bases__`` of *cls*
has never been supported).
"""
spec = implementedBy(cls)
interfaces = tuple(_normalizeargs(interfaces))
before = []
after = []
# Take steps to try to avoid producing an invalid resolution
# order, while still allowing for BWC (in the past, we always
# appended)
for iface in interfaces:
for b in spec.declared:
if iface.extends(b):
before.append(iface)
break
else:
after.append(iface)
_classImplements_ordered(spec, tuple(before), tuple(after))
def classImplementsFirst(cls, iface):
"""
Declare that instances of *cls* additionally provide *iface*.
The second argument is an interface or interface specification.
It is added as the highest priority (first in the IRO) interface;
no attempt is made to keep a consistent resolution order.
.. versionadded:: 5.0.0
"""
spec = implementedBy(cls)
_classImplements_ordered(spec, (iface,), ())
def _classImplements_ordered(spec, before=(), after=()):
# Elide everything already inherited.
# Except, if it is the root, and we don't already declare anything else
# that would imply it, allow the root through. (TODO: When we disallow non-strict
# IRO, this part of the check can be removed because it's not possible to re-declare
# like that.)
before = [
x
for x in before
if not spec.isOrExtends(x) or (x is Interface and not spec.declared)
]
after = [
x
for x in after
if not spec.isOrExtends(x) or (x is Interface and not spec.declared)
]
# eliminate duplicates
new_declared = []
seen = set()
for l in before, spec.declared, after:
for b in l:
if b not in seen:
new_declared.append(b)
seen.add(b)
spec.declared = tuple(new_declared)
# compute the bases
bases = new_declared # guaranteed no dupes
if spec.inherit is not None:
for c in spec.inherit.__bases__:
b = implementedBy(c)
if b not in seen:
seen.add(b)
bases.append(b)
spec.__bases__ = tuple(bases)
def _implements_advice(cls):
interfaces, do_classImplements = cls.__dict__['__implements_advice_data__']
del cls.__implements_advice_data__
do_classImplements(cls, *interfaces)
return cls
class implementer(object):
"""
Declare the interfaces implemented by instances of a class.
This function is called as a class decorator.
The arguments are one or more interfaces or interface
specifications (`~zope.interface.interfaces.IDeclaration`
objects).
The interfaces given (including the interfaces in the
specifications) are added to any interfaces previously declared,
unless the interface is already implemented.
Previous declarations include declarations for base classes unless
implementsOnly was used.
This function is provided for convenience. It provides a more
convenient way to call `classImplements`. For example::
@implementer(I1)
class C(object):
pass
is equivalent to calling::
classImplements(C, I1)
after the class has been created.
.. seealso:: `classImplements`
The change history provided there applies to this function too.
"""
__slots__ = ('interfaces',)
def __init__(self, *interfaces):
self.interfaces = interfaces
def __call__(self, ob):
if isinstance(ob, DescriptorAwareMetaClasses):
# This is the common branch for new-style (object) and
# on Python 2 old-style classes.
classImplements(ob, *self.interfaces)
return ob
spec_name = _implements_name(ob)
spec = Implements.named(spec_name, *self.interfaces)
try:
ob.__implemented__ = spec
except AttributeError:
raise TypeError("Can't declare implements", ob)
return ob
class implementer_only(object):
"""Declare the only interfaces implemented by instances of a class
This function is called as a class decorator.
The arguments are one or more interfaces or interface
specifications (`~zope.interface.interfaces.IDeclaration` objects).
Previous declarations including declarations for base classes
are overridden.
This function is provided for convenience. It provides a more
convenient way to call `classImplementsOnly`. For example::
@implementer_only(I1)
class C(object): pass
is equivalent to calling::
classImplementsOnly(I1)
after the class has been created.
"""
def __init__(self, *interfaces):
self.interfaces = interfaces
def __call__(self, ob):
if isinstance(ob, (FunctionType, MethodType)):
# XXX Does this decorator make sense for anything but classes?
# I don't think so. There can be no inheritance of interfaces
# on a method or function....
raise ValueError('The implementer_only decorator is not '
'supported for methods or functions.')
# Assume it's a class:
classImplementsOnly(ob, *self.interfaces)
return ob
def _implements(name, interfaces, do_classImplements):
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
frame = sys._getframe(2) # pylint:disable=protected-access
locals = frame.f_locals # pylint:disable=redefined-builtin
# Try to make sure we were called from a class def. In 2.2.0 we can't
# check for __module__ since it doesn't seem to be added to the locals
# until later on.
if locals is frame.f_globals or '__module__' not in locals:
raise TypeError(name+" can be used only from a class definition.")
if '__implements_advice_data__' in locals:
raise TypeError(name+" can be used only once in a class definition.")
locals['__implements_advice_data__'] = interfaces, do_classImplements
addClassAdvisor(_implements_advice, depth=3)
def implements(*interfaces):
"""
Declare interfaces implemented by instances of a class.
.. deprecated:: 5.0
This only works for Python 2. The `implementer` decorator
is preferred for all versions.
This function is called in a class definition.
The arguments are one or more interfaces or interface
specifications (`~zope.interface.interfaces.IDeclaration`
objects).
The interfaces given (including the interfaces in the
specifications) are added to any interfaces previously declared.
Previous declarations include declarations for base classes unless
`implementsOnly` was used.
This function is provided for convenience. It provides a more
convenient way to call `classImplements`. For example::
implements(I1)
is equivalent to calling::
classImplements(C, I1)
after the class has been created.
"""
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
if PYTHON3:
raise TypeError(_ADVICE_ERROR % 'implementer')
_implements("implements", interfaces, classImplements)
def implementsOnly(*interfaces):
"""Declare the only interfaces implemented by instances of a class
This function is called in a class definition.
The arguments are one or more interfaces or interface
specifications (`~zope.interface.interfaces.IDeclaration` objects).
Previous declarations including declarations for base classes
are overridden.
This function is provided for convenience. It provides a more
convenient way to call `classImplementsOnly`. For example::
implementsOnly(I1)
is equivalent to calling::
classImplementsOnly(I1)
after the class has been created.
"""
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
if PYTHON3:
raise TypeError(_ADVICE_ERROR % 'implementer_only')
_implements("implementsOnly", interfaces, classImplementsOnly)
##############################################################################
#
# Instance declarations
class Provides(Declaration): # Really named ProvidesClass
"""Implement ``__provides__``, the instance-specific specification
When an object is pickled, we pickle the interfaces that it implements.
"""
def __init__(self, cls, *interfaces):
self.__args = (cls, ) + interfaces
self._cls = cls
Declaration.__init__(self, *(interfaces + (implementedBy(cls), )))
def __repr__(self):
return "<%s.%s for %s>" % (
self.__class__.__module__,
self.__class__.__name__,
self._cls,
)
def __reduce__(self):
return Provides, self.__args
__module__ = 'zope.interface'
def __get__(self, inst, cls):
"""Make sure that a class __provides__ doesn't leak to an instance
"""
if inst is None and cls is self._cls:
# We were accessed through a class, so we are the class'
# provides spec. Just return this object, but only if we are
# being called on the same class that we were defined for:
return self
raise AttributeError('__provides__')
ProvidesClass = Provides
# Registry of instance declarations
# This is a memory optimization to allow objects to share specifications.
InstanceDeclarations = weakref.WeakValueDictionary()
def Provides(*interfaces): # pylint:disable=function-redefined
"""Cache instance declarations
Instance declarations are shared among instances that have the same
declaration. The declarations are cached in a weak value dictionary.
"""
spec = InstanceDeclarations.get(interfaces)
if spec is None:
spec = ProvidesClass(*interfaces)
InstanceDeclarations[interfaces] = spec
return spec
Provides.__safe_for_unpickling__ = True
def directlyProvides(object, *interfaces): # pylint:disable=redefined-builtin
"""Declare interfaces declared directly for an object
The arguments after the object are one or more interfaces or interface
specifications (`~zope.interface.interfaces.IDeclaration` objects).
The interfaces given (including the interfaces in the specifications)
replace interfaces previously declared for the object.
"""
cls = getattr(object, '__class__', None)
if cls is not None and getattr(cls, '__class__', None) is cls:
# It's a meta class (well, at least it it could be an extension class)
# Note that we can't get here from Py3k tests: there is no normal
# class which isn't descriptor aware.
if not isinstance(object,
DescriptorAwareMetaClasses):
raise TypeError("Attempt to make an interface declaration on a "
"non-descriptor-aware class")
interfaces = _normalizeargs(interfaces)
if cls is None:
cls = type(object)
issub = False
for damc in DescriptorAwareMetaClasses:
if issubclass(cls, damc):
issub = True
break
if issub:
# we have a class or type. We'll use a special descriptor
# that provides some extra caching
object.__provides__ = ClassProvides(object, cls, *interfaces)
else:
object.__provides__ = Provides(cls, *interfaces)
def alsoProvides(object, *interfaces): # pylint:disable=redefined-builtin
"""Declare interfaces declared directly for an object
The arguments after the object are one or more interfaces or interface
specifications (`~zope.interface.interfaces.IDeclaration` objects).
The interfaces given (including the interfaces in the specifications) are
added to the interfaces previously declared for the object.
"""
directlyProvides(object, directlyProvidedBy(object), *interfaces)
def noLongerProvides(object, interface): # pylint:disable=redefined-builtin
""" Removes a directly provided interface from an object.
"""
directlyProvides(object, directlyProvidedBy(object) - interface)
if interface.providedBy(object):
raise ValueError("Can only remove directly provided interfaces.")
@_use_c_impl
class ClassProvidesBase(SpecificationBase):
__slots__ = (
'_cls',
'_implements',
)
def __get__(self, inst, cls):
# member slots are set by subclass
# pylint:disable=no-member
if cls is self._cls:
# We only work if called on the class we were defined for
if inst is None:
# We were accessed through a class, so we are the class'
# provides spec. Just return this object as is:
return self
return self._implements
raise AttributeError('__provides__')
class ClassProvides(Declaration, ClassProvidesBase):
"""Special descriptor for class ``__provides__``
The descriptor caches the implementedBy info, so that
we can get declarations for objects without instance-specific
interfaces a bit quicker.
"""
__slots__ = (
'__args',
)
def __init__(self, cls, metacls, *interfaces):
self._cls = cls
self._implements = implementedBy(cls)
self.__args = (cls, metacls, ) + interfaces
Declaration.__init__(self, *(interfaces + (implementedBy(metacls), )))
def __repr__(self):
return "<%s.%s for %s>" % (
self.__class__.__module__,
self.__class__.__name__,
self._cls,
)
def __reduce__(self):
return self.__class__, self.__args
# Copy base-class method for speed
__get__ = ClassProvidesBase.__get__
def directlyProvidedBy(object): # pylint:disable=redefined-builtin
"""Return the interfaces directly provided by the given object
The value returned is an `~zope.interface.interfaces.IDeclaration`.
"""
provides = getattr(object, "__provides__", None)
if (
provides is None # no spec
# We might have gotten the implements spec, as an
# optimization. If so, it's like having only one base, that we
# lop off to exclude class-supplied declarations:
or isinstance(provides, Implements)
):
return _empty
# Strip off the class part of the spec:
return Declaration(provides.__bases__[:-1])
def classProvides(*interfaces):
"""Declare interfaces provided directly by a class
This function is called in a class definition.
The arguments are one or more interfaces or interface specifications
(`~zope.interface.interfaces.IDeclaration` objects).
The given interfaces (including the interfaces in the specifications)
are used to create the class's direct-object interface specification.
An error will be raised if the module class has an direct interface
specification. In other words, it is an error to call this function more
than once in a class definition.
Note that the given interfaces have nothing to do with the interfaces
implemented by instances of the class.
This function is provided for convenience. It provides a more convenient
way to call `directlyProvides` for a class. For example::
classProvides(I1)
is equivalent to calling::
directlyProvides(theclass, I1)
after the class has been created.
"""
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
if PYTHON3:
raise TypeError(_ADVICE_ERROR % 'provider')
frame = sys._getframe(1) # pylint:disable=protected-access
locals = frame.f_locals # pylint:disable=redefined-builtin
# Try to make sure we were called from a class def
if (locals is frame.f_globals) or ('__module__' not in locals):
raise TypeError("classProvides can be used only from a "
"class definition.")
if '__provides__' in locals:
raise TypeError(
"classProvides can only be used once in a class definition.")
locals["__provides__"] = _normalizeargs(interfaces)
addClassAdvisor(_classProvides_advice, depth=2)
def _classProvides_advice(cls):
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
interfaces = cls.__dict__['__provides__']
del cls.__provides__
directlyProvides(cls, *interfaces)
return cls
class provider(object):
"""Class decorator version of classProvides"""
def __init__(self, *interfaces):
self.interfaces = interfaces
def __call__(self, ob):
directlyProvides(ob, *self.interfaces)
return ob
def moduleProvides(*interfaces):
"""Declare interfaces provided by a module
This function is used in a module definition.
The arguments are one or more interfaces or interface specifications
(`~zope.interface.interfaces.IDeclaration` objects).
The given interfaces (including the interfaces in the specifications) are
used to create the module's direct-object interface specification. An
error will be raised if the module already has an interface specification.
In other words, it is an error to call this function more than once in a
module definition.
This function is provided for convenience. It provides a more convenient
way to call directlyProvides. For example::
moduleImplements(I1)
is equivalent to::
directlyProvides(sys.modules[__name__], I1)
"""
frame = sys._getframe(1) # pylint:disable=protected-access
locals = frame.f_locals # pylint:disable=redefined-builtin
# Try to make sure we were called from a class def
if (locals is not frame.f_globals) or ('__name__' not in locals):
raise TypeError(
"moduleProvides can only be used from a module definition.")
if '__provides__' in locals:
raise TypeError(
"moduleProvides can only be used once in a module definition.")
locals["__provides__"] = Provides(ModuleType,
*_normalizeargs(interfaces))
##############################################################################
#
# Declaration querying support
# XXX: is this a fossil? Nobody calls it, no unit tests exercise it, no
# doctests import it, and the package __init__ doesn't import it.
# (Answer: Versions of zope.container prior to 4.4.0 called this.)
def ObjectSpecification(direct, cls):
"""Provide object specifications
These combine information for the object and for it's classes.
"""
return Provides(cls, direct) # pragma: no cover fossil
@_use_c_impl
def getObjectSpecification(ob):
try:
provides = ob.__provides__
except AttributeError:
provides = None
if provides is not None:
if isinstance(provides, SpecificationBase):
return provides
try:
cls = ob.__class__
except AttributeError:
# We can't get the class, so just consider provides
return _empty
return implementedBy(cls)
@_use_c_impl
def providedBy(ob):
"""
Return the interfaces provided by *ob*.
If *ob* is a :class:`super` object, then only interfaces implemented
by the remainder of the classes in the method resolution order are
considered. Interfaces directly provided by the object underlying *ob*
are not.
"""
# Here we have either a special object, an old-style declaration
# or a descriptor
# Try to get __providedBy__
try:
if isinstance(ob, super): # Some objects raise errors on isinstance()
return implementedBy(ob)
r = ob.__providedBy__
except AttributeError:
# Not set yet. Fall back to lower-level thing that computes it
return getObjectSpecification(ob)
try:
# We might have gotten a descriptor from an instance of a
# class (like an ExtensionClass) that doesn't support
# descriptors. We'll make sure we got one by trying to get
# the only attribute, which all specs have.
r.extends
except AttributeError:
# The object's class doesn't understand descriptors.
# Sigh. We need to get an object descriptor, but we have to be
# careful. We want to use the instance's __provides__, if
# there is one, but only if it didn't come from the class.
try:
r = ob.__provides__
except AttributeError:
# No __provides__, so just fall back to implementedBy
return implementedBy(ob.__class__)
# We need to make sure we got the __provides__ from the
# instance. We'll do this by making sure we don't get the same
# thing from the class:
try:
cp = ob.__class__.__provides__
except AttributeError:
# The ob doesn't have a class or the class has no
# provides, assume we're done:
return r
if r is cp:
# Oops, we got the provides from the class. This means
# the object doesn't have it's own. We should use implementedBy
return implementedBy(ob.__class__)
return r
@_use_c_impl
class ObjectSpecificationDescriptor(object):
"""Implement the `__providedBy__` attribute
The `__providedBy__` attribute computes the interfaces provided by
an object.
"""
def __get__(self, inst, cls):
"""Get an object specification for an object
"""
if inst is None:
return getObjectSpecification(cls)
provides = getattr(inst, '__provides__', None)
if provides is not None:
return provides
return implementedBy(cls)
##############################################################################
def _normalizeargs(sequence, output=None):
"""Normalize declaration arguments
Normalization arguments might contain Declarions, tuples, or single
interfaces.
Anything but individial interfaces or implements specs will be expanded.
"""
if output is None:
output = []
cls = sequence.__class__
if InterfaceClass in cls.__mro__ or Implements in cls.__mro__:
output.append(sequence)
else:
for v in sequence:
_normalizeargs(v, output)
return output
_empty = _ImmutableDeclaration()
objectSpecificationDescriptor = ObjectSpecificationDescriptor()
|
src/zope/interface/declarations.py
|
codereval_python_data_43
|
Return the C optimization module, if available, otherwise
a false value.
If the optimizations are required but not available, this
raises the ImportError.
This does not say whether they should be used or not.
def _c_optimizations_available():
"""
Return the C optimization module, if available, otherwise
a false value.
If the optimizations are required but not available, this
raises the ImportError.
This does not say whether they should be used or not.
"""
catch = () if _c_optimizations_required() else (ImportError,)
try:
from zope.interface import _zope_interface_coptimizations as c_opt
return c_opt
except catch: # pragma: no cover (only Jython doesn't build extensions)
return False
##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
Support functions for dealing with differences in platforms, including Python
versions and implementations.
This file should have no imports from the rest of zope.interface because it is
used during early bootstrapping.
"""
import os
import sys
import types
if sys.version_info[0] < 3:
def _normalize_name(name):
if isinstance(name, basestring):
return unicode(name)
raise TypeError("name must be a regular or unicode string")
CLASS_TYPES = (type, types.ClassType)
STRING_TYPES = (basestring,)
_BUILTINS = '__builtin__'
PYTHON3 = False
PYTHON2 = True
else:
def _normalize_name(name):
if isinstance(name, bytes):
name = str(name, 'ascii')
if isinstance(name, str):
return name
raise TypeError("name must be a string or ASCII-only bytes")
CLASS_TYPES = (type,)
STRING_TYPES = (str,)
_BUILTINS = 'builtins'
PYTHON3 = True
PYTHON2 = False
PYPY = hasattr(sys, 'pypy_version_info')
PYPY2 = PYTHON2 and PYPY
def _skip_under_py3k(test_method):
import unittest
return unittest.skipIf(sys.version_info[0] >= 3, "Only on Python 2")(test_method)
def _skip_under_py2(test_method):
import unittest
return unittest.skipIf(sys.version_info[0] < 3, "Only on Python 3")(test_method)
def _c_optimizations_required():
"""
Return a true value if the C optimizations are required.
This uses the ``PURE_PYTHON`` variable as documented in `_use_c_impl`.
"""
pure_env = os.environ.get('PURE_PYTHON')
require_c = pure_env == "0"
return require_c
def _c_optimizations_available():
"""
Return the C optimization module, if available, otherwise
a false value.
If the optimizations are required but not available, this
raises the ImportError.
This does not say whether they should be used or not.
"""
catch = () if _c_optimizations_required() else (ImportError,)
try:
from zope.interface import _zope_interface_coptimizations as c_opt
return c_opt
except catch: # pragma: no cover (only Jython doesn't build extensions)
return False
def _c_optimizations_ignored():
"""
The opposite of `_c_optimizations_required`.
"""
pure_env = os.environ.get('PURE_PYTHON')
return pure_env is not None and pure_env != "0"
def _should_attempt_c_optimizations():
"""
Return a true value if we should attempt to use the C optimizations.
This takes into account whether we're on PyPy and the value of the
``PURE_PYTHON`` environment variable, as defined in `_use_c_impl`.
"""
is_pypy = hasattr(sys, 'pypy_version_info')
if _c_optimizations_required():
return True
if is_pypy:
return False
return not _c_optimizations_ignored()
def _use_c_impl(py_impl, name=None, globs=None):
"""
Decorator. Given an object implemented in Python, with a name like
``Foo``, import the corresponding C implementation from
``zope.interface._zope_interface_coptimizations`` with the name
``Foo`` and use it instead.
If the ``PURE_PYTHON`` environment variable is set to any value
other than ``"0"``, or we're on PyPy, ignore the C implementation
and return the Python version. If the C implementation cannot be
imported, return the Python version. If ``PURE_PYTHON`` is set to
0, *require* the C implementation (let the ImportError propagate);
note that PyPy can import the C implementation in this case (and all
tests pass).
In all cases, the Python version is kept available. in the module
globals with the name ``FooPy`` and the name ``FooFallback`` (both
conventions have been used; the C implementation of some functions
looks for the ``Fallback`` version, as do some of the Sphinx
documents).
Example::
@_use_c_impl
class Foo(object):
...
"""
name = name or py_impl.__name__
globs = globs or sys._getframe(1).f_globals
def find_impl():
if not _should_attempt_c_optimizations():
return py_impl
c_opt = _c_optimizations_available()
if not c_opt: # pragma: no cover (only Jython doesn't build extensions)
return py_impl
__traceback_info__ = c_opt
return getattr(c_opt, name)
c_impl = find_impl()
# Always make available by the FooPy name and FooFallback
# name (for testing and documentation)
globs[name + 'Py'] = py_impl
globs[name + 'Fallback'] = py_impl
return c_impl
|
src/zope/interface/_compat.py
|
codereval_python_data_44
|
Return a true value if we should attempt to use the C optimizations.
This takes into account whether we're on PyPy and the value of the
``PURE_PYTHON`` environment variable, as defined in `_use_c_impl`.
def _should_attempt_c_optimizations():
"""
Return a true value if we should attempt to use the C optimizations.
This takes into account whether we're on PyPy and the value of the
``PURE_PYTHON`` environment variable, as defined in `_use_c_impl`.
"""
is_pypy = hasattr(sys, 'pypy_version_info')
if _c_optimizations_required():
return True
if is_pypy:
return False
return not _c_optimizations_ignored()
##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
Support functions for dealing with differences in platforms, including Python
versions and implementations.
This file should have no imports from the rest of zope.interface because it is
used during early bootstrapping.
"""
import os
import sys
import types
if sys.version_info[0] < 3:
def _normalize_name(name):
if isinstance(name, basestring):
return unicode(name)
raise TypeError("name must be a regular or unicode string")
CLASS_TYPES = (type, types.ClassType)
STRING_TYPES = (basestring,)
_BUILTINS = '__builtin__'
PYTHON3 = False
PYTHON2 = True
else:
def _normalize_name(name):
if isinstance(name, bytes):
name = str(name, 'ascii')
if isinstance(name, str):
return name
raise TypeError("name must be a string or ASCII-only bytes")
CLASS_TYPES = (type,)
STRING_TYPES = (str,)
_BUILTINS = 'builtins'
PYTHON3 = True
PYTHON2 = False
PYPY = hasattr(sys, 'pypy_version_info')
PYPY2 = PYTHON2 and PYPY
def _skip_under_py3k(test_method):
import unittest
return unittest.skipIf(sys.version_info[0] >= 3, "Only on Python 2")(test_method)
def _skip_under_py2(test_method):
import unittest
return unittest.skipIf(sys.version_info[0] < 3, "Only on Python 3")(test_method)
def _c_optimizations_required():
"""
Return a true value if the C optimizations are required.
This uses the ``PURE_PYTHON`` variable as documented in `_use_c_impl`.
"""
pure_env = os.environ.get('PURE_PYTHON')
require_c = pure_env == "0"
return require_c
def _c_optimizations_available():
"""
Return the C optimization module, if available, otherwise
a false value.
If the optimizations are required but not available, this
raises the ImportError.
This does not say whether they should be used or not.
"""
catch = () if _c_optimizations_required() else (ImportError,)
try:
from zope.interface import _zope_interface_coptimizations as c_opt
return c_opt
except catch: # pragma: no cover (only Jython doesn't build extensions)
return False
def _c_optimizations_ignored():
"""
The opposite of `_c_optimizations_required`.
"""
pure_env = os.environ.get('PURE_PYTHON')
return pure_env is not None and pure_env != "0"
def _should_attempt_c_optimizations():
"""
Return a true value if we should attempt to use the C optimizations.
This takes into account whether we're on PyPy and the value of the
``PURE_PYTHON`` environment variable, as defined in `_use_c_impl`.
"""
is_pypy = hasattr(sys, 'pypy_version_info')
if _c_optimizations_required():
return True
if is_pypy:
return False
return not _c_optimizations_ignored()
def _use_c_impl(py_impl, name=None, globs=None):
"""
Decorator. Given an object implemented in Python, with a name like
``Foo``, import the corresponding C implementation from
``zope.interface._zope_interface_coptimizations`` with the name
``Foo`` and use it instead.
If the ``PURE_PYTHON`` environment variable is set to any value
other than ``"0"``, or we're on PyPy, ignore the C implementation
and return the Python version. If the C implementation cannot be
imported, return the Python version. If ``PURE_PYTHON`` is set to
0, *require* the C implementation (let the ImportError propagate);
note that PyPy can import the C implementation in this case (and all
tests pass).
In all cases, the Python version is kept available. in the module
globals with the name ``FooPy`` and the name ``FooFallback`` (both
conventions have been used; the C implementation of some functions
looks for the ``Fallback`` version, as do some of the Sphinx
documents).
Example::
@_use_c_impl
class Foo(object):
...
"""
name = name or py_impl.__name__
globs = globs or sys._getframe(1).f_globals
def find_impl():
if not _should_attempt_c_optimizations():
return py_impl
c_opt = _c_optimizations_available()
if not c_opt: # pragma: no cover (only Jython doesn't build extensions)
return py_impl
__traceback_info__ = c_opt
return getattr(c_opt, name)
c_impl = find_impl()
# Always make available by the FooPy name and FooFallback
# name (for testing and documentation)
globs[name + 'Py'] = py_impl
globs[name + 'Fallback'] = py_impl
return c_impl
|
src/zope/interface/_compat.py
|
codereval_python_data_45
|
The opposite of `_c_optimizations_required`.
def _c_optimizations_ignored():
"""
The opposite of `_c_optimizations_required`.
"""
pure_env = os.environ.get('PURE_PYTHON')
return pure_env is not None and pure_env != "0"
##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
Support functions for dealing with differences in platforms, including Python
versions and implementations.
This file should have no imports from the rest of zope.interface because it is
used during early bootstrapping.
"""
import os
import sys
import types
if sys.version_info[0] < 3:
def _normalize_name(name):
if isinstance(name, basestring):
return unicode(name)
raise TypeError("name must be a regular or unicode string")
CLASS_TYPES = (type, types.ClassType)
STRING_TYPES = (basestring,)
_BUILTINS = '__builtin__'
PYTHON3 = False
PYTHON2 = True
else:
def _normalize_name(name):
if isinstance(name, bytes):
name = str(name, 'ascii')
if isinstance(name, str):
return name
raise TypeError("name must be a string or ASCII-only bytes")
CLASS_TYPES = (type,)
STRING_TYPES = (str,)
_BUILTINS = 'builtins'
PYTHON3 = True
PYTHON2 = False
PYPY = hasattr(sys, 'pypy_version_info')
PYPY2 = PYTHON2 and PYPY
def _skip_under_py3k(test_method):
import unittest
return unittest.skipIf(sys.version_info[0] >= 3, "Only on Python 2")(test_method)
def _skip_under_py2(test_method):
import unittest
return unittest.skipIf(sys.version_info[0] < 3, "Only on Python 3")(test_method)
def _c_optimizations_required():
"""
Return a true value if the C optimizations are required.
This uses the ``PURE_PYTHON`` variable as documented in `_use_c_impl`.
"""
pure_env = os.environ.get('PURE_PYTHON')
require_c = pure_env == "0"
return require_c
def _c_optimizations_available():
"""
Return the C optimization module, if available, otherwise
a false value.
If the optimizations are required but not available, this
raises the ImportError.
This does not say whether they should be used or not.
"""
catch = () if _c_optimizations_required() else (ImportError,)
try:
from zope.interface import _zope_interface_coptimizations as c_opt
return c_opt
except catch: # pragma: no cover (only Jython doesn't build extensions)
return False
def _c_optimizations_ignored():
"""
The opposite of `_c_optimizations_required`.
"""
pure_env = os.environ.get('PURE_PYTHON')
return pure_env is not None and pure_env != "0"
def _should_attempt_c_optimizations():
"""
Return a true value if we should attempt to use the C optimizations.
This takes into account whether we're on PyPy and the value of the
``PURE_PYTHON`` environment variable, as defined in `_use_c_impl`.
"""
is_pypy = hasattr(sys, 'pypy_version_info')
if _c_optimizations_required():
return True
if is_pypy:
return False
return not _c_optimizations_ignored()
def _use_c_impl(py_impl, name=None, globs=None):
"""
Decorator. Given an object implemented in Python, with a name like
``Foo``, import the corresponding C implementation from
``zope.interface._zope_interface_coptimizations`` with the name
``Foo`` and use it instead.
If the ``PURE_PYTHON`` environment variable is set to any value
other than ``"0"``, or we're on PyPy, ignore the C implementation
and return the Python version. If the C implementation cannot be
imported, return the Python version. If ``PURE_PYTHON`` is set to
0, *require* the C implementation (let the ImportError propagate);
note that PyPy can import the C implementation in this case (and all
tests pass).
In all cases, the Python version is kept available. in the module
globals with the name ``FooPy`` and the name ``FooFallback`` (both
conventions have been used; the C implementation of some functions
looks for the ``Fallback`` version, as do some of the Sphinx
documents).
Example::
@_use_c_impl
class Foo(object):
...
"""
name = name or py_impl.__name__
globs = globs or sys._getframe(1).f_globals
def find_impl():
if not _should_attempt_c_optimizations():
return py_impl
c_opt = _c_optimizations_available()
if not c_opt: # pragma: no cover (only Jython doesn't build extensions)
return py_impl
__traceback_info__ = c_opt
return getattr(c_opt, name)
c_impl = find_impl()
# Always make available by the FooPy name and FooFallback
# name (for testing and documentation)
globs[name + 'Py'] = py_impl
globs[name + 'Fallback'] = py_impl
return c_impl
|
src/zope/interface/_compat.py
|
codereval_python_data_46
|
Return a true value if the C optimizations are required.
This uses the ``PURE_PYTHON`` variable as documented in `_use_c_impl`.
def _c_optimizations_required():
"""
Return a true value if the C optimizations are required.
This uses the ``PURE_PYTHON`` variable as documented in `_use_c_impl`.
"""
pure_env = os.environ.get('PURE_PYTHON')
require_c = pure_env == "0"
return require_c
##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
Support functions for dealing with differences in platforms, including Python
versions and implementations.
This file should have no imports from the rest of zope.interface because it is
used during early bootstrapping.
"""
import os
import sys
import types
if sys.version_info[0] < 3:
def _normalize_name(name):
if isinstance(name, basestring):
return unicode(name)
raise TypeError("name must be a regular or unicode string")
CLASS_TYPES = (type, types.ClassType)
STRING_TYPES = (basestring,)
_BUILTINS = '__builtin__'
PYTHON3 = False
PYTHON2 = True
else:
def _normalize_name(name):
if isinstance(name, bytes):
name = str(name, 'ascii')
if isinstance(name, str):
return name
raise TypeError("name must be a string or ASCII-only bytes")
CLASS_TYPES = (type,)
STRING_TYPES = (str,)
_BUILTINS = 'builtins'
PYTHON3 = True
PYTHON2 = False
PYPY = hasattr(sys, 'pypy_version_info')
PYPY2 = PYTHON2 and PYPY
def _skip_under_py3k(test_method):
import unittest
return unittest.skipIf(sys.version_info[0] >= 3, "Only on Python 2")(test_method)
def _skip_under_py2(test_method):
import unittest
return unittest.skipIf(sys.version_info[0] < 3, "Only on Python 3")(test_method)
def _c_optimizations_required():
"""
Return a true value if the C optimizations are required.
This uses the ``PURE_PYTHON`` variable as documented in `_use_c_impl`.
"""
pure_env = os.environ.get('PURE_PYTHON')
require_c = pure_env == "0"
return require_c
def _c_optimizations_available():
"""
Return the C optimization module, if available, otherwise
a false value.
If the optimizations are required but not available, this
raises the ImportError.
This does not say whether they should be used or not.
"""
catch = () if _c_optimizations_required() else (ImportError,)
try:
from zope.interface import _zope_interface_coptimizations as c_opt
return c_opt
except catch: # pragma: no cover (only Jython doesn't build extensions)
return False
def _c_optimizations_ignored():
"""
The opposite of `_c_optimizations_required`.
"""
pure_env = os.environ.get('PURE_PYTHON')
return pure_env is not None and pure_env != "0"
def _should_attempt_c_optimizations():
"""
Return a true value if we should attempt to use the C optimizations.
This takes into account whether we're on PyPy and the value of the
``PURE_PYTHON`` environment variable, as defined in `_use_c_impl`.
"""
is_pypy = hasattr(sys, 'pypy_version_info')
if _c_optimizations_required():
return True
if is_pypy:
return False
return not _c_optimizations_ignored()
def _use_c_impl(py_impl, name=None, globs=None):
"""
Decorator. Given an object implemented in Python, with a name like
``Foo``, import the corresponding C implementation from
``zope.interface._zope_interface_coptimizations`` with the name
``Foo`` and use it instead.
If the ``PURE_PYTHON`` environment variable is set to any value
other than ``"0"``, or we're on PyPy, ignore the C implementation
and return the Python version. If the C implementation cannot be
imported, return the Python version. If ``PURE_PYTHON`` is set to
0, *require* the C implementation (let the ImportError propagate);
note that PyPy can import the C implementation in this case (and all
tests pass).
In all cases, the Python version is kept available. in the module
globals with the name ``FooPy`` and the name ``FooFallback`` (both
conventions have been used; the C implementation of some functions
looks for the ``Fallback`` version, as do some of the Sphinx
documents).
Example::
@_use_c_impl
class Foo(object):
...
"""
name = name or py_impl.__name__
globs = globs or sys._getframe(1).f_globals
def find_impl():
if not _should_attempt_c_optimizations():
return py_impl
c_opt = _c_optimizations_available()
if not c_opt: # pragma: no cover (only Jython doesn't build extensions)
return py_impl
__traceback_info__ = c_opt
return getattr(c_opt, name)
c_impl = find_impl()
# Always make available by the FooPy name and FooFallback
# name (for testing and documentation)
globs[name + 'Py'] = py_impl
globs[name + 'Fallback'] = py_impl
return c_impl
|
src/zope/interface/_compat.py
|
codereval_python_data_47
|
Reset the histogram.
Current context is reset to an empty dict.
Bins are reinitialized with the *initial_value*
or with *make_bins()* (depending on the initialization).
def reset(self):
"""Reset the histogram.
Current context is reset to an empty dict.
Bins are reinitialized with the *initial_value*
or with *make_bins()* (depending on the initialization).
"""
if self._make_bins is not None:
self.bins = self._make_bins()
elif self._initial_bins is not None:
self.bins = copy.deepcopy(self._initial_bins)
else:
self.bins = hf.init_bins(self.edges, self._initial_value)
self._cur_context = {}
"""Histogram structure *histogram* and element *Histogram*."""
import copy
import lena.context
import lena.core
import lena.flow
import lena.math
from . import hist_functions as hf
class histogram():
"""A multidimensional histogram.
Arbitrary dimension, variable bin size and weights are supported.
Lower bin edge is included, upper edge is excluded.
Underflow and overflow values are skipped.
Bin content can be of arbitrary type,
which is defined during initialization.
Examples:
>>> # a two-dimensional histogram
>>> hist = histogram([[0, 1, 2], [0, 1, 2]])
>>> hist.fill([0, 1])
>>> hist.bins
[[0, 1], [0, 0]]
>>> values = [[0, 0], [1, 0], [1, 1]]
>>> # fill the histogram with values
>>> for v in values:
... hist.fill(v)
>>> hist.bins
[[1, 1], [1, 1]]
"""
# Note the differences from existing packages.
# Numpy 1.16 (numpy.histogram): all but the last
# (righthand-most) bin is half-open.
# This histogram class has bin limits as in ROOT
# (but without overflow and underflow).
# Numpy: the first element of the range must be less than or equal to the second.
# This histogram requires strictly increasing edges.
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html
# https://root.cern.ch/root/htmldoc/guides/users-guide/Histograms.html#bin-numbering
def __init__(self, edges, bins=None, initial_value=0):
"""*edges* is a sequence of one-dimensional arrays,
each containing strictly increasing bin edges.
Histogram's bins by default
are initialized with *initial_value*.
It can be any object that supports addition with *weight*
during *fill* (but that is not necessary
if you don't plan to fill the histogram).
If the *initial_value* is compound and requires special copying,
create initial bins yourself (see :func:`.init_bins`).
A histogram can be created from existing *bins* and *edges*.
In this case a simple check of the shape of *bins* is done
(raising :exc:`.LenaValueError` if failed).
**Attributes**
:attr:`edges` is a list of edges on each dimension.
Edges mark the borders of the bin.
Edges along each dimension are one-dimensional lists,
and the multidimensional bin is the result of all intersections
of one-dimensional edges.
For example, a 3-dimensional histogram has edges of the form
*[x_edges, y_edges, z_edges]*,
and the 0th bin has borders
*((x[0], x[1]), (y[0], y[1]), (z[0], z[1]))*.
Index in the edges is a tuple, where a given position corresponds
to a dimension, and the content at that position
to the bin along that dimension.
For example, index *(0, 1, 3)* corresponds to the bin
with lower edges *(x[0], y[1], z[3])*.
:attr:`bins` is a list of nested lists.
Same index as for edges can be used to get bin content:
bin at *(0, 1, 3)* can be obtained as *bins[0][1][3]*.
Most nested arrays correspond to highest
(further from x) coordinates.
For example, for a 3-dimensional histogram bins equal to
*[[[1, 1], [0, 0]], [[0, 0], [0, 0]]]*
mean that the only filled bins are those
where x and y indices are 0, and z index is 0 and 1.
:attr:`dim` is the dimension of a histogram
(length of its *edges* for a multidimensional histogram).
If subarrays of *edges* are not increasing
or if any of them has length less than 2,
:exc:`.LenaValueError` is raised.
.. admonition:: Programmer's note
one- and multidimensional histograms
have different *bins* and *edges* format.
To be unified, 1-dimensional edges should be
nested in a list (like *[[1, 2, 3]]*).
Instead, they are simply the x-edges list,
because it is more intuitive and one-dimensional histograms
are used more often.
To unify the interface for bins and edges in your code,
use :func:`.unify_1_md` function.
"""
# todo: allow creation of *edges* from tuples
# (without lena.math.mesh). Allow bin_size in this case.
hf.check_edges_increasing(edges)
self.edges = edges
self._scale = None
if hasattr(edges[0], "__iter__"):
self.dim = len(edges)
else:
self.dim = 1
# todo: add a kwarg no_check=False to disable bins testing
if bins is None:
self.bins = hf.init_bins(self.edges, initial_value)
else:
self.bins = bins
# We can't make scale for an arbitrary histogram,
# because it may contain compound values.
# self._scale = self.make_scale()
wrong_bins_error = lena.core.LenaValueError(
"bins of incorrect shape given, {}".format(bins)
)
if self.dim == 1:
if len(bins) != len(edges) - 1:
raise wrong_bins_error
else:
if len(bins) != len(edges[0]) - 1:
raise wrong_bins_error
if self.dim > 1:
self.ranges = [(axis[0], axis[-1]) for axis in edges]
self.nbins = [len(axis) - 1 for axis in edges]
else:
self.ranges = [(edges[0], edges[-1])]
self.nbins = [len(edges)-1]
def __eq__(self, other):
"""Two histograms are equal, if and only if they have
equal bins and equal edges.
If *other* is not a :class:`.histogram`, return ``False``.
Note that floating numbers should be compared
approximately (using :func:`math.isclose`).
"""
if not isinstance(other, histogram):
# in Python comparison between different types is allowed
return False
return self.bins == other.bins and self.edges == other.edges
def fill(self, coord, weight=1):
"""Fill histogram at *coord* with the given *weight*.
Coordinates outside the histogram edges are ignored.
"""
indices = hf.get_bin_on_value(coord, self.edges)
subarr = self.bins
for ind in indices[:-1]:
# underflow
if ind < 0:
return
try:
subarr = subarr[ind]
# overflow
except IndexError:
return
ind = indices[-1]
# underflow
if ind < 0:
return
# fill
try:
subarr[ind] += weight
except IndexError:
return
def __repr__(self):
return "histogram({}, bins={})".format(self.edges, self.bins)
def scale(self, other=None, recompute=False):
"""Compute or set scale (integral of the histogram).
If *other* is ``None``, return scale of this histogram.
If its scale was not computed before,
it is computed and stored for subsequent use
(unless explicitly asked to *recompute*).
Note that after changing (filling) the histogram
one must explicitly recompute the scale
if it was computed before.
If a float *other* is provided, rescale self to *other*.
Histograms with scale equal to zero can't be rescaled.
:exc:`.LenaValueError` is raised if one tries to do that.
"""
# see graph.scale comments why this is called simply "scale"
# (not set_scale, get_scale, etc.)
if other is None:
# return scale
if self._scale is None or recompute:
self._scale = hf.integral(
*hf.unify_1_md(self.bins, self.edges)
)
return self._scale
else:
# rescale from other
scale = self.scale()
if scale == 0:
raise lena.core.LenaValueError(
"can not rescale histogram with zero scale"
)
self.bins = lena.math.md_map(lambda binc: binc*float(other) / scale,
self.bins)
self._scale = other
return None
def _update_context(self, context):
"""Update *context* with the properties of this histogram.
*context.histogram* is updated with "dim", "nbins"
and "ranges" with values for this histogram.
If this histogram has a computed scale, it is also added
to the context.
Called on "destruction" of the histogram structure (for example,
in :class:`.ToCSV`). See graph._update_context for more details.
"""
hist_context = {
"dim": self.dim,
"nbins": self.nbins,
"ranges": self.ranges
}
if self._scale is not None:
hist_context["scale"] = self._scale
lena.context.update_recursively(context, {"histogram": hist_context})
class Histogram():
"""An element to produce histograms."""
def __init__(self, edges, bins=None, make_bins=None, initial_value=0):
"""*edges*, *bins* and *initial_value* have the same meaning
as during creation of a :class:`histogram`.
*make_bins* is a function without arguments
that creates new bins
(it will be called during :meth:`__init__` and :meth:`reset`).
*initial_value* in this case is ignored, but bin check is made.
If both *bins* and *make_bins* are provided,
:exc:`.LenaTypeError` is raised.
"""
self._hist = histogram(edges, bins)
if make_bins is not None and bins is not None:
raise lena.core.LenaTypeError(
"either initial bins or make_bins must be provided, "
"not both: {} and {}".format(bins, make_bins)
)
# may be None
self._initial_bins = copy.deepcopy(bins)
# todo: bins, make_bins, initial_value look redundant
# and may be reconsidered when really using reset().
if make_bins:
bins = make_bins()
self._make_bins = make_bins
self._cur_context = {}
def fill(self, value):
"""Fill the histogram with *value*.
*value* can be a *(data, context)* pair.
Values outside the histogram edges are ignored.
"""
data, self._cur_context = lena.flow.get_data_context(value)
self._hist.fill(data)
# filling with weight is only allowed in histogram structure
# self._hist.fill(data, weight)
def compute(self):
"""Yield histogram with context."""
yield (self._hist, self._cur_context)
def reset(self):
"""Reset the histogram.
Current context is reset to an empty dict.
Bins are reinitialized with the *initial_value*
or with *make_bins()* (depending on the initialization).
"""
if self._make_bins is not None:
self.bins = self._make_bins()
elif self._initial_bins is not None:
self.bins = copy.deepcopy(self._initial_bins)
else:
self.bins = hf.init_bins(self.edges, self._initial_value)
self._cur_context = {}
|
lena/structures/histogram.py
|
codereval_python_data_48
|
.. deprecated:: 0.5 in Lena 0.5 to_csv is not used.
Iterables are converted to tables.
Convert graph's points to CSV.
*separator* delimits values, the default is comma.
*header*, if not ``None``, is the first string of the output
(new line is added automatically).
Since a graph can be multidimensional,
for each point first its coordinate is converted to string
(separated by *separator*), then each part of its value.
To convert :class:`Graph` to CSV inside a Lena sequence,
use :class:`lena.output.ToCSV`.
def to_csv(self, separator=",", header=None):
""".. deprecated:: 0.5 in Lena 0.5 to_csv is not used.
Iterables are converted to tables.
Convert graph's points to CSV.
*separator* delimits values, the default is comma.
*header*, if not ``None``, is the first string of the output
(new line is added automatically).
Since a graph can be multidimensional,
for each point first its coordinate is converted to string
(separated by *separator*), then each part of its value.
To convert :class:`Graph` to CSV inside a Lena sequence,
use :class:`lena.output.ToCSV`.
"""
if self._sort:
self._update()
def unpack_pt(pt):
coord = pt[0]
value = pt[1]
if isinstance(coord, tuple):
unpacked = list(coord)
else:
unpacked = [coord]
if isinstance(value, tuple):
unpacked += list(value)
else:
unpacked.append(value)
return unpacked
def pt_to_str(pt, separ):
return separ.join([str(val) for val in unpack_pt(pt)])
if header is not None:
# if one needs an empty header line, they may provide ""
lines = header + "\n"
else:
lines = ""
lines += "\n".join([pt_to_str(pt, separator) for pt in self.points])
return lines
"""A graph is a function at given coordinates."""
import copy
import functools
import operator
import re
import warnings
import lena.core
import lena.context
import lena.flow
class graph():
"""Numeric arrays of equal size."""
def __init__(self, coords, field_names=("x", "y"), scale=None):
"""This structure generally corresponds
to the graph of a function
and represents arrays of coordinates and the function values
of arbitrary dimensions.
*coords* is a list of one-dimensional
coordinate and value sequences (usually lists).
There is little to no distinction between them,
and "values" can also be called "coordinates".
*field_names* provide the meaning of these arrays.
For example, a 3-dimensional graph could be distinguished
from a 2-dimensional graph with errors by its fields
("x", "y", "z") versus ("x", "y", "error_y").
Field names don't affect drawing graphs:
for that :class:`~Variable`-s should be used.
Default field names,
provided for the most used 2-dimensional graphs,
are "x" and "y".
*field_names* can be a string separated by whitespace
and/or commas or a tuple of strings, such as ("x", "y").
*field_names* must have as many elements
as *coords* and each field name must be unique.
Otherwise field names are arbitrary.
Error fields must go after all other coordinates.
Name of a coordinate error is "error\\_"
appended by coordinate name. Further error details
are appended after '_'. They could be arbitrary depending
on the problem: "low", "high", "low_90%_cl", etc. Example:
("E", "time", "error_E_low", "error_time").
*scale* of the graph is a kind of its norm. It could be
the integral of the function or its other property.
A scale of a normalised probability density
function would be one.
An initialized *scale* is required if one needs
to renormalise the graph in :meth:`scale`
(for example, to plot it with other graphs).
Coordinates of a function graph would usually be arrays
of increasing values, which is not required here.
Neither is it checked that coordinates indeed
contain one-dimensional numeric values.
However, non-standard graphs
will likely lead to errors during plotting
and will require more programmer's work and caution,
so use them only if you understand what you are doing.
A graph can be iterated yielding tuples of numbers
for each point.
**Attributes**
:attr:`coords` is a list \
of one-dimensional lists of coordinates.
:attr:`field_names`
:attr:`dim` is the dimension of the graph,
that is of all its coordinates without errors.
In case of incorrect initialization arguments,
:exc:`~.LenaTypeError` or :exc:`~.LenaValueError` is raised.
.. versionadded:: 0.5
"""
if not coords:
raise lena.core.LenaValueError(
"coords must be a non-empty sequence "
"of coordinate sequences"
)
# require coords to be of the same size
pt_len = len(coords[0])
for arr in coords[1:]:
if len(arr) != pt_len:
raise lena.core.LenaValueError(
"coords must have subsequences of equal lengths"
)
# Unicode (Python 2) field names would be just bad,
# so we don't check for it here.
if isinstance(field_names, str):
# split(', ') won't work.
# From https://stackoverflow.com/a/44785447/952234:
# \s stands for whitespace.
field_names = tuple(re.findall(r'[^,\s]+', field_names))
elif not isinstance(field_names, tuple):
# todo: why field_names are a tuple,
# while coords are a list?
# It might be non-Pythonic to require a tuple
# (to prohibit a list), but it's important
# for comparisons and uniformity
raise lena.core.LenaTypeError(
"field_names must be a string or a tuple"
)
if len(field_names) != len(coords):
raise lena.core.LenaValueError(
"field_names must have must have the same size as coords"
)
if len(set(field_names)) != len(field_names):
raise lena.core.LenaValueError(
"field_names contains duplicates"
)
self.coords = coords
self._scale = scale
# field_names are better than fields,
# because they are unambigous (as in namedtuple).
self.field_names = field_names
# decided to use "error_x_low" (like in ROOT).
# Other versions were x_error (looked better than x_err),
# but x_err_low looked much better than x_error_low).
try:
parsed_error_names = self._parse_error_names(field_names)
except lena.core.LenaValueError as err:
raise err
# in Python 3
# raise err from None
self._parsed_error_names = parsed_error_names
dim = len(field_names) - len(parsed_error_names)
self._coord_names = field_names[:dim]
self.dim = dim
# todo: add subsequences of coords as attributes
# with field names.
# In case if someone wants to create a graph of another function
# at the same coordinates.
# Should a) work when we rescale the graph
# b) not interfere with other fields and methods
# Probably we won't add methods __del__(n), __add__(*coords),
# since it might change the scale.
def __eq__(self, other):
"""Two graphs are equal, if and only if they have
equal coordinates, field names and scales.
If *other* is not a :class:`.graph`, return ``False``.
Note that floating numbers should be compared
approximately (using :func:`math.isclose`).
Therefore this comparison may give false negatives.
"""
if not isinstance(other, graph):
# in Python comparison between different types is allowed
return False
return (self.coords == other.coords and self._scale == other._scale
and self.field_names == other.field_names)
def _get_err_indices(self, coord_name):
"""Get error indices corresponding to a coordinate."""
err_indices = []
dim = self.dim
for ind, err in enumerate(self._parsed_error_names):
if err[1] == coord_name:
err_indices.append(ind+dim)
return err_indices
def __iter__(self):
"""Iterate graph coords one by one."""
for val in zip(*self.coords):
yield val
def __repr__(self):
return """graph({}, field_names={}, scale={})""".format(
self.coords, self.field_names, self._scale
)
def scale(self, other=None):
"""Get or set the scale of the graph.
If *other* is ``None``, return the scale of this graph.
If a numeric *other* is provided, rescale to that value.
If the graph has unknown or zero scale,
rescaling that will raise :exc:`~.LenaValueError`.
To get meaningful results, graph's fields are used.
Only the last coordinate is rescaled.
For example, if the graph has *x* and *y* coordinates,
then *y* will be rescaled, and for a 3-dimensional graph
*z* will be rescaled.
All errors are rescaled together with their coordinate.
"""
# this method is called scale() for uniformity with histograms
# And this looks really good: explicit for computations
# (not a subtle graph.scale, like a constant field (which is,
# however, the case in graph - but not in other structures))
# and easy to remember (set_scale? rescale? change_scale_to?..)
# We modify the graph in place,
# because that would be redundant (not optimal)
# to create a new graph
# if we only want to change the scale of the existing one.
if other is None:
return self._scale
if not self._scale:
raise lena.core.LenaValueError(
"can't rescale a graph with zero or unknown scale"
)
last_coord_ind = self.dim - 1
last_coord_name = self.field_names[last_coord_ind]
last_coord_indices = ([last_coord_ind] +
self._get_err_indices(last_coord_name)
)
# In Python 2 3/2 is 1, so we want to be safe;
# the downside is that integer-valued graphs
# will become floating, but that is doubtfully an issue.
# Remove when/if dropping support for Python 2.
rescale = float(other) / self._scale
mul = operator.mul
partial = functools.partial
# a version with lambda is about 50% slower:
# timeit.timeit('[*map(lambda val: val*2, vals)]', \
# setup='vals = list(range(45)); from operator import mul; \
# from functools import partial')
# 3.159
# same setup for
# timeit.timeit('[*map(partial(mul, 2), vals)]',...):
# 2.075
#
# [*map(...)] is very slightly faster than list(map(...)),
# but it's unavailable in Python 2 (and anyway less readable).
# rescale arrays of values and errors
for ind, arr in enumerate(self.coords):
if ind in last_coord_indices:
# Python lists are faster than arrays,
# https://stackoverflow.com/a/62399645/952234
# (because each time taking a value from an array
# creates a Python object)
self.coords[ind] = list(map(partial(mul, rescale),
arr))
self._scale = other
# as suggested in PEP 8
return None
def _parse_error_names(self, field_names):
# field_names is a parameter for easier testing,
# usually object's field_names are used.
errors = []
# collect all error fields and check that they are
# strictly after other fields
in_error_fields = False
# there is at least one field
last_coord_ind = 0
for ind, field in enumerate(field_names):
if field.startswith("error_"):
in_error_fields = True
errors.append((field, ind))
else:
last_coord_ind = ind
if in_error_fields:
raise lena.core.LenaValueError(
"errors must go after coordinate fields"
)
coords = set(field_names[:last_coord_ind+1])
parsed_errors = []
for err, ind in errors:
err_coords = []
for coord in coords:
err_main = err[6:] # all after "error_"
if err_main == coord or err_main.startswith(coord + "_"):
err_coords.append(coord)
err_tail = err_main[len(coord)+1:]
if not err_coords:
raise lena.core.LenaValueError(
"no coordinate corresponding to {} given".format(err)
)
elif len(err_coords) > 1:
raise lena.core.LenaValueError(
"ambiguous error " + err +\
" corresponding to several coordinates given"
)
# "error" may be redundant, but it is explicit.
parsed_errors.append(("error", err_coords[0], err_tail, ind))
return parsed_errors
def _update_context(self, context):
"""Update *context* with the properties of this graph.
*context.error* is appended with indices of errors.
Example subcontext for a graph with fields "E,t,error_E_low":
{"error": {"x_low": {"index": 2}}}.
Note that error names are called "x", "y" and "z"
(this corresponds to first three coordinates,
if they are present), which allows to simplify plotting.
Existing values are not removed
from *context.value* and its subcontexts.
Called on "destruction" of the graph (for example,
in :class:`.ToCSV`). By destruction we mean conversion
to another structure (like text) in the flow.
The graph object is not really destroyed in this process.
"""
# this method is private, because we encourage users to yield
# graphs into the flow and process them with ToCSV element
# (not manually).
if not self._parsed_error_names:
# no error fields present
return
dim = self.dim
xyz_coord_names = self._coord_names[:3]
for name, coord_name in zip(["x", "y", "z"], xyz_coord_names):
for err in self._parsed_error_names:
if err[1] == coord_name:
error_ind = err[3]
if err[2]:
# add error suffix
error_name = name + "_" + err[2]
else:
error_name = name
lena.context.update_recursively(
context,
"error.{}.index".format(error_name),
# error can correspond both to variable and
# value, so we put it outside value.
# "value.error.{}.index".format(error_name),
error_ind
)
# used in deprecated Graph
def _rescale_value(rescale, value):
return rescale * lena.flow.get_data(value)
class Graph(object):
"""
.. deprecated:: 0.5
use :class:`graph`.
This class may be used in the future,
but with a changed interface.
Function at given coordinates (arbitraty dimensions).
Graph points can be set during the initialization and
during :meth:`fill`. It can be rescaled (producing a new :class:`Graph`).
A point is a tuple of *(coordinate, value)*, where both *coordinate*
and *value* can be tuples of numbers.
*Coordinate* corresponds to a point in N-dimensional space,
while *value* is some function's value at this point
(the function can take a value in M-dimensional space).
Coordinate and value dimensions must be the same for all points.
One can get graph points as :attr:`Graph.points` attribute.
They will be sorted each time before return
if *sort* was set to ``True``.
An attempt to change points
(use :attr:`Graph.points` on the left of '=')
will raise Python's :exc:`AttributeError`.
"""
def __init__(self, points=None, context=None, scale=None, sort=True):
"""*points* is an array of *(coordinate, value)* tuples.
*context* is the same as the most recent context
during *fill*. Use it to provide a context
when initializing a :class:`Graph` from existing points.
*scale* sets the scale of the graph.
It is used during plotting if rescaling is needed.
Graph coordinates are sorted by default.
This is usually needed to plot graphs of functions.
If you need to keep the order of insertion, set *sort* to ``False``.
By default, sorting is done using standard Python
lists and functions. You can disable *sort* and provide your own
sorting container for *points*.
Some implementations are compared
`here <http://www.grantjenks.com/docs/sortedcontainers/performance.html>`_.
Note that a rescaled graph uses a default list.
Note that :class:`Graph` does not reduce data.
All filled values will be stored in it.
To reduce data, use histograms.
"""
warnings.warn("Graph is deprecated since Lena 0.5. Use graph.",
DeprecationWarning, stacklevel=2)
self._points = points if points is not None else []
# todo: add some sanity checks for points
self._scale = scale
self._init_context = {"scale": scale}
if context is None:
self._cur_context = {}
elif not isinstance(context, dict):
raise lena.core.LenaTypeError(
"context must be a dict, {} provided".format(context)
)
else:
self._cur_context = context
self._sort = sort
# todo: probably, scale from context is not needed.
## probably this function is not needed.
## it can't be copied, graphs won't be possible to compare.
# *rescale_value* is a function, which can be used to scale
# complex graph values.
# It must accept a rescale parameter and the value at a data point.
# By default, it is multiplication of rescale and the value
# (which must be a number).
# if rescale_value is None:
# self._rescale_value = _rescale_value
self._rescale_value = _rescale_value
self._update()
def fill(self, value):
"""Fill the graph with *value*.
*Value* can be a *(data, context)* tuple.
*Data* part must be a *(coordinates, value)* pair,
where both coordinates and value are also tuples.
For example, *value* can contain the principal number
and its precision.
"""
point, self._cur_context = lena.flow.get_data_context(value)
# coords, val = point
self._points.append(point)
def request(self):
"""Yield graph with context.
If *sort* was initialized ``True``, graph points will be sorted.
"""
# If flow contained *scale* it the context, it is set now.
self._update()
yield (self, self._context)
# compute method shouldn't be in this class,
# because it is a pure FillRequest.
# def compute(self):
# """Yield graph with context (as in :meth:`request`),
# and :meth:`reset`."""
# self._update()
# yield (self, self._context)
# self.reset()
@property
def points(self):
"""Get graph points (read only)."""
# sort points before giving them
self._update()
return self._points
def reset(self):
"""Reset points to an empty list
and current context to an empty dict.
"""
self._points = []
self._cur_context = {}
def __repr__(self):
self._update()
return ("Graph(points={}, scale={}, sort={})"
.format(self._points, self._scale, self._sort))
def scale(self, other=None):
"""Get or set the scale.
Graph's scale comes from an external source.
For example, if the graph was computed from a function,
this may be its integral passed via context during :meth:`fill`.
Once the scale is set, it is stored in the graph.
If one attempts to use scale which was not set,
:exc:`.LenaAttributeError` is raised.
If *other* is None, return the scale.
If a ``float`` *other* is provided, rescale to *other*.
A new graph with the scale equal to *other*
is returned, the original one remains unchanged.
Note that in this case its *points* will be a simple list
and new graph *sort* parameter will be ``True``.
Graphs with scale equal to zero can't be rescaled.
Attempts to do that raise :exc:`.LenaValueError`.
"""
if other is None:
# return scale
self._update()
if self._scale is None:
raise lena.core.LenaAttributeError(
"scale must be explicitly set before using that"
)
return self._scale
else:
# rescale from other
scale = self.scale()
if scale == 0:
raise lena.core.LenaValueError(
"can't rescale graph with 0 scale"
)
# new_init_context = copy.deepcopy(self._init_context)
# new_init_context.update({"scale": other})
rescale = float(other) / scale
new_points = []
for coord, val in self._points:
# probably not needed, because tuples are immutable:
# make a deep copy so that new values
# are completely independent from old ones.
new_points.append((coord, self._rescale_value(rescale, val)))
# todo: should it inherit context?
# Probably yes, but watch out scale.
new_graph = Graph(points=new_points, scale=other,
sort=self._sort)
return new_graph
def to_csv(self, separator=",", header=None):
""".. deprecated:: 0.5 in Lena 0.5 to_csv is not used.
Iterables are converted to tables.
Convert graph's points to CSV.
*separator* delimits values, the default is comma.
*header*, if not ``None``, is the first string of the output
(new line is added automatically).
Since a graph can be multidimensional,
for each point first its coordinate is converted to string
(separated by *separator*), then each part of its value.
To convert :class:`Graph` to CSV inside a Lena sequence,
use :class:`lena.output.ToCSV`.
"""
if self._sort:
self._update()
def unpack_pt(pt):
coord = pt[0]
value = pt[1]
if isinstance(coord, tuple):
unpacked = list(coord)
else:
unpacked = [coord]
if isinstance(value, tuple):
unpacked += list(value)
else:
unpacked.append(value)
return unpacked
def pt_to_str(pt, separ):
return separ.join([str(val) for val in unpack_pt(pt)])
if header is not None:
# if one needs an empty header line, they may provide ""
lines = header + "\n"
else:
lines = ""
lines += "\n".join([pt_to_str(pt, separator) for pt in self.points])
return lines
# *context* will be added to graph context.
# If it contains "scale", :meth:`scale` method will be available.
# Otherwise, if "scale" is contained in the context
# during :meth:`fill`, it will be used.
# In this case it is assumed that this scale
# is same for all values (only the last filled context is checked).
# Context from flow takes precedence over the initialized one.
def _update(self):
"""Sort points if needed, update context."""
# todo: probably remove this context_scale?
context_scale = self._cur_context.get("scale")
if context_scale is not None:
# this complex check is fine with rescale,
# because that returns a new graph (this scale unchanged).
if self._scale is not None and self._scale != context_scale:
raise lena.core.LenaRuntimeError(
"Initialization and context scale differ, "
"{} and {} from context {}"
.format(self._scale, context_scale, self._cur_context)
)
self._scale = context_scale
if self._sort:
self._points = sorted(self._points)
self._context = copy.deepcopy(self._cur_context)
self._context.update(self._init_context)
# why this? Not *graph.scale*?
self._context.update({"scale": self._scale})
# self._context.update(lena.context.make_context(self, "_scale"))
# todo: make this check during fill. Probably initialize self._dim
# with kwarg dim. (dim of coordinates or values?)
if self._points:
# check points correctness
points = self._points
def coord_dim(coord):
if not hasattr(coord, "__len__"):
return 1
return len(coord)
first_coord = points[0][0]
dim = coord_dim(first_coord)
same_dim = all(coord_dim(point[0]) == dim for point in points)
if not same_dim:
raise lena.core.LenaValueError(
"coordinates tuples must have same dimension, "
"{} given".format(points)
)
self.dim = dim
self._context["dim"] = self.dim
def __eq__(self, other):
if not isinstance(other, Graph):
return False
if self.points != other.points:
return False
if self._scale is None and other._scale is None:
return True
try:
result = self.scale() == other.scale()
except lena.core.LenaAttributeError:
# one scale couldn't be computed
return False
else:
return result
|
lena/structures/graph.py
|
codereval_python_data_49
|
Get error indices corresponding to a coordinate.
def _get_err_indices(self, coord_name):
"""Get error indices corresponding to a coordinate."""
err_indices = []
dim = self.dim
for ind, err in enumerate(self._parsed_error_names):
if err[1] == coord_name:
err_indices.append(ind+dim)
return err_indices
"""A graph is a function at given coordinates."""
import copy
import functools
import operator
import re
import warnings
import lena.core
import lena.context
import lena.flow
class graph():
"""Numeric arrays of equal size."""
def __init__(self, coords, field_names=("x", "y"), scale=None):
"""This structure generally corresponds
to the graph of a function
and represents arrays of coordinates and the function values
of arbitrary dimensions.
*coords* is a list of one-dimensional
coordinate and value sequences (usually lists).
There is little to no distinction between them,
and "values" can also be called "coordinates".
*field_names* provide the meaning of these arrays.
For example, a 3-dimensional graph could be distinguished
from a 2-dimensional graph with errors by its fields
("x", "y", "z") versus ("x", "y", "error_y").
Field names don't affect drawing graphs:
for that :class:`~Variable`-s should be used.
Default field names,
provided for the most used 2-dimensional graphs,
are "x" and "y".
*field_names* can be a string separated by whitespace
and/or commas or a tuple of strings, such as ("x", "y").
*field_names* must have as many elements
as *coords* and each field name must be unique.
Otherwise field names are arbitrary.
Error fields must go after all other coordinates.
Name of a coordinate error is "error\\_"
appended by coordinate name. Further error details
are appended after '_'. They could be arbitrary depending
on the problem: "low", "high", "low_90%_cl", etc. Example:
("E", "time", "error_E_low", "error_time").
*scale* of the graph is a kind of its norm. It could be
the integral of the function or its other property.
A scale of a normalised probability density
function would be one.
An initialized *scale* is required if one needs
to renormalise the graph in :meth:`scale`
(for example, to plot it with other graphs).
Coordinates of a function graph would usually be arrays
of increasing values, which is not required here.
Neither is it checked that coordinates indeed
contain one-dimensional numeric values.
However, non-standard graphs
will likely lead to errors during plotting
and will require more programmer's work and caution,
so use them only if you understand what you are doing.
A graph can be iterated yielding tuples of numbers
for each point.
**Attributes**
:attr:`coords` is a list \
of one-dimensional lists of coordinates.
:attr:`field_names`
:attr:`dim` is the dimension of the graph,
that is of all its coordinates without errors.
In case of incorrect initialization arguments,
:exc:`~.LenaTypeError` or :exc:`~.LenaValueError` is raised.
.. versionadded:: 0.5
"""
if not coords:
raise lena.core.LenaValueError(
"coords must be a non-empty sequence "
"of coordinate sequences"
)
# require coords to be of the same size
pt_len = len(coords[0])
for arr in coords[1:]:
if len(arr) != pt_len:
raise lena.core.LenaValueError(
"coords must have subsequences of equal lengths"
)
# Unicode (Python 2) field names would be just bad,
# so we don't check for it here.
if isinstance(field_names, str):
# split(', ') won't work.
# From https://stackoverflow.com/a/44785447/952234:
# \s stands for whitespace.
field_names = tuple(re.findall(r'[^,\s]+', field_names))
elif not isinstance(field_names, tuple):
# todo: why field_names are a tuple,
# while coords are a list?
# It might be non-Pythonic to require a tuple
# (to prohibit a list), but it's important
# for comparisons and uniformity
raise lena.core.LenaTypeError(
"field_names must be a string or a tuple"
)
if len(field_names) != len(coords):
raise lena.core.LenaValueError(
"field_names must have must have the same size as coords"
)
if len(set(field_names)) != len(field_names):
raise lena.core.LenaValueError(
"field_names contains duplicates"
)
self.coords = coords
self._scale = scale
# field_names are better than fields,
# because they are unambigous (as in namedtuple).
self.field_names = field_names
# decided to use "error_x_low" (like in ROOT).
# Other versions were x_error (looked better than x_err),
# but x_err_low looked much better than x_error_low).
try:
parsed_error_names = self._parse_error_names(field_names)
except lena.core.LenaValueError as err:
raise err
# in Python 3
# raise err from None
self._parsed_error_names = parsed_error_names
dim = len(field_names) - len(parsed_error_names)
self._coord_names = field_names[:dim]
self.dim = dim
# todo: add subsequences of coords as attributes
# with field names.
# In case if someone wants to create a graph of another function
# at the same coordinates.
# Should a) work when we rescale the graph
# b) not interfere with other fields and methods
# Probably we won't add methods __del__(n), __add__(*coords),
# since it might change the scale.
def __eq__(self, other):
"""Two graphs are equal, if and only if they have
equal coordinates, field names and scales.
If *other* is not a :class:`.graph`, return ``False``.
Note that floating numbers should be compared
approximately (using :func:`math.isclose`).
Therefore this comparison may give false negatives.
"""
if not isinstance(other, graph):
# in Python comparison between different types is allowed
return False
return (self.coords == other.coords and self._scale == other._scale
and self.field_names == other.field_names)
def _get_err_indices(self, coord_name):
"""Get error indices corresponding to a coordinate."""
err_indices = []
dim = self.dim
for ind, err in enumerate(self._parsed_error_names):
if err[1] == coord_name:
err_indices.append(ind+dim)
return err_indices
def __iter__(self):
"""Iterate graph coords one by one."""
for val in zip(*self.coords):
yield val
def __repr__(self):
return """graph({}, field_names={}, scale={})""".format(
self.coords, self.field_names, self._scale
)
def scale(self, other=None):
"""Get or set the scale of the graph.
If *other* is ``None``, return the scale of this graph.
If a numeric *other* is provided, rescale to that value.
If the graph has unknown or zero scale,
rescaling that will raise :exc:`~.LenaValueError`.
To get meaningful results, graph's fields are used.
Only the last coordinate is rescaled.
For example, if the graph has *x* and *y* coordinates,
then *y* will be rescaled, and for a 3-dimensional graph
*z* will be rescaled.
All errors are rescaled together with their coordinate.
"""
# this method is called scale() for uniformity with histograms
# And this looks really good: explicit for computations
# (not a subtle graph.scale, like a constant field (which is,
# however, the case in graph - but not in other structures))
# and easy to remember (set_scale? rescale? change_scale_to?..)
# We modify the graph in place,
# because that would be redundant (not optimal)
# to create a new graph
# if we only want to change the scale of the existing one.
if other is None:
return self._scale
if not self._scale:
raise lena.core.LenaValueError(
"can't rescale a graph with zero or unknown scale"
)
last_coord_ind = self.dim - 1
last_coord_name = self.field_names[last_coord_ind]
last_coord_indices = ([last_coord_ind] +
self._get_err_indices(last_coord_name)
)
# In Python 2 3/2 is 1, so we want to be safe;
# the downside is that integer-valued graphs
# will become floating, but that is doubtfully an issue.
# Remove when/if dropping support for Python 2.
rescale = float(other) / self._scale
mul = operator.mul
partial = functools.partial
# a version with lambda is about 50% slower:
# timeit.timeit('[*map(lambda val: val*2, vals)]', \
# setup='vals = list(range(45)); from operator import mul; \
# from functools import partial')
# 3.159
# same setup for
# timeit.timeit('[*map(partial(mul, 2), vals)]',...):
# 2.075
#
# [*map(...)] is very slightly faster than list(map(...)),
# but it's unavailable in Python 2 (and anyway less readable).
# rescale arrays of values and errors
for ind, arr in enumerate(self.coords):
if ind in last_coord_indices:
# Python lists are faster than arrays,
# https://stackoverflow.com/a/62399645/952234
# (because each time taking a value from an array
# creates a Python object)
self.coords[ind] = list(map(partial(mul, rescale),
arr))
self._scale = other
# as suggested in PEP 8
return None
def _parse_error_names(self, field_names):
# field_names is a parameter for easier testing,
# usually object's field_names are used.
errors = []
# collect all error fields and check that they are
# strictly after other fields
in_error_fields = False
# there is at least one field
last_coord_ind = 0
for ind, field in enumerate(field_names):
if field.startswith("error_"):
in_error_fields = True
errors.append((field, ind))
else:
last_coord_ind = ind
if in_error_fields:
raise lena.core.LenaValueError(
"errors must go after coordinate fields"
)
coords = set(field_names[:last_coord_ind+1])
parsed_errors = []
for err, ind in errors:
err_coords = []
for coord in coords:
err_main = err[6:] # all after "error_"
if err_main == coord or err_main.startswith(coord + "_"):
err_coords.append(coord)
err_tail = err_main[len(coord)+1:]
if not err_coords:
raise lena.core.LenaValueError(
"no coordinate corresponding to {} given".format(err)
)
elif len(err_coords) > 1:
raise lena.core.LenaValueError(
"ambiguous error " + err +\
" corresponding to several coordinates given"
)
# "error" may be redundant, but it is explicit.
parsed_errors.append(("error", err_coords[0], err_tail, ind))
return parsed_errors
def _update_context(self, context):
"""Update *context* with the properties of this graph.
*context.error* is appended with indices of errors.
Example subcontext for a graph with fields "E,t,error_E_low":
{"error": {"x_low": {"index": 2}}}.
Note that error names are called "x", "y" and "z"
(this corresponds to first three coordinates,
if they are present), which allows to simplify plotting.
Existing values are not removed
from *context.value* and its subcontexts.
Called on "destruction" of the graph (for example,
in :class:`.ToCSV`). By destruction we mean conversion
to another structure (like text) in the flow.
The graph object is not really destroyed in this process.
"""
# this method is private, because we encourage users to yield
# graphs into the flow and process them with ToCSV element
# (not manually).
if not self._parsed_error_names:
# no error fields present
return
dim = self.dim
xyz_coord_names = self._coord_names[:3]
for name, coord_name in zip(["x", "y", "z"], xyz_coord_names):
for err in self._parsed_error_names:
if err[1] == coord_name:
error_ind = err[3]
if err[2]:
# add error suffix
error_name = name + "_" + err[2]
else:
error_name = name
lena.context.update_recursively(
context,
"error.{}.index".format(error_name),
# error can correspond both to variable and
# value, so we put it outside value.
# "value.error.{}.index".format(error_name),
error_ind
)
# used in deprecated Graph
def _rescale_value(rescale, value):
return rescale * lena.flow.get_data(value)
class Graph(object):
"""
.. deprecated:: 0.5
use :class:`graph`.
This class may be used in the future,
but with a changed interface.
Function at given coordinates (arbitraty dimensions).
Graph points can be set during the initialization and
during :meth:`fill`. It can be rescaled (producing a new :class:`Graph`).
A point is a tuple of *(coordinate, value)*, where both *coordinate*
and *value* can be tuples of numbers.
*Coordinate* corresponds to a point in N-dimensional space,
while *value* is some function's value at this point
(the function can take a value in M-dimensional space).
Coordinate and value dimensions must be the same for all points.
One can get graph points as :attr:`Graph.points` attribute.
They will be sorted each time before return
if *sort* was set to ``True``.
An attempt to change points
(use :attr:`Graph.points` on the left of '=')
will raise Python's :exc:`AttributeError`.
"""
def __init__(self, points=None, context=None, scale=None, sort=True):
"""*points* is an array of *(coordinate, value)* tuples.
*context* is the same as the most recent context
during *fill*. Use it to provide a context
when initializing a :class:`Graph` from existing points.
*scale* sets the scale of the graph.
It is used during plotting if rescaling is needed.
Graph coordinates are sorted by default.
This is usually needed to plot graphs of functions.
If you need to keep the order of insertion, set *sort* to ``False``.
By default, sorting is done using standard Python
lists and functions. You can disable *sort* and provide your own
sorting container for *points*.
Some implementations are compared
`here <http://www.grantjenks.com/docs/sortedcontainers/performance.html>`_.
Note that a rescaled graph uses a default list.
Note that :class:`Graph` does not reduce data.
All filled values will be stored in it.
To reduce data, use histograms.
"""
warnings.warn("Graph is deprecated since Lena 0.5. Use graph.",
DeprecationWarning, stacklevel=2)
self._points = points if points is not None else []
# todo: add some sanity checks for points
self._scale = scale
self._init_context = {"scale": scale}
if context is None:
self._cur_context = {}
elif not isinstance(context, dict):
raise lena.core.LenaTypeError(
"context must be a dict, {} provided".format(context)
)
else:
self._cur_context = context
self._sort = sort
# todo: probably, scale from context is not needed.
## probably this function is not needed.
## it can't be copied, graphs won't be possible to compare.
# *rescale_value* is a function, which can be used to scale
# complex graph values.
# It must accept a rescale parameter and the value at a data point.
# By default, it is multiplication of rescale and the value
# (which must be a number).
# if rescale_value is None:
# self._rescale_value = _rescale_value
self._rescale_value = _rescale_value
self._update()
def fill(self, value):
"""Fill the graph with *value*.
*Value* can be a *(data, context)* tuple.
*Data* part must be a *(coordinates, value)* pair,
where both coordinates and value are also tuples.
For example, *value* can contain the principal number
and its precision.
"""
point, self._cur_context = lena.flow.get_data_context(value)
# coords, val = point
self._points.append(point)
def request(self):
"""Yield graph with context.
If *sort* was initialized ``True``, graph points will be sorted.
"""
# If flow contained *scale* it the context, it is set now.
self._update()
yield (self, self._context)
# compute method shouldn't be in this class,
# because it is a pure FillRequest.
# def compute(self):
# """Yield graph with context (as in :meth:`request`),
# and :meth:`reset`."""
# self._update()
# yield (self, self._context)
# self.reset()
@property
def points(self):
"""Get graph points (read only)."""
# sort points before giving them
self._update()
return self._points
def reset(self):
"""Reset points to an empty list
and current context to an empty dict.
"""
self._points = []
self._cur_context = {}
def __repr__(self):
self._update()
return ("Graph(points={}, scale={}, sort={})"
.format(self._points, self._scale, self._sort))
def scale(self, other=None):
"""Get or set the scale.
Graph's scale comes from an external source.
For example, if the graph was computed from a function,
this may be its integral passed via context during :meth:`fill`.
Once the scale is set, it is stored in the graph.
If one attempts to use scale which was not set,
:exc:`.LenaAttributeError` is raised.
If *other* is None, return the scale.
If a ``float`` *other* is provided, rescale to *other*.
A new graph with the scale equal to *other*
is returned, the original one remains unchanged.
Note that in this case its *points* will be a simple list
and new graph *sort* parameter will be ``True``.
Graphs with scale equal to zero can't be rescaled.
Attempts to do that raise :exc:`.LenaValueError`.
"""
if other is None:
# return scale
self._update()
if self._scale is None:
raise lena.core.LenaAttributeError(
"scale must be explicitly set before using that"
)
return self._scale
else:
# rescale from other
scale = self.scale()
if scale == 0:
raise lena.core.LenaValueError(
"can't rescale graph with 0 scale"
)
# new_init_context = copy.deepcopy(self._init_context)
# new_init_context.update({"scale": other})
rescale = float(other) / scale
new_points = []
for coord, val in self._points:
# probably not needed, because tuples are immutable:
# make a deep copy so that new values
# are completely independent from old ones.
new_points.append((coord, self._rescale_value(rescale, val)))
# todo: should it inherit context?
# Probably yes, but watch out scale.
new_graph = Graph(points=new_points, scale=other,
sort=self._sort)
return new_graph
def to_csv(self, separator=",", header=None):
""".. deprecated:: 0.5 in Lena 0.5 to_csv is not used.
Iterables are converted to tables.
Convert graph's points to CSV.
*separator* delimits values, the default is comma.
*header*, if not ``None``, is the first string of the output
(new line is added automatically).
Since a graph can be multidimensional,
for each point first its coordinate is converted to string
(separated by *separator*), then each part of its value.
To convert :class:`Graph` to CSV inside a Lena sequence,
use :class:`lena.output.ToCSV`.
"""
if self._sort:
self._update()
def unpack_pt(pt):
coord = pt[0]
value = pt[1]
if isinstance(coord, tuple):
unpacked = list(coord)
else:
unpacked = [coord]
if isinstance(value, tuple):
unpacked += list(value)
else:
unpacked.append(value)
return unpacked
def pt_to_str(pt, separ):
return separ.join([str(val) for val in unpack_pt(pt)])
if header is not None:
# if one needs an empty header line, they may provide ""
lines = header + "\n"
else:
lines = ""
lines += "\n".join([pt_to_str(pt, separator) for pt in self.points])
return lines
# *context* will be added to graph context.
# If it contains "scale", :meth:`scale` method will be available.
# Otherwise, if "scale" is contained in the context
# during :meth:`fill`, it will be used.
# In this case it is assumed that this scale
# is same for all values (only the last filled context is checked).
# Context from flow takes precedence over the initialized one.
def _update(self):
"""Sort points if needed, update context."""
# todo: probably remove this context_scale?
context_scale = self._cur_context.get("scale")
if context_scale is not None:
# this complex check is fine with rescale,
# because that returns a new graph (this scale unchanged).
if self._scale is not None and self._scale != context_scale:
raise lena.core.LenaRuntimeError(
"Initialization and context scale differ, "
"{} and {} from context {}"
.format(self._scale, context_scale, self._cur_context)
)
self._scale = context_scale
if self._sort:
self._points = sorted(self._points)
self._context = copy.deepcopy(self._cur_context)
self._context.update(self._init_context)
# why this? Not *graph.scale*?
self._context.update({"scale": self._scale})
# self._context.update(lena.context.make_context(self, "_scale"))
# todo: make this check during fill. Probably initialize self._dim
# with kwarg dim. (dim of coordinates or values?)
if self._points:
# check points correctness
points = self._points
def coord_dim(coord):
if not hasattr(coord, "__len__"):
return 1
return len(coord)
first_coord = points[0][0]
dim = coord_dim(first_coord)
same_dim = all(coord_dim(point[0]) == dim for point in points)
if not same_dim:
raise lena.core.LenaValueError(
"coordinates tuples must have same dimension, "
"{} given".format(points)
)
self.dim = dim
self._context["dim"] = self.dim
def __eq__(self, other):
if not isinstance(other, Graph):
return False
if self.points != other.points:
return False
if self._scale is None and other._scale is None:
return True
try:
result = self.scale() == other.scale()
except lena.core.LenaAttributeError:
# one scale couldn't be computed
return False
else:
return result
|
lena/structures/graph.py
|
codereval_python_data_50
|
Update *context* with the properties of this graph.
*context.error* is appended with indices of errors.
Example subcontext for a graph with fields "E,t,error_E_low":
{"error": {"x_low": {"index": 2}}}.
Note that error names are called "x", "y" and "z"
(this corresponds to first three coordinates,
if they are present), which allows to simplify plotting.
Existing values are not removed
from *context.value* and its subcontexts.
Called on "destruction" of the graph (for example,
in :class:`.ToCSV`). By destruction we mean conversion
to another structure (like text) in the flow.
The graph object is not really destroyed in this process.
def _update_context(self, context):
"""Update *context* with the properties of this graph.
*context.error* is appended with indices of errors.
Example subcontext for a graph with fields "E,t,error_E_low":
{"error": {"x_low": {"index": 2}}}.
Note that error names are called "x", "y" and "z"
(this corresponds to first three coordinates,
if they are present), which allows to simplify plotting.
Existing values are not removed
from *context.value* and its subcontexts.
Called on "destruction" of the graph (for example,
in :class:`.ToCSV`). By destruction we mean conversion
to another structure (like text) in the flow.
The graph object is not really destroyed in this process.
"""
# this method is private, because we encourage users to yield
# graphs into the flow and process them with ToCSV element
# (not manually).
if not self._parsed_error_names:
# no error fields present
return
dim = self.dim
xyz_coord_names = self._coord_names[:3]
for name, coord_name in zip(["x", "y", "z"], xyz_coord_names):
for err in self._parsed_error_names:
if err[1] == coord_name:
error_ind = err[3]
if err[2]:
# add error suffix
error_name = name + "_" + err[2]
else:
error_name = name
lena.context.update_recursively(
context,
"error.{}.index".format(error_name),
# error can correspond both to variable and
# value, so we put it outside value.
# "value.error.{}.index".format(error_name),
error_ind
)
"""A graph is a function at given coordinates."""
import copy
import functools
import operator
import re
import warnings
import lena.core
import lena.context
import lena.flow
class graph():
"""Numeric arrays of equal size."""
def __init__(self, coords, field_names=("x", "y"), scale=None):
"""This structure generally corresponds
to the graph of a function
and represents arrays of coordinates and the function values
of arbitrary dimensions.
*coords* is a list of one-dimensional
coordinate and value sequences (usually lists).
There is little to no distinction between them,
and "values" can also be called "coordinates".
*field_names* provide the meaning of these arrays.
For example, a 3-dimensional graph could be distinguished
from a 2-dimensional graph with errors by its fields
("x", "y", "z") versus ("x", "y", "error_y").
Field names don't affect drawing graphs:
for that :class:`~Variable`-s should be used.
Default field names,
provided for the most used 2-dimensional graphs,
are "x" and "y".
*field_names* can be a string separated by whitespace
and/or commas or a tuple of strings, such as ("x", "y").
*field_names* must have as many elements
as *coords* and each field name must be unique.
Otherwise field names are arbitrary.
Error fields must go after all other coordinates.
Name of a coordinate error is "error\\_"
appended by coordinate name. Further error details
are appended after '_'. They could be arbitrary depending
on the problem: "low", "high", "low_90%_cl", etc. Example:
("E", "time", "error_E_low", "error_time").
*scale* of the graph is a kind of its norm. It could be
the integral of the function or its other property.
A scale of a normalised probability density
function would be one.
An initialized *scale* is required if one needs
to renormalise the graph in :meth:`scale`
(for example, to plot it with other graphs).
Coordinates of a function graph would usually be arrays
of increasing values, which is not required here.
Neither is it checked that coordinates indeed
contain one-dimensional numeric values.
However, non-standard graphs
will likely lead to errors during plotting
and will require more programmer's work and caution,
so use them only if you understand what you are doing.
A graph can be iterated yielding tuples of numbers
for each point.
**Attributes**
:attr:`coords` is a list \
of one-dimensional lists of coordinates.
:attr:`field_names`
:attr:`dim` is the dimension of the graph,
that is of all its coordinates without errors.
In case of incorrect initialization arguments,
:exc:`~.LenaTypeError` or :exc:`~.LenaValueError` is raised.
.. versionadded:: 0.5
"""
if not coords:
raise lena.core.LenaValueError(
"coords must be a non-empty sequence "
"of coordinate sequences"
)
# require coords to be of the same size
pt_len = len(coords[0])
for arr in coords[1:]:
if len(arr) != pt_len:
raise lena.core.LenaValueError(
"coords must have subsequences of equal lengths"
)
# Unicode (Python 2) field names would be just bad,
# so we don't check for it here.
if isinstance(field_names, str):
# split(', ') won't work.
# From https://stackoverflow.com/a/44785447/952234:
# \s stands for whitespace.
field_names = tuple(re.findall(r'[^,\s]+', field_names))
elif not isinstance(field_names, tuple):
# todo: why field_names are a tuple,
# while coords are a list?
# It might be non-Pythonic to require a tuple
# (to prohibit a list), but it's important
# for comparisons and uniformity
raise lena.core.LenaTypeError(
"field_names must be a string or a tuple"
)
if len(field_names) != len(coords):
raise lena.core.LenaValueError(
"field_names must have must have the same size as coords"
)
if len(set(field_names)) != len(field_names):
raise lena.core.LenaValueError(
"field_names contains duplicates"
)
self.coords = coords
self._scale = scale
# field_names are better than fields,
# because they are unambigous (as in namedtuple).
self.field_names = field_names
# decided to use "error_x_low" (like in ROOT).
# Other versions were x_error (looked better than x_err),
# but x_err_low looked much better than x_error_low).
try:
parsed_error_names = self._parse_error_names(field_names)
except lena.core.LenaValueError as err:
raise err
# in Python 3
# raise err from None
self._parsed_error_names = parsed_error_names
dim = len(field_names) - len(parsed_error_names)
self._coord_names = field_names[:dim]
self.dim = dim
# todo: add subsequences of coords as attributes
# with field names.
# In case if someone wants to create a graph of another function
# at the same coordinates.
# Should a) work when we rescale the graph
# b) not interfere with other fields and methods
# Probably we won't add methods __del__(n), __add__(*coords),
# since it might change the scale.
def __eq__(self, other):
"""Two graphs are equal, if and only if they have
equal coordinates, field names and scales.
If *other* is not a :class:`.graph`, return ``False``.
Note that floating numbers should be compared
approximately (using :func:`math.isclose`).
Therefore this comparison may give false negatives.
"""
if not isinstance(other, graph):
# in Python comparison between different types is allowed
return False
return (self.coords == other.coords and self._scale == other._scale
and self.field_names == other.field_names)
def _get_err_indices(self, coord_name):
"""Get error indices corresponding to a coordinate."""
err_indices = []
dim = self.dim
for ind, err in enumerate(self._parsed_error_names):
if err[1] == coord_name:
err_indices.append(ind+dim)
return err_indices
def __iter__(self):
"""Iterate graph coords one by one."""
for val in zip(*self.coords):
yield val
def __repr__(self):
return """graph({}, field_names={}, scale={})""".format(
self.coords, self.field_names, self._scale
)
def scale(self, other=None):
"""Get or set the scale of the graph.
If *other* is ``None``, return the scale of this graph.
If a numeric *other* is provided, rescale to that value.
If the graph has unknown or zero scale,
rescaling that will raise :exc:`~.LenaValueError`.
To get meaningful results, graph's fields are used.
Only the last coordinate is rescaled.
For example, if the graph has *x* and *y* coordinates,
then *y* will be rescaled, and for a 3-dimensional graph
*z* will be rescaled.
All errors are rescaled together with their coordinate.
"""
# this method is called scale() for uniformity with histograms
# And this looks really good: explicit for computations
# (not a subtle graph.scale, like a constant field (which is,
# however, the case in graph - but not in other structures))
# and easy to remember (set_scale? rescale? change_scale_to?..)
# We modify the graph in place,
# because that would be redundant (not optimal)
# to create a new graph
# if we only want to change the scale of the existing one.
if other is None:
return self._scale
if not self._scale:
raise lena.core.LenaValueError(
"can't rescale a graph with zero or unknown scale"
)
last_coord_ind = self.dim - 1
last_coord_name = self.field_names[last_coord_ind]
last_coord_indices = ([last_coord_ind] +
self._get_err_indices(last_coord_name)
)
# In Python 2 3/2 is 1, so we want to be safe;
# the downside is that integer-valued graphs
# will become floating, but that is doubtfully an issue.
# Remove when/if dropping support for Python 2.
rescale = float(other) / self._scale
mul = operator.mul
partial = functools.partial
# a version with lambda is about 50% slower:
# timeit.timeit('[*map(lambda val: val*2, vals)]', \
# setup='vals = list(range(45)); from operator import mul; \
# from functools import partial')
# 3.159
# same setup for
# timeit.timeit('[*map(partial(mul, 2), vals)]',...):
# 2.075
#
# [*map(...)] is very slightly faster than list(map(...)),
# but it's unavailable in Python 2 (and anyway less readable).
# rescale arrays of values and errors
for ind, arr in enumerate(self.coords):
if ind in last_coord_indices:
# Python lists are faster than arrays,
# https://stackoverflow.com/a/62399645/952234
# (because each time taking a value from an array
# creates a Python object)
self.coords[ind] = list(map(partial(mul, rescale),
arr))
self._scale = other
# as suggested in PEP 8
return None
def _parse_error_names(self, field_names):
# field_names is a parameter for easier testing,
# usually object's field_names are used.
errors = []
# collect all error fields and check that they are
# strictly after other fields
in_error_fields = False
# there is at least one field
last_coord_ind = 0
for ind, field in enumerate(field_names):
if field.startswith("error_"):
in_error_fields = True
errors.append((field, ind))
else:
last_coord_ind = ind
if in_error_fields:
raise lena.core.LenaValueError(
"errors must go after coordinate fields"
)
coords = set(field_names[:last_coord_ind+1])
parsed_errors = []
for err, ind in errors:
err_coords = []
for coord in coords:
err_main = err[6:] # all after "error_"
if err_main == coord or err_main.startswith(coord + "_"):
err_coords.append(coord)
err_tail = err_main[len(coord)+1:]
if not err_coords:
raise lena.core.LenaValueError(
"no coordinate corresponding to {} given".format(err)
)
elif len(err_coords) > 1:
raise lena.core.LenaValueError(
"ambiguous error " + err +\
" corresponding to several coordinates given"
)
# "error" may be redundant, but it is explicit.
parsed_errors.append(("error", err_coords[0], err_tail, ind))
return parsed_errors
def _update_context(self, context):
"""Update *context* with the properties of this graph.
*context.error* is appended with indices of errors.
Example subcontext for a graph with fields "E,t,error_E_low":
{"error": {"x_low": {"index": 2}}}.
Note that error names are called "x", "y" and "z"
(this corresponds to first three coordinates,
if they are present), which allows to simplify plotting.
Existing values are not removed
from *context.value* and its subcontexts.
Called on "destruction" of the graph (for example,
in :class:`.ToCSV`). By destruction we mean conversion
to another structure (like text) in the flow.
The graph object is not really destroyed in this process.
"""
# this method is private, because we encourage users to yield
# graphs into the flow and process them with ToCSV element
# (not manually).
if not self._parsed_error_names:
# no error fields present
return
dim = self.dim
xyz_coord_names = self._coord_names[:3]
for name, coord_name in zip(["x", "y", "z"], xyz_coord_names):
for err in self._parsed_error_names:
if err[1] == coord_name:
error_ind = err[3]
if err[2]:
# add error suffix
error_name = name + "_" + err[2]
else:
error_name = name
lena.context.update_recursively(
context,
"error.{}.index".format(error_name),
# error can correspond both to variable and
# value, so we put it outside value.
# "value.error.{}.index".format(error_name),
error_ind
)
# used in deprecated Graph
def _rescale_value(rescale, value):
return rescale * lena.flow.get_data(value)
class Graph(object):
"""
.. deprecated:: 0.5
use :class:`graph`.
This class may be used in the future,
but with a changed interface.
Function at given coordinates (arbitraty dimensions).
Graph points can be set during the initialization and
during :meth:`fill`. It can be rescaled (producing a new :class:`Graph`).
A point is a tuple of *(coordinate, value)*, where both *coordinate*
and *value* can be tuples of numbers.
*Coordinate* corresponds to a point in N-dimensional space,
while *value* is some function's value at this point
(the function can take a value in M-dimensional space).
Coordinate and value dimensions must be the same for all points.
One can get graph points as :attr:`Graph.points` attribute.
They will be sorted each time before return
if *sort* was set to ``True``.
An attempt to change points
(use :attr:`Graph.points` on the left of '=')
will raise Python's :exc:`AttributeError`.
"""
def __init__(self, points=None, context=None, scale=None, sort=True):
"""*points* is an array of *(coordinate, value)* tuples.
*context* is the same as the most recent context
during *fill*. Use it to provide a context
when initializing a :class:`Graph` from existing points.
*scale* sets the scale of the graph.
It is used during plotting if rescaling is needed.
Graph coordinates are sorted by default.
This is usually needed to plot graphs of functions.
If you need to keep the order of insertion, set *sort* to ``False``.
By default, sorting is done using standard Python
lists and functions. You can disable *sort* and provide your own
sorting container for *points*.
Some implementations are compared
`here <http://www.grantjenks.com/docs/sortedcontainers/performance.html>`_.
Note that a rescaled graph uses a default list.
Note that :class:`Graph` does not reduce data.
All filled values will be stored in it.
To reduce data, use histograms.
"""
warnings.warn("Graph is deprecated since Lena 0.5. Use graph.",
DeprecationWarning, stacklevel=2)
self._points = points if points is not None else []
# todo: add some sanity checks for points
self._scale = scale
self._init_context = {"scale": scale}
if context is None:
self._cur_context = {}
elif not isinstance(context, dict):
raise lena.core.LenaTypeError(
"context must be a dict, {} provided".format(context)
)
else:
self._cur_context = context
self._sort = sort
# todo: probably, scale from context is not needed.
## probably this function is not needed.
## it can't be copied, graphs won't be possible to compare.
# *rescale_value* is a function, which can be used to scale
# complex graph values.
# It must accept a rescale parameter and the value at a data point.
# By default, it is multiplication of rescale and the value
# (which must be a number).
# if rescale_value is None:
# self._rescale_value = _rescale_value
self._rescale_value = _rescale_value
self._update()
def fill(self, value):
"""Fill the graph with *value*.
*Value* can be a *(data, context)* tuple.
*Data* part must be a *(coordinates, value)* pair,
where both coordinates and value are also tuples.
For example, *value* can contain the principal number
and its precision.
"""
point, self._cur_context = lena.flow.get_data_context(value)
# coords, val = point
self._points.append(point)
def request(self):
"""Yield graph with context.
If *sort* was initialized ``True``, graph points will be sorted.
"""
# If flow contained *scale* it the context, it is set now.
self._update()
yield (self, self._context)
# compute method shouldn't be in this class,
# because it is a pure FillRequest.
# def compute(self):
# """Yield graph with context (as in :meth:`request`),
# and :meth:`reset`."""
# self._update()
# yield (self, self._context)
# self.reset()
@property
def points(self):
"""Get graph points (read only)."""
# sort points before giving them
self._update()
return self._points
def reset(self):
"""Reset points to an empty list
and current context to an empty dict.
"""
self._points = []
self._cur_context = {}
def __repr__(self):
self._update()
return ("Graph(points={}, scale={}, sort={})"
.format(self._points, self._scale, self._sort))
def scale(self, other=None):
"""Get or set the scale.
Graph's scale comes from an external source.
For example, if the graph was computed from a function,
this may be its integral passed via context during :meth:`fill`.
Once the scale is set, it is stored in the graph.
If one attempts to use scale which was not set,
:exc:`.LenaAttributeError` is raised.
If *other* is None, return the scale.
If a ``float`` *other* is provided, rescale to *other*.
A new graph with the scale equal to *other*
is returned, the original one remains unchanged.
Note that in this case its *points* will be a simple list
and new graph *sort* parameter will be ``True``.
Graphs with scale equal to zero can't be rescaled.
Attempts to do that raise :exc:`.LenaValueError`.
"""
if other is None:
# return scale
self._update()
if self._scale is None:
raise lena.core.LenaAttributeError(
"scale must be explicitly set before using that"
)
return self._scale
else:
# rescale from other
scale = self.scale()
if scale == 0:
raise lena.core.LenaValueError(
"can't rescale graph with 0 scale"
)
# new_init_context = copy.deepcopy(self._init_context)
# new_init_context.update({"scale": other})
rescale = float(other) / scale
new_points = []
for coord, val in self._points:
# probably not needed, because tuples are immutable:
# make a deep copy so that new values
# are completely independent from old ones.
new_points.append((coord, self._rescale_value(rescale, val)))
# todo: should it inherit context?
# Probably yes, but watch out scale.
new_graph = Graph(points=new_points, scale=other,
sort=self._sort)
return new_graph
def to_csv(self, separator=",", header=None):
""".. deprecated:: 0.5 in Lena 0.5 to_csv is not used.
Iterables are converted to tables.
Convert graph's points to CSV.
*separator* delimits values, the default is comma.
*header*, if not ``None``, is the first string of the output
(new line is added automatically).
Since a graph can be multidimensional,
for each point first its coordinate is converted to string
(separated by *separator*), then each part of its value.
To convert :class:`Graph` to CSV inside a Lena sequence,
use :class:`lena.output.ToCSV`.
"""
if self._sort:
self._update()
def unpack_pt(pt):
coord = pt[0]
value = pt[1]
if isinstance(coord, tuple):
unpacked = list(coord)
else:
unpacked = [coord]
if isinstance(value, tuple):
unpacked += list(value)
else:
unpacked.append(value)
return unpacked
def pt_to_str(pt, separ):
return separ.join([str(val) for val in unpack_pt(pt)])
if header is not None:
# if one needs an empty header line, they may provide ""
lines = header + "\n"
else:
lines = ""
lines += "\n".join([pt_to_str(pt, separator) for pt in self.points])
return lines
# *context* will be added to graph context.
# If it contains "scale", :meth:`scale` method will be available.
# Otherwise, if "scale" is contained in the context
# during :meth:`fill`, it will be used.
# In this case it is assumed that this scale
# is same for all values (only the last filled context is checked).
# Context from flow takes precedence over the initialized one.
def _update(self):
"""Sort points if needed, update context."""
# todo: probably remove this context_scale?
context_scale = self._cur_context.get("scale")
if context_scale is not None:
# this complex check is fine with rescale,
# because that returns a new graph (this scale unchanged).
if self._scale is not None and self._scale != context_scale:
raise lena.core.LenaRuntimeError(
"Initialization and context scale differ, "
"{} and {} from context {}"
.format(self._scale, context_scale, self._cur_context)
)
self._scale = context_scale
if self._sort:
self._points = sorted(self._points)
self._context = copy.deepcopy(self._cur_context)
self._context.update(self._init_context)
# why this? Not *graph.scale*?
self._context.update({"scale": self._scale})
# self._context.update(lena.context.make_context(self, "_scale"))
# todo: make this check during fill. Probably initialize self._dim
# with kwarg dim. (dim of coordinates or values?)
if self._points:
# check points correctness
points = self._points
def coord_dim(coord):
if not hasattr(coord, "__len__"):
return 1
return len(coord)
first_coord = points[0][0]
dim = coord_dim(first_coord)
same_dim = all(coord_dim(point[0]) == dim for point in points)
if not same_dim:
raise lena.core.LenaValueError(
"coordinates tuples must have same dimension, "
"{} given".format(points)
)
self.dim = dim
self._context["dim"] = self.dim
def __eq__(self, other):
if not isinstance(other, Graph):
return False
if self.points != other.points:
return False
if self._scale is None and other._scale is None:
return True
try:
result = self.scale() == other.scale()
except lena.core.LenaAttributeError:
# one scale couldn't be computed
return False
else:
return result
|
lena/structures/graph.py
|
codereval_python_data_51
|
Compute integral (scale for a histogram).
*bins* contain values, and *edges* form the mesh
for the integration.
Their format is defined in :class:`.histogram` description.
def integral(bins, edges):
"""Compute integral (scale for a histogram).
*bins* contain values, and *edges* form the mesh
for the integration.
Their format is defined in :class:`.histogram` description.
"""
total = 0
for ind, bin_content in iter_bins(bins):
bin_lengths = [
edges[coord][i+1] - edges[coord][i]
for coord, i in enumerate(ind)
]
# product
vol = _reduce(operator.mul, bin_lengths, 1)
cell_integral = vol * bin_content
total += cell_integral
return total
"""Functions for histograms.
These functions are used for low-level work
with histograms and their contents.
They are not needed for normal usage.
"""
import collections
import copy
import itertools
import operator
import re
import sys
if sys.version_info.major == 3:
from functools import reduce as _reduce
else:
_reduce = reduce
import lena.core
from .graph import graph as _graph
class HistCell(collections.namedtuple("HistCell", ("edges, bin, index"))):
"""A namedtuple with fields *edges, bin, index*."""
# from Aaron Hall's answer https://stackoverflow.com/a/28568351/952234
__slots__ = ()
def cell_to_string(
cell_edges, var_context=None, coord_names=None,
coord_fmt="{}_lte_{}_lt_{}", coord_join="_", reverse=False):
"""Transform cell edges into a string.
*cell_edges* is a tuple of pairs *(lower bound, upper bound)*
for each coordinate.
*coord_names* is a list of coordinates names.
*coord_fmt* is a string,
which defines how to format individual coordinates.
*coord_join* is a string, which joins coordinate pairs.
If *reverse* is True, coordinates are joined in reverse order.
"""
# todo: do we really need var_context?
# todo: even if so, why isn't that a {}? Is that dangerous?
if coord_names is None:
if var_context is None:
coord_names = [
"coord{}".format(ind) for ind in range(len(cell_edges))
]
else:
if "combine" in var_context:
coord_names = [var["name"]
for var in var_context["combine"]]
else:
coord_names = [var_context["name"]]
if len(cell_edges) != len(coord_names):
raise lena.core.LenaValueError(
"coord_names must have same length as cell_edges, "
"{} and {} given".format(coord_names, cell_edges)
)
coord_strings = [coord_fmt.format(edge[0], coord_names[ind], edge[1])
for (ind, edge) in enumerate(cell_edges)]
if reverse:
coord_strings = reversed(coord_strings)
coord_str = coord_join.join(coord_strings)
return coord_str
def _check_edges_increasing_1d(arr):
if len(arr) <= 1:
raise lena.core.LenaValueError("size of edges should be more than one,"
" {} provided".format(arr))
increasing = (tup[0] < tup[1] for tup in zip(arr, arr[1:]))
if not all(increasing):
raise lena.core.LenaValueError(
"expected strictly increasing values, "
"{} provided".format(arr)
)
def check_edges_increasing(edges):
"""Assure that multidimensional *edges* are increasing.
If length of *edges* or its subarray is less than 2
or if some subarray of *edges*
contains not strictly increasing values,
:exc:`.LenaValueError` is raised.
"""
if not len(edges):
raise lena.core.LenaValueError("edges must be non-empty")
elif not hasattr(edges[0], '__iter__'):
_check_edges_increasing_1d(edges)
return
for arr in edges:
if len(arr) <= 1:
raise lena.core.LenaValueError(
"size of edges should be more than one. "
"{} provided".format(arr)
)
_check_edges_increasing_1d(arr)
def get_bin_edges(index, edges):
"""Return edges of the bin for the given *edges* of a histogram.
In one-dimensional case *index* must be an integer and a tuple
of *(x_low_edge, x_high_edge)* for that bin is returned.
In a multidimensional case *index* is a container of numeric indices
in each dimension.
A list of bin edges in each dimension is returned."""
# todo: maybe give up this 1- and multidimensional unification
# and write separate functions for each case.
if not hasattr(edges[0], '__iter__'):
# 1-dimensional edges
if hasattr(index, '__iter__'):
index = index[0]
return (edges[index], edges[index+1])
# multidimensional edges
return [(edges[coord][i], edges[coord][i+1])
for coord, i in enumerate(index)]
def get_bin_on_index(index, bins):
"""Return bin corresponding to multidimensional *index*.
*index* can be a number or a list/tuple.
If *index* length is less than dimension of *bins*,
a subarray of *bins* is returned.
In case of an index error, :exc:`.LenaIndexError` is raised.
Example:
>>> from lena.structures import histogram, get_bin_on_index
>>> hist = histogram([0, 1], [0])
>>> get_bin_on_index(0, hist.bins)
0
>>> get_bin_on_index((0, 1), [[0, 1], [0, 0]])
1
>>> get_bin_on_index(0, [[0, 1], [0, 0]])
[0, 1]
"""
if not isinstance(index, (list, tuple)):
index = [index]
subarr = bins
for ind in index:
try:
subarr = subarr[ind]
except IndexError:
raise lena.core.LenaIndexError(
"bad index: {}, bins = {}".format(index, bins)
)
return subarr
def get_bin_on_value_1d(val, arr):
"""Return index for value in one-dimensional array.
*arr* must contain strictly increasing values
(not necessarily equidistant),
it is not checked.
"Linear binary search" is used,
that is our array search by default assumes
the array to be split on equidistant steps.
Example:
>>> from lena.structures import get_bin_on_value_1d
>>> arr = [0, 1, 4, 5, 7, 10]
>>> get_bin_on_value_1d(0, arr)
0
>>> get_bin_on_value_1d(4.5, arr)
2
>>> # upper range is excluded
>>> get_bin_on_value_1d(10, arr)
5
>>> # underflow
>>> get_bin_on_value_1d(-10, arr)
-1
"""
# may also use numpy.searchsorted
# https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.searchsorted.html
ind_min = 0
ind_max = len(arr) - 1
while True:
if ind_max - ind_min <= 1:
# lower bound is close
if val < arr[ind_min]:
return ind_min - 1
# upper bound is open
elif val >= arr[ind_max]:
return ind_max
else:
return ind_min
if val == arr[ind_min]:
return ind_min
if val < arr[ind_min]:
return ind_min - 1
elif val >= arr[ind_max]:
return ind_max
else:
shift = int(
(ind_max - ind_min) * (
float(val - arr[ind_min]) / (arr[ind_max] - arr[ind_min])
))
ind_guess = ind_min + shift
if ind_min == ind_guess:
ind_min += 1
continue
# ind_max is always more that ind_guess,
# because val < arr[ind_max] (see the formula for shift).
# This branch is not needed and can't be tested.
# But for the sake of numerical inaccuracies, let us keep this
# so that we never get into an infinite loop.
elif ind_max == ind_guess:
ind_max -= 1
continue
if val < arr[ind_guess]:
ind_max = ind_guess
else:
ind_min = ind_guess
def get_bin_on_value(arg, edges):
"""Get the bin index for *arg* in a multidimensional array *edges*.
*arg* is a 1-dimensional array of numbers
(or a number for 1-dimensional *edges*),
and corresponds to a point in N-dimensional space.
*edges* is an array of N-1 dimensional arrays (lists or tuples) of numbers.
Each 1-dimensional subarray consists of increasing numbers.
*arg* and *edges* must have the same length
(otherwise :exc:`.LenaValueError` is raised).
*arg* and *edges* must be iterable and support *len()*.
Return list of indices in *edges* corresponding to *arg*.
If any coordinate is out of its corresponding edge range,
its index will be ``-1`` for underflow
or ``len(edge)-1`` for overflow.
Examples:
>>> from lena.structures import get_bin_on_value
>>> edges = [[1, 2, 3], [1, 3.5]]
>>> get_bin_on_value((1.5, 2), edges)
[0, 0]
>>> get_bin_on_value((1.5, 0), edges)
[0, -1]
>>> # the upper edge is excluded
>>> get_bin_on_value((3, 2), edges)
[2, 0]
>>> # one-dimensional edges
>>> edges = [1, 2, 3]
>>> get_bin_on_value(2, edges)
[1]
"""
# arg is a one-dimensional index
if not isinstance(arg, (tuple, list)):
return [get_bin_on_value_1d(arg, edges)]
# arg is a multidimensional index
if len(arg) != len(edges):
raise lena.core.LenaValueError(
"argument should have same dimension as edges. "
"arg = {}, edges = {}".format(arg, edges)
)
indices = []
for ind, array in enumerate(edges):
cur_bin = get_bin_on_value_1d(arg[ind], array)
indices.append(cur_bin)
return indices
def get_example_bin(struct):
"""Return bin with zero index on each axis of the histogram bins.
For example, if the histogram is two-dimensional, return hist[0][0].
*struct* can be a :class:`.histogram`
or an array of bins.
"""
if isinstance(struct, lena.structures.histogram):
return lena.structures.get_bin_on_index([0] * struct.dim, struct.bins)
else:
bins = struct
while isinstance(bins, list):
bins = bins[0]
return bins
def hist_to_graph(hist, make_value=None, get_coordinate="left",
field_names=("x", "y"), scale=None):
"""Convert a :class:`.histogram` to a :class:`.graph`.
*make_value* is a function to set the value of a graph's point.
By default it is bin content.
*make_value* accepts a single value (bin content) without context.
This option could be used to create graph's error bars.
For example, to create a graph with errors
from a histogram where bins contain
a named tuple with fields *mean*, *mean_error* and a context
one could use
>>> make_value = lambda bin_: (bin_.mean, bin_.mean_error)
*get_coordinate* defines what the coordinate
of a graph point created from a histogram bin will be.
It can be "left" (default), "right" and "middle".
*field_names* set field names of the graph. Their number
must be the same as the dimension of the result.
For a *make_value* above they would be
*("x", "y_mean", "y_mean_error")*.
*scale* becomes the graph's scale (unknown by default).
If it is ``True``, it uses the histogram scale.
*hist* must contain only numeric bins (without context)
or *make_value* must remove context when creating a numeric graph.
Return the resulting graph.
"""
## Could have allowed get_coordinate to be callable
# (for generality), but 1) first find a use case,
# 2) histogram bins could be adjusted in the first place.
# -- don't understand 2.
if get_coordinate == "left":
get_coord = lambda edges: tuple(coord[0] for coord in edges)
elif get_coordinate == "right":
get_coord = lambda edges: tuple(coord[1] for coord in edges)
# *middle* between the two edges, not the *center* of the bin
# as a whole (because the graph corresponds to a point)
elif get_coordinate == "middle":
get_coord = lambda edges: tuple(0.5*(coord[0] + coord[1])
for coord in edges)
else:
raise lena.core.LenaValueError(
'get_coordinate must be one of "left", "right" or "middle"; '
'"{}" provided'.format(get_coordinate)
)
# todo: make_value may be bad design.
# Maybe allow to change the graph in the sequence.
# However, make_value allows not to recreate a graph
# or its coordinates (if that is not needed).
if isinstance(field_names, str):
# copied from graph.__init__
field_names = tuple(re.findall(r'[^,\s]+', field_names))
elif not isinstance(field_names, tuple):
raise lena.core.LenaTypeError(
"field_names must be a string or a tuple"
)
coords = [[] for _ in field_names]
chain = itertools.chain
if scale is True:
scale = hist.scale()
for value, edges in iter_bins_with_edges(hist.bins, hist.edges):
coord = get_coord(edges)
# Since we never use contexts here, it will be optimal
# to ignore them completely (remove them elsewhere).
# bin_value = lena.flow.get_data(value)
bin_value = value
if make_value is None:
graph_value = bin_value
else:
graph_value = make_value(bin_value)
# for iteration below
if not hasattr(graph_value, "__iter__"):
graph_value = (graph_value,)
# add each coordinate to respective array
for arr, coord_ in zip(coords, chain(coord, graph_value)):
arr.append(coord_)
return _graph(coords, field_names=field_names, scale=scale)
def init_bins(edges, value=0, deepcopy=False):
"""Initialize cells of the form *edges* with the given *value*.
Return bins filled with copies of *value*.
*Value* must be copyable, usual numbers will suit.
If the value is mutable, use *deepcopy =* ``True``
(or the content of cells will be identical).
Examples:
>>> edges = [[0, 1], [0, 1]]
>>> # one cell
>>> init_bins(edges)
[[0]]
>>> # no need to use floats,
>>> # because integers will automatically be cast to floats
>>> # when used together
>>> init_bins(edges, 0.0)
[[0.0]]
>>> init_bins([[0, 1, 2], [0, 1, 2]])
[[0, 0], [0, 0]]
>>> init_bins([0, 1, 2])
[0, 0]
"""
nbins = len(edges) - 1
if not isinstance(edges[0], (list, tuple)):
# edges is one-dimensional
if deepcopy:
return [copy.deepcopy(value) for _ in range(nbins)]
else:
return [value] * nbins
for ind, arr in enumerate(edges):
if ind == nbins:
if deepcopy:
return [copy.deepcopy(value) for _ in range(len(arr)-1)]
else:
return list([value] * (len(arr)-1))
bins = []
for _ in range(len(arr)-1):
bins.append(init_bins(edges[ind+1:], value, deepcopy))
return bins
def integral(bins, edges):
"""Compute integral (scale for a histogram).
*bins* contain values, and *edges* form the mesh
for the integration.
Their format is defined in :class:`.histogram` description.
"""
total = 0
for ind, bin_content in iter_bins(bins):
bin_lengths = [
edges[coord][i+1] - edges[coord][i]
for coord, i in enumerate(ind)
]
# product
vol = _reduce(operator.mul, bin_lengths, 1)
cell_integral = vol * bin_content
total += cell_integral
return total
def iter_bins(bins):
"""Iterate on *bins*. Yield *(index, bin content)*.
Edges with higher index are iterated first
(that is z, then y, then x for a 3-dimensional histogram).
"""
# if not isinstance(bins, (list, tuple)):
if not hasattr(bins, '__iter__'):
# cell
yield ((), bins)
else:
for ind, _ in enumerate(bins):
for sub_ind, val in iter_bins(bins[ind]):
yield (((ind,) + sub_ind), val)
def iter_bins_with_edges(bins, edges):
"""Generate *(bin content, bin edges)* pairs.
Bin edges is a tuple, such that
its item at index i is *(lower bound, upper bound)*
of the bin at i-th coordinate.
Examples:
>>> from lena.math import mesh
>>> list(iter_bins_with_edges([0, 1, 2], edges=mesh((0, 3), 3)))
[(0, ((0, 1.0),)), (1, ((1.0, 2.0),)), (2, ((2.0, 3),))]
>>>
>>> # 2-dimensional histogram
>>> list(iter_bins_with_edges(
... bins=[[2]], edges=mesh(((0, 1), (0, 1)), (1, 1))
... ))
[(2, ((0, 1), (0, 1)))]
.. versionadded:: 0.5
made public.
"""
# todo: only a list or also a tuple, an array?
if not isinstance(edges[0], list):
edges = [edges]
bins_sizes = [len(edge)-1 for edge in edges]
indices = [list(range(nbins)) for nbins in bins_sizes]
for index in itertools.product(*indices):
bin_ = lena.structures.get_bin_on_index(index, bins)
edges_low = []
edges_high = []
for var, var_ind in enumerate(index):
edges_low.append(edges[var][var_ind])
edges_high.append(edges[var][var_ind+1])
yield (bin_, tuple(zip(edges_low, edges_high)))
def iter_cells(hist, ranges=None, coord_ranges=None):
"""Iterate cells of a histogram *hist*, possibly in a subrange.
For each bin, yield a :class:`HistCell`
containing *bin edges, bin content* and *bin index*.
The order of iteration is the same as for :func:`iter_bins`.
*ranges* are the ranges of bin indices to be used
for each coordinate
(the lower value is included, the upper value is excluded).
*coord_ranges* set real coordinate ranges based on histogram edges.
Obviously, they can be not exactly bin edges.
If one of the ranges for the given coordinate
is outside the histogram edges,
then only existing histogram edges within the range are selected.
If the coordinate range is completely outside histogram edges,
nothing is yielded.
If a lower or upper *coord_range*
falls within a bin, this bin is yielded.
Note that if a coordinate range falls on a bin edge,
the number of generated bins can be unstable
because of limited float precision.
*ranges* and *coord_ranges* are tuples of tuples of limits
in corresponding dimensions.
For one-dimensional histogram it must be a tuple
containing a tuple, for example
*((None, None),)*.
``None`` as an upper or lower *range* means no limit
(*((None, None),)* is equivalent to *((0, len(bins)),)*
for a 1-dimensional histogram).
If a *range* index is lower than 0 or higher than possible index,
:exc:`.LenaValueError` is raised.
If both *coord_ranges* and *ranges* are provided,
:exc:`.LenaTypeError` is raised.
"""
# for bin_ind, bin_ in iter_bins(hist.bins):
# yield HistCell(get_bin_edges(bin_ind, hist.edges), bin_, bin_ind)
# if bins and edges are calculated each time, save the result now
bins, edges = hist.bins, hist.edges
# todo: hist.edges must be same
# for 1- and multidimensional histograms.
if hist.dim == 1:
edges = (edges,)
if coord_ranges is not None:
if ranges is not None:
raise lena.core.LenaTypeError(
"only ranges or coord_ranges can be provided, not both"
)
ranges = []
if not isinstance(coord_ranges[0], (tuple, list)):
coord_ranges = (coord_ranges, )
for coord, coord_range in enumerate(coord_ranges):
# todo: (dis?)allow None as an infinite range.
# todo: raise or transpose unordered coordinates?
# todo: change the order of function arguments.
lower_bin_ind = get_bin_on_value_1d(coord_range[0], edges[coord])
if lower_bin_ind == -1:
lower_bin_ind = 0
upper_bin_ind = get_bin_on_value_1d(coord_range[1], edges[coord])
max_ind = len(edges[coord])
if upper_bin_ind == max_ind:
upper_bin_ind -= 1
if lower_bin_ind >= max_ind or upper_bin_ind <= 0:
# histogram edges are outside the range.
return
ranges.append((lower_bin_ind, upper_bin_ind))
if not ranges:
ranges = ((None, None),) * hist.dim
real_ind_ranges = []
for coord, coord_range in enumerate(ranges):
low, up = coord_range
if low is None:
low = 0
else:
# negative indices should not be supported
if low < 0:
raise lena.core.LenaValueError(
"low must be not less than 0 if provided"
)
max_ind = len(edges[coord]) - 1
if up is None:
up = max_ind
else:
# huge indices should not be supported as well.
if up > max_ind:
raise lena.core.LenaValueError(
"up must not be greater than len(edges)-1, if provided"
)
real_ind_ranges.append(list(range(low, up)))
indices = list(itertools.product(*real_ind_ranges))
for ind in indices:
yield HistCell(get_bin_edges(ind, edges),
get_bin_on_index(ind, bins),
ind)
def make_hist_context(hist, context):
"""Update a deep copy of *context* with the context
of a :class:`.histogram` *hist*.
.. deprecated:: 0.5
histogram context is updated automatically
during conversion in :class:`~.output.ToCSV`.
Use histogram._update_context explicitly if needed.
"""
# absolutely unnecessary.
context = copy.deepcopy(context)
hist_context = {
"histogram": {
"dim": hist.dim,
"nbins": hist.nbins,
"ranges": hist.ranges
}
}
context.update(hist_context)
# just bad.
return context
def unify_1_md(bins, edges):
"""Unify 1- and multidimensional bins and edges.
Return a tuple of *(bins, edges)*.
Bins and multidimensional *edges* return unchanged,
while one-dimensional *edges* are inserted into a list.
"""
if hasattr(edges[0], '__iter__'):
# if isinstance(edges[0], (list, tuple)):
return (bins, edges)
else:
return (bins, [edges])
|
lena/structures/hist_functions.py
|
codereval_python_data_52
|
Test whether *seq* can be converted to a FillRequestSeq.
True only if it is a FillRequest element
or contains at least one such,
and it is not a Source sequence.
def is_fill_request_seq(seq):
"""Test whether *seq* can be converted to a FillRequestSeq.
True only if it is a FillRequest element
or contains at least one such,
and it is not a Source sequence.
"""
if is_source(seq):
return False
is_fcseq = False
if hasattr(seq, "__iter__"):
is_fcseq = any(map(is_fill_request_el, seq))
if is_fill_request_el(seq):
is_fcseq = True
return is_fcseq
"""Check whether a sequence can be converted to a Lena Sequence."""
# otherwise import errors arise
# from . import source
def is_fill_compute_el(obj):
"""Object contains executable methods 'fill' and 'compute'."""
return (hasattr(obj, "fill")
and hasattr(obj, "compute")
and callable(obj.fill)
and callable(obj.compute))
def is_fill_compute_seq(seq):
"""Test whether *seq* can be converted to a FillComputeSeq.
True only if it is a FillCompute element
or contains at least one such,
and it is not a Source sequence.
"""
if is_source(seq):
return False
is_fcseq = False
try:
is_fcseq = any(map(is_fill_compute_el, seq))
except TypeError:
# seq is non-iterable
pass
if is_fill_compute_el(seq):
is_fcseq = True
return is_fcseq
def is_fill_request_el(obj):
"""Object contains executable methods 'fill' and 'request'."""
return hasattr(obj, "fill") and hasattr(obj, "request") \
and callable(obj.fill) and callable(obj.request)
def is_fill_request_seq(seq):
"""Test whether *seq* can be converted to a FillRequestSeq.
True only if it is a FillRequest element
or contains at least one such,
and it is not a Source sequence.
"""
if is_source(seq):
return False
is_fcseq = False
if hasattr(seq, "__iter__"):
is_fcseq = any(map(is_fill_request_el, seq))
if is_fill_request_el(seq):
is_fcseq = True
return is_fcseq
def is_run_el(obj):
"""Object contains executable method 'run'."""
return hasattr(obj, "run") and callable(obj.run)
def is_source(seq):
"""Sequence is a Source, if and only if its type is Source."""
# Otherwise lambdas would be counted as Source,
# but they must be converted to Sequences.
# Moreover: this makes Source elements explicit and visible in code.
from . import source
return isinstance(seq, source.Source)
|
lena/core/check_sequence_type.py
|
codereval_python_data_53
|
Object contains executable methods 'fill' and 'request'.
def is_fill_request_el(obj):
"""Object contains executable methods 'fill' and 'request'."""
return hasattr(obj, "fill") and hasattr(obj, "request") \
and callable(obj.fill) and callable(obj.request)
"""Check whether a sequence can be converted to a Lena Sequence."""
# otherwise import errors arise
# from . import source
def is_fill_compute_el(obj):
"""Object contains executable methods 'fill' and 'compute'."""
return (hasattr(obj, "fill")
and hasattr(obj, "compute")
and callable(obj.fill)
and callable(obj.compute))
def is_fill_compute_seq(seq):
"""Test whether *seq* can be converted to a FillComputeSeq.
True only if it is a FillCompute element
or contains at least one such,
and it is not a Source sequence.
"""
if is_source(seq):
return False
is_fcseq = False
try:
is_fcseq = any(map(is_fill_compute_el, seq))
except TypeError:
# seq is non-iterable
pass
if is_fill_compute_el(seq):
is_fcseq = True
return is_fcseq
def is_fill_request_el(obj):
"""Object contains executable methods 'fill' and 'request'."""
return hasattr(obj, "fill") and hasattr(obj, "request") \
and callable(obj.fill) and callable(obj.request)
def is_fill_request_seq(seq):
"""Test whether *seq* can be converted to a FillRequestSeq.
True only if it is a FillRequest element
or contains at least one such,
and it is not a Source sequence.
"""
if is_source(seq):
return False
is_fcseq = False
if hasattr(seq, "__iter__"):
is_fcseq = any(map(is_fill_request_el, seq))
if is_fill_request_el(seq):
is_fcseq = True
return is_fcseq
def is_run_el(obj):
"""Object contains executable method 'run'."""
return hasattr(obj, "run") and callable(obj.run)
def is_source(seq):
"""Sequence is a Source, if and only if its type is Source."""
# Otherwise lambdas would be counted as Source,
# but they must be converted to Sequences.
# Moreover: this makes Source elements explicit and visible in code.
from . import source
return isinstance(seq, source.Source)
|
lena/core/check_sequence_type.py
|
codereval_python_data_54
|
Object contains executable method 'run'.
def is_run_el(obj):
"""Object contains executable method 'run'."""
return hasattr(obj, "run") and callable(obj.run)
"""Check whether a sequence can be converted to a Lena Sequence."""
# otherwise import errors arise
# from . import source
def is_fill_compute_el(obj):
"""Object contains executable methods 'fill' and 'compute'."""
return (hasattr(obj, "fill")
and hasattr(obj, "compute")
and callable(obj.fill)
and callable(obj.compute))
def is_fill_compute_seq(seq):
"""Test whether *seq* can be converted to a FillComputeSeq.
True only if it is a FillCompute element
or contains at least one such,
and it is not a Source sequence.
"""
if is_source(seq):
return False
is_fcseq = False
try:
is_fcseq = any(map(is_fill_compute_el, seq))
except TypeError:
# seq is non-iterable
pass
if is_fill_compute_el(seq):
is_fcseq = True
return is_fcseq
def is_fill_request_el(obj):
"""Object contains executable methods 'fill' and 'request'."""
return hasattr(obj, "fill") and hasattr(obj, "request") \
and callable(obj.fill) and callable(obj.request)
def is_fill_request_seq(seq):
"""Test whether *seq* can be converted to a FillRequestSeq.
True only if it is a FillRequest element
or contains at least one such,
and it is not a Source sequence.
"""
if is_source(seq):
return False
is_fcseq = False
if hasattr(seq, "__iter__"):
is_fcseq = any(map(is_fill_request_el, seq))
if is_fill_request_el(seq):
is_fcseq = True
return is_fcseq
def is_run_el(obj):
"""Object contains executable method 'run'."""
return hasattr(obj, "run") and callable(obj.run)
def is_source(seq):
"""Sequence is a Source, if and only if its type is Source."""
# Otherwise lambdas would be counted as Source,
# but they must be converted to Sequences.
# Moreover: this makes Source elements explicit and visible in code.
from . import source
return isinstance(seq, source.Source)
|
lena/core/check_sequence_type.py
|
codereval_python_data_55
|
Object contains executable methods 'fill' and 'compute'.
def is_fill_compute_el(obj):
"""Object contains executable methods 'fill' and 'compute'."""
return (hasattr(obj, "fill")
and hasattr(obj, "compute")
and callable(obj.fill)
and callable(obj.compute))
"""Check whether a sequence can be converted to a Lena Sequence."""
# otherwise import errors arise
# from . import source
def is_fill_compute_el(obj):
"""Object contains executable methods 'fill' and 'compute'."""
return (hasattr(obj, "fill")
and hasattr(obj, "compute")
and callable(obj.fill)
and callable(obj.compute))
def is_fill_compute_seq(seq):
"""Test whether *seq* can be converted to a FillComputeSeq.
True only if it is a FillCompute element
or contains at least one such,
and it is not a Source sequence.
"""
if is_source(seq):
return False
is_fcseq = False
try:
is_fcseq = any(map(is_fill_compute_el, seq))
except TypeError:
# seq is non-iterable
pass
if is_fill_compute_el(seq):
is_fcseq = True
return is_fcseq
def is_fill_request_el(obj):
"""Object contains executable methods 'fill' and 'request'."""
return hasattr(obj, "fill") and hasattr(obj, "request") \
and callable(obj.fill) and callable(obj.request)
def is_fill_request_seq(seq):
"""Test whether *seq* can be converted to a FillRequestSeq.
True only if it is a FillRequest element
or contains at least one such,
and it is not a Source sequence.
"""
if is_source(seq):
return False
is_fcseq = False
if hasattr(seq, "__iter__"):
is_fcseq = any(map(is_fill_request_el, seq))
if is_fill_request_el(seq):
is_fcseq = True
return is_fcseq
def is_run_el(obj):
"""Object contains executable method 'run'."""
return hasattr(obj, "run") and callable(obj.run)
def is_source(seq):
"""Sequence is a Source, if and only if its type is Source."""
# Otherwise lambdas would be counted as Source,
# but they must be converted to Sequences.
# Moreover: this makes Source elements explicit and visible in code.
from . import source
return isinstance(seq, source.Source)
|
lena/core/check_sequence_type.py
|
codereval_python_data_56
|
Return a dictionary with items from *d1* not contained in *d2*.
*level* sets the maximum depth of recursion. For infinite recursion,
set that to -1. For level 1,
if a key is present both in *d1* and *d2* but has different values,
it is included into the difference.
See :func:`intersection` for more details.
*d1* and *d2* remain unchanged. However, *d1* or some of its
subdictionaries may be returned directly.
Make a deep copy of the result when appropriate.
.. versionadded:: 0.5
add keyword argument *level*.
def difference(d1, d2, level=-1):
"""Return a dictionary with items from *d1* not contained in *d2*.
*level* sets the maximum depth of recursion. For infinite recursion,
set that to -1. For level 1,
if a key is present both in *d1* and *d2* but has different values,
it is included into the difference.
See :func:`intersection` for more details.
*d1* and *d2* remain unchanged. However, *d1* or some of its
subdictionaries may be returned directly.
Make a deep copy of the result when appropriate.
.. versionadded:: 0.5
add keyword argument *level*.
"""
# can become not dicts during the recursion
if not isinstance(d1, dict) or not isinstance(d2, dict):
return d1
if d1 == d2:
return {}
elif level == 0:
return d1
# some keys differ
result = {}
for key in d1:
if key not in d2:
result[key] = d1[key]
elif d1[key] != d2[key]:
res = difference(d1[key], d2[key], level-1)
# if d2[key] contains all d1[key] elements,
# the difference will be empty
if res:
result[key] = res
return result
"""Functions to work with context (dictionary)."""
import copy
import lena.core
# pylint: disable=invalid-name
# d is a good name for dictionary,
# used in Python documentation for dict.
def contains(d, s):
"""Check that a dictionary *d* contains a subdictionary
defined by a string *s*.
True if *d* contains a subdictionary that is represented by *s*.
Dots in *s* mean nested subdictionaries.
A string without dots means a key in *d*.
Example:
>>> d = {'fit': {'coordinate': 'x'}}
>>> contains(d, "fit")
True
>>> contains(d, "fit.coordinate.x")
True
>>> contains(d, "fit.coordinate.y")
False
If the most nested element of *d* to be compared with *s*
is not a string, its string representation is used for comparison.
See also :func:`str_to_dict`.
"""
# todo: s can be a list, or a dict?
levels = s.split(".")
if len(levels) < 2:
return s in d
subdict = d
for key in levels[:-1]:
if key not in subdict:
return False
subdict = subdict[key]
last_val = levels[-1]
if isinstance(subdict, dict):
return last_val in subdict
else:
# just a value
try:
# it's better to test for an object to be cast to str
# than to disallow "dim.1"
subd = str(subdict)
except Exception:
return False
else:
return subd == last_val
def difference(d1, d2, level=-1):
"""Return a dictionary with items from *d1* not contained in *d2*.
*level* sets the maximum depth of recursion. For infinite recursion,
set that to -1. For level 1,
if a key is present both in *d1* and *d2* but has different values,
it is included into the difference.
See :func:`intersection` for more details.
*d1* and *d2* remain unchanged. However, *d1* or some of its
subdictionaries may be returned directly.
Make a deep copy of the result when appropriate.
.. versionadded:: 0.5
add keyword argument *level*.
"""
# can become not dicts during the recursion
if not isinstance(d1, dict) or not isinstance(d2, dict):
return d1
if d1 == d2:
return {}
elif level == 0:
return d1
# some keys differ
result = {}
for key in d1:
if key not in d2:
result[key] = d1[key]
elif d1[key] != d2[key]:
res = difference(d1[key], d2[key], level-1)
# if d2[key] contains all d1[key] elements,
# the difference will be empty
if res:
result[key] = res
return result
def format_context(format_str):
"""Create a function that formats a given string using a context.
It is recommended to use jinja2.Template.
Use this function only if you don't have jinja2.
*format_str* is a Python format string with double braces
instead of single ones.
It must contain all non-empty replacement fields,
and only simplest formatting without attribute lookup.
Example:
>>> f = format_context("{{x}}")
>>> f({"x": 10})
'10'
When calling *format_context*, arguments are bound and
a new function is returned. When called with a context,
its keys are extracted and formatted in *format_str*.
Keys can be nested using a dot, for example:
>>> f = format_context("{{x.y}}_{{z}}")
>>> f({"x": {"y": 10}, "z": 1})
'10_1'
This function does not work with unbalanced braces.
If a simple check fails, :exc:`.LenaValueError` is raised.
If *format_str* is not a string, :exc:`.LenaTypeError` is raised.
All other errors are raised only during formatting.
If context doesn't contain the needed key,
:exc:`.LenaKeyError` is raised.
Note that string formatting can also raise a :exc:`ValueError`,
so it is recommended to test your formatters before using them.
"""
if not isinstance(format_str, str):
raise lena.core.LenaTypeError(
"format_str must be a string, {} given".format(format_str)
)
# prohibit single or unbalanced braces
if format_str.count('{') != format_str.count('}'):
raise lena.core.LenaValueError("unbalanced braces in '{}'".format(format_str))
if '{' in format_str and not '{{' in format_str:
raise lena.core.LenaValueError(
"double braces must be used for formatting instead of '{}'"
.format(format_str)
)
# new format: now double braces instead of single ones.
# but the algorithm may be left unchanged.
format_str = format_str.replace("{{", "{").replace("}}", "}")
new_str = []
new_args = []
prev_char = ''
ind = 0
within_field = False
while ind < len(format_str):
c = format_str[ind]
if c != '{' and not within_field:
prev_char = c
new_str.append(c)
ind += 1
continue
while c == '{' and ind < len(format_str):
new_str.append(c)
# literal formatting { are not allowed
# if prev_char == '{':
# prev_char = ''
# within_field = False
# else:
prev_char = c
within_field = True
ind += 1
c = format_str[ind]
if within_field:
new_arg = []
while ind < len(format_str):
if c in '}!:':
prev_char = c
within_field = False
new_args.append(''.join(new_arg))
break
new_arg.append(c)
ind += 1
c = format_str[ind]
format_str = ''.join(new_str)
args = new_args
def _format_context(context):
new_args = []
for arg in args:
# LenaKeyError may be raised
new_args.append(lena.context.get_recursively(context, arg))
# other exceptions, like ValueError
# (for bad string formatting) may be raised.
s = format_str.format(*new_args)
return s
return _format_context
_sentinel = object()
def get_recursively(d, keys, default=_sentinel):
"""Get value from a dictionary *d* recursively.
*keys* can be a list of simple keys (strings),
a dot-separated string
or a dictionary with at most one key at each level.
A string is split by dots and used as a list.
A list of keys is searched in the dictionary recursively
(it represents nested dictionaries).
If any of them is not found, *default* is returned
if "default" is given,
otherwise :exc:`.LenaKeyError` is raised.
If *keys* is empty, *d* is returned.
Examples:
>>> context = {"output": {"latex": {"name": "x"}}}
>>> get_recursively(context, ["output", "latex", "name"], default="y")
'x'
>>> get_recursively(context, "output.latex.name")
'x'
.. note::
Python's dict.get in case of a missing value
returns ``None`` and never raises an error.
We implement it differently,
because it allows more flexibility.
If *d* is not a dictionary or if *keys* is not a string, a dict
or a list, :exc:`.LenaTypeError` is raised.
If *keys* is a dictionary with more than one key at some level,
:exc:`.LenaValueError` is raised.
"""
has_default = default is not _sentinel
if not isinstance(d, dict):
raise lena.core.LenaTypeError(
"need a dictionary, {} provided".format(d)
)
if isinstance(keys, str):
# here empty substrings are skipped, but this is undefined.
keys = [key for key in keys.split('.') if key]
# todo: create dict_to_list and disable dict keys here?
elif isinstance(keys, dict):
new_keys = []
while keys:
if isinstance(keys, dict) and len(keys) != 1:
raise lena.core.LenaValueError(
"keys must have exactly one key at each level, "
"{} given".format(keys)
)
else:
if not isinstance(keys, dict):
new_keys.append(keys)
break
for key in keys:
new_keys.append(key)
keys = keys[key]
break
keys = new_keys
elif isinstance(keys, list):
if not all(isinstance(k, str) for k in keys):
raise lena.core.LenaTypeError(
"all simple keys must be strings, "
"{} given".format(keys)
)
else:
raise lena.core.LenaTypeError(
"keys must be a dict, a string or a list of keys, "
"{} given".format(keys)
)
for key in keys[:-1]:
if key in d and isinstance(d.get(key), dict):
d = d[key]
elif has_default:
return default
else:
raise lena.core.LenaKeyError(
"nested dict {} not found in {}".format(key, d)
)
if not keys:
return d
if keys[-1] in d:
return d[keys[-1]]
elif has_default:
return default
else:
raise lena.core.LenaKeyError(
"nested key {} not found in {}".format(keys[-1], d)
)
def intersection(*dicts, **kwargs):
"""Return a dictionary, such that each of its items
are contained in all *dicts* (recursively).
*dicts* are several dictionaries.
If *dicts* is empty, an empty dictionary is returned.
A keyword argument *level* sets maximum number of recursions.
For example, if *level* is 0, all *dicts* must be equal
(otherwise an empty dict is returned).
If *level* is 1, the result contains those subdictionaries
which are equal.
For arbitrarily nested subdictionaries set *level* to -1 (default).
Example:
>>> from lena.context import intersection
>>> d1 = {1: "1", 2: {3: "3", 4: "4"}}
>>> d2 = {2: {4: "4"}}
>>> # by default level is -1, which means infinite recursion
>>> intersection(d1, d2) == d2
True
>>> intersection(d1, d2, level=0)
{}
>>> intersection(d1, d2, level=1)
{}
>>> intersection(d1, d2, level=2)
{2: {4: '4'}}
This function always returns a dictionary
or its subtype (copied from dicts[0]).
All values are deeply copied.
No dictionary or subdictionary is changed.
If any of *dicts* is not a dictionary
or if some *kwargs* are unknown,
:exc:`.LenaTypeError` is raised.
"""
if not all([isinstance(d, dict) for d in dicts]):
raise lena.core.LenaTypeError(
"all dicts must be dictionaries, "
"{} given".format(dicts)
)
level = kwargs.pop("level", -1)
if kwargs:
raise lena.core.LenaTypeError(
"unknown kwargs {}".format(kwargs)
)
if not dicts:
return {}
res = copy.deepcopy(dicts[0])
for d in dicts[1:]:
if level == 0:
if d == res and d:
continue
else:
return {}
to_delete = []
for key in res:
if key in d:
if d[key] != res[key]:
if level == 1:
to_delete.append(key)
elif isinstance(res[key], dict) and isinstance(d[key], dict):
res[key] = intersection(res[key], d[key], level=level-1)
else:
to_delete.append(key)
else:
# keys can't be deleted during iteration
to_delete.append(key)
for key in to_delete:
del res[key]
if not res:
# res was calculated empty
return res
return res
def iterate_update(d, updates):
"""Iterate on updates of *d* with *updates*.
*d* is a dictionary. It remains unchanged.
*updates* is a list of dictionaries.
For each element *update*
a copy of *d* updated with *update* is yielded.
If *updates* is empty, nothing is yielded.
"""
# todo: do I need this function?
for update in updates:
d_copy = copy.deepcopy(d)
update_recursively(d_copy, update)
yield d_copy
def make_context(obj, *attrs):
"""Return context for object *obj*.
*attrs* is a list of attributes of *obj* to be inserted
into the context.
If an attribute starts with an underscore '_',
it is inserted without the underscore.
If an attribute is absent or None, it is skipped.
"""
# todo: rename to to_dict
# not used anywhere, change it freely.
# add examples.
context = {}
for attr in attrs:
val = getattr(obj, attr, None)
if val is not None:
if attr.startswith("_"):
attr = attr[1:]
context.update({attr: val})
return context
def str_to_dict(s, value=_sentinel):
"""Create a dictionary from a dot-separated string *s*.
If the *value* is provided, it becomes the value of
the deepest key represented by *s*.
Dots represent nested dictionaries.
If *s* is non-empty and *value* is not provided,
then *s* must have at least two dot-separated parts
(*"a.b"*), otherwise :exc:`.LenaValueError` is raised.
If a *value* is provided, *s* must be non-empty.
If *s* is empty, an empty dictionary is returned.
Examples:
>>> str_to_dict("a.b.c d")
{'a': {'b': 'c d'}}
>>> str_to_dict("output.changed", True)
{'output': {'changed': True}}
"""
if s == "":
if value is _sentinel:
return {}
else:
raise lena.core.LenaValueError(
"to make a dict with a value, "
"provide at least one dot-separated key"
)
# """*s* can be a dictionary. In this case it is returned as it is.
# If s were a dictionary, value mustn't had been allowed.
# probably this is a bad design,
# elif isinstance(s, dict):
# return s
parts = s.split(".")
if value is not _sentinel:
parts.append(value)
def nest_list(d, l):
"""Convert list *l* to nested dictionaries in *d*."""
len_l = len(l)
if len_l == 2:
d.update([(l[0], l[1])])
elif len_l < 2:
raise lena.core.LenaValueError(
"to make a dict, provide at least two dot-separated values"
)
else:
d.update([(l[0], nest_list({}, l[1:]))])
return d
d = nest_list({}, parts)
return d
def str_to_list(s):
"""Like :func:`str_to_dict`, but return a flat list.
If the string *s* is empty, an empty list is returned.
This is different from *str.split*: the latter would
return a list with one empty string.
Contrarily to :func:`str_to_dict`, this function allows
an arbitrary number of dots in *s* (or none).
"""
if s == "":
return []
# s can't be a list. This function is not used as a general
# interface (as str_to_dict could be).
# s may contain empty substrings, like in "a..b"
# this is not encouraged, of course, but may suit:
# if there are two errors in some user's context logic,
# they may compensate and not destroy all.
# Another variant would be to treat empty strings
# as whole context. The variant with '' seems more understandable
# to the user.
return s.split(".")
def update_nested(key, d, other):
"""Update *d[key]* with the *other* dictionary preserving data.
If *d* doesn't contain the *key*, it is updated with *{key: other}*.
If *d* contains the *key*, *d[key]* is inserted into *other[key]*
(so that it is not overriden).
If *other* contains *key* (and possibly more nested *key*-s),
then *d[key]* is inserted into the deepest level
of *other.key.key...* Finally, *d[key]* becomes *other*.
Example:
>>> context = {"variable": {"name": "x"}}
>>> new_var_context = {"name": "n"}
>>> update_nested("variable", context, copy.deepcopy(new_var_context))
>>> context == {'variable': {'name': 'n', 'variable': {'name': 'x'}}}
True
>>>
>>> update_nested("variable", context, {"name": "top"})
>>> context == {
... 'variable': {'name': 'top',
... 'variable': {'name': 'n', 'variable': {'name': 'x'}}}
... }
True
*other* is modified in general. Create that on the fly
or use *copy.deepcopy* when appropriate.
Recursive dictionaries (containing references to themselves)
are strongly discouraged and meaningless when nesting.
If *other[key]* is recursive, :exc:`.LenaValueError` may be raised.
"""
# there was an idea to add a keyword argument copy_other
# (by default True), but the user can do that him/herself
# with copy.deepcopy when needed. Otherwise it would be
# unnecessary complication of this interface.
# Only one key is nested. This encourages design when
# 1) elements combine their contexts into one key
# (like {"split_into_bins": {"variable": {}, "histogram": {}}})
# 2) elements change only one key ("variable", "histogram",...).
def get_most_nested_subdict_with(key, d):
nested_dicts = []
while True:
if key in d:
if d in nested_dicts:
raise lena.core.LenaValueError(
"recursive *other* is forbidden"
)
nested_dicts.append(d)
d = d[key]
else:
return d
if key in d:
other_most_nested = get_most_nested_subdict_with(key, other)
# insert d[key] at the lowest other.key.key....
other_most_nested[key] = d[key]
d[key] = other
def update_recursively(d, other, value=_sentinel):
"""Update dictionary *d* with items from *other* dictionary.
*other* can be a dot-separated string. In this case
:func:`str_to_dict` is used to convert it and the *value*
to a dictionary.
A *value* argument is allowed only when *other* is a string,
otherwise :exc:`.LenaValueError` is raised.
Existing values are updated recursively,
that is including nested subdictionaries.
Example:
>>> d1 = {"a": 1, "b": {"c": 3}}
>>> d2 = {"b": {"d": 4}}
>>> update_recursively(d1, d2)
>>> d1 == {'a': 1, 'b': {'c': 3, 'd': 4}}
True
>>> # Usual update would have made d1["b"] = {"d": 4}, erasing "c".
Non-dictionary items from *other* overwrite those in *d*:
>>> update_recursively(d1, {"b": 2})
>>> d1 == {'a': 1, 'b': 2}
True
"""
# skip this docstring, because it's trivial.
# Both *d* and *other* must be dictionaries,
# otherwise :exc:`.LenaTypeError` is raised.
# it would be cleaner to allow only dict as other,
# but it's very clear and useful to allow
# lena.context.update_recursively(context, "output.changed", True)
if isinstance(other, str):
other = str_to_dict(other, value)
else:
if value is not _sentinel:
raise lena.core.LenaValueError(
"explicit value is allowed only when other is a string"
)
if not isinstance(d, dict) or not isinstance(other, dict):
raise lena.core.LenaTypeError(
"d and other must be dicts, {} and {} provided".format(d, other)
)
for key, val in other.items():
if not isinstance(val, dict):
d[key] = val
else:
if key in d:
if not isinstance(d[key], dict):
d[key] = {}
update_recursively(d[key], other[key])
else:
d[key] = val
|
lena/context/functions.py
|
codereval_python_data_57
|
Fill histogram at *coord* with the given *weight*.
Coordinates outside the histogram edges are ignored.
def fill(self, coord, weight=1):
"""Fill histogram at *coord* with the given *weight*.
Coordinates outside the histogram edges are ignored.
"""
indices = hf.get_bin_on_value(coord, self.edges)
subarr = self.bins
for ind in indices[:-1]:
# underflow
if ind < 0:
return
try:
subarr = subarr[ind]
# overflow
except IndexError:
return
ind = indices[-1]
# underflow
if ind < 0:
return
# fill
try:
subarr[ind] += weight
except IndexError:
return
"""Histogram structure *histogram* and element *Histogram*."""
import copy
import lena.context
import lena.core
import lena.flow
import lena.math
from . import hist_functions as hf
class histogram():
"""A multidimensional histogram.
Arbitrary dimension, variable bin size and weights are supported.
Lower bin edge is included, upper edge is excluded.
Underflow and overflow values are skipped.
Bin content can be of arbitrary type,
which is defined during initialization.
Examples:
>>> # a two-dimensional histogram
>>> hist = histogram([[0, 1, 2], [0, 1, 2]])
>>> hist.fill([0, 1])
>>> hist.bins
[[0, 1], [0, 0]]
>>> values = [[0, 0], [1, 0], [1, 1]]
>>> # fill the histogram with values
>>> for v in values:
... hist.fill(v)
>>> hist.bins
[[1, 1], [1, 1]]
"""
# Note the differences from existing packages.
# Numpy 1.16 (numpy.histogram): all but the last
# (righthand-most) bin is half-open.
# This histogram class has bin limits as in ROOT
# (but without overflow and underflow).
# Numpy: the first element of the range must be less than or equal to the second.
# This histogram requires strictly increasing edges.
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html
# https://root.cern.ch/root/htmldoc/guides/users-guide/Histograms.html#bin-numbering
def __init__(self, edges, bins=None, initial_value=0):
"""*edges* is a sequence of one-dimensional arrays,
each containing strictly increasing bin edges.
Histogram's bins by default
are initialized with *initial_value*.
It can be any object that supports addition with *weight*
during *fill* (but that is not necessary
if you don't plan to fill the histogram).
If the *initial_value* is compound and requires special copying,
create initial bins yourself (see :func:`.init_bins`).
A histogram can be created from existing *bins* and *edges*.
In this case a simple check of the shape of *bins* is done
(raising :exc:`.LenaValueError` if failed).
**Attributes**
:attr:`edges` is a list of edges on each dimension.
Edges mark the borders of the bin.
Edges along each dimension are one-dimensional lists,
and the multidimensional bin is the result of all intersections
of one-dimensional edges.
For example, a 3-dimensional histogram has edges of the form
*[x_edges, y_edges, z_edges]*,
and the 0th bin has borders
*((x[0], x[1]), (y[0], y[1]), (z[0], z[1]))*.
Index in the edges is a tuple, where a given position corresponds
to a dimension, and the content at that position
to the bin along that dimension.
For example, index *(0, 1, 3)* corresponds to the bin
with lower edges *(x[0], y[1], z[3])*.
:attr:`bins` is a list of nested lists.
Same index as for edges can be used to get bin content:
bin at *(0, 1, 3)* can be obtained as *bins[0][1][3]*.
Most nested arrays correspond to highest
(further from x) coordinates.
For example, for a 3-dimensional histogram bins equal to
*[[[1, 1], [0, 0]], [[0, 0], [0, 0]]]*
mean that the only filled bins are those
where x and y indices are 0, and z index is 0 and 1.
:attr:`dim` is the dimension of a histogram
(length of its *edges* for a multidimensional histogram).
If subarrays of *edges* are not increasing
or if any of them has length less than 2,
:exc:`.LenaValueError` is raised.
.. admonition:: Programmer's note
one- and multidimensional histograms
have different *bins* and *edges* format.
To be unified, 1-dimensional edges should be
nested in a list (like *[[1, 2, 3]]*).
Instead, they are simply the x-edges list,
because it is more intuitive and one-dimensional histograms
are used more often.
To unify the interface for bins and edges in your code,
use :func:`.unify_1_md` function.
"""
# todo: allow creation of *edges* from tuples
# (without lena.math.mesh). Allow bin_size in this case.
hf.check_edges_increasing(edges)
self.edges = edges
self._scale = None
if hasattr(edges[0], "__iter__"):
self.dim = len(edges)
else:
self.dim = 1
# todo: add a kwarg no_check=False to disable bins testing
if bins is None:
self.bins = hf.init_bins(self.edges, initial_value)
else:
self.bins = bins
# We can't make scale for an arbitrary histogram,
# because it may contain compound values.
# self._scale = self.make_scale()
wrong_bins_error = lena.core.LenaValueError(
"bins of incorrect shape given, {}".format(bins)
)
if self.dim == 1:
if len(bins) != len(edges) - 1:
raise wrong_bins_error
else:
if len(bins) != len(edges[0]) - 1:
raise wrong_bins_error
if self.dim > 1:
self.ranges = [(axis[0], axis[-1]) for axis in edges]
self.nbins = [len(axis) - 1 for axis in edges]
else:
self.ranges = [(edges[0], edges[-1])]
self.nbins = [len(edges)-1]
def __eq__(self, other):
"""Two histograms are equal, if and only if they have
equal bins and equal edges.
If *other* is not a :class:`.histogram`, return ``False``.
Note that floating numbers should be compared
approximately (using :func:`math.isclose`).
"""
if not isinstance(other, histogram):
# in Python comparison between different types is allowed
return False
return self.bins == other.bins and self.edges == other.edges
def fill(self, coord, weight=1):
"""Fill histogram at *coord* with the given *weight*.
Coordinates outside the histogram edges are ignored.
"""
indices = hf.get_bin_on_value(coord, self.edges)
subarr = self.bins
for ind in indices[:-1]:
# underflow
if ind < 0:
return
try:
subarr = subarr[ind]
# overflow
except IndexError:
return
ind = indices[-1]
# underflow
if ind < 0:
return
# fill
try:
subarr[ind] += weight
except IndexError:
return
def __repr__(self):
return "histogram({}, bins={})".format(self.edges, self.bins)
def scale(self, other=None, recompute=False):
"""Compute or set scale (integral of the histogram).
If *other* is ``None``, return scale of this histogram.
If its scale was not computed before,
it is computed and stored for subsequent use
(unless explicitly asked to *recompute*).
Note that after changing (filling) the histogram
one must explicitly recompute the scale
if it was computed before.
If a float *other* is provided, rescale self to *other*.
Histograms with scale equal to zero can't be rescaled.
:exc:`.LenaValueError` is raised if one tries to do that.
"""
# see graph.scale comments why this is called simply "scale"
# (not set_scale, get_scale, etc.)
if other is None:
# return scale
if self._scale is None or recompute:
self._scale = hf.integral(
*hf.unify_1_md(self.bins, self.edges)
)
return self._scale
else:
# rescale from other
scale = self.scale()
if scale == 0:
raise lena.core.LenaValueError(
"can not rescale histogram with zero scale"
)
self.bins = lena.math.md_map(lambda binc: binc*float(other) / scale,
self.bins)
self._scale = other
return None
def _update_context(self, context):
"""Update *context* with the properties of this histogram.
*context.histogram* is updated with "dim", "nbins"
and "ranges" with values for this histogram.
If this histogram has a computed scale, it is also added
to the context.
Called on "destruction" of the histogram structure (for example,
in :class:`.ToCSV`). See graph._update_context for more details.
"""
hist_context = {
"dim": self.dim,
"nbins": self.nbins,
"ranges": self.ranges
}
if self._scale is not None:
hist_context["scale"] = self._scale
lena.context.update_recursively(context, {"histogram": hist_context})
class Histogram():
"""An element to produce histograms."""
def __init__(self, edges, bins=None, make_bins=None, initial_value=0):
"""*edges*, *bins* and *initial_value* have the same meaning
as during creation of a :class:`histogram`.
*make_bins* is a function without arguments
that creates new bins
(it will be called during :meth:`__init__` and :meth:`reset`).
*initial_value* in this case is ignored, but bin check is made.
If both *bins* and *make_bins* are provided,
:exc:`.LenaTypeError` is raised.
"""
self._hist = histogram(edges, bins)
if make_bins is not None and bins is not None:
raise lena.core.LenaTypeError(
"either initial bins or make_bins must be provided, "
"not both: {} and {}".format(bins, make_bins)
)
# may be None
self._initial_bins = copy.deepcopy(bins)
# todo: bins, make_bins, initial_value look redundant
# and may be reconsidered when really using reset().
if make_bins:
bins = make_bins()
self._make_bins = make_bins
self._cur_context = {}
def fill(self, value):
"""Fill the histogram with *value*.
*value* can be a *(data, context)* pair.
Values outside the histogram edges are ignored.
"""
data, self._cur_context = lena.flow.get_data_context(value)
self._hist.fill(data)
# filling with weight is only allowed in histogram structure
# self._hist.fill(data, weight)
def compute(self):
"""Yield histogram with context."""
yield (self._hist, self._cur_context)
def reset(self):
"""Reset the histogram.
Current context is reset to an empty dict.
Bins are reinitialized with the *initial_value*
or with *make_bins()* (depending on the initialization).
"""
if self._make_bins is not None:
self.bins = self._make_bins()
elif self._initial_bins is not None:
self.bins = copy.deepcopy(self._initial_bins)
else:
self.bins = hf.init_bins(self.edges, self._initial_value)
self._cur_context = {}
|
lena/structures/histogram.py
|
codereval_python_data_58
|
Check that keys and values in the given labels match against their corresponding
regular expressions.
Args:
labels (dict): the different labels to validate.
Raises:
ValidationError: if any of the keys and labels does not match their respective
regular expression. The error contains as message the list of all errors
which occurred in the labels. Each element of the list is a dictionary with
one key-value pair:
- key: the label key or label value for which an error occurred as string.
- value: the error message.
.. code:: python
# Example:
labels = {
"key1": "valid",
"key2": ["invalid"],
"$$": "invalid",
True: True,
}
try:
_validate_labels(labels)
except ValidationError as err:
assert err.messages == [
{"['invalid']": 'expected string or bytes-like object'},
{'$$': "Label key '$$' does not match the regex [...]"},
{'True': 'expected string or bytes-like object'},
{'True': 'expected string or bytes-like object'},
]
def _validate_labels(labels):
"""Check that keys and values in the given labels match against their corresponding
regular expressions.
Args:
labels (dict): the different labels to validate.
Raises:
ValidationError: if any of the keys and labels does not match their respective
regular expression. The error contains as message the list of all errors
which occurred in the labels. Each element of the list is a dictionary with
one key-value pair:
- key: the label key or label value for which an error occurred as string.
- value: the error message.
.. code:: python
# Example:
labels = {
"key1": "valid",
"key2": ["invalid"],
"$$": "invalid",
True: True,
}
try:
_validate_labels(labels)
except ValidationError as err:
assert err.messages == [
{"['invalid']": 'expected string or bytes-like object'},
{'$$': "Label key '$$' does not match the regex [...]"},
{'True': 'expected string or bytes-like object'},
{'True': 'expected string or bytes-like object'},
]
"""
errors = []
for key, value in labels.items():
try:
validate_key(key)
except (ValidationError, TypeError) as err:
errors.append({str(key): str(err)})
try:
validate_value(value)
except (ValidationError, TypeError) as err:
errors.append({str(value): str(err)})
if errors:
raise ValidationError(list(errors))
import re
from enum import Enum, IntEnum, auto
from datetime import datetime
from dataclasses import field
from typing import List, Dict
from marshmallow import ValidationError
from . import persistent
from .serializable import Serializable, ApiObject, PolymorphicContainer
class ResourceRef(Serializable):
api: str
namespace: str = field(default=None)
kind: str
name: str
def __hash__(self):
return hash((self.api, self.namespace, self.kind, self.name))
def __repr__(self):
message = f"{self.kind}(api='{self.api}', "
return message + f"namespace='{self.namespace}', name='{self.name}')"
_label_key_pattern = None
_label_value_pattern = None
_label_key_regex = None
_label_value_regex = None
def _get_labels_regex():
"""Build or return the regular expressions that are used to validate the key and
value of the labels of the Krake resources.
The first call builds the expressions, while a second returns the already built
ones.
Returns:
(re.Pattern, re.Pattern): a tuple that contains the compiled regular,
expressions, the first element to validate the key and the second to
validate the value.
"""
global _label_key_pattern, _label_value_pattern
global _label_key_regex, _label_value_regex
if _label_key_regex and _label_value_regex:
return _label_key_regex, _label_value_regex
# Build the patterns only if not already built
max_prefix_size = 253
max_key_size = 63
max_value_size = max_key_size
# First and last characters must be alphanumeric. The rest of the string must be
# alphanumeric, "-", "_" or "."
base_alphanumeric_pattern = "\\w|(\\w[\\w\\-_.]{{0,{length}}}\\w)"
key_pattern = base_alphanumeric_pattern.format(length=max_key_size - 2)
value_pattern = base_alphanumeric_pattern.format(length=max_value_size - 2)
prefix_pattern = base_alphanumeric_pattern.format(length=max_prefix_size - 2)
# The key can be a string of length 63 with the specifications described above,
# or have a prefix, then one "/" character, then the string of length 63 (called
# name).
# The prefix itself should have a max length of 253, but otherwise follows the
# specifications described above.
_label_key_pattern = f"^(({prefix_pattern})\\/)?({key_pattern})$"
# The value can be a string of length 63 with the specifications described
# above.
_label_value_pattern = value_pattern
_label_key_regex = re.compile(_label_key_pattern, re.ASCII)
_label_value_regex = re.compile(_label_value_pattern, re.ASCII)
return _label_key_regex, _label_value_regex
def validate_key(key):
"""Validate the given key against the corresponding regular expression.
Args:
key: the string to validate
Raises:
ValidationError: if the given key is not conform to the regular expression.
"""
key_regex, _ = _get_labels_regex()
if not key_regex.fullmatch(key):
raise ValidationError(
f"Label key {key!r} does not match the regex {_label_key_pattern!r}."
)
def validate_value(value):
"""Validate the given value against the corresponding regular expression.
Args:
value: the string to validate
Raises:
ValidationError: if the given value is not conform to the regular expression.
"""
_, value_regex = _get_labels_regex()
if not value_regex.fullmatch(value):
raise ValidationError(
f"Label value {value!r} does not match"
f" the regex {_label_value_pattern!r}."
)
def _validate_labels(labels):
"""Check that keys and values in the given labels match against their corresponding
regular expressions.
Args:
labels (dict): the different labels to validate.
Raises:
ValidationError: if any of the keys and labels does not match their respective
regular expression. The error contains as message the list of all errors
which occurred in the labels. Each element of the list is a dictionary with
one key-value pair:
- key: the label key or label value for which an error occurred as string.
- value: the error message.
.. code:: python
# Example:
labels = {
"key1": "valid",
"key2": ["invalid"],
"$$": "invalid",
True: True,
}
try:
_validate_labels(labels)
except ValidationError as err:
assert err.messages == [
{"['invalid']": 'expected string or bytes-like object'},
{'$$': "Label key '$$' does not match the regex [...]"},
{'True': 'expected string or bytes-like object'},
{'True': 'expected string or bytes-like object'},
]
"""
errors = []
for key, value in labels.items():
try:
validate_key(key)
except (ValidationError, TypeError) as err:
errors.append({str(key): str(err)})
try:
validate_value(value)
except (ValidationError, TypeError) as err:
errors.append({str(value): str(err)})
if errors:
raise ValidationError(list(errors))
_resource_name_pattern = None
_resource_name_regex = None
def _get_resource_name_regex():
"""Build or return the regular expressions that are used to validate
the name of the Krake resources.
Returns:
(re.Pattern): the compiled regular expressions, to validate
the resource name.
"""
global _resource_name_regex, _resource_name_pattern
# Build the patterns only if not already built
if _resource_name_regex:
return _resource_name_regex
# First and last characters must be alphanumeric. The rest of the string must be
# alphanumeric, "-", "_" or "." and without whitespace as well as have a
# max length of 255 and a min length of 1
max_name_size = 253 # reduced by 2 for the regex
min_name_size = 0 # reduced by 1 for the regex
base_alphanumeric_pattern = "\\w|(\\w[\\w\\-_.:]{{{min_length},{length}}}\\w)"
resource_name_pattern = base_alphanumeric_pattern.format(
min_length=min_name_size, length=max_name_size
)
_resource_name_pattern = resource_name_pattern
_resource_name_regex = re.compile(_resource_name_pattern, re.ASCII)
return _resource_name_regex
def _validate_resource_name(name):
"""Each Krake resource name is checked against a specific pattern.
Which characters are not allowed is defined in _get_resource_name_regex
Args:
name(str): the different resource names to validate.
Raises:
ValidationError: if any resource name does not match their respective
regular expression.
"""
resource_name_regex = _get_resource_name_regex()
if not resource_name_regex.fullmatch(name):
raise ValidationError("Invalid character in resource name.")
def _validate_resource_namespace(namespace):
"""Each Krake resource namespace is checked against a specific pattern.
Which characters are not allowed is defined in _get_resource_name_regex
Args:
namespace(str): the different resource namespaces to validate.
Raises:
ValidationError: if any resource namespace does not match their respective
regular expression.
"""
resource_namespace_regex = _get_resource_name_regex()
if not resource_namespace_regex.fullmatch(namespace):
raise ValidationError("Invalid character in resource namespace.")
class Metadata(Serializable):
name: str = field(metadata={"immutable": True, "validate": _validate_resource_name})
namespace: str = field(
default=None,
metadata={"immutable": True, "validate": _validate_resource_namespace},
)
labels: dict = field(default_factory=dict, metadata={"validate": _validate_labels})
finalizers: List[str] = field(default_factory=list)
uid: str = field(metadata={"readonly": True})
created: datetime = field(metadata={"readonly": True})
modified: datetime = field(metadata={"readonly": True})
deleted: datetime = field(default=None, metadata={"readonly": True})
owners: List[ResourceRef] = field(default_factory=list)
class CoreMetadata(Serializable):
name: str
uid: str
class ListMetadata(Serializable):
pass # TODO
class ReasonCode(IntEnum):
INTERNAL_ERROR = 1 # Default error
INVALID_RESOURCE = 10 # Invalid values in the Manifest
# Kubernetes' resource is not supported by the Kubernetes controller
UNSUPPORTED_RESOURCE = 11
# The custom resource provided does not exist or is invalid
INVALID_CUSTOM_RESOURCE = 12
CLUSTER_NOT_REACHABLE = 20 # Connectivity issue with the Kubernetes deployment
NO_SUITABLE_RESOURCE = 50 # Scheduler issue
KUBERNETES_ERROR = 60
CREATE_FAILED = 70
RECONCILE_FAILED = 71
DELETE_FAILED = 72
OPENSTACK_ERROR = 80
INVALID_CLUSTER_TEMPLATE = 81
# Related to Metrics and Metric Provider
INVALID_METRIC = 91
UNREACHABLE_METRICS_PROVIDER = 92
UNKNOWN_METRIC = 93
UNKNOWN_METRICS_PROVIDER = 94
class Reason(Serializable):
code: ReasonCode
message: str
class WatchEventType(Enum):
ADDED = auto()
MODIFIED = auto()
DELETED = auto()
class Status(Serializable):
reason: Reason = None
class WatchEvent(Serializable):
type: WatchEventType
object: dict
class Verb(Enum):
create = auto()
list = auto()
list_all = auto()
get = auto()
update = auto()
delete = auto()
class RoleRule(Serializable):
api: str
resources: List[str]
namespaces: List[str]
verbs: List[Verb]
@persistent("/core/roles/{name}")
class Role(ApiObject):
api: str = "core"
kind: str = "Role"
metadata: Metadata
rules: List[RoleRule]
class RoleList(ApiObject):
api: str = "core"
kind: str = "RoleList"
metadata: ListMetadata
items: List[Role]
@persistent("/core/rolebindings/{name}")
class RoleBinding(ApiObject):
api: str = "core"
kind: str = "RoleBinding"
metadata: Metadata
users: List[str]
roles: List[str]
class RoleBindingList(ApiObject):
api: str = "core"
kind: str = "RoleBindingList"
metadata: ListMetadata
items: List[RoleBinding]
class Conflict(Serializable):
source: ResourceRef
conflicting: List[ResourceRef]
def resource_ref(resource):
"""Create a :class:`ResourceRef` from a :class:`ApiObject`
Args:
resource (.serializable.ApiObject): API object that should be
referenced
Returns:
ResourceRef: Corresponding reference to the API object
"""
return ResourceRef(
api=resource.api,
kind=resource.kind,
namespace=resource.metadata.namespace,
name=resource.metadata.name,
)
class MetricSpecProvider(Serializable):
name: str
metric: str
class MetricSpec(Serializable):
min: float
max: float
provider: MetricSpecProvider
class BaseMetric(ApiObject):
api: str = "core"
kind: str = None
metadata: Metadata
spec: MetricSpec
@persistent("/core/globalmetrics/{name}")
class GlobalMetric(BaseMetric):
api: str = "core"
kind: str = "GlobalMetric"
metadata: Metadata
spec: MetricSpec
@persistent("/core/metrics/{namespace}/{name}")
class Metric(BaseMetric):
api: str = "core"
kind: str = "Metric"
metadata: Metadata
spec: MetricSpec
class MetricList(ApiObject):
api: str = "core"
kind: str = "MetricList"
metadata: ListMetadata
items: List[Metric]
class GlobalMetricList(ApiObject):
api: str = "core"
kind: str = "GlobalMetricList"
metadata: ListMetadata
items: List[GlobalMetric]
class MetricsProviderSpec(PolymorphicContainer):
type: str
@MetricsProviderSpec.register("prometheus")
class PrometheusSpec(Serializable):
url: str
@MetricsProviderSpec.register("kafka")
class KafkaSpec(Serializable):
"""Specifications to connect to a KSQL database, and retrieve a specific row from a
specific table.
Attributes:
comparison_column (str): name of the column where the value will be compared to
the metric name, to select the right metric.
value_column (str): name of the column where the value of a metric is stored.
table (str): the name of the KSQL table where the metric is defined.
url (str): endpoint of the KSQL database.
"""
comparison_column: str
value_column: str
table: str
url: str
@MetricsProviderSpec.register("static")
class StaticSpec(Serializable):
metrics: Dict[str, float]
class BaseMetricsProvider(ApiObject):
api: str = "core"
kind: str = None
metadata: Metadata
spec: MetricsProviderSpec
@persistent("/core/globalmetricsproviders/{name}")
class GlobalMetricsProvider(BaseMetricsProvider):
api: str = "core"
kind: str = "GlobalMetricsProvider"
metadata: Metadata
spec: MetricsProviderSpec
@persistent("/core/metricsproviders/{namespace}/{name}")
class MetricsProvider(BaseMetricsProvider):
api: str = "core"
kind: str = "MetricsProvider"
metadata: Metadata
spec: MetricsProviderSpec
class MetricsProviderList(ApiObject):
api: str = "core"
kind: str = "MetricsProviderList"
metadata: ListMetadata
items: List[MetricsProvider]
class GlobalMetricsProviderList(ApiObject):
api: str = "core"
kind: str = "GlobalMetricsProviderList"
metadata: ListMetadata
items: List[GlobalMetricsProvider]
class MetricRef(Serializable):
name: str
weight: float
namespaced: bool = False
|
krake/krake/data/core.py
|
codereval_python_data_59
|
Build or return the regular expressions that are used to validate
the name of the Krake resources.
Returns:
(re.Pattern): the compiled regular expressions, to validate
the resource name.
def _get_resource_name_regex():
"""Build or return the regular expressions that are used to validate
the name of the Krake resources.
Returns:
(re.Pattern): the compiled regular expressions, to validate
the resource name.
"""
global _resource_name_regex, _resource_name_pattern
# Build the patterns only if not already built
if _resource_name_regex:
return _resource_name_regex
# First and last characters must be alphanumeric. The rest of the string must be
# alphanumeric, "-", "_" or "." and without whitespace as well as have a
# max length of 255 and a min length of 1
max_name_size = 253 # reduced by 2 for the regex
min_name_size = 0 # reduced by 1 for the regex
base_alphanumeric_pattern = "\\w|(\\w[\\w\\-_.:]{{{min_length},{length}}}\\w)"
resource_name_pattern = base_alphanumeric_pattern.format(
min_length=min_name_size, length=max_name_size
)
_resource_name_pattern = resource_name_pattern
_resource_name_regex = re.compile(_resource_name_pattern, re.ASCII)
return _resource_name_regex
import re
from enum import Enum, IntEnum, auto
from datetime import datetime
from dataclasses import field
from typing import List, Dict
from marshmallow import ValidationError
from . import persistent
from .serializable import Serializable, ApiObject, PolymorphicContainer
class ResourceRef(Serializable):
api: str
namespace: str = field(default=None)
kind: str
name: str
def __hash__(self):
return hash((self.api, self.namespace, self.kind, self.name))
def __repr__(self):
message = f"{self.kind}(api='{self.api}', "
return message + f"namespace='{self.namespace}', name='{self.name}')"
_label_key_pattern = None
_label_value_pattern = None
_label_key_regex = None
_label_value_regex = None
def _get_labels_regex():
"""Build or return the regular expressions that are used to validate the key and
value of the labels of the Krake resources.
The first call builds the expressions, while a second returns the already built
ones.
Returns:
(re.Pattern, re.Pattern): a tuple that contains the compiled regular,
expressions, the first element to validate the key and the second to
validate the value.
"""
global _label_key_pattern, _label_value_pattern
global _label_key_regex, _label_value_regex
if _label_key_regex and _label_value_regex:
return _label_key_regex, _label_value_regex
# Build the patterns only if not already built
max_prefix_size = 253
max_key_size = 63
max_value_size = max_key_size
# First and last characters must be alphanumeric. The rest of the string must be
# alphanumeric, "-", "_" or "."
base_alphanumeric_pattern = "\\w|(\\w[\\w\\-_.]{{0,{length}}}\\w)"
key_pattern = base_alphanumeric_pattern.format(length=max_key_size - 2)
value_pattern = base_alphanumeric_pattern.format(length=max_value_size - 2)
prefix_pattern = base_alphanumeric_pattern.format(length=max_prefix_size - 2)
# The key can be a string of length 63 with the specifications described above,
# or have a prefix, then one "/" character, then the string of length 63 (called
# name).
# The prefix itself should have a max length of 253, but otherwise follows the
# specifications described above.
_label_key_pattern = f"^(({prefix_pattern})\\/)?({key_pattern})$"
# The value can be a string of length 63 with the specifications described
# above.
_label_value_pattern = value_pattern
_label_key_regex = re.compile(_label_key_pattern, re.ASCII)
_label_value_regex = re.compile(_label_value_pattern, re.ASCII)
return _label_key_regex, _label_value_regex
def validate_key(key):
"""Validate the given key against the corresponding regular expression.
Args:
key: the string to validate
Raises:
ValidationError: if the given key is not conform to the regular expression.
"""
key_regex, _ = _get_labels_regex()
if not key_regex.fullmatch(key):
raise ValidationError(
f"Label key {key!r} does not match the regex {_label_key_pattern!r}."
)
def validate_value(value):
"""Validate the given value against the corresponding regular expression.
Args:
value: the string to validate
Raises:
ValidationError: if the given value is not conform to the regular expression.
"""
_, value_regex = _get_labels_regex()
if not value_regex.fullmatch(value):
raise ValidationError(
f"Label value {value!r} does not match"
f" the regex {_label_value_pattern!r}."
)
def _validate_labels(labels):
"""Check that keys and values in the given labels match against their corresponding
regular expressions.
Args:
labels (dict): the different labels to validate.
Raises:
ValidationError: if any of the keys and labels does not match their respective
regular expression. The error contains as message the list of all errors
which occurred in the labels. Each element of the list is a dictionary with
one key-value pair:
- key: the label key or label value for which an error occurred as string.
- value: the error message.
.. code:: python
# Example:
labels = {
"key1": "valid",
"key2": ["invalid"],
"$$": "invalid",
True: True,
}
try:
_validate_labels(labels)
except ValidationError as err:
assert err.messages == [
{"['invalid']": 'expected string or bytes-like object'},
{'$$': "Label key '$$' does not match the regex [...]"},
{'True': 'expected string or bytes-like object'},
{'True': 'expected string or bytes-like object'},
]
"""
errors = []
for key, value in labels.items():
try:
validate_key(key)
except (ValidationError, TypeError) as err:
errors.append({str(key): str(err)})
try:
validate_value(value)
except (ValidationError, TypeError) as err:
errors.append({str(value): str(err)})
if errors:
raise ValidationError(list(errors))
_resource_name_pattern = None
_resource_name_regex = None
def _get_resource_name_regex():
"""Build or return the regular expressions that are used to validate
the name of the Krake resources.
Returns:
(re.Pattern): the compiled regular expressions, to validate
the resource name.
"""
global _resource_name_regex, _resource_name_pattern
# Build the patterns only if not already built
if _resource_name_regex:
return _resource_name_regex
# First and last characters must be alphanumeric. The rest of the string must be
# alphanumeric, "-", "_" or "." and without whitespace as well as have a
# max length of 255 and a min length of 1
max_name_size = 253 # reduced by 2 for the regex
min_name_size = 0 # reduced by 1 for the regex
base_alphanumeric_pattern = "\\w|(\\w[\\w\\-_.:]{{{min_length},{length}}}\\w)"
resource_name_pattern = base_alphanumeric_pattern.format(
min_length=min_name_size, length=max_name_size
)
_resource_name_pattern = resource_name_pattern
_resource_name_regex = re.compile(_resource_name_pattern, re.ASCII)
return _resource_name_regex
def _validate_resource_name(name):
"""Each Krake resource name is checked against a specific pattern.
Which characters are not allowed is defined in _get_resource_name_regex
Args:
name(str): the different resource names to validate.
Raises:
ValidationError: if any resource name does not match their respective
regular expression.
"""
resource_name_regex = _get_resource_name_regex()
if not resource_name_regex.fullmatch(name):
raise ValidationError("Invalid character in resource name.")
def _validate_resource_namespace(namespace):
"""Each Krake resource namespace is checked against a specific pattern.
Which characters are not allowed is defined in _get_resource_name_regex
Args:
namespace(str): the different resource namespaces to validate.
Raises:
ValidationError: if any resource namespace does not match their respective
regular expression.
"""
resource_namespace_regex = _get_resource_name_regex()
if not resource_namespace_regex.fullmatch(namespace):
raise ValidationError("Invalid character in resource namespace.")
class Metadata(Serializable):
name: str = field(metadata={"immutable": True, "validate": _validate_resource_name})
namespace: str = field(
default=None,
metadata={"immutable": True, "validate": _validate_resource_namespace},
)
labels: dict = field(default_factory=dict, metadata={"validate": _validate_labels})
finalizers: List[str] = field(default_factory=list)
uid: str = field(metadata={"readonly": True})
created: datetime = field(metadata={"readonly": True})
modified: datetime = field(metadata={"readonly": True})
deleted: datetime = field(default=None, metadata={"readonly": True})
owners: List[ResourceRef] = field(default_factory=list)
class CoreMetadata(Serializable):
name: str
uid: str
class ListMetadata(Serializable):
pass # TODO
class ReasonCode(IntEnum):
INTERNAL_ERROR = 1 # Default error
INVALID_RESOURCE = 10 # Invalid values in the Manifest
# Kubernetes' resource is not supported by the Kubernetes controller
UNSUPPORTED_RESOURCE = 11
# The custom resource provided does not exist or is invalid
INVALID_CUSTOM_RESOURCE = 12
CLUSTER_NOT_REACHABLE = 20 # Connectivity issue with the Kubernetes deployment
NO_SUITABLE_RESOURCE = 50 # Scheduler issue
KUBERNETES_ERROR = 60
CREATE_FAILED = 70
RECONCILE_FAILED = 71
DELETE_FAILED = 72
OPENSTACK_ERROR = 80
INVALID_CLUSTER_TEMPLATE = 81
# Related to Metrics and Metric Provider
INVALID_METRIC = 91
UNREACHABLE_METRICS_PROVIDER = 92
UNKNOWN_METRIC = 93
UNKNOWN_METRICS_PROVIDER = 94
class Reason(Serializable):
code: ReasonCode
message: str
class WatchEventType(Enum):
ADDED = auto()
MODIFIED = auto()
DELETED = auto()
class Status(Serializable):
reason: Reason = None
class WatchEvent(Serializable):
type: WatchEventType
object: dict
class Verb(Enum):
create = auto()
list = auto()
list_all = auto()
get = auto()
update = auto()
delete = auto()
class RoleRule(Serializable):
api: str
resources: List[str]
namespaces: List[str]
verbs: List[Verb]
@persistent("/core/roles/{name}")
class Role(ApiObject):
api: str = "core"
kind: str = "Role"
metadata: Metadata
rules: List[RoleRule]
class RoleList(ApiObject):
api: str = "core"
kind: str = "RoleList"
metadata: ListMetadata
items: List[Role]
@persistent("/core/rolebindings/{name}")
class RoleBinding(ApiObject):
api: str = "core"
kind: str = "RoleBinding"
metadata: Metadata
users: List[str]
roles: List[str]
class RoleBindingList(ApiObject):
api: str = "core"
kind: str = "RoleBindingList"
metadata: ListMetadata
items: List[RoleBinding]
class Conflict(Serializable):
source: ResourceRef
conflicting: List[ResourceRef]
def resource_ref(resource):
"""Create a :class:`ResourceRef` from a :class:`ApiObject`
Args:
resource (.serializable.ApiObject): API object that should be
referenced
Returns:
ResourceRef: Corresponding reference to the API object
"""
return ResourceRef(
api=resource.api,
kind=resource.kind,
namespace=resource.metadata.namespace,
name=resource.metadata.name,
)
class MetricSpecProvider(Serializable):
name: str
metric: str
class MetricSpec(Serializable):
min: float
max: float
provider: MetricSpecProvider
class BaseMetric(ApiObject):
api: str = "core"
kind: str = None
metadata: Metadata
spec: MetricSpec
@persistent("/core/globalmetrics/{name}")
class GlobalMetric(BaseMetric):
api: str = "core"
kind: str = "GlobalMetric"
metadata: Metadata
spec: MetricSpec
@persistent("/core/metrics/{namespace}/{name}")
class Metric(BaseMetric):
api: str = "core"
kind: str = "Metric"
metadata: Metadata
spec: MetricSpec
class MetricList(ApiObject):
api: str = "core"
kind: str = "MetricList"
metadata: ListMetadata
items: List[Metric]
class GlobalMetricList(ApiObject):
api: str = "core"
kind: str = "GlobalMetricList"
metadata: ListMetadata
items: List[GlobalMetric]
class MetricsProviderSpec(PolymorphicContainer):
type: str
@MetricsProviderSpec.register("prometheus")
class PrometheusSpec(Serializable):
url: str
@MetricsProviderSpec.register("kafka")
class KafkaSpec(Serializable):
"""Specifications to connect to a KSQL database, and retrieve a specific row from a
specific table.
Attributes:
comparison_column (str): name of the column where the value will be compared to
the metric name, to select the right metric.
value_column (str): name of the column where the value of a metric is stored.
table (str): the name of the KSQL table where the metric is defined.
url (str): endpoint of the KSQL database.
"""
comparison_column: str
value_column: str
table: str
url: str
@MetricsProviderSpec.register("static")
class StaticSpec(Serializable):
metrics: Dict[str, float]
class BaseMetricsProvider(ApiObject):
api: str = "core"
kind: str = None
metadata: Metadata
spec: MetricsProviderSpec
@persistent("/core/globalmetricsproviders/{name}")
class GlobalMetricsProvider(BaseMetricsProvider):
api: str = "core"
kind: str = "GlobalMetricsProvider"
metadata: Metadata
spec: MetricsProviderSpec
@persistent("/core/metricsproviders/{namespace}/{name}")
class MetricsProvider(BaseMetricsProvider):
api: str = "core"
kind: str = "MetricsProvider"
metadata: Metadata
spec: MetricsProviderSpec
class MetricsProviderList(ApiObject):
api: str = "core"
kind: str = "MetricsProviderList"
metadata: ListMetadata
items: List[MetricsProvider]
class GlobalMetricsProviderList(ApiObject):
api: str = "core"
kind: str = "GlobalMetricsProviderList"
metadata: ListMetadata
items: List[GlobalMetricsProvider]
class MetricRef(Serializable):
name: str
weight: float
namespaced: bool = False
|
krake/krake/data/core.py
|
codereval_python_data_60
|
Validate the given value against the corresponding regular expression.
Args:
value: the string to validate
Raises:
ValidationError: if the given value is not conform to the regular expression.
def validate_value(value):
"""Validate the given value against the corresponding regular expression.
Args:
value: the string to validate
Raises:
ValidationError: if the given value is not conform to the regular expression.
"""
_, value_regex = _get_labels_regex()
if not value_regex.fullmatch(value):
raise ValidationError(
f"Label value {value!r} does not match"
f" the regex {_label_value_pattern!r}."
)
import re
from enum import Enum, IntEnum, auto
from datetime import datetime
from dataclasses import field
from typing import List, Dict
from marshmallow import ValidationError
from . import persistent
from .serializable import Serializable, ApiObject, PolymorphicContainer
class ResourceRef(Serializable):
api: str
namespace: str = field(default=None)
kind: str
name: str
def __hash__(self):
return hash((self.api, self.namespace, self.kind, self.name))
def __repr__(self):
message = f"{self.kind}(api='{self.api}', "
return message + f"namespace='{self.namespace}', name='{self.name}')"
_label_key_pattern = None
_label_value_pattern = None
_label_key_regex = None
_label_value_regex = None
def _get_labels_regex():
"""Build or return the regular expressions that are used to validate the key and
value of the labels of the Krake resources.
The first call builds the expressions, while a second returns the already built
ones.
Returns:
(re.Pattern, re.Pattern): a tuple that contains the compiled regular,
expressions, the first element to validate the key and the second to
validate the value.
"""
global _label_key_pattern, _label_value_pattern
global _label_key_regex, _label_value_regex
if _label_key_regex and _label_value_regex:
return _label_key_regex, _label_value_regex
# Build the patterns only if not already built
max_prefix_size = 253
max_key_size = 63
max_value_size = max_key_size
# First and last characters must be alphanumeric. The rest of the string must be
# alphanumeric, "-", "_" or "."
base_alphanumeric_pattern = "\\w|(\\w[\\w\\-_.]{{0,{length}}}\\w)"
key_pattern = base_alphanumeric_pattern.format(length=max_key_size - 2)
value_pattern = base_alphanumeric_pattern.format(length=max_value_size - 2)
prefix_pattern = base_alphanumeric_pattern.format(length=max_prefix_size - 2)
# The key can be a string of length 63 with the specifications described above,
# or have a prefix, then one "/" character, then the string of length 63 (called
# name).
# The prefix itself should have a max length of 253, but otherwise follows the
# specifications described above.
_label_key_pattern = f"^(({prefix_pattern})\\/)?({key_pattern})$"
# The value can be a string of length 63 with the specifications described
# above.
_label_value_pattern = value_pattern
_label_key_regex = re.compile(_label_key_pattern, re.ASCII)
_label_value_regex = re.compile(_label_value_pattern, re.ASCII)
return _label_key_regex, _label_value_regex
def validate_key(key):
"""Validate the given key against the corresponding regular expression.
Args:
key: the string to validate
Raises:
ValidationError: if the given key is not conform to the regular expression.
"""
key_regex, _ = _get_labels_regex()
if not key_regex.fullmatch(key):
raise ValidationError(
f"Label key {key!r} does not match the regex {_label_key_pattern!r}."
)
def validate_value(value):
"""Validate the given value against the corresponding regular expression.
Args:
value: the string to validate
Raises:
ValidationError: if the given value is not conform to the regular expression.
"""
_, value_regex = _get_labels_regex()
if not value_regex.fullmatch(value):
raise ValidationError(
f"Label value {value!r} does not match"
f" the regex {_label_value_pattern!r}."
)
def _validate_labels(labels):
"""Check that keys and values in the given labels match against their corresponding
regular expressions.
Args:
labels (dict): the different labels to validate.
Raises:
ValidationError: if any of the keys and labels does not match their respective
regular expression. The error contains as message the list of all errors
which occurred in the labels. Each element of the list is a dictionary with
one key-value pair:
- key: the label key or label value for which an error occurred as string.
- value: the error message.
.. code:: python
# Example:
labels = {
"key1": "valid",
"key2": ["invalid"],
"$$": "invalid",
True: True,
}
try:
_validate_labels(labels)
except ValidationError as err:
assert err.messages == [
{"['invalid']": 'expected string or bytes-like object'},
{'$$': "Label key '$$' does not match the regex [...]"},
{'True': 'expected string or bytes-like object'},
{'True': 'expected string or bytes-like object'},
]
"""
errors = []
for key, value in labels.items():
try:
validate_key(key)
except (ValidationError, TypeError) as err:
errors.append({str(key): str(err)})
try:
validate_value(value)
except (ValidationError, TypeError) as err:
errors.append({str(value): str(err)})
if errors:
raise ValidationError(list(errors))
_resource_name_pattern = None
_resource_name_regex = None
def _get_resource_name_regex():
"""Build or return the regular expressions that are used to validate
the name of the Krake resources.
Returns:
(re.Pattern): the compiled regular expressions, to validate
the resource name.
"""
global _resource_name_regex, _resource_name_pattern
# Build the patterns only if not already built
if _resource_name_regex:
return _resource_name_regex
# First and last characters must be alphanumeric. The rest of the string must be
# alphanumeric, "-", "_" or "." and without whitespace as well as have a
# max length of 255 and a min length of 1
max_name_size = 253 # reduced by 2 for the regex
min_name_size = 0 # reduced by 1 for the regex
base_alphanumeric_pattern = "\\w|(\\w[\\w\\-_.:]{{{min_length},{length}}}\\w)"
resource_name_pattern = base_alphanumeric_pattern.format(
min_length=min_name_size, length=max_name_size
)
_resource_name_pattern = resource_name_pattern
_resource_name_regex = re.compile(_resource_name_pattern, re.ASCII)
return _resource_name_regex
def _validate_resource_name(name):
"""Each Krake resource name is checked against a specific pattern.
Which characters are not allowed is defined in _get_resource_name_regex
Args:
name(str): the different resource names to validate.
Raises:
ValidationError: if any resource name does not match their respective
regular expression.
"""
resource_name_regex = _get_resource_name_regex()
if not resource_name_regex.fullmatch(name):
raise ValidationError("Invalid character in resource name.")
def _validate_resource_namespace(namespace):
"""Each Krake resource namespace is checked against a specific pattern.
Which characters are not allowed is defined in _get_resource_name_regex
Args:
namespace(str): the different resource namespaces to validate.
Raises:
ValidationError: if any resource namespace does not match their respective
regular expression.
"""
resource_namespace_regex = _get_resource_name_regex()
if not resource_namespace_regex.fullmatch(namespace):
raise ValidationError("Invalid character in resource namespace.")
class Metadata(Serializable):
name: str = field(metadata={"immutable": True, "validate": _validate_resource_name})
namespace: str = field(
default=None,
metadata={"immutable": True, "validate": _validate_resource_namespace},
)
labels: dict = field(default_factory=dict, metadata={"validate": _validate_labels})
finalizers: List[str] = field(default_factory=list)
uid: str = field(metadata={"readonly": True})
created: datetime = field(metadata={"readonly": True})
modified: datetime = field(metadata={"readonly": True})
deleted: datetime = field(default=None, metadata={"readonly": True})
owners: List[ResourceRef] = field(default_factory=list)
class CoreMetadata(Serializable):
name: str
uid: str
class ListMetadata(Serializable):
pass # TODO
class ReasonCode(IntEnum):
INTERNAL_ERROR = 1 # Default error
INVALID_RESOURCE = 10 # Invalid values in the Manifest
# Kubernetes' resource is not supported by the Kubernetes controller
UNSUPPORTED_RESOURCE = 11
# The custom resource provided does not exist or is invalid
INVALID_CUSTOM_RESOURCE = 12
CLUSTER_NOT_REACHABLE = 20 # Connectivity issue with the Kubernetes deployment
NO_SUITABLE_RESOURCE = 50 # Scheduler issue
KUBERNETES_ERROR = 60
CREATE_FAILED = 70
RECONCILE_FAILED = 71
DELETE_FAILED = 72
OPENSTACK_ERROR = 80
INVALID_CLUSTER_TEMPLATE = 81
# Related to Metrics and Metric Provider
INVALID_METRIC = 91
UNREACHABLE_METRICS_PROVIDER = 92
UNKNOWN_METRIC = 93
UNKNOWN_METRICS_PROVIDER = 94
class Reason(Serializable):
code: ReasonCode
message: str
class WatchEventType(Enum):
ADDED = auto()
MODIFIED = auto()
DELETED = auto()
class Status(Serializable):
reason: Reason = None
class WatchEvent(Serializable):
type: WatchEventType
object: dict
class Verb(Enum):
create = auto()
list = auto()
list_all = auto()
get = auto()
update = auto()
delete = auto()
class RoleRule(Serializable):
api: str
resources: List[str]
namespaces: List[str]
verbs: List[Verb]
@persistent("/core/roles/{name}")
class Role(ApiObject):
api: str = "core"
kind: str = "Role"
metadata: Metadata
rules: List[RoleRule]
class RoleList(ApiObject):
api: str = "core"
kind: str = "RoleList"
metadata: ListMetadata
items: List[Role]
@persistent("/core/rolebindings/{name}")
class RoleBinding(ApiObject):
api: str = "core"
kind: str = "RoleBinding"
metadata: Metadata
users: List[str]
roles: List[str]
class RoleBindingList(ApiObject):
api: str = "core"
kind: str = "RoleBindingList"
metadata: ListMetadata
items: List[RoleBinding]
class Conflict(Serializable):
source: ResourceRef
conflicting: List[ResourceRef]
def resource_ref(resource):
"""Create a :class:`ResourceRef` from a :class:`ApiObject`
Args:
resource (.serializable.ApiObject): API object that should be
referenced
Returns:
ResourceRef: Corresponding reference to the API object
"""
return ResourceRef(
api=resource.api,
kind=resource.kind,
namespace=resource.metadata.namespace,
name=resource.metadata.name,
)
class MetricSpecProvider(Serializable):
name: str
metric: str
class MetricSpec(Serializable):
min: float
max: float
provider: MetricSpecProvider
class BaseMetric(ApiObject):
api: str = "core"
kind: str = None
metadata: Metadata
spec: MetricSpec
@persistent("/core/globalmetrics/{name}")
class GlobalMetric(BaseMetric):
api: str = "core"
kind: str = "GlobalMetric"
metadata: Metadata
spec: MetricSpec
@persistent("/core/metrics/{namespace}/{name}")
class Metric(BaseMetric):
api: str = "core"
kind: str = "Metric"
metadata: Metadata
spec: MetricSpec
class MetricList(ApiObject):
api: str = "core"
kind: str = "MetricList"
metadata: ListMetadata
items: List[Metric]
class GlobalMetricList(ApiObject):
api: str = "core"
kind: str = "GlobalMetricList"
metadata: ListMetadata
items: List[GlobalMetric]
class MetricsProviderSpec(PolymorphicContainer):
type: str
@MetricsProviderSpec.register("prometheus")
class PrometheusSpec(Serializable):
url: str
@MetricsProviderSpec.register("kafka")
class KafkaSpec(Serializable):
"""Specifications to connect to a KSQL database, and retrieve a specific row from a
specific table.
Attributes:
comparison_column (str): name of the column where the value will be compared to
the metric name, to select the right metric.
value_column (str): name of the column where the value of a metric is stored.
table (str): the name of the KSQL table where the metric is defined.
url (str): endpoint of the KSQL database.
"""
comparison_column: str
value_column: str
table: str
url: str
@MetricsProviderSpec.register("static")
class StaticSpec(Serializable):
metrics: Dict[str, float]
class BaseMetricsProvider(ApiObject):
api: str = "core"
kind: str = None
metadata: Metadata
spec: MetricsProviderSpec
@persistent("/core/globalmetricsproviders/{name}")
class GlobalMetricsProvider(BaseMetricsProvider):
api: str = "core"
kind: str = "GlobalMetricsProvider"
metadata: Metadata
spec: MetricsProviderSpec
@persistent("/core/metricsproviders/{namespace}/{name}")
class MetricsProvider(BaseMetricsProvider):
api: str = "core"
kind: str = "MetricsProvider"
metadata: Metadata
spec: MetricsProviderSpec
class MetricsProviderList(ApiObject):
api: str = "core"
kind: str = "MetricsProviderList"
metadata: ListMetadata
items: List[MetricsProvider]
class GlobalMetricsProviderList(ApiObject):
api: str = "core"
kind: str = "GlobalMetricsProviderList"
metadata: ListMetadata
items: List[GlobalMetricsProvider]
class MetricRef(Serializable):
name: str
weight: float
namespaced: bool = False
|
krake/krake/data/core.py
|
codereval_python_data_61
|
Validate the given key against the corresponding regular expression.
Args:
key: the string to validate
Raises:
ValidationError: if the given key is not conform to the regular expression.
def validate_key(key):
"""Validate the given key against the corresponding regular expression.
Args:
key: the string to validate
Raises:
ValidationError: if the given key is not conform to the regular expression.
"""
key_regex, _ = _get_labels_regex()
if not key_regex.fullmatch(key):
raise ValidationError(
f"Label key {key!r} does not match the regex {_label_key_pattern!r}."
)
import re
from enum import Enum, IntEnum, auto
from datetime import datetime
from dataclasses import field
from typing import List, Dict
from marshmallow import ValidationError
from . import persistent
from .serializable import Serializable, ApiObject, PolymorphicContainer
class ResourceRef(Serializable):
api: str
namespace: str = field(default=None)
kind: str
name: str
def __hash__(self):
return hash((self.api, self.namespace, self.kind, self.name))
def __repr__(self):
message = f"{self.kind}(api='{self.api}', "
return message + f"namespace='{self.namespace}', name='{self.name}')"
_label_key_pattern = None
_label_value_pattern = None
_label_key_regex = None
_label_value_regex = None
def _get_labels_regex():
"""Build or return the regular expressions that are used to validate the key and
value of the labels of the Krake resources.
The first call builds the expressions, while a second returns the already built
ones.
Returns:
(re.Pattern, re.Pattern): a tuple that contains the compiled regular,
expressions, the first element to validate the key and the second to
validate the value.
"""
global _label_key_pattern, _label_value_pattern
global _label_key_regex, _label_value_regex
if _label_key_regex and _label_value_regex:
return _label_key_regex, _label_value_regex
# Build the patterns only if not already built
max_prefix_size = 253
max_key_size = 63
max_value_size = max_key_size
# First and last characters must be alphanumeric. The rest of the string must be
# alphanumeric, "-", "_" or "."
base_alphanumeric_pattern = "\\w|(\\w[\\w\\-_.]{{0,{length}}}\\w)"
key_pattern = base_alphanumeric_pattern.format(length=max_key_size - 2)
value_pattern = base_alphanumeric_pattern.format(length=max_value_size - 2)
prefix_pattern = base_alphanumeric_pattern.format(length=max_prefix_size - 2)
# The key can be a string of length 63 with the specifications described above,
# or have a prefix, then one "/" character, then the string of length 63 (called
# name).
# The prefix itself should have a max length of 253, but otherwise follows the
# specifications described above.
_label_key_pattern = f"^(({prefix_pattern})\\/)?({key_pattern})$"
# The value can be a string of length 63 with the specifications described
# above.
_label_value_pattern = value_pattern
_label_key_regex = re.compile(_label_key_pattern, re.ASCII)
_label_value_regex = re.compile(_label_value_pattern, re.ASCII)
return _label_key_regex, _label_value_regex
def validate_key(key):
"""Validate the given key against the corresponding regular expression.
Args:
key: the string to validate
Raises:
ValidationError: if the given key is not conform to the regular expression.
"""
key_regex, _ = _get_labels_regex()
if not key_regex.fullmatch(key):
raise ValidationError(
f"Label key {key!r} does not match the regex {_label_key_pattern!r}."
)
def validate_value(value):
"""Validate the given value against the corresponding regular expression.
Args:
value: the string to validate
Raises:
ValidationError: if the given value is not conform to the regular expression.
"""
_, value_regex = _get_labels_regex()
if not value_regex.fullmatch(value):
raise ValidationError(
f"Label value {value!r} does not match"
f" the regex {_label_value_pattern!r}."
)
def _validate_labels(labels):
"""Check that keys and values in the given labels match against their corresponding
regular expressions.
Args:
labels (dict): the different labels to validate.
Raises:
ValidationError: if any of the keys and labels does not match their respective
regular expression. The error contains as message the list of all errors
which occurred in the labels. Each element of the list is a dictionary with
one key-value pair:
- key: the label key or label value for which an error occurred as string.
- value: the error message.
.. code:: python
# Example:
labels = {
"key1": "valid",
"key2": ["invalid"],
"$$": "invalid",
True: True,
}
try:
_validate_labels(labels)
except ValidationError as err:
assert err.messages == [
{"['invalid']": 'expected string or bytes-like object'},
{'$$': "Label key '$$' does not match the regex [...]"},
{'True': 'expected string or bytes-like object'},
{'True': 'expected string or bytes-like object'},
]
"""
errors = []
for key, value in labels.items():
try:
validate_key(key)
except (ValidationError, TypeError) as err:
errors.append({str(key): str(err)})
try:
validate_value(value)
except (ValidationError, TypeError) as err:
errors.append({str(value): str(err)})
if errors:
raise ValidationError(list(errors))
_resource_name_pattern = None
_resource_name_regex = None
def _get_resource_name_regex():
"""Build or return the regular expressions that are used to validate
the name of the Krake resources.
Returns:
(re.Pattern): the compiled regular expressions, to validate
the resource name.
"""
global _resource_name_regex, _resource_name_pattern
# Build the patterns only if not already built
if _resource_name_regex:
return _resource_name_regex
# First and last characters must be alphanumeric. The rest of the string must be
# alphanumeric, "-", "_" or "." and without whitespace as well as have a
# max length of 255 and a min length of 1
max_name_size = 253 # reduced by 2 for the regex
min_name_size = 0 # reduced by 1 for the regex
base_alphanumeric_pattern = "\\w|(\\w[\\w\\-_.:]{{{min_length},{length}}}\\w)"
resource_name_pattern = base_alphanumeric_pattern.format(
min_length=min_name_size, length=max_name_size
)
_resource_name_pattern = resource_name_pattern
_resource_name_regex = re.compile(_resource_name_pattern, re.ASCII)
return _resource_name_regex
def _validate_resource_name(name):
"""Each Krake resource name is checked against a specific pattern.
Which characters are not allowed is defined in _get_resource_name_regex
Args:
name(str): the different resource names to validate.
Raises:
ValidationError: if any resource name does not match their respective
regular expression.
"""
resource_name_regex = _get_resource_name_regex()
if not resource_name_regex.fullmatch(name):
raise ValidationError("Invalid character in resource name.")
def _validate_resource_namespace(namespace):
"""Each Krake resource namespace is checked against a specific pattern.
Which characters are not allowed is defined in _get_resource_name_regex
Args:
namespace(str): the different resource namespaces to validate.
Raises:
ValidationError: if any resource namespace does not match their respective
regular expression.
"""
resource_namespace_regex = _get_resource_name_regex()
if not resource_namespace_regex.fullmatch(namespace):
raise ValidationError("Invalid character in resource namespace.")
class Metadata(Serializable):
name: str = field(metadata={"immutable": True, "validate": _validate_resource_name})
namespace: str = field(
default=None,
metadata={"immutable": True, "validate": _validate_resource_namespace},
)
labels: dict = field(default_factory=dict, metadata={"validate": _validate_labels})
finalizers: List[str] = field(default_factory=list)
uid: str = field(metadata={"readonly": True})
created: datetime = field(metadata={"readonly": True})
modified: datetime = field(metadata={"readonly": True})
deleted: datetime = field(default=None, metadata={"readonly": True})
owners: List[ResourceRef] = field(default_factory=list)
class CoreMetadata(Serializable):
name: str
uid: str
class ListMetadata(Serializable):
pass # TODO
class ReasonCode(IntEnum):
INTERNAL_ERROR = 1 # Default error
INVALID_RESOURCE = 10 # Invalid values in the Manifest
# Kubernetes' resource is not supported by the Kubernetes controller
UNSUPPORTED_RESOURCE = 11
# The custom resource provided does not exist or is invalid
INVALID_CUSTOM_RESOURCE = 12
CLUSTER_NOT_REACHABLE = 20 # Connectivity issue with the Kubernetes deployment
NO_SUITABLE_RESOURCE = 50 # Scheduler issue
KUBERNETES_ERROR = 60
CREATE_FAILED = 70
RECONCILE_FAILED = 71
DELETE_FAILED = 72
OPENSTACK_ERROR = 80
INVALID_CLUSTER_TEMPLATE = 81
# Related to Metrics and Metric Provider
INVALID_METRIC = 91
UNREACHABLE_METRICS_PROVIDER = 92
UNKNOWN_METRIC = 93
UNKNOWN_METRICS_PROVIDER = 94
class Reason(Serializable):
code: ReasonCode
message: str
class WatchEventType(Enum):
ADDED = auto()
MODIFIED = auto()
DELETED = auto()
class Status(Serializable):
reason: Reason = None
class WatchEvent(Serializable):
type: WatchEventType
object: dict
class Verb(Enum):
create = auto()
list = auto()
list_all = auto()
get = auto()
update = auto()
delete = auto()
class RoleRule(Serializable):
api: str
resources: List[str]
namespaces: List[str]
verbs: List[Verb]
@persistent("/core/roles/{name}")
class Role(ApiObject):
api: str = "core"
kind: str = "Role"
metadata: Metadata
rules: List[RoleRule]
class RoleList(ApiObject):
api: str = "core"
kind: str = "RoleList"
metadata: ListMetadata
items: List[Role]
@persistent("/core/rolebindings/{name}")
class RoleBinding(ApiObject):
api: str = "core"
kind: str = "RoleBinding"
metadata: Metadata
users: List[str]
roles: List[str]
class RoleBindingList(ApiObject):
api: str = "core"
kind: str = "RoleBindingList"
metadata: ListMetadata
items: List[RoleBinding]
class Conflict(Serializable):
source: ResourceRef
conflicting: List[ResourceRef]
def resource_ref(resource):
"""Create a :class:`ResourceRef` from a :class:`ApiObject`
Args:
resource (.serializable.ApiObject): API object that should be
referenced
Returns:
ResourceRef: Corresponding reference to the API object
"""
return ResourceRef(
api=resource.api,
kind=resource.kind,
namespace=resource.metadata.namespace,
name=resource.metadata.name,
)
class MetricSpecProvider(Serializable):
name: str
metric: str
class MetricSpec(Serializable):
min: float
max: float
provider: MetricSpecProvider
class BaseMetric(ApiObject):
api: str = "core"
kind: str = None
metadata: Metadata
spec: MetricSpec
@persistent("/core/globalmetrics/{name}")
class GlobalMetric(BaseMetric):
api: str = "core"
kind: str = "GlobalMetric"
metadata: Metadata
spec: MetricSpec
@persistent("/core/metrics/{namespace}/{name}")
class Metric(BaseMetric):
api: str = "core"
kind: str = "Metric"
metadata: Metadata
spec: MetricSpec
class MetricList(ApiObject):
api: str = "core"
kind: str = "MetricList"
metadata: ListMetadata
items: List[Metric]
class GlobalMetricList(ApiObject):
api: str = "core"
kind: str = "GlobalMetricList"
metadata: ListMetadata
items: List[GlobalMetric]
class MetricsProviderSpec(PolymorphicContainer):
type: str
@MetricsProviderSpec.register("prometheus")
class PrometheusSpec(Serializable):
url: str
@MetricsProviderSpec.register("kafka")
class KafkaSpec(Serializable):
"""Specifications to connect to a KSQL database, and retrieve a specific row from a
specific table.
Attributes:
comparison_column (str): name of the column where the value will be compared to
the metric name, to select the right metric.
value_column (str): name of the column where the value of a metric is stored.
table (str): the name of the KSQL table where the metric is defined.
url (str): endpoint of the KSQL database.
"""
comparison_column: str
value_column: str
table: str
url: str
@MetricsProviderSpec.register("static")
class StaticSpec(Serializable):
metrics: Dict[str, float]
class BaseMetricsProvider(ApiObject):
api: str = "core"
kind: str = None
metadata: Metadata
spec: MetricsProviderSpec
@persistent("/core/globalmetricsproviders/{name}")
class GlobalMetricsProvider(BaseMetricsProvider):
api: str = "core"
kind: str = "GlobalMetricsProvider"
metadata: Metadata
spec: MetricsProviderSpec
@persistent("/core/metricsproviders/{namespace}/{name}")
class MetricsProvider(BaseMetricsProvider):
api: str = "core"
kind: str = "MetricsProvider"
metadata: Metadata
spec: MetricsProviderSpec
class MetricsProviderList(ApiObject):
api: str = "core"
kind: str = "MetricsProviderList"
metadata: ListMetadata
items: List[MetricsProvider]
class GlobalMetricsProviderList(ApiObject):
api: str = "core"
kind: str = "GlobalMetricsProviderList"
metadata: ListMetadata
items: List[GlobalMetricsProvider]
class MetricRef(Serializable):
name: str
weight: float
namespaced: bool = False
|
krake/krake/data/core.py
|
codereval_python_data_62
|
Together with :func:``generate_default_observer_schema_list``, this function is
called recursively to generate part of a default ``observer_schema`` from part of a
Kubernetes resource, defined respectively by ``manifest_dict`` or ``manifest_list``.
Args:
manifest_dict (dict): Partial Kubernetes resources
first_level (bool, optional): If True, indicates that the dictionary represents
the whole observer schema of a Kubernetes resource
Returns:
dict: Generated partial observer_schema
This function creates a new dictionary from ``manifest_dict`` and replaces all
non-list and non-dict values by ``None``.
In case of ``first_level`` dictionary (i.e. complete ``observer_schema`` for a
resource), the values of the identifying fields are copied from the manifest file.
def generate_default_observer_schema_dict(manifest_dict, first_level=False):
"""Together with :func:``generate_default_observer_schema_list``, this function is
called recursively to generate part of a default ``observer_schema`` from part of a
Kubernetes resource, defined respectively by ``manifest_dict`` or ``manifest_list``.
Args:
manifest_dict (dict): Partial Kubernetes resources
first_level (bool, optional): If True, indicates that the dictionary represents
the whole observer schema of a Kubernetes resource
Returns:
dict: Generated partial observer_schema
This function creates a new dictionary from ``manifest_dict`` and replaces all
non-list and non-dict values by ``None``.
In case of ``first_level`` dictionary (i.e. complete ``observer_schema`` for a
resource), the values of the identifying fields are copied from the manifest file.
"""
observer_schema_dict = {}
for key, value in manifest_dict.items():
if isinstance(value, dict):
observer_schema_dict[key] = generate_default_observer_schema_dict(value)
elif isinstance(value, list):
observer_schema_dict[key] = generate_default_observer_schema_list(value)
else:
observer_schema_dict[key] = None
if first_level:
observer_schema_dict["apiVersion"] = manifest_dict["apiVersion"]
observer_schema_dict["kind"] = manifest_dict["kind"]
observer_schema_dict["metadata"]["name"] = manifest_dict["metadata"]["name"]
if (
"spec" in manifest_dict
and "type" in manifest_dict["spec"]
and manifest_dict["spec"]["type"] == "LoadBalancer"
):
observer_schema_dict["status"] = {"load_balancer": {"ingress": None}}
return observer_schema_dict
"""This module defines the Hook Dispatcher and listeners for registering and
executing hooks. Hook Dispatcher emits hooks based on :class:`Hook` attributes which
define when the hook will be executed.
"""
import asyncio
import logging
import random
from base64 import b64encode
from collections import defaultdict
from contextlib import suppress
from copy import deepcopy
from datetime import datetime
from functools import reduce
from operator import getitem
from enum import Enum, auto
from inspect import iscoroutinefunction
from OpenSSL import crypto
from typing import NamedTuple
import yarl
from aiohttp import ClientConnectorError
from krake.controller import Observer
from krake.controller.kubernetes.client import KubernetesClient, InvalidManifestError
from krake.utils import camel_to_snake_case, get_kubernetes_resource_idx
from kubernetes_asyncio.client.rest import ApiException
from kubernetes_asyncio.client.api_client import ApiClient
from kubernetes_asyncio import client
from krake.data.kubernetes import ClusterState, Application, Cluster
from yarl import URL
from secrets import token_urlsafe
from kubernetes_asyncio.client import (
Configuration,
V1Secret,
V1EnvVar,
V1VolumeMount,
V1Volume,
V1SecretKeySelector,
V1EnvVarSource,
)
from kubernetes_asyncio.config.kube_config import KubeConfigLoader
logger = logging.getLogger(__name__)
class HookType(Enum):
ResourcePreCreate = auto()
ResourcePostCreate = auto()
ResourcePreUpdate = auto()
ResourcePostUpdate = auto()
ResourcePreDelete = auto()
ResourcePostDelete = auto()
ApplicationMangling = auto()
ApplicationPreMigrate = auto()
ApplicationPostMigrate = auto()
ApplicationPreReconcile = auto()
ApplicationPostReconcile = auto()
ApplicationPreDelete = auto()
ApplicationPostDelete = auto()
ClusterCreation = auto()
ClusterDeletion = auto()
class HookDispatcher(object):
"""Simple wrapper around a registry of handlers associated to :class:`Hook`
attributes. Each :class:`Hook` attribute defines when the handler will be
executed.
Listeners for certain hooks can be registered via :meth:`on`. Registered
listeners are executed via :meth:`hook`.
Example:
.. code:: python
listen = HookDispatcher()
@listen.on(HookType.PreApply)
def to_perform_before_app_creation(app, cluster, resource, controller):
# Do Stuff
@listen.on(HookType.PostApply)
def another_to_perform_after_app_creation(app, cluster, resource, resp):
# Do Stuff
@listen.on(HookType.PostDelete)
def to_perform_after_app_deletion(app, cluster, resource, resp):
# Do Stuff
"""
def __init__(self):
self.registry = defaultdict(list)
def on(self, hook):
"""Decorator function to add a new handler to the registry.
Args:
hook (HookType): Hook attribute for which to register the handler.
Returns:
callable: Decorator for registering listeners for the specified
hook.
"""
def decorator(handler):
self.registry[hook].append(handler)
return handler
return decorator
async def hook(self, hook, **kwargs):
"""Execute the list of handlers associated to the provided :class:`Hook`
attribute.
Args:
hook (HookType): The hook attribute for which to execute handlers.
"""
try:
handlers = self.registry[hook]
except KeyError:
pass
else:
for handler in handlers:
if iscoroutinefunction(handler):
await handler(**kwargs)
else:
handler(**kwargs)
listen = HookDispatcher()
@listen.on(HookType.ResourcePostCreate)
@listen.on(HookType.ResourcePostUpdate)
async def register_service(app, cluster, resource, response):
"""Register endpoint of Kubernetes Service object on creation and update.
Args:
app (krake.data.kubernetes.Application): Application the service belongs to
cluster (krake.data.kubernetes.Cluster): The cluster on which the
application is running
resource (dict): Kubernetes object description as specified in the
specification of the application.
response (kubernetes_asyncio.client.V1Service): Response of the
Kubernetes API
"""
if resource["kind"] != "Service":
return
service_name = resource["metadata"]["name"]
if response.spec and response.spec.type == "LoadBalancer":
# For a "LoadBalancer" type of Service, an external IP is given in the cluster
# by a load balancer controller to the service. In this case, the "port"
# specified in the spec is reachable from the outside.
if (
not response.status.load_balancer
or not response.status.load_balancer.ingress
):
# When a "LoadBalancer" type of service is created, the IP is given by an
# additional controller (e.g. a controller that requests a floating IP to an
# OpenStack infrastructure). This process can take some time, but the
# Service itself already exist before the IP is assigned. In the case of an
# error with the controller, the IP is also not given. This "<pending>" IP
# just expresses that the Service exists, but the IP is not ready yet.
external_ip = "<pending>"
else:
external_ip = response.status.load_balancer.ingress[0].ip
if not response.spec.ports:
external_port = "<pending>"
else:
external_port = response.spec.ports[0].port
app.status.services[service_name] = f"{external_ip}:{external_port}"
return
node_port = None
# Ensure that ports are specified
if response.spec and response.spec.ports:
node_port = response.spec.ports[0].node_port
# If the service does not have a node port, remove a potential reference
# and return.
if node_port is None:
try:
del app.status.services[service_name]
except KeyError:
pass
return
# Determine URL of Kubernetes cluster API
loader = KubeConfigLoader(cluster.spec.kubeconfig)
config = Configuration()
await loader.load_and_set(config)
cluster_url = yarl.URL(config.host)
app.status.services[service_name] = f"{cluster_url.host}:{node_port}"
@listen.on(HookType.ResourcePostDelete)
async def unregister_service(app, resource, **kwargs):
"""Unregister endpoint of Kubernetes Service object on deletion.
Args:
app (krake.data.kubernetes.Application): Application the service belongs to
resource (dict): Kubernetes object description as specified in the
specification of the application.
"""
if resource["kind"] != "Service":
return
service_name = resource["metadata"]["name"]
try:
del app.status.services[service_name]
except KeyError:
pass
@listen.on(HookType.ResourcePostDelete)
async def remove_resource_from_last_observed_manifest(app, resource, **kwargs):
"""Remove a given resource from the last_observed_manifest after its deletion
Args:
app (krake.data.kubernetes.Application): Application the service belongs to
resource (dict): Kubernetes object description as specified in the
specification of the application.
"""
try:
idx = get_kubernetes_resource_idx(app.status.last_observed_manifest, resource)
except IndexError:
return
app.status.last_observed_manifest.pop(idx)
def update_last_applied_manifest_dict_from_resp(
last_applied_manifest, observer_schema, response
):
"""Together with :func:``update_last_applied_manifest_list_from_resp``, this
function is called recursively to update a partial ``last_applied_manifest``
from a partial Kubernetes response
Args:
last_applied_manifest (dict): partial ``last_applied_manifest`` being
updated
observer_schema (dict): partial ``observer_schema``
response (dict): partial response from the Kubernetes API.
Raises:
KeyError: If the observed field is not present in the Kubernetes response
This function go through all observed fields, and initialized their value in
last_applied_manifest if they are not yet present
"""
for key, value in observer_schema.items():
# Keys in the response are in camelCase
camel_key = camel_to_snake_case(key)
if camel_key not in response:
# An observed key should always be present in the k8s response
raise KeyError(
f"Observed key {camel_key} is not present in response {response}"
)
if isinstance(value, dict):
if key not in last_applied_manifest:
# The dictionary is observed, but not present in
# last_applied_manifest
last_applied_manifest[key] = {}
update_last_applied_manifest_dict_from_resp(
last_applied_manifest[key], observer_schema[key], response[camel_key]
)
elif isinstance(value, list):
if key not in last_applied_manifest:
# The list is observed, but not present in last_applied_manifest
last_applied_manifest[key] = []
update_last_applied_manifest_list_from_resp(
last_applied_manifest[key], observer_schema[key], response[camel_key]
)
elif key not in last_applied_manifest:
# If key not present in last_applied_manifest, and value is neither a
# dict nor a list, simply add it.
last_applied_manifest[key] = response[camel_key]
def update_last_applied_manifest_list_from_resp(
last_applied_manifest, observer_schema, response
):
"""Together with :func:``update_last_applied_manifest_dict_from_resp``, this
function is called recursively to update a partial ``last_applied_manifest``
from a partial Kubernetes response
Args:
last_applied_manifest (list): partial ``last_applied_manifest`` being
updated
observer_schema (list): partial ``observer_schema``
response (list): partial response from the Kubernetes API.
This function go through all observed fields, and initialized their value in
last_applied_manifest if they are not yet present
"""
# Looping over the observed resource, except the last element which is the
# special control dictionary
for idx, val in enumerate(observer_schema[:-1]):
if idx >= len(response):
# Element is observed but not present in k8s response, so following
# elements will also not exist.
#
# This doesn't raise an Exception as observing the element of a list
# doesn't ensure its presence. The list length is controlled by the
# special control dictionary
return
if isinstance(val, dict):
if idx >= len(last_applied_manifest):
# The dict is observed, but not present in last_applied_manifest
last_applied_manifest.append({})
update_last_applied_manifest_dict_from_resp(
last_applied_manifest[idx], observer_schema[idx], response[idx]
)
elif isinstance(response[idx], list):
if idx >= len(last_applied_manifest):
# The list is observed, but not present in last_applied_manifest
last_applied_manifest.append([])
update_last_applied_manifest_list_from_resp(
last_applied_manifest[idx], observer_schema[idx], response[idx]
)
elif idx >= len(last_applied_manifest):
# Element is not yet present in last_applied_manifest. Adding it.
last_applied_manifest.append(response[idx])
@listen.on(HookType.ResourcePostCreate)
@listen.on(HookType.ResourcePostUpdate)
def update_last_applied_manifest_from_resp(app, response, **kwargs):
"""Hook run after the creation or update of an application in order to update the
`status.last_applied_manifest` using the k8s response.
Args:
app (krake.data.kubernetes.Application): Application the service belongs to
response (kubernetes_asyncio.client.V1Status): Response of the Kubernetes API
After a Kubernetes resource has been created/updated, the
`status.last_applied_manifest` has to be updated. All fields already initialized
(either from the mangling of `spec.manifest`, or by a previous call to this
function) should be left untouched. Only observed fields which are not present in
`status.last_applied_manifest` should be initialized.
"""
if isinstance(response, dict):
# The Kubernetes API couldn't deserialize the k8s response into an object
resp = response
else:
# The Kubernetes API deserialized the k8s response into an object
resp = response.to_dict()
idx_applied = get_kubernetes_resource_idx(app.status.last_applied_manifest, resp)
idx_observed = get_kubernetes_resource_idx(app.status.mangled_observer_schema, resp)
update_last_applied_manifest_dict_from_resp(
app.status.last_applied_manifest[idx_applied],
app.status.mangled_observer_schema[idx_observed],
resp,
)
@listen.on(HookType.ResourcePostCreate)
@listen.on(HookType.ResourcePostUpdate)
def update_last_observed_manifest_from_resp(app, response, **kwargs):
"""Handler to run after the creation or update of a Kubernetes resource to update
the last_observed_manifest from the response of the Kubernetes API.
Args:
app (krake.data.kubernetes.Application): Application the service belongs to
response (kubernetes_asyncio.client.V1Service): Response of the
Kubernetes API
The target last_observed_manifest holds the value of all observed fields plus the
special control dictionaries for the list length
"""
if isinstance(response, dict):
# The Kubernetes API couldn't deserialize the k8s response into an object
resp = response
else:
# The Kubernetes API deserialized the k8s response into an object
resp = response.to_dict()
try:
idx_observed = get_kubernetes_resource_idx(
app.status.mangled_observer_schema,
resp,
)
except IndexError:
# All created resources should be observed
raise
try:
idx_last_observed = get_kubernetes_resource_idx(
app.status.last_observed_manifest,
resp,
)
except IndexError:
# If the resource is not yes present in last_observed_manifest, append it.
idx_last_observed = len(app.status.last_observed_manifest)
app.status.last_observed_manifest.append({})
# Overwrite the last_observed_manifest for this resource
app.status.last_observed_manifest[
idx_last_observed
] = update_last_observed_manifest_dict(
app.status.mangled_observer_schema[idx_observed], resp
)
def update_last_observed_manifest_dict(observed_resource, response):
"""Together with :func:``update_last_observed_manifest_list``, recursively
crafts the ``last_observed_manifest`` from the Kubernetes :attr:``response``.
Args:
observed_resource (dict): The schema to observe for the partial given resource
response (dict): The partial Kubernetes response for this resource.
Raises:
KeyError: If an observed key is not present in the Kubernetes response
Returns:
dict: The dictionary of observed keys and their value
Get the value of all observed fields from the Kubernetes response
"""
res = {}
for key, value in observed_resource.items():
camel_key = camel_to_snake_case(key)
if camel_key not in response:
raise KeyError(
f"Observed key {camel_key} is not present in response {response}"
)
if isinstance(value, dict):
res[key] = update_last_observed_manifest_dict(value, response[camel_key])
elif isinstance(value, list):
res[key] = update_last_observed_manifest_list(value, response[camel_key])
else:
res[key] = response[camel_key]
return res
def update_last_observed_manifest_list(observed_resource, response):
"""Together with :func:``update_last_observed_manifest_dict``, recursively
crafts the ``last_observed_manifest`` from the Kubernetes :attr:``response``.
Args:
observed_resource (list): the schema to observe for the partial given resource
response (list): the partial Kubernetes response for this resource.
Returns:
list: The list of observed elements, plus the special list length control
dictionary
Get the value of all observed elements from the Kubernetes response
"""
if not response:
return [{"observer_schema_list_current_length": 0}]
res = []
# Looping over the observed resource, except the last element which is the special
# control dictionary
for idx, val in enumerate(observed_resource[:-1]):
if idx >= len(response):
# Element is not present in the Kubernetes response, nothing more to do
break
if type(response[idx]) == dict:
res.append(update_last_observed_manifest_dict(val, response[idx]))
elif type(response[idx]) == list:
res.append(update_last_observed_manifest_list(val, response[idx]))
else:
res.append(response[idx])
# Append the special control dictionary to the list
res.append({"observer_schema_list_current_length": len(response)})
return res
def update_last_applied_manifest_dict_from_spec(
resource_status_new, resource_status_old, resource_observed
):
"""Together with :func:``update_last_applied_manifest_list_from_spec``, this
function is called recursively to update a partial ``last_applied_manifest``
Args:
resource_status_new (dict): partial ``last_applied_manifest`` being updated
resource_status_old (dict): partial of the current ``last_applied_manifest``
resource_observed (dict): partial observer_schema for the manifest file
being updated
"""
for key, value in resource_observed.items():
if key not in resource_status_old:
continue
if key in resource_status_new:
if isinstance(value, dict):
update_last_applied_manifest_dict_from_spec(
resource_status_new[key],
resource_status_old[key],
resource_observed[key],
)
elif isinstance(value, list):
update_last_applied_manifest_list_from_spec(
resource_status_new[key],
resource_status_old[key],
resource_observed[key],
)
else:
# If the key is not present the spec.manifest, we first need to
# initialize it
if isinstance(value, dict):
resource_status_new[key] = {}
update_last_applied_manifest_dict_from_spec(
resource_status_new[key],
resource_status_old[key],
resource_observed[key],
)
elif isinstance(value, list):
resource_status_new[key] = []
update_last_applied_manifest_list_from_spec(
resource_status_new[key],
resource_status_old[key],
resource_observed[key],
)
else:
resource_status_new[key] = resource_status_old[key]
def update_last_applied_manifest_list_from_spec(
resource_status_new, resource_status_old, resource_observed
):
"""Together with :func:``update_last_applied_manifest_dict_from_spec``, this
function is called recursively to update a partial ``last_applied_manifest``
Args:
resource_status_new (list): partial ``last_applied_manifest`` being updated
resource_status_old (list): partial of the current ``last_applied_manifest``
resource_observed (list): partial observer_schema for the manifest file
being updated
"""
# Looping over the observed resource, except the last element which is the
# special control dictionary
for idx, val in enumerate(resource_observed[:-1]):
if idx >= len(resource_status_old):
# The element in not in the current last_applied_manifest, and neither
# is the rest of the list
break
if idx < len(resource_status_new):
# The element is present in spec.manifest and in the current
# last_applied_manifest. Updating observed fields
if isinstance(val, dict):
update_last_applied_manifest_dict_from_spec(
resource_status_new[idx],
resource_status_old[idx],
resource_observed[idx],
)
elif isinstance(val, list):
update_last_applied_manifest_list_from_spec(
resource_status_new[idx],
resource_status_old[idx],
resource_observed[idx],
)
else:
# If the element is not present in the spec.manifest, we first have to
# initialize it.
if isinstance(val, dict):
resource_status_new.append({})
update_last_applied_manifest_dict_from_spec(
resource_status_new[idx],
resource_status_old[idx],
resource_observed[idx],
)
elif isinstance(val, list):
resource_status_new.append([])
update_last_applied_manifest_list_from_spec(
resource_status_new[idx],
resource_status_old[idx],
resource_observed[idx],
)
else:
resource_status_new.append(resource_status_old[idx])
def update_last_applied_manifest_from_spec(app):
"""Update the status.last_applied_manifest of an application from spec.manifests
Args:
app (krake.data.kubernetes.Application): Application to update
This function is called on application creation and updates. The
last_applied_manifest of an application is initialized as a copy of spec.manifest,
and is augmented by all known observed fields not yet initialized (i.e. all observed
fields or resources which are present in the current last_applied_manifest but not
in the spec.manifest)
"""
# The new last_applied_manifest is initialized as a copy of the spec.manifest, and
# augmented by all observed fields which are present in the current
# last_applied_manifest but not in the original spec.manifest
new_last_applied_manifest = deepcopy(app.spec.manifest)
# Loop over observed resources and observed fields, and check if they should be
# added to the new last_applied_manifest (i.e. present in the current
# last_applied_manifest but not in spec.manifest)
for resource_observed in app.status.mangled_observer_schema:
# If the resource is not present in the current last_applied_manifest, there is
# nothing to do. Whether the resource was initialized by spec.manifest doesn't
# matter.
try:
idx_status_old = get_kubernetes_resource_idx(
app.status.last_applied_manifest, resource_observed
)
except IndexError:
continue
# As the resource is present in the current last_applied_manifest, we need to go
# through it to check if observed fields should be set to their current value
# (i.e. fields are present in the current last_applied_manifest, but not in
# spec.manifest)
try:
# Check if the observed resource is present in spec.manifest
idx_status_new = get_kubernetes_resource_idx(
new_last_applied_manifest, resource_observed
)
except IndexError:
# The resource is observed but is not present in the spec.manifest.
# Create an empty resource, which will be augmented in
# update_last_applied_manifest_dict_from_spec with the observed and known
# fields.
new_last_applied_manifest.append({})
idx_status_new = len(new_last_applied_manifest) - 1
update_last_applied_manifest_dict_from_spec(
new_last_applied_manifest[idx_status_new],
app.status.last_applied_manifest[idx_status_old],
resource_observed,
)
app.status.last_applied_manifest = new_last_applied_manifest
class KubernetesApplicationObserver(Observer):
"""Observer specific for Kubernetes Applications. One observer is created for each
Application managed by the Controller, but not one per Kubernetes resource
(Deployment, Service...). If several resources are defined by an Application, they
are all monitored by the same observer.
The observer gets the actual status of the resources on the cluster using the
Kubernetes API, and compare it to the status stored in the API.
The observer is:
* started at initial Krake resource creation;
* deleted when a resource needs to be updated, then started again when it is done;
* simply deleted on resource deletion.
Args:
cluster (krake.data.kubernetes.Cluster): the cluster on which the observed
Application is created.
resource (krake.data.kubernetes.Application): the application that will be
observed.
on_res_update (coroutine): a coroutine called when a resource's actual status
differs from the status sent by the database. Its signature is:
``(resource) -> updated_resource``. ``updated_resource`` is the instance of
the resource that is up-to-date with the API. The Observer internal instance
of the resource to observe will be updated. If the API cannot be contacted,
``None`` can be returned. In this case the internal instance of the Observer
will not be updated.
time_step (int, optional): how frequently the Observer should watch the actual
status of the resources.
"""
def __init__(self, cluster, resource, on_res_update, time_step=2):
super().__init__(resource, on_res_update, time_step)
self.cluster = cluster
async def poll_resource(self):
"""Fetch the current status of the Application monitored by the Observer.
Returns:
krake.data.core.Status: the status object created using information from the
real world Applications resource.
"""
app = self.resource
status = deepcopy(app.status)
status.last_observed_manifest = []
# For each observed kubernetes resource of the Application,
# get its current status on the cluster.
for desired_resource in app.status.last_applied_manifest:
kube = KubernetesClient(self.cluster.spec.kubeconfig)
idx_observed = get_kubernetes_resource_idx(
app.status.mangled_observer_schema, desired_resource
)
observed_resource = app.status.mangled_observer_schema[idx_observed]
async with kube:
try:
group, version, kind, name, namespace = kube.get_immutables(
desired_resource
)
resource_api = await kube.get_resource_api(group, version, kind)
resp = await resource_api.read(kind, name, namespace)
except ApiException as err:
if err.status == 404:
# Resource does not exist
continue
# Otherwise, log the unexpected errors
logger.error(err)
observed_manifest = update_last_observed_manifest_dict(
observed_resource, resp.to_dict()
)
status.last_observed_manifest.append(observed_manifest)
return status
class KubernetesClusterObserver(Observer):
"""Observer specific for Kubernetes Clusters. One observer is created for each
Cluster managed by the Controller.
The observer gets the actual status of the cluster using the
Kubernetes API, and compare it to the status stored in the API.
The observer is:
* started at initial Krake resource creation;
* deleted when a resource needs to be updated, then started again when it is done;
* simply deleted on resource deletion.
Args:
cluster (krake.data.kubernetes.Cluster): the cluster which will be observed.
on_res_update (coroutine): a coroutine called when a resource's actual status
differs from the status sent by the database. Its signature is:
``(resource) -> updated_resource``. ``updated_resource`` is the instance of
the resource that is up-to-date with the API. The Observer internal instance
of the resource to observe will be updated. If the API cannot be contacted,
``None`` can be returned. In this case the internal instance of the Observer
will not be updated.
time_step (int, optional): how frequently the Observer should watch the actual
status of the resources.
"""
def __init__(self, cluster, on_res_update, time_step=2):
super().__init__(cluster, on_res_update, time_step)
self.cluster = cluster
async def poll_resource(self):
"""Fetch the current status of the Cluster monitored by the Observer.
Returns:
krake.data.core.Status: the status object created using information from the
real world Cluster.
"""
status = deepcopy(self.cluster.status)
# For each observed kubernetes cluster registered in Krake,
# get its current node status.
loader = KubeConfigLoader(self.cluster.spec.kubeconfig)
config = Configuration()
await loader.load_and_set(config)
kube = ApiClient(config)
async with kube as api:
v1 = client.CoreV1Api(api)
try:
response = await v1.list_node()
except ClientConnectorError as err:
status.state = ClusterState.OFFLINE
self.cluster.status.state = ClusterState.OFFLINE
# Log the error
logger.debug(err)
return status
condition_dict = {
"MemoryPressure": [],
"DiskPressure": [],
"PIDPressure": [],
"Ready": [],
}
for item in response.items:
for condition in item.status.conditions:
condition_dict[condition.type].append(condition.status)
if (
condition_dict["MemoryPressure"] == ["True"]
or condition_dict["DiskPressure"] == ["True"]
or condition_dict["PIDPressure"] == ["True"]
):
status.state = ClusterState.UNHEALTHY
self.cluster.status.state = ClusterState.UNHEALTHY
return status
elif (
condition_dict["Ready"] == ["True"]
and status.state is ClusterState.OFFLINE
):
status.state = ClusterState.CONNECTING
self.cluster.status.state = ClusterState.CONNECTING
return status
elif condition_dict["Ready"] == ["True"]:
status.state = ClusterState.ONLINE
self.cluster.status.state = ClusterState.ONLINE
return status
else:
status.state = ClusterState.NOTREADY
self.cluster.status.state = ClusterState.NOTREADY
return status
@listen.on(HookType.ApplicationPostReconcile)
@listen.on(HookType.ApplicationPostMigrate)
@listen.on(HookType.ClusterCreation)
async def register_observer(controller, resource, start=True, **kwargs):
"""Create an observer for the given Application or Cluster, and start it as a
background task if wanted.
If an observer already existed for this Application or Cluster, it is stopped
and deleted.
Args:
controller (KubernetesController): the controller for which the observer will be
added in the list of working observers.
resource (krake.data.kubernetes.Application): the Application to observe or
resource (krake.data.kubernetes.Cluster): the Cluster to observe.
start (bool, optional): if False, does not start the observer as background
task.
"""
if resource.kind == Application.kind:
cluster = await controller.kubernetes_api.read_cluster(
namespace=resource.status.running_on.namespace,
name=resource.status.running_on.name,
)
observer = KubernetesApplicationObserver(
cluster,
resource,
controller.on_status_update,
time_step=controller.observer_time_step,
)
elif resource.kind == Cluster.kind:
observer = KubernetesClusterObserver(
resource,
controller.on_status_update,
time_step=controller.observer_time_step,
)
else:
logger.debug("Unknown resource kind. No observer was registered.", resource)
return
logger.debug(f"Start observer for {resource.kind} %r", resource.metadata.name)
task = None
if start:
task = controller.loop.create_task(observer.run())
controller.observers[resource.metadata.uid] = (observer, task)
@listen.on(HookType.ApplicationPreReconcile)
@listen.on(HookType.ApplicationPreMigrate)
@listen.on(HookType.ApplicationPreDelete)
@listen.on(HookType.ClusterDeletion)
async def unregister_observer(controller, resource, **kwargs):
"""Stop and delete the observer for the given Application or Cluster. If no observer
is started, do nothing.
Args:
controller (KubernetesController): the controller for which the observer will be
removed from the list of working observers.
resource (krake.data.kubernetes.Application): the Application whose observer
will be stopped or
resource (krake.data.kubernetes.Cluster): the Cluster whose observer will be
stopped.
"""
if resource.metadata.uid not in controller.observers:
return
logger.debug(f"Stop observer for {resource.kind} %r", resource.metadata.name)
_, task = controller.observers.pop(resource.metadata.uid)
task.cancel()
with suppress(asyncio.CancelledError):
await task
def utc_difference():
"""Get the difference in seconds between the current time and the current UTC time.
Returns:
int: the time difference in seconds.
"""
delta = datetime.now() - datetime.utcnow()
return delta.seconds
def generate_certificate(config):
"""Create and sign a new certificate using the one defined in the complete hook
configuration as intermediate certificate.
Args:
config (krake.data.config.CompleteHookConfiguration): the configuration of the
complete hook.
Returns:
CertificatePair: the content of the certificate created and its corresponding
key.
"""
with open(config.intermediate_src, "rb") as f:
intermediate_src = crypto.load_certificate(crypto.FILETYPE_PEM, f.read())
with open(config.intermediate_key_src, "rb") as f:
intermediate_key_src = crypto.load_privatekey(crypto.FILETYPE_PEM, f.read())
client_cert = crypto.X509()
# Set general information
client_cert.set_version(3)
client_cert.set_serial_number(random.randint(50000000000000, 100000000000000))
# If not set before, TLS will not accept to use this certificate in UTC cases, as
# the server time may be earlier.
time_offset = utc_difference() * -1
client_cert.gmtime_adj_notBefore(time_offset)
client_cert.gmtime_adj_notAfter(1 * 365 * 24 * 60 * 60)
# Set issuer and subject
intermediate_subject = intermediate_src.get_subject()
client_cert.set_issuer(intermediate_subject)
client_subj = crypto.X509Name(intermediate_subject)
client_subj.CN = config.hook_user
client_cert.set_subject(client_subj)
# Create and set the private key
client_key = crypto.PKey()
client_key.generate_key(crypto.TYPE_RSA, 2048)
client_cert.set_pubkey(client_key)
client_cert.sign(intermediate_key_src, "sha256")
cert_dump = crypto.dump_certificate(crypto.FILETYPE_PEM, client_cert).decode()
key_dump = crypto.dump_privatekey(crypto.FILETYPE_PEM, client_key).decode()
return CertificatePair(cert=cert_dump, key=key_dump)
def generate_default_observer_schema(app):
"""Generate the default observer schema for each Kubernetes resource present in
``spec.manifest`` for which a custom observer schema hasn't been specified.
Args:
app (krake.data.kubernetes.Application): The application for which to generate a
default observer schema
"""
app.status.mangled_observer_schema = deepcopy(app.spec.observer_schema)
for resource_manifest in app.spec.manifest:
try:
get_kubernetes_resource_idx(
app.status.mangled_observer_schema, resource_manifest
)
except IndexError:
# Only create a default observer schema, if a custom observer schema hasn't
# been set by the user.
app.status.mangled_observer_schema.append(
generate_default_observer_schema_dict(
resource_manifest,
first_level=True,
)
)
def generate_default_observer_schema_dict(manifest_dict, first_level=False):
"""Together with :func:``generate_default_observer_schema_list``, this function is
called recursively to generate part of a default ``observer_schema`` from part of a
Kubernetes resource, defined respectively by ``manifest_dict`` or ``manifest_list``.
Args:
manifest_dict (dict): Partial Kubernetes resources
first_level (bool, optional): If True, indicates that the dictionary represents
the whole observer schema of a Kubernetes resource
Returns:
dict: Generated partial observer_schema
This function creates a new dictionary from ``manifest_dict`` and replaces all
non-list and non-dict values by ``None``.
In case of ``first_level`` dictionary (i.e. complete ``observer_schema`` for a
resource), the values of the identifying fields are copied from the manifest file.
"""
observer_schema_dict = {}
for key, value in manifest_dict.items():
if isinstance(value, dict):
observer_schema_dict[key] = generate_default_observer_schema_dict(value)
elif isinstance(value, list):
observer_schema_dict[key] = generate_default_observer_schema_list(value)
else:
observer_schema_dict[key] = None
if first_level:
observer_schema_dict["apiVersion"] = manifest_dict["apiVersion"]
observer_schema_dict["kind"] = manifest_dict["kind"]
observer_schema_dict["metadata"]["name"] = manifest_dict["metadata"]["name"]
if (
"spec" in manifest_dict
and "type" in manifest_dict["spec"]
and manifest_dict["spec"]["type"] == "LoadBalancer"
):
observer_schema_dict["status"] = {"load_balancer": {"ingress": None}}
return observer_schema_dict
def generate_default_observer_schema_list(manifest_list):
"""Together with :func:``generate_default_observer_schema_dict``, this function is
called recursively to generate part of a default ``observer_schema`` from part of a
Kubernetes resource, defined respectively by ``manifest_list`` or ``manifest_dict``.
Args:
manifest_list (list): Partial Kubernetes resources
Returns:
list: Generated partial observer_schema
This function creates a new list from ``manifest_list`` and replaces all non-list
and non-dict elements by ``None``.
Additionally, it generates the default list control dictionary, using the current
length of the list as default minimum and maximum values.
"""
observer_schema_list = []
for value in manifest_list:
if isinstance(value, dict):
observer_schema_list.append(generate_default_observer_schema_dict(value))
elif isinstance(value, list):
observer_schema_list.append(generate_default_observer_schema_list(value))
else:
observer_schema_list.append(None)
observer_schema_list.append(
{
"observer_schema_list_min_length": len(manifest_list),
"observer_schema_list_max_length": len(manifest_list),
}
)
return observer_schema_list
@listen.on(HookType.ApplicationMangling)
async def complete(app, api_endpoint, ssl_context, config):
"""Execute application complete hook defined by :class:`Complete`.
Hook mangles given application and injects complete hooks variables.
Application complete hook is disabled by default.
User enables this hook by the --hook-complete argument in rok cli.
Args:
app (krake.data.kubernetes.Application): Application object processed
when the hook is called
api_endpoint (str): the given API endpoint
ssl_context (ssl.SSLContext): SSL context to communicate with the API endpoint
config (krake.data.config.HooksConfiguration): Complete hook
configuration.
"""
if "complete" not in app.spec.hooks:
return
# Use the endpoint of the API only if the external endpoint has not been set.
if config.complete.external_endpoint:
api_endpoint = config.complete.external_endpoint
app.status.complete_token = \
app.status.complete_token if app.status.complete_token else token_urlsafe()
# Generate only once the certificate and key for a specific Application
generated_cert = CertificatePair(
cert=app.status.complete_cert, key=app.status.complete_key
)
if ssl_context and generated_cert == (None, None):
generated_cert = generate_certificate(config.complete)
app.status.complete_cert = generated_cert.cert
app.status.complete_key = generated_cert.key
hook = Complete(
api_endpoint,
ssl_context,
hook_user=config.complete.hook_user,
cert_dest=config.complete.cert_dest,
env_token=config.complete.env_token,
env_url=config.complete.env_url,
)
hook.mangle_app(
app.metadata.name,
app.metadata.namespace,
app.status.complete_token,
app.status.last_applied_manifest,
config.complete.intermediate_src,
generated_cert,
app.status.mangled_observer_schema,
"complete"
)
@listen.on(HookType.ApplicationMangling)
async def shutdown(app, api_endpoint, ssl_context, config):
"""Executes an application shutdown hook defined by :class:`Shutdown`.
The hook mangles the given application and injects shutdown hooks variables.
Application shutdown hook is disabled by default.
User enables this hook by the --hook-shutdown argument in rok cli.
Args:
app (krake.data.kubernetes.Application): Application object processed
when the hook is called
api_endpoint (str): the given API endpoint
ssl_context (ssl.SSLContext): SSL context to communicate with the API endpoint
config (krake.data.config.HooksConfiguration): Shutdown hook
configuration.
"""
if "shutdown" not in app.spec.hooks:
return
# Use the endpoint of the API only if the external endpoint has not been set.
if config.shutdown.external_endpoint:
api_endpoint = config.shutdown.external_endpoint
app.status.shutdown_token = \
app.status.shutdown_token if app.status.shutdown_token else token_urlsafe()
# Generate only once the certificate and key for a specific Application
generated_cert = CertificatePair(
cert=app.status.shutdown_cert, key=app.status.shutdown_key
)
if ssl_context and generated_cert == (None, None):
generated_cert = generate_certificate(config.shutdown)
app.status.shutdown_cert = generated_cert.cert
app.status.shutdown_key = generated_cert.key
hook = Shutdown(
api_endpoint,
ssl_context,
hook_user=config.shutdown.hook_user,
cert_dest=config.shutdown.cert_dest,
env_token=config.shutdown.env_token,
env_url=config.shutdown.env_url,
)
hook.mangle_app(
app.metadata.name,
app.metadata.namespace,
app.status.shutdown_token,
app.status.last_applied_manifest,
config.shutdown.intermediate_src,
generated_cert,
app.status.mangled_observer_schema,
"shutdown"
)
@listen.on(HookType.ResourcePreDelete)
async def pre_shutdown(controller, app, **kwargs):
"""
Args:
app (krake.data.kubernetes.Application): Application object processed
when the hook is called
"""
if "shutdown" not in app.spec.hooks:
return
return
class SubResource(NamedTuple):
group: str
name: str
body: dict
path: tuple
class CertificatePair(NamedTuple):
"""Tuple which contains a certificate and its corresponding key.
Attributes:
cert (str): content of a certificate.
key (str): content of the key that corresponds to the certificate.
"""
cert: str
key: str
class Hook(object):
hook_resources = ()
ca_name = "ca-bundle.pem"
cert_name = "cert.pem"
key_name = "key.pem"
def __init__(
self, api_endpoint, ssl_context, hook_user, cert_dest, env_token, env_url
):
self.api_endpoint = api_endpoint
self.ssl_context = ssl_context
self.hook_user = hook_user
self.cert_dest = cert_dest
self.env_token = env_token
self.env_url = env_url
def mangle_app(
self,
name,
namespace,
token,
last_applied_manifest,
intermediate_src,
generated_cert,
mangled_observer_schema,
hook_type="",
):
"""Mangle a given application and inject complete hook resources and
sub-resources into the :attr:`last_applied_manifest` object by :meth:`mangle`.
Also mangle the observer_schema as new resources and sub-resources should
be observed.
:attr:`last_applied_manifest` is created as a deep copy of the desired
application resources, as defined by user. It can be updated by custom hook
resources or modified by custom hook sub-resources. It is used as a desired
state for the Krake deployment process.
Args:
name (str): Application name
namespace (str): Application namespace
token (str): Complete hook authentication token
last_applied_manifest (list): Application resources
intermediate_src (str): content of the certificate that is used to sign new
certificates for the complete hook.
generated_cert (CertificatePair): tuple that contains the content of the
new signed certificate for the Application, and the content of its
corresponding key.
mangled_observer_schema (list): Observed fields
hook_type (str, optional): Name of the hook the app should be mangled for
"""
secret_certs_name = "-".join([name, "krake", hook_type, "secret", "certs"])
secret_token_name = "-".join([name, "krake", hook_type, "secret", "token"])
volume_name = "-".join([name, "krake", hook_type, "volume"])
ca_certs = (
self.ssl_context.get_ca_certs(binary_form=True)
if self.ssl_context
else None
)
# Extract all different namespaces
# FIXME: too many assumptions here: do we create one ConfigMap for each
# namespace?
resource_namespaces = {
resource["metadata"].get("namespace", "default")
for resource in last_applied_manifest
}
hook_resources = []
hook_sub_resources = []
if ca_certs:
hook_resources.extend(
[
self.secret_certs(
secret_certs_name,
resource_namespace,
intermediate_src=intermediate_src,
generated_cert=generated_cert,
ca_certs=ca_certs,
)
for resource_namespace in resource_namespaces
]
)
hook_sub_resources.extend(
[*self.volumes(secret_certs_name, volume_name, self.cert_dest)]
)
hook_resources.extend(
[
self.secret_token(
secret_token_name,
name,
namespace,
resource_namespace,
self.api_endpoint,
token,
)
for resource_namespace in resource_namespaces
]
)
hook_sub_resources.extend(
[
*self.env_vars(secret_token_name),
]
)
self.mangle(
hook_resources,
last_applied_manifest,
mangled_observer_schema,
)
self.mangle(
hook_sub_resources,
last_applied_manifest,
mangled_observer_schema,
is_sub_resource=True,
)
def mangle(
self,
items,
last_applied_manifest,
mangled_observer_schema,
is_sub_resource=False,
):
"""Mangle applications desired state with custom hook resources or
sub-resources.
Example:
.. code:: python
last_applied_manifest = [
{
'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {'name': 'test', 'namespace': 'default'},
'spec': {'containers': [{'name': 'test'}]}
}
]
mangled_observer_schema = [
{
'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {'name': 'test', 'namespace': 'default'},
'spec': {
'containers': [
{'name': None},
{
'observer_schema_list_max_length': 1,
'observer_schema_list_min_length': 1,
},
]
},
}
]
hook_resources = [
{
'apiVersion': 'v1',
'kind': 'Secret',
'metadata': {'name': 'sct', 'namespace': 'default'}
}
]
hook_sub_resources = [
SubResource(
group='env', name='env', body={'name': 'test', 'value': 'test'},
path=(('spec', 'containers'),)
)
]
mangle(
hook_resources,
last_applied_manifest,
mangled_observer_schema,
)
mangle(
hook_sub_resources,
last_applied_manifest,
mangled_observer_schema,
is_sub_resource=True
)
assert last_applied_manifest == [
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {"name": "test", 'namespace': 'default'},
"spec": {
"containers": [
{
"name": "test",
"env": [{"name": "test", "value": "test"}]
}
]
},
},
{"apiVersion": "v1", "kind": "Secret", "metadata": {"name": "sct"}},
]
assert mangled_observer_schema == [
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {"name": "test", "namespace": None},
"spec": {
"containers": [
{
"name": None,
"env": [
{"name": None, "value": None},
{
"observer_schema_list_max_length": 1,
"observer_schema_list_min_length": 1,
},
],
},
{
"observer_schema_list_max_length": 1,
"observer_schema_list_min_length": 1,
},
]
},
},
{
"apiVersion": "v1",
"kind": "Secret",
"metadata": {"name": "sct", "namespace": None},
},
]
Args:
items (list[SubResource]): Custom hook resources or sub-resources
last_applied_manifest (list): Application resources
mangled_observer_schema (list): Observed resources
is_sub_resource (bool, optional): if False, the function only extend the
list of Kubernetes resources defined in :attr:`last_applied_manifest`
with new hook resources. Otherwise, the function injects each new hook
sub-resource into the :attr:`last_applied_manifest` object
sub-resources. Defaults to False.
"""
if not items:
return
if not is_sub_resource:
last_applied_manifest.extend(items)
for sub_resource in items:
# Generate the default observer schema for each resource
mangled_observer_schema.append(
generate_default_observer_schema_dict(
sub_resource,
first_level=True,
)
)
return
def inject(sub_resource, sub_resource_to_mangle, observed_resource_to_mangle):
"""Inject a hooks defined sub-resource into a Kubernetes sub-resource.
Args:
sub_resource (SubResource): Hook sub-resource that needs to be injected
into :attr:`last_applied_manifest`
sub_resource_to_mangle (object): Kubernetes sub-resources from
:attr:`last_applied_manifest` which need to be processed
observed_resource_to_mangle (dict): partial mangled_observer_schema
corresponding to the Kubernetes sub-resource.
Raises:
InvalidManifestError: if the sub-resource which will be mangled is not a
list or a dict.
"""
# Create sub-resource group if not present in the Kubernetes sub-resource
if sub_resource.group not in sub_resource_to_mangle:
# FIXME: This assumes the subresource group contains a list
sub_resource_to_mangle.update({sub_resource.group: []})
# Create sub-resource group if not present in the observed fields
if sub_resource.group not in observed_resource_to_mangle:
observed_resource_to_mangle.update(
{
sub_resource.group: [
{
"observer_schema_list_min_length": 0,
"observer_schema_list_max_length": 0,
}
]
}
)
# Inject sub-resource
# If sub-resource name is already there update it, if not, append it
if sub_resource.name in [
g["name"] for g in sub_resource_to_mangle[sub_resource.group]
]:
# FIXME: Assuming we are dealing with a list
for idx, item in enumerate(sub_resource_to_mangle[sub_resource.group]):
if item["name"]:
if hasattr(item, "body"):
sub_resource_to_mangle[item.group][idx] = item["body"]
else:
sub_resource_to_mangle[sub_resource.group].append(sub_resource.body)
# Make sure the value is observed
if sub_resource.name not in [
g["name"] for g in observed_resource_to_mangle[sub_resource.group][:-1]
]:
observed_resource_to_mangle[sub_resource.group].insert(
-1, generate_default_observer_schema_dict(sub_resource.body)
)
observed_resource_to_mangle[sub_resource.group][-1][
"observer_schema_list_min_length"
] += 1
observed_resource_to_mangle[sub_resource.group][-1][
"observer_schema_list_max_length"
] += 1
for resource in last_applied_manifest:
# Complete hook is applied only on defined Kubernetes resources
if resource["kind"] not in self.hook_resources:
continue
for sub_resource in items:
sub_resources_to_mangle = None
idx_observed = get_kubernetes_resource_idx(
mangled_observer_schema, resource
)
for keys in sub_resource.path:
try:
sub_resources_to_mangle = reduce(getitem, keys, resource)
except KeyError:
continue
break
# Create the path to the observed sub-resource, if it doesn't yet exist
try:
observed_sub_resources = reduce(
getitem, keys, mangled_observer_schema[idx_observed]
)
except KeyError:
Complete.create_path(
mangled_observer_schema[idx_observed], list(keys)
)
observed_sub_resources = reduce(
getitem, keys, mangled_observer_schema[idx_observed]
)
if isinstance(sub_resources_to_mangle, list):
for idx, sub_resource_to_mangle in enumerate(
sub_resources_to_mangle
):
# Ensure that each element of the list is observed.
idx_observed = idx
if idx >= len(observed_sub_resources[:-1]):
idx_observed = len(observed_sub_resources[:-1])
# FIXME: Assuming each element of the list contains a
# dictionary, therefore initializing new elements with an
# empty dict
observed_sub_resources.insert(-1, {})
observed_sub_resource = observed_sub_resources[idx_observed]
# FIXME: This is assuming a list always contains dict
inject(
sub_resource, sub_resource_to_mangle, observed_sub_resource
)
elif isinstance(sub_resources_to_mangle, dict):
inject(
sub_resource, sub_resources_to_mangle, observed_sub_resources
)
else:
message = (
f"The sub-resource to mangle {sub_resources_to_mangle!r} has an"
"invalid type, should be in '[dict, list]'"
)
raise InvalidManifestError(message)
@staticmethod
def attribute_map(obj):
"""Convert a Kubernetes object to dict based on its attribute mapping
Example:
.. code:: python
from kubernetes_asyncio.client import V1VolumeMount
d = attribute_map(
V1VolumeMount(name="name", mount_path="path")
)
assert d == {'mountPath': 'path', 'name': 'name'}
Args:
obj (object): Kubernetes object
Returns:
dict: Converted Kubernetes object
"""
return {
obj.attribute_map[attr]: getattr(obj, attr)
for attr, _ in obj.to_dict().items()
if getattr(obj, attr) is not None
}
@staticmethod
def create_path(mangled_observer_schema, keys):
"""Create the path to the observed field in the observer schema.
When a sub-resource is mangled, it should be observed. This function creates
the path to the subresource to observe.
Args:
mangled_observer_schema (dict): Partial observer schema of a resource
keys (list): list of keys forming the path to the sub-resource to
observe
FIXME: This assumes we are only adding keys to dict. We don't consider lists
"""
# Unpack the first key first, as it contains the base directory
key = keys.pop(0)
# If the key is the last of the list, we reached the end of the path.
if len(keys) == 0:
mangled_observer_schema[key] = None
return
if key not in mangled_observer_schema:
mangled_observer_schema[key] = {}
Hook.create_path(mangled_observer_schema[key], keys)
def secret_certs(
self,
secret_name,
namespace,
ca_certs=None,
intermediate_src=None,
generated_cert=None,
):
"""Create a complete hooks secret resource.
Complete hook secret stores Krake CAs and client certificates to communicate
with the Krake API.
Args:
secret_name (str): Secret name
namespace (str): Kubernetes namespace where the Secret will be created.
ca_certs (list): Krake CA list
intermediate_src (str): content of the certificate that is used to sign new
certificates for the complete hook.
generated_cert (CertificatePair): tuple that contains the content of the
new signed certificate for the Application, and the content of its
corresponding key.
Returns:
dict: complete hook secret resource
"""
ca_certs_pem = ""
for ca_cert in ca_certs:
x509 = crypto.load_certificate(crypto.FILETYPE_ASN1, ca_cert)
ca_certs_pem += crypto.dump_certificate(crypto.FILETYPE_PEM, x509).decode()
# Add the intermediate certificate into the chain
with open(intermediate_src, "r") as f:
intermediate_src_content = f.read()
ca_certs_pem += intermediate_src_content
data = {
self.ca_name: self._encode_to_64(ca_certs_pem),
self.cert_name: self._encode_to_64(generated_cert.cert),
self.key_name: self._encode_to_64(generated_cert.key),
}
return self.secret(secret_name, data, namespace)
def secret_token(
self, secret_name, name, namespace, resource_namespace, api_endpoint, token
):
"""Create a hooks secret resource.
The hook secret stores Krake authentication token
and hook URL for given application.
Args:
secret_name (str): Secret name
name (str): Application name
namespace (str): Application namespace
resource_namespace (str): Kubernetes namespace where the
Secret will be created.
api_endpoint (str): Krake API endpoint
token (str): Complete hook authentication token
Returns:
dict: complete hook secret resource
"""
pass
def volumes(self, secret_name, volume_name, mount_path):
"""Create complete hooks volume and volume mount sub-resources
Complete hook volume gives access to hook's secret, which stores
Krake CAs and client certificates to communicate with the Krake API.
Complete hook volume mount puts the volume into the application
Args:
secret_name (str): Secret name
volume_name (str): Volume name
mount_path (list): Volume mount path
Returns:
list: List of complete hook volume and volume mount sub-resources
"""
volume = V1Volume(name=volume_name, secret={"secretName": secret_name})
volume_mount = V1VolumeMount(name=volume_name, mount_path=mount_path)
return [
SubResource(
group="volumes",
name=volume.name,
body=self.attribute_map(volume),
path=(("spec", "template", "spec"), ("spec",)),
),
SubResource(
group="volumeMounts",
name=volume_mount.name,
body=self.attribute_map(volume_mount),
path=(
("spec", "template", "spec", "containers"),
("spec", "containers"), # kind: Pod
),
),
]
@staticmethod
def _encode_to_64(string):
"""Compute the base 64 encoding of a string.
Args:
string (str): the string to encode.
Returns:
str: the result of the encoding.
"""
return b64encode(string.encode()).decode()
def secret(self, secret_name, secret_data, namespace, _type="Opaque"):
"""Create a secret resource.
Args:
secret_name (str): Secret name
secret_data (dict): Secret data
namespace (str): Kubernetes namespace where the Secret will be created.
_type (str, optional): Secret type. Defaults to Opaque.
Returns:
dict: secret resource
"""
return self.attribute_map(
V1Secret(
api_version="v1",
kind="Secret",
data=secret_data,
metadata={"name": secret_name, "namespace": namespace},
type=_type,
)
)
@staticmethod
def create_hook_url(name, namespace, api_endpoint):
"""Create an applications' hook URL.
Function needs to be specified for each hook.
Args:
name (str): Application name
namespace (str): Application namespace
api_endpoint (str): Krake API endpoint
Returns:
str: Application shutdown url
"""
pass
def env_vars(self, secret_name):
"""Create the hooks' environment variables sub-resources.
Function needs to be specified for each hook.
Creates hook environment variables to store Krake authentication token
and a hook URL for the given applications.
Args:
secret_name (str): Secret name
Returns:
list: List of shutdown hook environment variables sub-resources
"""
pass
class Complete(Hook):
"""Mangle given application and inject complete hooks variables into it.
Hook injects a Kubernetes secret, which stores Krake authentication token
and the Krake complete hook URL for the given application. The variables
from Kubernetes secret are imported as environment variables
into the application resource definition. Only resources defined in
:args:`hook_resources` can be modified.
Names of environment variables are defined in the application controller
configuration file.
If TLS is enabled on the Krake API, the complete hook injects a Kubernetes secret,
and it's corresponding volume and volume mount definitions for the Krake CA,
the client certificate with the right CN, and its key. The directory where the
secret is mounted is defined in the configuration.
Args:
api_endpoint (str): the given API endpoint
ssl_context (ssl.SSLContext): SSL context to communicate with the API endpoint
cert_dest (str, optional): Path of the directory where the CA, client
certificate and key to the Krake API will be stored.
env_token (str, optional): Name of the environment variable, which stores Krake
authentication token.
env_url (str, optional): Name of the environment variable,
which stores Krake complete hook URL.
"""
hook_resources = ("Pod", "Deployment", "ReplicationController")
def __init__(
self, api_endpoint, ssl_context, hook_user, cert_dest, env_token, env_url
):
super().__init__(
api_endpoint, ssl_context, hook_user, cert_dest, env_token, env_url
)
self.env_url = env_url
def secret_token(
self, secret_name, name, namespace, resource_namespace, api_endpoint, token
):
"""Create complete hooks secret resource.
Complete hook secret stores Krake authentication token
and complete hook URL for given application.
Args:
secret_name (str): Secret name
name (str): Application name
namespace (str): Application namespace
resource_namespace (str): Kubernetes namespace where the
Secret will be created.
api_endpoint (str): Krake API endpoint
token (str): Complete hook authentication token
Returns:
dict: complete hook secret resource
"""
complete_url = self.create_hook_url(name, namespace, api_endpoint)
data = {
self.env_token.lower(): self._encode_to_64(token),
self.env_url.lower(): self._encode_to_64(complete_url),
}
return self.secret(secret_name, data, resource_namespace)
@staticmethod
def create_hook_url(name, namespace, api_endpoint):
"""Create an applications' complete URL.
Args:
name (str): Application name
namespace (str): Application namespace
api_endpoint (str): Krake API endpoint
Returns:
str: Application complete url
"""
api_url = URL(api_endpoint)
return str(
api_url.with_path(
f"/kubernetes/namespaces/{namespace}/applications/{name}/complete"
)
)
def env_vars(self, secret_name):
"""Create complete hooks environment variables sub-resources
Create complete hook environment variables store Krake authentication token
and complete hook URL for given application.
Args:
secret_name (str): Secret name
Returns:
list: List of complete hook environment variables sub-resources
"""
sub_resources = []
env_token = V1EnvVar(
name=self.env_token,
value_from=self.attribute_map(
V1EnvVarSource(
secret_key_ref=self.attribute_map(
V1SecretKeySelector(
name=secret_name, key=self.env_token.lower()
)
)
)
),
)
env_url = V1EnvVar(
name=self.env_url,
value_from=self.attribute_map(
V1EnvVarSource(
secret_key_ref=self.attribute_map(
V1SecretKeySelector(name=secret_name, key=self.env_url.lower())
)
)
),
)
for env in (env_token, env_url):
sub_resources.append(
SubResource(
group="env",
name=env.name,
body=self.attribute_map(env),
path=(
("spec", "template", "spec", "containers"),
("spec", "containers"), # kind: Pod
),
)
)
return sub_resources
class Shutdown(Hook):
"""Mangle given application and inject shutdown hooks variables into it.
Hook injects a Kubernetes secret, which stores Krake authentication token
and the Krake complete hook URL for the given application. The variables
from the Kubernetes secret are imported as environment variables
into the application resource definition. Only resources defined in
:args:`hook_resources` can be modified.
Names of environment variables are defined in the application controller
configuration file.
If TLS is enabled on the Krake API, the shutdown hook injects a Kubernetes secret,
and it's corresponding volume and volume mount definitions for the Krake CA,
the client certificate with the right CN, and its key. The directory where the
secret is mounted is defined in the configuration.
Args:
api_endpoint (str): the given API endpoint
ssl_context (ssl.SSLContext): SSL context to communicate with the API endpoint
cert_dest (str, optional): Path of the directory where the CA, client
certificate and key to the Krake API will be stored.
env_token (str, optional): Name of the environment variable, which stores Krake
authentication token.
env_url (str, optional): Name of the environment variable,
which stores Krake complete hook URL.
"""
hook_resources = ("Pod", "Deployment", "ReplicationController")
def __init__(
self, api_endpoint, ssl_context, hook_user, cert_dest, env_token, env_url
):
super().__init__(
api_endpoint, ssl_context, hook_user, cert_dest, env_token, env_url
)
self.env_url = env_url
def secret_token(
self, secret_name, name, namespace, resource_namespace, api_endpoint, token
):
"""Create shutdown hooks secret resource.
Shutdown hook secret stores Krake authentication token
and shutdown hook URL for given application.
Args:
secret_name (str): Secret name
name (str): Application name
namespace (str): Application namespace
resource_namespace (str): Kubernetes namespace where the
Secret will be created.
api_endpoint (str): Krake API endpoint
token (str): Shutdown hook authentication token
Returns:
dict: shutdown hook secret resource
"""
shutdown_url = self.create_hook_url(name, namespace, api_endpoint)
data = {
self.env_token.lower(): self._encode_to_64(token),
self.env_url.lower(): self._encode_to_64(shutdown_url),
}
return self.secret(secret_name, data, resource_namespace)
@staticmethod
def create_hook_url(name, namespace, api_endpoint):
"""Create an applications' shutdown URL.
Args:
name (str): Application name
namespace (str): Application namespace
api_endpoint (str): Krake API endpoint
Returns:
str: Application shutdown url
"""
api_url = URL(api_endpoint)
return str(
api_url.with_path(
f"/kubernetes/namespaces/{namespace}/applications/{name}/shutdown"
)
)
def env_vars(self, secret_name):
"""Create shutdown hooks environment variables sub-resources.
Creates shutdown hook environment variables to store Krake authentication token
and a shutdown hook URL for given applications.
Args:
secret_name (str): Secret name
Returns:
list: List of shutdown hook environment variables sub-resources
"""
sub_resources = []
env_resources = []
env_token = V1EnvVar(
name=self.env_token,
value_from=self.attribute_map(
V1EnvVarSource(
secret_key_ref=self.attribute_map(
V1SecretKeySelector(
name=secret_name,
key=self.env_token.lower()
)
)
)
)
)
env_resources.append(env_token)
env_url = V1EnvVar(
name=self.env_url,
value_from=self.attribute_map(
V1EnvVarSource(
secret_key_ref=self.attribute_map(
V1SecretKeySelector(name=secret_name, key=self.env_url.lower())
)
)
),
)
env_resources.append(env_url)
for env in env_resources:
sub_resources.append(
SubResource(
group="env",
name=env.name,
body=self.attribute_map(env),
path=(
("spec", "template", "spec", "containers"),
("spec", "containers"), # kind: Pod
),
)
)
return sub_resources
|
krake/krake/controller/kubernetes/hooks.py
|
codereval_python_data_63
|
Together with :func:``update_last_applied_manifest_dict_from_resp``, this
function is called recursively to update a partial ``last_applied_manifest``
from a partial Kubernetes response
Args:
last_applied_manifest (list): partial ``last_applied_manifest`` being
updated
observer_schema (list): partial ``observer_schema``
response (list): partial response from the Kubernetes API.
This function go through all observed fields, and initialized their value in
last_applied_manifest if they are not yet present
def update_last_applied_manifest_list_from_resp(
last_applied_manifest, observer_schema, response
):
"""Together with :func:``update_last_applied_manifest_dict_from_resp``, this
function is called recursively to update a partial ``last_applied_manifest``
from a partial Kubernetes response
Args:
last_applied_manifest (list): partial ``last_applied_manifest`` being
updated
observer_schema (list): partial ``observer_schema``
response (list): partial response from the Kubernetes API.
This function go through all observed fields, and initialized their value in
last_applied_manifest if they are not yet present
"""
# Looping over the observed resource, except the last element which is the
# special control dictionary
for idx, val in enumerate(observer_schema[:-1]):
if idx >= len(response):
# Element is observed but not present in k8s response, so following
# elements will also not exist.
#
# This doesn't raise an Exception as observing the element of a list
# doesn't ensure its presence. The list length is controlled by the
# special control dictionary
return
if isinstance(val, dict):
if idx >= len(last_applied_manifest):
# The dict is observed, but not present in last_applied_manifest
last_applied_manifest.append({})
update_last_applied_manifest_dict_from_resp(
last_applied_manifest[idx], observer_schema[idx], response[idx]
)
elif isinstance(response[idx], list):
if idx >= len(last_applied_manifest):
# The list is observed, but not present in last_applied_manifest
last_applied_manifest.append([])
update_last_applied_manifest_list_from_resp(
last_applied_manifest[idx], observer_schema[idx], response[idx]
)
elif idx >= len(last_applied_manifest):
# Element is not yet present in last_applied_manifest. Adding it.
last_applied_manifest.append(response[idx])
"""This module defines the Hook Dispatcher and listeners for registering and
executing hooks. Hook Dispatcher emits hooks based on :class:`Hook` attributes which
define when the hook will be executed.
"""
import asyncio
import logging
import random
from base64 import b64encode
from collections import defaultdict
from contextlib import suppress
from copy import deepcopy
from datetime import datetime
from functools import reduce
from operator import getitem
from enum import Enum, auto
from inspect import iscoroutinefunction
from OpenSSL import crypto
from typing import NamedTuple
import yarl
from aiohttp import ClientConnectorError
from krake.controller import Observer
from krake.controller.kubernetes.client import KubernetesClient, InvalidManifestError
from krake.utils import camel_to_snake_case, get_kubernetes_resource_idx
from kubernetes_asyncio.client.rest import ApiException
from kubernetes_asyncio.client.api_client import ApiClient
from kubernetes_asyncio import client
from krake.data.kubernetes import ClusterState, Application, Cluster
from yarl import URL
from secrets import token_urlsafe
from kubernetes_asyncio.client import (
Configuration,
V1Secret,
V1EnvVar,
V1VolumeMount,
V1Volume,
V1SecretKeySelector,
V1EnvVarSource,
)
from kubernetes_asyncio.config.kube_config import KubeConfigLoader
logger = logging.getLogger(__name__)
class HookType(Enum):
ResourcePreCreate = auto()
ResourcePostCreate = auto()
ResourcePreUpdate = auto()
ResourcePostUpdate = auto()
ResourcePreDelete = auto()
ResourcePostDelete = auto()
ApplicationMangling = auto()
ApplicationPreMigrate = auto()
ApplicationPostMigrate = auto()
ApplicationPreReconcile = auto()
ApplicationPostReconcile = auto()
ApplicationPreDelete = auto()
ApplicationPostDelete = auto()
ClusterCreation = auto()
ClusterDeletion = auto()
class HookDispatcher(object):
"""Simple wrapper around a registry of handlers associated to :class:`Hook`
attributes. Each :class:`Hook` attribute defines when the handler will be
executed.
Listeners for certain hooks can be registered via :meth:`on`. Registered
listeners are executed via :meth:`hook`.
Example:
.. code:: python
listen = HookDispatcher()
@listen.on(HookType.PreApply)
def to_perform_before_app_creation(app, cluster, resource, controller):
# Do Stuff
@listen.on(HookType.PostApply)
def another_to_perform_after_app_creation(app, cluster, resource, resp):
# Do Stuff
@listen.on(HookType.PostDelete)
def to_perform_after_app_deletion(app, cluster, resource, resp):
# Do Stuff
"""
def __init__(self):
self.registry = defaultdict(list)
def on(self, hook):
"""Decorator function to add a new handler to the registry.
Args:
hook (HookType): Hook attribute for which to register the handler.
Returns:
callable: Decorator for registering listeners for the specified
hook.
"""
def decorator(handler):
self.registry[hook].append(handler)
return handler
return decorator
async def hook(self, hook, **kwargs):
"""Execute the list of handlers associated to the provided :class:`Hook`
attribute.
Args:
hook (HookType): The hook attribute for which to execute handlers.
"""
try:
handlers = self.registry[hook]
except KeyError:
pass
else:
for handler in handlers:
if iscoroutinefunction(handler):
await handler(**kwargs)
else:
handler(**kwargs)
listen = HookDispatcher()
@listen.on(HookType.ResourcePostCreate)
@listen.on(HookType.ResourcePostUpdate)
async def register_service(app, cluster, resource, response):
"""Register endpoint of Kubernetes Service object on creation and update.
Args:
app (krake.data.kubernetes.Application): Application the service belongs to
cluster (krake.data.kubernetes.Cluster): The cluster on which the
application is running
resource (dict): Kubernetes object description as specified in the
specification of the application.
response (kubernetes_asyncio.client.V1Service): Response of the
Kubernetes API
"""
if resource["kind"] != "Service":
return
service_name = resource["metadata"]["name"]
if response.spec and response.spec.type == "LoadBalancer":
# For a "LoadBalancer" type of Service, an external IP is given in the cluster
# by a load balancer controller to the service. In this case, the "port"
# specified in the spec is reachable from the outside.
if (
not response.status.load_balancer
or not response.status.load_balancer.ingress
):
# When a "LoadBalancer" type of service is created, the IP is given by an
# additional controller (e.g. a controller that requests a floating IP to an
# OpenStack infrastructure). This process can take some time, but the
# Service itself already exist before the IP is assigned. In the case of an
# error with the controller, the IP is also not given. This "<pending>" IP
# just expresses that the Service exists, but the IP is not ready yet.
external_ip = "<pending>"
else:
external_ip = response.status.load_balancer.ingress[0].ip
if not response.spec.ports:
external_port = "<pending>"
else:
external_port = response.spec.ports[0].port
app.status.services[service_name] = f"{external_ip}:{external_port}"
return
node_port = None
# Ensure that ports are specified
if response.spec and response.spec.ports:
node_port = response.spec.ports[0].node_port
# If the service does not have a node port, remove a potential reference
# and return.
if node_port is None:
try:
del app.status.services[service_name]
except KeyError:
pass
return
# Determine URL of Kubernetes cluster API
loader = KubeConfigLoader(cluster.spec.kubeconfig)
config = Configuration()
await loader.load_and_set(config)
cluster_url = yarl.URL(config.host)
app.status.services[service_name] = f"{cluster_url.host}:{node_port}"
@listen.on(HookType.ResourcePostDelete)
async def unregister_service(app, resource, **kwargs):
"""Unregister endpoint of Kubernetes Service object on deletion.
Args:
app (krake.data.kubernetes.Application): Application the service belongs to
resource (dict): Kubernetes object description as specified in the
specification of the application.
"""
if resource["kind"] != "Service":
return
service_name = resource["metadata"]["name"]
try:
del app.status.services[service_name]
except KeyError:
pass
@listen.on(HookType.ResourcePostDelete)
async def remove_resource_from_last_observed_manifest(app, resource, **kwargs):
"""Remove a given resource from the last_observed_manifest after its deletion
Args:
app (krake.data.kubernetes.Application): Application the service belongs to
resource (dict): Kubernetes object description as specified in the
specification of the application.
"""
try:
idx = get_kubernetes_resource_idx(app.status.last_observed_manifest, resource)
except IndexError:
return
app.status.last_observed_manifest.pop(idx)
def update_last_applied_manifest_dict_from_resp(
last_applied_manifest, observer_schema, response
):
"""Together with :func:``update_last_applied_manifest_list_from_resp``, this
function is called recursively to update a partial ``last_applied_manifest``
from a partial Kubernetes response
Args:
last_applied_manifest (dict): partial ``last_applied_manifest`` being
updated
observer_schema (dict): partial ``observer_schema``
response (dict): partial response from the Kubernetes API.
Raises:
KeyError: If the observed field is not present in the Kubernetes response
This function go through all observed fields, and initialized their value in
last_applied_manifest if they are not yet present
"""
for key, value in observer_schema.items():
# Keys in the response are in camelCase
camel_key = camel_to_snake_case(key)
if camel_key not in response:
# An observed key should always be present in the k8s response
raise KeyError(
f"Observed key {camel_key} is not present in response {response}"
)
if isinstance(value, dict):
if key not in last_applied_manifest:
# The dictionary is observed, but not present in
# last_applied_manifest
last_applied_manifest[key] = {}
update_last_applied_manifest_dict_from_resp(
last_applied_manifest[key], observer_schema[key], response[camel_key]
)
elif isinstance(value, list):
if key not in last_applied_manifest:
# The list is observed, but not present in last_applied_manifest
last_applied_manifest[key] = []
update_last_applied_manifest_list_from_resp(
last_applied_manifest[key], observer_schema[key], response[camel_key]
)
elif key not in last_applied_manifest:
# If key not present in last_applied_manifest, and value is neither a
# dict nor a list, simply add it.
last_applied_manifest[key] = response[camel_key]
def update_last_applied_manifest_list_from_resp(
last_applied_manifest, observer_schema, response
):
"""Together with :func:``update_last_applied_manifest_dict_from_resp``, this
function is called recursively to update a partial ``last_applied_manifest``
from a partial Kubernetes response
Args:
last_applied_manifest (list): partial ``last_applied_manifest`` being
updated
observer_schema (list): partial ``observer_schema``
response (list): partial response from the Kubernetes API.
This function go through all observed fields, and initialized their value in
last_applied_manifest if they are not yet present
"""
# Looping over the observed resource, except the last element which is the
# special control dictionary
for idx, val in enumerate(observer_schema[:-1]):
if idx >= len(response):
# Element is observed but not present in k8s response, so following
# elements will also not exist.
#
# This doesn't raise an Exception as observing the element of a list
# doesn't ensure its presence. The list length is controlled by the
# special control dictionary
return
if isinstance(val, dict):
if idx >= len(last_applied_manifest):
# The dict is observed, but not present in last_applied_manifest
last_applied_manifest.append({})
update_last_applied_manifest_dict_from_resp(
last_applied_manifest[idx], observer_schema[idx], response[idx]
)
elif isinstance(response[idx], list):
if idx >= len(last_applied_manifest):
# The list is observed, but not present in last_applied_manifest
last_applied_manifest.append([])
update_last_applied_manifest_list_from_resp(
last_applied_manifest[idx], observer_schema[idx], response[idx]
)
elif idx >= len(last_applied_manifest):
# Element is not yet present in last_applied_manifest. Adding it.
last_applied_manifest.append(response[idx])
@listen.on(HookType.ResourcePostCreate)
@listen.on(HookType.ResourcePostUpdate)
def update_last_applied_manifest_from_resp(app, response, **kwargs):
"""Hook run after the creation or update of an application in order to update the
`status.last_applied_manifest` using the k8s response.
Args:
app (krake.data.kubernetes.Application): Application the service belongs to
response (kubernetes_asyncio.client.V1Status): Response of the Kubernetes API
After a Kubernetes resource has been created/updated, the
`status.last_applied_manifest` has to be updated. All fields already initialized
(either from the mangling of `spec.manifest`, or by a previous call to this
function) should be left untouched. Only observed fields which are not present in
`status.last_applied_manifest` should be initialized.
"""
if isinstance(response, dict):
# The Kubernetes API couldn't deserialize the k8s response into an object
resp = response
else:
# The Kubernetes API deserialized the k8s response into an object
resp = response.to_dict()
idx_applied = get_kubernetes_resource_idx(app.status.last_applied_manifest, resp)
idx_observed = get_kubernetes_resource_idx(app.status.mangled_observer_schema, resp)
update_last_applied_manifest_dict_from_resp(
app.status.last_applied_manifest[idx_applied],
app.status.mangled_observer_schema[idx_observed],
resp,
)
@listen.on(HookType.ResourcePostCreate)
@listen.on(HookType.ResourcePostUpdate)
def update_last_observed_manifest_from_resp(app, response, **kwargs):
"""Handler to run after the creation or update of a Kubernetes resource to update
the last_observed_manifest from the response of the Kubernetes API.
Args:
app (krake.data.kubernetes.Application): Application the service belongs to
response (kubernetes_asyncio.client.V1Service): Response of the
Kubernetes API
The target last_observed_manifest holds the value of all observed fields plus the
special control dictionaries for the list length
"""
if isinstance(response, dict):
# The Kubernetes API couldn't deserialize the k8s response into an object
resp = response
else:
# The Kubernetes API deserialized the k8s response into an object
resp = response.to_dict()
try:
idx_observed = get_kubernetes_resource_idx(
app.status.mangled_observer_schema,
resp,
)
except IndexError:
# All created resources should be observed
raise
try:
idx_last_observed = get_kubernetes_resource_idx(
app.status.last_observed_manifest,
resp,
)
except IndexError:
# If the resource is not yes present in last_observed_manifest, append it.
idx_last_observed = len(app.status.last_observed_manifest)
app.status.last_observed_manifest.append({})
# Overwrite the last_observed_manifest for this resource
app.status.last_observed_manifest[
idx_last_observed
] = update_last_observed_manifest_dict(
app.status.mangled_observer_schema[idx_observed], resp
)
def update_last_observed_manifest_dict(observed_resource, response):
"""Together with :func:``update_last_observed_manifest_list``, recursively
crafts the ``last_observed_manifest`` from the Kubernetes :attr:``response``.
Args:
observed_resource (dict): The schema to observe for the partial given resource
response (dict): The partial Kubernetes response for this resource.
Raises:
KeyError: If an observed key is not present in the Kubernetes response
Returns:
dict: The dictionary of observed keys and their value
Get the value of all observed fields from the Kubernetes response
"""
res = {}
for key, value in observed_resource.items():
camel_key = camel_to_snake_case(key)
if camel_key not in response:
raise KeyError(
f"Observed key {camel_key} is not present in response {response}"
)
if isinstance(value, dict):
res[key] = update_last_observed_manifest_dict(value, response[camel_key])
elif isinstance(value, list):
res[key] = update_last_observed_manifest_list(value, response[camel_key])
else:
res[key] = response[camel_key]
return res
def update_last_observed_manifest_list(observed_resource, response):
"""Together with :func:``update_last_observed_manifest_dict``, recursively
crafts the ``last_observed_manifest`` from the Kubernetes :attr:``response``.
Args:
observed_resource (list): the schema to observe for the partial given resource
response (list): the partial Kubernetes response for this resource.
Returns:
list: The list of observed elements, plus the special list length control
dictionary
Get the value of all observed elements from the Kubernetes response
"""
if not response:
return [{"observer_schema_list_current_length": 0}]
res = []
# Looping over the observed resource, except the last element which is the special
# control dictionary
for idx, val in enumerate(observed_resource[:-1]):
if idx >= len(response):
# Element is not present in the Kubernetes response, nothing more to do
break
if type(response[idx]) == dict:
res.append(update_last_observed_manifest_dict(val, response[idx]))
elif type(response[idx]) == list:
res.append(update_last_observed_manifest_list(val, response[idx]))
else:
res.append(response[idx])
# Append the special control dictionary to the list
res.append({"observer_schema_list_current_length": len(response)})
return res
def update_last_applied_manifest_dict_from_spec(
resource_status_new, resource_status_old, resource_observed
):
"""Together with :func:``update_last_applied_manifest_list_from_spec``, this
function is called recursively to update a partial ``last_applied_manifest``
Args:
resource_status_new (dict): partial ``last_applied_manifest`` being updated
resource_status_old (dict): partial of the current ``last_applied_manifest``
resource_observed (dict): partial observer_schema for the manifest file
being updated
"""
for key, value in resource_observed.items():
if key not in resource_status_old:
continue
if key in resource_status_new:
if isinstance(value, dict):
update_last_applied_manifest_dict_from_spec(
resource_status_new[key],
resource_status_old[key],
resource_observed[key],
)
elif isinstance(value, list):
update_last_applied_manifest_list_from_spec(
resource_status_new[key],
resource_status_old[key],
resource_observed[key],
)
else:
# If the key is not present the spec.manifest, we first need to
# initialize it
if isinstance(value, dict):
resource_status_new[key] = {}
update_last_applied_manifest_dict_from_spec(
resource_status_new[key],
resource_status_old[key],
resource_observed[key],
)
elif isinstance(value, list):
resource_status_new[key] = []
update_last_applied_manifest_list_from_spec(
resource_status_new[key],
resource_status_old[key],
resource_observed[key],
)
else:
resource_status_new[key] = resource_status_old[key]
def update_last_applied_manifest_list_from_spec(
resource_status_new, resource_status_old, resource_observed
):
"""Together with :func:``update_last_applied_manifest_dict_from_spec``, this
function is called recursively to update a partial ``last_applied_manifest``
Args:
resource_status_new (list): partial ``last_applied_manifest`` being updated
resource_status_old (list): partial of the current ``last_applied_manifest``
resource_observed (list): partial observer_schema for the manifest file
being updated
"""
# Looping over the observed resource, except the last element which is the
# special control dictionary
for idx, val in enumerate(resource_observed[:-1]):
if idx >= len(resource_status_old):
# The element in not in the current last_applied_manifest, and neither
# is the rest of the list
break
if idx < len(resource_status_new):
# The element is present in spec.manifest and in the current
# last_applied_manifest. Updating observed fields
if isinstance(val, dict):
update_last_applied_manifest_dict_from_spec(
resource_status_new[idx],
resource_status_old[idx],
resource_observed[idx],
)
elif isinstance(val, list):
update_last_applied_manifest_list_from_spec(
resource_status_new[idx],
resource_status_old[idx],
resource_observed[idx],
)
else:
# If the element is not present in the spec.manifest, we first have to
# initialize it.
if isinstance(val, dict):
resource_status_new.append({})
update_last_applied_manifest_dict_from_spec(
resource_status_new[idx],
resource_status_old[idx],
resource_observed[idx],
)
elif isinstance(val, list):
resource_status_new.append([])
update_last_applied_manifest_list_from_spec(
resource_status_new[idx],
resource_status_old[idx],
resource_observed[idx],
)
else:
resource_status_new.append(resource_status_old[idx])
def update_last_applied_manifest_from_spec(app):
"""Update the status.last_applied_manifest of an application from spec.manifests
Args:
app (krake.data.kubernetes.Application): Application to update
This function is called on application creation and updates. The
last_applied_manifest of an application is initialized as a copy of spec.manifest,
and is augmented by all known observed fields not yet initialized (i.e. all observed
fields or resources which are present in the current last_applied_manifest but not
in the spec.manifest)
"""
# The new last_applied_manifest is initialized as a copy of the spec.manifest, and
# augmented by all observed fields which are present in the current
# last_applied_manifest but not in the original spec.manifest
new_last_applied_manifest = deepcopy(app.spec.manifest)
# Loop over observed resources and observed fields, and check if they should be
# added to the new last_applied_manifest (i.e. present in the current
# last_applied_manifest but not in spec.manifest)
for resource_observed in app.status.mangled_observer_schema:
# If the resource is not present in the current last_applied_manifest, there is
# nothing to do. Whether the resource was initialized by spec.manifest doesn't
# matter.
try:
idx_status_old = get_kubernetes_resource_idx(
app.status.last_applied_manifest, resource_observed
)
except IndexError:
continue
# As the resource is present in the current last_applied_manifest, we need to go
# through it to check if observed fields should be set to their current value
# (i.e. fields are present in the current last_applied_manifest, but not in
# spec.manifest)
try:
# Check if the observed resource is present in spec.manifest
idx_status_new = get_kubernetes_resource_idx(
new_last_applied_manifest, resource_observed
)
except IndexError:
# The resource is observed but is not present in the spec.manifest.
# Create an empty resource, which will be augmented in
# update_last_applied_manifest_dict_from_spec with the observed and known
# fields.
new_last_applied_manifest.append({})
idx_status_new = len(new_last_applied_manifest) - 1
update_last_applied_manifest_dict_from_spec(
new_last_applied_manifest[idx_status_new],
app.status.last_applied_manifest[idx_status_old],
resource_observed,
)
app.status.last_applied_manifest = new_last_applied_manifest
class KubernetesApplicationObserver(Observer):
"""Observer specific for Kubernetes Applications. One observer is created for each
Application managed by the Controller, but not one per Kubernetes resource
(Deployment, Service...). If several resources are defined by an Application, they
are all monitored by the same observer.
The observer gets the actual status of the resources on the cluster using the
Kubernetes API, and compare it to the status stored in the API.
The observer is:
* started at initial Krake resource creation;
* deleted when a resource needs to be updated, then started again when it is done;
* simply deleted on resource deletion.
Args:
cluster (krake.data.kubernetes.Cluster): the cluster on which the observed
Application is created.
resource (krake.data.kubernetes.Application): the application that will be
observed.
on_res_update (coroutine): a coroutine called when a resource's actual status
differs from the status sent by the database. Its signature is:
``(resource) -> updated_resource``. ``updated_resource`` is the instance of
the resource that is up-to-date with the API. The Observer internal instance
of the resource to observe will be updated. If the API cannot be contacted,
``None`` can be returned. In this case the internal instance of the Observer
will not be updated.
time_step (int, optional): how frequently the Observer should watch the actual
status of the resources.
"""
def __init__(self, cluster, resource, on_res_update, time_step=2):
super().__init__(resource, on_res_update, time_step)
self.cluster = cluster
async def poll_resource(self):
"""Fetch the current status of the Application monitored by the Observer.
Returns:
krake.data.core.Status: the status object created using information from the
real world Applications resource.
"""
app = self.resource
status = deepcopy(app.status)
status.last_observed_manifest = []
# For each observed kubernetes resource of the Application,
# get its current status on the cluster.
for desired_resource in app.status.last_applied_manifest:
kube = KubernetesClient(self.cluster.spec.kubeconfig)
idx_observed = get_kubernetes_resource_idx(
app.status.mangled_observer_schema, desired_resource
)
observed_resource = app.status.mangled_observer_schema[idx_observed]
async with kube:
try:
group, version, kind, name, namespace = kube.get_immutables(
desired_resource
)
resource_api = await kube.get_resource_api(group, version, kind)
resp = await resource_api.read(kind, name, namespace)
except ApiException as err:
if err.status == 404:
# Resource does not exist
continue
# Otherwise, log the unexpected errors
logger.error(err)
observed_manifest = update_last_observed_manifest_dict(
observed_resource, resp.to_dict()
)
status.last_observed_manifest.append(observed_manifest)
return status
class KubernetesClusterObserver(Observer):
"""Observer specific for Kubernetes Clusters. One observer is created for each
Cluster managed by the Controller.
The observer gets the actual status of the cluster using the
Kubernetes API, and compare it to the status stored in the API.
The observer is:
* started at initial Krake resource creation;
* deleted when a resource needs to be updated, then started again when it is done;
* simply deleted on resource deletion.
Args:
cluster (krake.data.kubernetes.Cluster): the cluster which will be observed.
on_res_update (coroutine): a coroutine called when a resource's actual status
differs from the status sent by the database. Its signature is:
``(resource) -> updated_resource``. ``updated_resource`` is the instance of
the resource that is up-to-date with the API. The Observer internal instance
of the resource to observe will be updated. If the API cannot be contacted,
``None`` can be returned. In this case the internal instance of the Observer
will not be updated.
time_step (int, optional): how frequently the Observer should watch the actual
status of the resources.
"""
def __init__(self, cluster, on_res_update, time_step=2):
super().__init__(cluster, on_res_update, time_step)
self.cluster = cluster
async def poll_resource(self):
"""Fetch the current status of the Cluster monitored by the Observer.
Returns:
krake.data.core.Status: the status object created using information from the
real world Cluster.
"""
status = deepcopy(self.cluster.status)
# For each observed kubernetes cluster registered in Krake,
# get its current node status.
loader = KubeConfigLoader(self.cluster.spec.kubeconfig)
config = Configuration()
await loader.load_and_set(config)
kube = ApiClient(config)
async with kube as api:
v1 = client.CoreV1Api(api)
try:
response = await v1.list_node()
except ClientConnectorError as err:
status.state = ClusterState.OFFLINE
self.cluster.status.state = ClusterState.OFFLINE
# Log the error
logger.debug(err)
return status
condition_dict = {
"MemoryPressure": [],
"DiskPressure": [],
"PIDPressure": [],
"Ready": [],
}
for item in response.items:
for condition in item.status.conditions:
condition_dict[condition.type].append(condition.status)
if (
condition_dict["MemoryPressure"] == ["True"]
or condition_dict["DiskPressure"] == ["True"]
or condition_dict["PIDPressure"] == ["True"]
):
status.state = ClusterState.UNHEALTHY
self.cluster.status.state = ClusterState.UNHEALTHY
return status
elif (
condition_dict["Ready"] == ["True"]
and status.state is ClusterState.OFFLINE
):
status.state = ClusterState.CONNECTING
self.cluster.status.state = ClusterState.CONNECTING
return status
elif condition_dict["Ready"] == ["True"]:
status.state = ClusterState.ONLINE
self.cluster.status.state = ClusterState.ONLINE
return status
else:
status.state = ClusterState.NOTREADY
self.cluster.status.state = ClusterState.NOTREADY
return status
@listen.on(HookType.ApplicationPostReconcile)
@listen.on(HookType.ApplicationPostMigrate)
@listen.on(HookType.ClusterCreation)
async def register_observer(controller, resource, start=True, **kwargs):
"""Create an observer for the given Application or Cluster, and start it as a
background task if wanted.
If an observer already existed for this Application or Cluster, it is stopped
and deleted.
Args:
controller (KubernetesController): the controller for which the observer will be
added in the list of working observers.
resource (krake.data.kubernetes.Application): the Application to observe or
resource (krake.data.kubernetes.Cluster): the Cluster to observe.
start (bool, optional): if False, does not start the observer as background
task.
"""
if resource.kind == Application.kind:
cluster = await controller.kubernetes_api.read_cluster(
namespace=resource.status.running_on.namespace,
name=resource.status.running_on.name,
)
observer = KubernetesApplicationObserver(
cluster,
resource,
controller.on_status_update,
time_step=controller.observer_time_step,
)
elif resource.kind == Cluster.kind:
observer = KubernetesClusterObserver(
resource,
controller.on_status_update,
time_step=controller.observer_time_step,
)
else:
logger.debug("Unknown resource kind. No observer was registered.", resource)
return
logger.debug(f"Start observer for {resource.kind} %r", resource.metadata.name)
task = None
if start:
task = controller.loop.create_task(observer.run())
controller.observers[resource.metadata.uid] = (observer, task)
@listen.on(HookType.ApplicationPreReconcile)
@listen.on(HookType.ApplicationPreMigrate)
@listen.on(HookType.ApplicationPreDelete)
@listen.on(HookType.ClusterDeletion)
async def unregister_observer(controller, resource, **kwargs):
"""Stop and delete the observer for the given Application or Cluster. If no observer
is started, do nothing.
Args:
controller (KubernetesController): the controller for which the observer will be
removed from the list of working observers.
resource (krake.data.kubernetes.Application): the Application whose observer
will be stopped or
resource (krake.data.kubernetes.Cluster): the Cluster whose observer will be
stopped.
"""
if resource.metadata.uid not in controller.observers:
return
logger.debug(f"Stop observer for {resource.kind} %r", resource.metadata.name)
_, task = controller.observers.pop(resource.metadata.uid)
task.cancel()
with suppress(asyncio.CancelledError):
await task
def utc_difference():
"""Get the difference in seconds between the current time and the current UTC time.
Returns:
int: the time difference in seconds.
"""
delta = datetime.now() - datetime.utcnow()
return delta.seconds
def generate_certificate(config):
"""Create and sign a new certificate using the one defined in the complete hook
configuration as intermediate certificate.
Args:
config (krake.data.config.CompleteHookConfiguration): the configuration of the
complete hook.
Returns:
CertificatePair: the content of the certificate created and its corresponding
key.
"""
with open(config.intermediate_src, "rb") as f:
intermediate_src = crypto.load_certificate(crypto.FILETYPE_PEM, f.read())
with open(config.intermediate_key_src, "rb") as f:
intermediate_key_src = crypto.load_privatekey(crypto.FILETYPE_PEM, f.read())
client_cert = crypto.X509()
# Set general information
client_cert.set_version(3)
client_cert.set_serial_number(random.randint(50000000000000, 100000000000000))
# If not set before, TLS will not accept to use this certificate in UTC cases, as
# the server time may be earlier.
time_offset = utc_difference() * -1
client_cert.gmtime_adj_notBefore(time_offset)
client_cert.gmtime_adj_notAfter(1 * 365 * 24 * 60 * 60)
# Set issuer and subject
intermediate_subject = intermediate_src.get_subject()
client_cert.set_issuer(intermediate_subject)
client_subj = crypto.X509Name(intermediate_subject)
client_subj.CN = config.hook_user
client_cert.set_subject(client_subj)
# Create and set the private key
client_key = crypto.PKey()
client_key.generate_key(crypto.TYPE_RSA, 2048)
client_cert.set_pubkey(client_key)
client_cert.sign(intermediate_key_src, "sha256")
cert_dump = crypto.dump_certificate(crypto.FILETYPE_PEM, client_cert).decode()
key_dump = crypto.dump_privatekey(crypto.FILETYPE_PEM, client_key).decode()
return CertificatePair(cert=cert_dump, key=key_dump)
def generate_default_observer_schema(app):
"""Generate the default observer schema for each Kubernetes resource present in
``spec.manifest`` for which a custom observer schema hasn't been specified.
Args:
app (krake.data.kubernetes.Application): The application for which to generate a
default observer schema
"""
app.status.mangled_observer_schema = deepcopy(app.spec.observer_schema)
for resource_manifest in app.spec.manifest:
try:
get_kubernetes_resource_idx(
app.status.mangled_observer_schema, resource_manifest
)
except IndexError:
# Only create a default observer schema, if a custom observer schema hasn't
# been set by the user.
app.status.mangled_observer_schema.append(
generate_default_observer_schema_dict(
resource_manifest,
first_level=True,
)
)
def generate_default_observer_schema_dict(manifest_dict, first_level=False):
"""Together with :func:``generate_default_observer_schema_list``, this function is
called recursively to generate part of a default ``observer_schema`` from part of a
Kubernetes resource, defined respectively by ``manifest_dict`` or ``manifest_list``.
Args:
manifest_dict (dict): Partial Kubernetes resources
first_level (bool, optional): If True, indicates that the dictionary represents
the whole observer schema of a Kubernetes resource
Returns:
dict: Generated partial observer_schema
This function creates a new dictionary from ``manifest_dict`` and replaces all
non-list and non-dict values by ``None``.
In case of ``first_level`` dictionary (i.e. complete ``observer_schema`` for a
resource), the values of the identifying fields are copied from the manifest file.
"""
observer_schema_dict = {}
for key, value in manifest_dict.items():
if isinstance(value, dict):
observer_schema_dict[key] = generate_default_observer_schema_dict(value)
elif isinstance(value, list):
observer_schema_dict[key] = generate_default_observer_schema_list(value)
else:
observer_schema_dict[key] = None
if first_level:
observer_schema_dict["apiVersion"] = manifest_dict["apiVersion"]
observer_schema_dict["kind"] = manifest_dict["kind"]
observer_schema_dict["metadata"]["name"] = manifest_dict["metadata"]["name"]
if (
"spec" in manifest_dict
and "type" in manifest_dict["spec"]
and manifest_dict["spec"]["type"] == "LoadBalancer"
):
observer_schema_dict["status"] = {"load_balancer": {"ingress": None}}
return observer_schema_dict
def generate_default_observer_schema_list(manifest_list):
"""Together with :func:``generate_default_observer_schema_dict``, this function is
called recursively to generate part of a default ``observer_schema`` from part of a
Kubernetes resource, defined respectively by ``manifest_list`` or ``manifest_dict``.
Args:
manifest_list (list): Partial Kubernetes resources
Returns:
list: Generated partial observer_schema
This function creates a new list from ``manifest_list`` and replaces all non-list
and non-dict elements by ``None``.
Additionally, it generates the default list control dictionary, using the current
length of the list as default minimum and maximum values.
"""
observer_schema_list = []
for value in manifest_list:
if isinstance(value, dict):
observer_schema_list.append(generate_default_observer_schema_dict(value))
elif isinstance(value, list):
observer_schema_list.append(generate_default_observer_schema_list(value))
else:
observer_schema_list.append(None)
observer_schema_list.append(
{
"observer_schema_list_min_length": len(manifest_list),
"observer_schema_list_max_length": len(manifest_list),
}
)
return observer_schema_list
@listen.on(HookType.ApplicationMangling)
async def complete(app, api_endpoint, ssl_context, config):
"""Execute application complete hook defined by :class:`Complete`.
Hook mangles given application and injects complete hooks variables.
Application complete hook is disabled by default.
User enables this hook by the --hook-complete argument in rok cli.
Args:
app (krake.data.kubernetes.Application): Application object processed
when the hook is called
api_endpoint (str): the given API endpoint
ssl_context (ssl.SSLContext): SSL context to communicate with the API endpoint
config (krake.data.config.HooksConfiguration): Complete hook
configuration.
"""
if "complete" not in app.spec.hooks:
return
# Use the endpoint of the API only if the external endpoint has not been set.
if config.complete.external_endpoint:
api_endpoint = config.complete.external_endpoint
app.status.complete_token = \
app.status.complete_token if app.status.complete_token else token_urlsafe()
# Generate only once the certificate and key for a specific Application
generated_cert = CertificatePair(
cert=app.status.complete_cert, key=app.status.complete_key
)
if ssl_context and generated_cert == (None, None):
generated_cert = generate_certificate(config.complete)
app.status.complete_cert = generated_cert.cert
app.status.complete_key = generated_cert.key
hook = Complete(
api_endpoint,
ssl_context,
hook_user=config.complete.hook_user,
cert_dest=config.complete.cert_dest,
env_token=config.complete.env_token,
env_url=config.complete.env_url,
)
hook.mangle_app(
app.metadata.name,
app.metadata.namespace,
app.status.complete_token,
app.status.last_applied_manifest,
config.complete.intermediate_src,
generated_cert,
app.status.mangled_observer_schema,
"complete"
)
@listen.on(HookType.ApplicationMangling)
async def shutdown(app, api_endpoint, ssl_context, config):
"""Executes an application shutdown hook defined by :class:`Shutdown`.
The hook mangles the given application and injects shutdown hooks variables.
Application shutdown hook is disabled by default.
User enables this hook by the --hook-shutdown argument in rok cli.
Args:
app (krake.data.kubernetes.Application): Application object processed
when the hook is called
api_endpoint (str): the given API endpoint
ssl_context (ssl.SSLContext): SSL context to communicate with the API endpoint
config (krake.data.config.HooksConfiguration): Shutdown hook
configuration.
"""
if "shutdown" not in app.spec.hooks:
return
# Use the endpoint of the API only if the external endpoint has not been set.
if config.shutdown.external_endpoint:
api_endpoint = config.shutdown.external_endpoint
app.status.shutdown_token = \
app.status.shutdown_token if app.status.shutdown_token else token_urlsafe()
# Generate only once the certificate and key for a specific Application
generated_cert = CertificatePair(
cert=app.status.shutdown_cert, key=app.status.shutdown_key
)
if ssl_context and generated_cert == (None, None):
generated_cert = generate_certificate(config.shutdown)
app.status.shutdown_cert = generated_cert.cert
app.status.shutdown_key = generated_cert.key
hook = Shutdown(
api_endpoint,
ssl_context,
hook_user=config.shutdown.hook_user,
cert_dest=config.shutdown.cert_dest,
env_token=config.shutdown.env_token,
env_url=config.shutdown.env_url,
)
hook.mangle_app(
app.metadata.name,
app.metadata.namespace,
app.status.shutdown_token,
app.status.last_applied_manifest,
config.shutdown.intermediate_src,
generated_cert,
app.status.mangled_observer_schema,
"shutdown"
)
@listen.on(HookType.ResourcePreDelete)
async def pre_shutdown(controller, app, **kwargs):
"""
Args:
app (krake.data.kubernetes.Application): Application object processed
when the hook is called
"""
if "shutdown" not in app.spec.hooks:
return
return
class SubResource(NamedTuple):
group: str
name: str
body: dict
path: tuple
class CertificatePair(NamedTuple):
"""Tuple which contains a certificate and its corresponding key.
Attributes:
cert (str): content of a certificate.
key (str): content of the key that corresponds to the certificate.
"""
cert: str
key: str
class Hook(object):
hook_resources = ()
ca_name = "ca-bundle.pem"
cert_name = "cert.pem"
key_name = "key.pem"
def __init__(
self, api_endpoint, ssl_context, hook_user, cert_dest, env_token, env_url
):
self.api_endpoint = api_endpoint
self.ssl_context = ssl_context
self.hook_user = hook_user
self.cert_dest = cert_dest
self.env_token = env_token
self.env_url = env_url
def mangle_app(
self,
name,
namespace,
token,
last_applied_manifest,
intermediate_src,
generated_cert,
mangled_observer_schema,
hook_type="",
):
"""Mangle a given application and inject complete hook resources and
sub-resources into the :attr:`last_applied_manifest` object by :meth:`mangle`.
Also mangle the observer_schema as new resources and sub-resources should
be observed.
:attr:`last_applied_manifest` is created as a deep copy of the desired
application resources, as defined by user. It can be updated by custom hook
resources or modified by custom hook sub-resources. It is used as a desired
state for the Krake deployment process.
Args:
name (str): Application name
namespace (str): Application namespace
token (str): Complete hook authentication token
last_applied_manifest (list): Application resources
intermediate_src (str): content of the certificate that is used to sign new
certificates for the complete hook.
generated_cert (CertificatePair): tuple that contains the content of the
new signed certificate for the Application, and the content of its
corresponding key.
mangled_observer_schema (list): Observed fields
hook_type (str, optional): Name of the hook the app should be mangled for
"""
secret_certs_name = "-".join([name, "krake", hook_type, "secret", "certs"])
secret_token_name = "-".join([name, "krake", hook_type, "secret", "token"])
volume_name = "-".join([name, "krake", hook_type, "volume"])
ca_certs = (
self.ssl_context.get_ca_certs(binary_form=True)
if self.ssl_context
else None
)
# Extract all different namespaces
# FIXME: too many assumptions here: do we create one ConfigMap for each
# namespace?
resource_namespaces = {
resource["metadata"].get("namespace", "default")
for resource in last_applied_manifest
}
hook_resources = []
hook_sub_resources = []
if ca_certs:
hook_resources.extend(
[
self.secret_certs(
secret_certs_name,
resource_namespace,
intermediate_src=intermediate_src,
generated_cert=generated_cert,
ca_certs=ca_certs,
)
for resource_namespace in resource_namespaces
]
)
hook_sub_resources.extend(
[*self.volumes(secret_certs_name, volume_name, self.cert_dest)]
)
hook_resources.extend(
[
self.secret_token(
secret_token_name,
name,
namespace,
resource_namespace,
self.api_endpoint,
token,
)
for resource_namespace in resource_namespaces
]
)
hook_sub_resources.extend(
[
*self.env_vars(secret_token_name),
]
)
self.mangle(
hook_resources,
last_applied_manifest,
mangled_observer_schema,
)
self.mangle(
hook_sub_resources,
last_applied_manifest,
mangled_observer_schema,
is_sub_resource=True,
)
def mangle(
self,
items,
last_applied_manifest,
mangled_observer_schema,
is_sub_resource=False,
):
"""Mangle applications desired state with custom hook resources or
sub-resources.
Example:
.. code:: python
last_applied_manifest = [
{
'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {'name': 'test', 'namespace': 'default'},
'spec': {'containers': [{'name': 'test'}]}
}
]
mangled_observer_schema = [
{
'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {'name': 'test', 'namespace': 'default'},
'spec': {
'containers': [
{'name': None},
{
'observer_schema_list_max_length': 1,
'observer_schema_list_min_length': 1,
},
]
},
}
]
hook_resources = [
{
'apiVersion': 'v1',
'kind': 'Secret',
'metadata': {'name': 'sct', 'namespace': 'default'}
}
]
hook_sub_resources = [
SubResource(
group='env', name='env', body={'name': 'test', 'value': 'test'},
path=(('spec', 'containers'),)
)
]
mangle(
hook_resources,
last_applied_manifest,
mangled_observer_schema,
)
mangle(
hook_sub_resources,
last_applied_manifest,
mangled_observer_schema,
is_sub_resource=True
)
assert last_applied_manifest == [
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {"name": "test", 'namespace': 'default'},
"spec": {
"containers": [
{
"name": "test",
"env": [{"name": "test", "value": "test"}]
}
]
},
},
{"apiVersion": "v1", "kind": "Secret", "metadata": {"name": "sct"}},
]
assert mangled_observer_schema == [
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {"name": "test", "namespace": None},
"spec": {
"containers": [
{
"name": None,
"env": [
{"name": None, "value": None},
{
"observer_schema_list_max_length": 1,
"observer_schema_list_min_length": 1,
},
],
},
{
"observer_schema_list_max_length": 1,
"observer_schema_list_min_length": 1,
},
]
},
},
{
"apiVersion": "v1",
"kind": "Secret",
"metadata": {"name": "sct", "namespace": None},
},
]
Args:
items (list[SubResource]): Custom hook resources or sub-resources
last_applied_manifest (list): Application resources
mangled_observer_schema (list): Observed resources
is_sub_resource (bool, optional): if False, the function only extend the
list of Kubernetes resources defined in :attr:`last_applied_manifest`
with new hook resources. Otherwise, the function injects each new hook
sub-resource into the :attr:`last_applied_manifest` object
sub-resources. Defaults to False.
"""
if not items:
return
if not is_sub_resource:
last_applied_manifest.extend(items)
for sub_resource in items:
# Generate the default observer schema for each resource
mangled_observer_schema.append(
generate_default_observer_schema_dict(
sub_resource,
first_level=True,
)
)
return
def inject(sub_resource, sub_resource_to_mangle, observed_resource_to_mangle):
"""Inject a hooks defined sub-resource into a Kubernetes sub-resource.
Args:
sub_resource (SubResource): Hook sub-resource that needs to be injected
into :attr:`last_applied_manifest`
sub_resource_to_mangle (object): Kubernetes sub-resources from
:attr:`last_applied_manifest` which need to be processed
observed_resource_to_mangle (dict): partial mangled_observer_schema
corresponding to the Kubernetes sub-resource.
Raises:
InvalidManifestError: if the sub-resource which will be mangled is not a
list or a dict.
"""
# Create sub-resource group if not present in the Kubernetes sub-resource
if sub_resource.group not in sub_resource_to_mangle:
# FIXME: This assumes the subresource group contains a list
sub_resource_to_mangle.update({sub_resource.group: []})
# Create sub-resource group if not present in the observed fields
if sub_resource.group not in observed_resource_to_mangle:
observed_resource_to_mangle.update(
{
sub_resource.group: [
{
"observer_schema_list_min_length": 0,
"observer_schema_list_max_length": 0,
}
]
}
)
# Inject sub-resource
# If sub-resource name is already there update it, if not, append it
if sub_resource.name in [
g["name"] for g in sub_resource_to_mangle[sub_resource.group]
]:
# FIXME: Assuming we are dealing with a list
for idx, item in enumerate(sub_resource_to_mangle[sub_resource.group]):
if item["name"]:
if hasattr(item, "body"):
sub_resource_to_mangle[item.group][idx] = item["body"]
else:
sub_resource_to_mangle[sub_resource.group].append(sub_resource.body)
# Make sure the value is observed
if sub_resource.name not in [
g["name"] for g in observed_resource_to_mangle[sub_resource.group][:-1]
]:
observed_resource_to_mangle[sub_resource.group].insert(
-1, generate_default_observer_schema_dict(sub_resource.body)
)
observed_resource_to_mangle[sub_resource.group][-1][
"observer_schema_list_min_length"
] += 1
observed_resource_to_mangle[sub_resource.group][-1][
"observer_schema_list_max_length"
] += 1
for resource in last_applied_manifest:
# Complete hook is applied only on defined Kubernetes resources
if resource["kind"] not in self.hook_resources:
continue
for sub_resource in items:
sub_resources_to_mangle = None
idx_observed = get_kubernetes_resource_idx(
mangled_observer_schema, resource
)
for keys in sub_resource.path:
try:
sub_resources_to_mangle = reduce(getitem, keys, resource)
except KeyError:
continue
break
# Create the path to the observed sub-resource, if it doesn't yet exist
try:
observed_sub_resources = reduce(
getitem, keys, mangled_observer_schema[idx_observed]
)
except KeyError:
Complete.create_path(
mangled_observer_schema[idx_observed], list(keys)
)
observed_sub_resources = reduce(
getitem, keys, mangled_observer_schema[idx_observed]
)
if isinstance(sub_resources_to_mangle, list):
for idx, sub_resource_to_mangle in enumerate(
sub_resources_to_mangle
):
# Ensure that each element of the list is observed.
idx_observed = idx
if idx >= len(observed_sub_resources[:-1]):
idx_observed = len(observed_sub_resources[:-1])
# FIXME: Assuming each element of the list contains a
# dictionary, therefore initializing new elements with an
# empty dict
observed_sub_resources.insert(-1, {})
observed_sub_resource = observed_sub_resources[idx_observed]
# FIXME: This is assuming a list always contains dict
inject(
sub_resource, sub_resource_to_mangle, observed_sub_resource
)
elif isinstance(sub_resources_to_mangle, dict):
inject(
sub_resource, sub_resources_to_mangle, observed_sub_resources
)
else:
message = (
f"The sub-resource to mangle {sub_resources_to_mangle!r} has an"
"invalid type, should be in '[dict, list]'"
)
raise InvalidManifestError(message)
@staticmethod
def attribute_map(obj):
"""Convert a Kubernetes object to dict based on its attribute mapping
Example:
.. code:: python
from kubernetes_asyncio.client import V1VolumeMount
d = attribute_map(
V1VolumeMount(name="name", mount_path="path")
)
assert d == {'mountPath': 'path', 'name': 'name'}
Args:
obj (object): Kubernetes object
Returns:
dict: Converted Kubernetes object
"""
return {
obj.attribute_map[attr]: getattr(obj, attr)
for attr, _ in obj.to_dict().items()
if getattr(obj, attr) is not None
}
@staticmethod
def create_path(mangled_observer_schema, keys):
"""Create the path to the observed field in the observer schema.
When a sub-resource is mangled, it should be observed. This function creates
the path to the subresource to observe.
Args:
mangled_observer_schema (dict): Partial observer schema of a resource
keys (list): list of keys forming the path to the sub-resource to
observe
FIXME: This assumes we are only adding keys to dict. We don't consider lists
"""
# Unpack the first key first, as it contains the base directory
key = keys.pop(0)
# If the key is the last of the list, we reached the end of the path.
if len(keys) == 0:
mangled_observer_schema[key] = None
return
if key not in mangled_observer_schema:
mangled_observer_schema[key] = {}
Hook.create_path(mangled_observer_schema[key], keys)
def secret_certs(
self,
secret_name,
namespace,
ca_certs=None,
intermediate_src=None,
generated_cert=None,
):
"""Create a complete hooks secret resource.
Complete hook secret stores Krake CAs and client certificates to communicate
with the Krake API.
Args:
secret_name (str): Secret name
namespace (str): Kubernetes namespace where the Secret will be created.
ca_certs (list): Krake CA list
intermediate_src (str): content of the certificate that is used to sign new
certificates for the complete hook.
generated_cert (CertificatePair): tuple that contains the content of the
new signed certificate for the Application, and the content of its
corresponding key.
Returns:
dict: complete hook secret resource
"""
ca_certs_pem = ""
for ca_cert in ca_certs:
x509 = crypto.load_certificate(crypto.FILETYPE_ASN1, ca_cert)
ca_certs_pem += crypto.dump_certificate(crypto.FILETYPE_PEM, x509).decode()
# Add the intermediate certificate into the chain
with open(intermediate_src, "r") as f:
intermediate_src_content = f.read()
ca_certs_pem += intermediate_src_content
data = {
self.ca_name: self._encode_to_64(ca_certs_pem),
self.cert_name: self._encode_to_64(generated_cert.cert),
self.key_name: self._encode_to_64(generated_cert.key),
}
return self.secret(secret_name, data, namespace)
def secret_token(
self, secret_name, name, namespace, resource_namespace, api_endpoint, token
):
"""Create a hooks secret resource.
The hook secret stores Krake authentication token
and hook URL for given application.
Args:
secret_name (str): Secret name
name (str): Application name
namespace (str): Application namespace
resource_namespace (str): Kubernetes namespace where the
Secret will be created.
api_endpoint (str): Krake API endpoint
token (str): Complete hook authentication token
Returns:
dict: complete hook secret resource
"""
pass
def volumes(self, secret_name, volume_name, mount_path):
"""Create complete hooks volume and volume mount sub-resources
Complete hook volume gives access to hook's secret, which stores
Krake CAs and client certificates to communicate with the Krake API.
Complete hook volume mount puts the volume into the application
Args:
secret_name (str): Secret name
volume_name (str): Volume name
mount_path (list): Volume mount path
Returns:
list: List of complete hook volume and volume mount sub-resources
"""
volume = V1Volume(name=volume_name, secret={"secretName": secret_name})
volume_mount = V1VolumeMount(name=volume_name, mount_path=mount_path)
return [
SubResource(
group="volumes",
name=volume.name,
body=self.attribute_map(volume),
path=(("spec", "template", "spec"), ("spec",)),
),
SubResource(
group="volumeMounts",
name=volume_mount.name,
body=self.attribute_map(volume_mount),
path=(
("spec", "template", "spec", "containers"),
("spec", "containers"), # kind: Pod
),
),
]
@staticmethod
def _encode_to_64(string):
"""Compute the base 64 encoding of a string.
Args:
string (str): the string to encode.
Returns:
str: the result of the encoding.
"""
return b64encode(string.encode()).decode()
def secret(self, secret_name, secret_data, namespace, _type="Opaque"):
"""Create a secret resource.
Args:
secret_name (str): Secret name
secret_data (dict): Secret data
namespace (str): Kubernetes namespace where the Secret will be created.
_type (str, optional): Secret type. Defaults to Opaque.
Returns:
dict: secret resource
"""
return self.attribute_map(
V1Secret(
api_version="v1",
kind="Secret",
data=secret_data,
metadata={"name": secret_name, "namespace": namespace},
type=_type,
)
)
@staticmethod
def create_hook_url(name, namespace, api_endpoint):
"""Create an applications' hook URL.
Function needs to be specified for each hook.
Args:
name (str): Application name
namespace (str): Application namespace
api_endpoint (str): Krake API endpoint
Returns:
str: Application shutdown url
"""
pass
def env_vars(self, secret_name):
"""Create the hooks' environment variables sub-resources.
Function needs to be specified for each hook.
Creates hook environment variables to store Krake authentication token
and a hook URL for the given applications.
Args:
secret_name (str): Secret name
Returns:
list: List of shutdown hook environment variables sub-resources
"""
pass
class Complete(Hook):
"""Mangle given application and inject complete hooks variables into it.
Hook injects a Kubernetes secret, which stores Krake authentication token
and the Krake complete hook URL for the given application. The variables
from Kubernetes secret are imported as environment variables
into the application resource definition. Only resources defined in
:args:`hook_resources` can be modified.
Names of environment variables are defined in the application controller
configuration file.
If TLS is enabled on the Krake API, the complete hook injects a Kubernetes secret,
and it's corresponding volume and volume mount definitions for the Krake CA,
the client certificate with the right CN, and its key. The directory where the
secret is mounted is defined in the configuration.
Args:
api_endpoint (str): the given API endpoint
ssl_context (ssl.SSLContext): SSL context to communicate with the API endpoint
cert_dest (str, optional): Path of the directory where the CA, client
certificate and key to the Krake API will be stored.
env_token (str, optional): Name of the environment variable, which stores Krake
authentication token.
env_url (str, optional): Name of the environment variable,
which stores Krake complete hook URL.
"""
hook_resources = ("Pod", "Deployment", "ReplicationController")
def __init__(
self, api_endpoint, ssl_context, hook_user, cert_dest, env_token, env_url
):
super().__init__(
api_endpoint, ssl_context, hook_user, cert_dest, env_token, env_url
)
self.env_url = env_url
def secret_token(
self, secret_name, name, namespace, resource_namespace, api_endpoint, token
):
"""Create complete hooks secret resource.
Complete hook secret stores Krake authentication token
and complete hook URL for given application.
Args:
secret_name (str): Secret name
name (str): Application name
namespace (str): Application namespace
resource_namespace (str): Kubernetes namespace where the
Secret will be created.
api_endpoint (str): Krake API endpoint
token (str): Complete hook authentication token
Returns:
dict: complete hook secret resource
"""
complete_url = self.create_hook_url(name, namespace, api_endpoint)
data = {
self.env_token.lower(): self._encode_to_64(token),
self.env_url.lower(): self._encode_to_64(complete_url),
}
return self.secret(secret_name, data, resource_namespace)
@staticmethod
def create_hook_url(name, namespace, api_endpoint):
"""Create an applications' complete URL.
Args:
name (str): Application name
namespace (str): Application namespace
api_endpoint (str): Krake API endpoint
Returns:
str: Application complete url
"""
api_url = URL(api_endpoint)
return str(
api_url.with_path(
f"/kubernetes/namespaces/{namespace}/applications/{name}/complete"
)
)
def env_vars(self, secret_name):
"""Create complete hooks environment variables sub-resources
Create complete hook environment variables store Krake authentication token
and complete hook URL for given application.
Args:
secret_name (str): Secret name
Returns:
list: List of complete hook environment variables sub-resources
"""
sub_resources = []
env_token = V1EnvVar(
name=self.env_token,
value_from=self.attribute_map(
V1EnvVarSource(
secret_key_ref=self.attribute_map(
V1SecretKeySelector(
name=secret_name, key=self.env_token.lower()
)
)
)
),
)
env_url = V1EnvVar(
name=self.env_url,
value_from=self.attribute_map(
V1EnvVarSource(
secret_key_ref=self.attribute_map(
V1SecretKeySelector(name=secret_name, key=self.env_url.lower())
)
)
),
)
for env in (env_token, env_url):
sub_resources.append(
SubResource(
group="env",
name=env.name,
body=self.attribute_map(env),
path=(
("spec", "template", "spec", "containers"),
("spec", "containers"), # kind: Pod
),
)
)
return sub_resources
class Shutdown(Hook):
"""Mangle given application and inject shutdown hooks variables into it.
Hook injects a Kubernetes secret, which stores Krake authentication token
and the Krake complete hook URL for the given application. The variables
from the Kubernetes secret are imported as environment variables
into the application resource definition. Only resources defined in
:args:`hook_resources` can be modified.
Names of environment variables are defined in the application controller
configuration file.
If TLS is enabled on the Krake API, the shutdown hook injects a Kubernetes secret,
and it's corresponding volume and volume mount definitions for the Krake CA,
the client certificate with the right CN, and its key. The directory where the
secret is mounted is defined in the configuration.
Args:
api_endpoint (str): the given API endpoint
ssl_context (ssl.SSLContext): SSL context to communicate with the API endpoint
cert_dest (str, optional): Path of the directory where the CA, client
certificate and key to the Krake API will be stored.
env_token (str, optional): Name of the environment variable, which stores Krake
authentication token.
env_url (str, optional): Name of the environment variable,
which stores Krake complete hook URL.
"""
hook_resources = ("Pod", "Deployment", "ReplicationController")
def __init__(
self, api_endpoint, ssl_context, hook_user, cert_dest, env_token, env_url
):
super().__init__(
api_endpoint, ssl_context, hook_user, cert_dest, env_token, env_url
)
self.env_url = env_url
def secret_token(
self, secret_name, name, namespace, resource_namespace, api_endpoint, token
):
"""Create shutdown hooks secret resource.
Shutdown hook secret stores Krake authentication token
and shutdown hook URL for given application.
Args:
secret_name (str): Secret name
name (str): Application name
namespace (str): Application namespace
resource_namespace (str): Kubernetes namespace where the
Secret will be created.
api_endpoint (str): Krake API endpoint
token (str): Shutdown hook authentication token
Returns:
dict: shutdown hook secret resource
"""
shutdown_url = self.create_hook_url(name, namespace, api_endpoint)
data = {
self.env_token.lower(): self._encode_to_64(token),
self.env_url.lower(): self._encode_to_64(shutdown_url),
}
return self.secret(secret_name, data, resource_namespace)
@staticmethod
def create_hook_url(name, namespace, api_endpoint):
"""Create an applications' shutdown URL.
Args:
name (str): Application name
namespace (str): Application namespace
api_endpoint (str): Krake API endpoint
Returns:
str: Application shutdown url
"""
api_url = URL(api_endpoint)
return str(
api_url.with_path(
f"/kubernetes/namespaces/{namespace}/applications/{name}/shutdown"
)
)
def env_vars(self, secret_name):
"""Create shutdown hooks environment variables sub-resources.
Creates shutdown hook environment variables to store Krake authentication token
and a shutdown hook URL for given applications.
Args:
secret_name (str): Secret name
Returns:
list: List of shutdown hook environment variables sub-resources
"""
sub_resources = []
env_resources = []
env_token = V1EnvVar(
name=self.env_token,
value_from=self.attribute_map(
V1EnvVarSource(
secret_key_ref=self.attribute_map(
V1SecretKeySelector(
name=secret_name,
key=self.env_token.lower()
)
)
)
)
)
env_resources.append(env_token)
env_url = V1EnvVar(
name=self.env_url,
value_from=self.attribute_map(
V1EnvVarSource(
secret_key_ref=self.attribute_map(
V1SecretKeySelector(name=secret_name, key=self.env_url.lower())
)
)
),
)
env_resources.append(env_url)
for env in env_resources:
sub_resources.append(
SubResource(
group="env",
name=env.name,
body=self.attribute_map(env),
path=(
("spec", "template", "spec", "containers"),
("spec", "containers"), # kind: Pod
),
)
)
return sub_resources
|
krake/krake/controller/kubernetes/hooks.py
|
codereval_python_data_64
|
Together with :func:``update_last_applied_manifest_list_from_resp``, this
function is called recursively to update a partial ``last_applied_manifest``
from a partial Kubernetes response
Args:
last_applied_manifest (dict): partial ``last_applied_manifest`` being
updated
observer_schema (dict): partial ``observer_schema``
response (dict): partial response from the Kubernetes API.
Raises:
KeyError: If the observed field is not present in the Kubernetes response
This function go through all observed fields, and initialized their value in
last_applied_manifest if they are not yet present
def update_last_applied_manifest_dict_from_resp(
last_applied_manifest, observer_schema, response
):
"""Together with :func:``update_last_applied_manifest_list_from_resp``, this
function is called recursively to update a partial ``last_applied_manifest``
from a partial Kubernetes response
Args:
last_applied_manifest (dict): partial ``last_applied_manifest`` being
updated
observer_schema (dict): partial ``observer_schema``
response (dict): partial response from the Kubernetes API.
Raises:
KeyError: If the observed field is not present in the Kubernetes response
This function go through all observed fields, and initialized their value in
last_applied_manifest if they are not yet present
"""
for key, value in observer_schema.items():
# Keys in the response are in camelCase
camel_key = camel_to_snake_case(key)
if camel_key not in response:
# An observed key should always be present in the k8s response
raise KeyError(
f"Observed key {camel_key} is not present in response {response}"
)
if isinstance(value, dict):
if key not in last_applied_manifest:
# The dictionary is observed, but not present in
# last_applied_manifest
last_applied_manifest[key] = {}
update_last_applied_manifest_dict_from_resp(
last_applied_manifest[key], observer_schema[key], response[camel_key]
)
elif isinstance(value, list):
if key not in last_applied_manifest:
# The list is observed, but not present in last_applied_manifest
last_applied_manifest[key] = []
update_last_applied_manifest_list_from_resp(
last_applied_manifest[key], observer_schema[key], response[camel_key]
)
elif key not in last_applied_manifest:
# If key not present in last_applied_manifest, and value is neither a
# dict nor a list, simply add it.
last_applied_manifest[key] = response[camel_key]
"""This module defines the Hook Dispatcher and listeners for registering and
executing hooks. Hook Dispatcher emits hooks based on :class:`Hook` attributes which
define when the hook will be executed.
"""
import asyncio
import logging
import random
from base64 import b64encode
from collections import defaultdict
from contextlib import suppress
from copy import deepcopy
from datetime import datetime
from functools import reduce
from operator import getitem
from enum import Enum, auto
from inspect import iscoroutinefunction
from OpenSSL import crypto
from typing import NamedTuple
import yarl
from aiohttp import ClientConnectorError
from krake.controller import Observer
from krake.controller.kubernetes.client import KubernetesClient, InvalidManifestError
from krake.utils import camel_to_snake_case, get_kubernetes_resource_idx
from kubernetes_asyncio.client.rest import ApiException
from kubernetes_asyncio.client.api_client import ApiClient
from kubernetes_asyncio import client
from krake.data.kubernetes import ClusterState, Application, Cluster
from yarl import URL
from secrets import token_urlsafe
from kubernetes_asyncio.client import (
Configuration,
V1Secret,
V1EnvVar,
V1VolumeMount,
V1Volume,
V1SecretKeySelector,
V1EnvVarSource,
)
from kubernetes_asyncio.config.kube_config import KubeConfigLoader
logger = logging.getLogger(__name__)
class HookType(Enum):
ResourcePreCreate = auto()
ResourcePostCreate = auto()
ResourcePreUpdate = auto()
ResourcePostUpdate = auto()
ResourcePreDelete = auto()
ResourcePostDelete = auto()
ApplicationMangling = auto()
ApplicationPreMigrate = auto()
ApplicationPostMigrate = auto()
ApplicationPreReconcile = auto()
ApplicationPostReconcile = auto()
ApplicationPreDelete = auto()
ApplicationPostDelete = auto()
ClusterCreation = auto()
ClusterDeletion = auto()
class HookDispatcher(object):
"""Simple wrapper around a registry of handlers associated to :class:`Hook`
attributes. Each :class:`Hook` attribute defines when the handler will be
executed.
Listeners for certain hooks can be registered via :meth:`on`. Registered
listeners are executed via :meth:`hook`.
Example:
.. code:: python
listen = HookDispatcher()
@listen.on(HookType.PreApply)
def to_perform_before_app_creation(app, cluster, resource, controller):
# Do Stuff
@listen.on(HookType.PostApply)
def another_to_perform_after_app_creation(app, cluster, resource, resp):
# Do Stuff
@listen.on(HookType.PostDelete)
def to_perform_after_app_deletion(app, cluster, resource, resp):
# Do Stuff
"""
def __init__(self):
self.registry = defaultdict(list)
def on(self, hook):
"""Decorator function to add a new handler to the registry.
Args:
hook (HookType): Hook attribute for which to register the handler.
Returns:
callable: Decorator for registering listeners for the specified
hook.
"""
def decorator(handler):
self.registry[hook].append(handler)
return handler
return decorator
async def hook(self, hook, **kwargs):
"""Execute the list of handlers associated to the provided :class:`Hook`
attribute.
Args:
hook (HookType): The hook attribute for which to execute handlers.
"""
try:
handlers = self.registry[hook]
except KeyError:
pass
else:
for handler in handlers:
if iscoroutinefunction(handler):
await handler(**kwargs)
else:
handler(**kwargs)
listen = HookDispatcher()
@listen.on(HookType.ResourcePostCreate)
@listen.on(HookType.ResourcePostUpdate)
async def register_service(app, cluster, resource, response):
"""Register endpoint of Kubernetes Service object on creation and update.
Args:
app (krake.data.kubernetes.Application): Application the service belongs to
cluster (krake.data.kubernetes.Cluster): The cluster on which the
application is running
resource (dict): Kubernetes object description as specified in the
specification of the application.
response (kubernetes_asyncio.client.V1Service): Response of the
Kubernetes API
"""
if resource["kind"] != "Service":
return
service_name = resource["metadata"]["name"]
if response.spec and response.spec.type == "LoadBalancer":
# For a "LoadBalancer" type of Service, an external IP is given in the cluster
# by a load balancer controller to the service. In this case, the "port"
# specified in the spec is reachable from the outside.
if (
not response.status.load_balancer
or not response.status.load_balancer.ingress
):
# When a "LoadBalancer" type of service is created, the IP is given by an
# additional controller (e.g. a controller that requests a floating IP to an
# OpenStack infrastructure). This process can take some time, but the
# Service itself already exist before the IP is assigned. In the case of an
# error with the controller, the IP is also not given. This "<pending>" IP
# just expresses that the Service exists, but the IP is not ready yet.
external_ip = "<pending>"
else:
external_ip = response.status.load_balancer.ingress[0].ip
if not response.spec.ports:
external_port = "<pending>"
else:
external_port = response.spec.ports[0].port
app.status.services[service_name] = f"{external_ip}:{external_port}"
return
node_port = None
# Ensure that ports are specified
if response.spec and response.spec.ports:
node_port = response.spec.ports[0].node_port
# If the service does not have a node port, remove a potential reference
# and return.
if node_port is None:
try:
del app.status.services[service_name]
except KeyError:
pass
return
# Determine URL of Kubernetes cluster API
loader = KubeConfigLoader(cluster.spec.kubeconfig)
config = Configuration()
await loader.load_and_set(config)
cluster_url = yarl.URL(config.host)
app.status.services[service_name] = f"{cluster_url.host}:{node_port}"
@listen.on(HookType.ResourcePostDelete)
async def unregister_service(app, resource, **kwargs):
"""Unregister endpoint of Kubernetes Service object on deletion.
Args:
app (krake.data.kubernetes.Application): Application the service belongs to
resource (dict): Kubernetes object description as specified in the
specification of the application.
"""
if resource["kind"] != "Service":
return
service_name = resource["metadata"]["name"]
try:
del app.status.services[service_name]
except KeyError:
pass
@listen.on(HookType.ResourcePostDelete)
async def remove_resource_from_last_observed_manifest(app, resource, **kwargs):
"""Remove a given resource from the last_observed_manifest after its deletion
Args:
app (krake.data.kubernetes.Application): Application the service belongs to
resource (dict): Kubernetes object description as specified in the
specification of the application.
"""
try:
idx = get_kubernetes_resource_idx(app.status.last_observed_manifest, resource)
except IndexError:
return
app.status.last_observed_manifest.pop(idx)
def update_last_applied_manifest_dict_from_resp(
last_applied_manifest, observer_schema, response
):
"""Together with :func:``update_last_applied_manifest_list_from_resp``, this
function is called recursively to update a partial ``last_applied_manifest``
from a partial Kubernetes response
Args:
last_applied_manifest (dict): partial ``last_applied_manifest`` being
updated
observer_schema (dict): partial ``observer_schema``
response (dict): partial response from the Kubernetes API.
Raises:
KeyError: If the observed field is not present in the Kubernetes response
This function go through all observed fields, and initialized their value in
last_applied_manifest if they are not yet present
"""
for key, value in observer_schema.items():
# Keys in the response are in camelCase
camel_key = camel_to_snake_case(key)
if camel_key not in response:
# An observed key should always be present in the k8s response
raise KeyError(
f"Observed key {camel_key} is not present in response {response}"
)
if isinstance(value, dict):
if key not in last_applied_manifest:
# The dictionary is observed, but not present in
# last_applied_manifest
last_applied_manifest[key] = {}
update_last_applied_manifest_dict_from_resp(
last_applied_manifest[key], observer_schema[key], response[camel_key]
)
elif isinstance(value, list):
if key not in last_applied_manifest:
# The list is observed, but not present in last_applied_manifest
last_applied_manifest[key] = []
update_last_applied_manifest_list_from_resp(
last_applied_manifest[key], observer_schema[key], response[camel_key]
)
elif key not in last_applied_manifest:
# If key not present in last_applied_manifest, and value is neither a
# dict nor a list, simply add it.
last_applied_manifest[key] = response[camel_key]
def update_last_applied_manifest_list_from_resp(
last_applied_manifest, observer_schema, response
):
"""Together with :func:``update_last_applied_manifest_dict_from_resp``, this
function is called recursively to update a partial ``last_applied_manifest``
from a partial Kubernetes response
Args:
last_applied_manifest (list): partial ``last_applied_manifest`` being
updated
observer_schema (list): partial ``observer_schema``
response (list): partial response from the Kubernetes API.
This function go through all observed fields, and initialized their value in
last_applied_manifest if they are not yet present
"""
# Looping over the observed resource, except the last element which is the
# special control dictionary
for idx, val in enumerate(observer_schema[:-1]):
if idx >= len(response):
# Element is observed but not present in k8s response, so following
# elements will also not exist.
#
# This doesn't raise an Exception as observing the element of a list
# doesn't ensure its presence. The list length is controlled by the
# special control dictionary
return
if isinstance(val, dict):
if idx >= len(last_applied_manifest):
# The dict is observed, but not present in last_applied_manifest
last_applied_manifest.append({})
update_last_applied_manifest_dict_from_resp(
last_applied_manifest[idx], observer_schema[idx], response[idx]
)
elif isinstance(response[idx], list):
if idx >= len(last_applied_manifest):
# The list is observed, but not present in last_applied_manifest
last_applied_manifest.append([])
update_last_applied_manifest_list_from_resp(
last_applied_manifest[idx], observer_schema[idx], response[idx]
)
elif idx >= len(last_applied_manifest):
# Element is not yet present in last_applied_manifest. Adding it.
last_applied_manifest.append(response[idx])
@listen.on(HookType.ResourcePostCreate)
@listen.on(HookType.ResourcePostUpdate)
def update_last_applied_manifest_from_resp(app, response, **kwargs):
"""Hook run after the creation or update of an application in order to update the
`status.last_applied_manifest` using the k8s response.
Args:
app (krake.data.kubernetes.Application): Application the service belongs to
response (kubernetes_asyncio.client.V1Status): Response of the Kubernetes API
After a Kubernetes resource has been created/updated, the
`status.last_applied_manifest` has to be updated. All fields already initialized
(either from the mangling of `spec.manifest`, or by a previous call to this
function) should be left untouched. Only observed fields which are not present in
`status.last_applied_manifest` should be initialized.
"""
if isinstance(response, dict):
# The Kubernetes API couldn't deserialize the k8s response into an object
resp = response
else:
# The Kubernetes API deserialized the k8s response into an object
resp = response.to_dict()
idx_applied = get_kubernetes_resource_idx(app.status.last_applied_manifest, resp)
idx_observed = get_kubernetes_resource_idx(app.status.mangled_observer_schema, resp)
update_last_applied_manifest_dict_from_resp(
app.status.last_applied_manifest[idx_applied],
app.status.mangled_observer_schema[idx_observed],
resp,
)
@listen.on(HookType.ResourcePostCreate)
@listen.on(HookType.ResourcePostUpdate)
def update_last_observed_manifest_from_resp(app, response, **kwargs):
"""Handler to run after the creation or update of a Kubernetes resource to update
the last_observed_manifest from the response of the Kubernetes API.
Args:
app (krake.data.kubernetes.Application): Application the service belongs to
response (kubernetes_asyncio.client.V1Service): Response of the
Kubernetes API
The target last_observed_manifest holds the value of all observed fields plus the
special control dictionaries for the list length
"""
if isinstance(response, dict):
# The Kubernetes API couldn't deserialize the k8s response into an object
resp = response
else:
# The Kubernetes API deserialized the k8s response into an object
resp = response.to_dict()
try:
idx_observed = get_kubernetes_resource_idx(
app.status.mangled_observer_schema,
resp,
)
except IndexError:
# All created resources should be observed
raise
try:
idx_last_observed = get_kubernetes_resource_idx(
app.status.last_observed_manifest,
resp,
)
except IndexError:
# If the resource is not yes present in last_observed_manifest, append it.
idx_last_observed = len(app.status.last_observed_manifest)
app.status.last_observed_manifest.append({})
# Overwrite the last_observed_manifest for this resource
app.status.last_observed_manifest[
idx_last_observed
] = update_last_observed_manifest_dict(
app.status.mangled_observer_schema[idx_observed], resp
)
def update_last_observed_manifest_dict(observed_resource, response):
"""Together with :func:``update_last_observed_manifest_list``, recursively
crafts the ``last_observed_manifest`` from the Kubernetes :attr:``response``.
Args:
observed_resource (dict): The schema to observe for the partial given resource
response (dict): The partial Kubernetes response for this resource.
Raises:
KeyError: If an observed key is not present in the Kubernetes response
Returns:
dict: The dictionary of observed keys and their value
Get the value of all observed fields from the Kubernetes response
"""
res = {}
for key, value in observed_resource.items():
camel_key = camel_to_snake_case(key)
if camel_key not in response:
raise KeyError(
f"Observed key {camel_key} is not present in response {response}"
)
if isinstance(value, dict):
res[key] = update_last_observed_manifest_dict(value, response[camel_key])
elif isinstance(value, list):
res[key] = update_last_observed_manifest_list(value, response[camel_key])
else:
res[key] = response[camel_key]
return res
def update_last_observed_manifest_list(observed_resource, response):
"""Together with :func:``update_last_observed_manifest_dict``, recursively
crafts the ``last_observed_manifest`` from the Kubernetes :attr:``response``.
Args:
observed_resource (list): the schema to observe for the partial given resource
response (list): the partial Kubernetes response for this resource.
Returns:
list: The list of observed elements, plus the special list length control
dictionary
Get the value of all observed elements from the Kubernetes response
"""
if not response:
return [{"observer_schema_list_current_length": 0}]
res = []
# Looping over the observed resource, except the last element which is the special
# control dictionary
for idx, val in enumerate(observed_resource[:-1]):
if idx >= len(response):
# Element is not present in the Kubernetes response, nothing more to do
break
if type(response[idx]) == dict:
res.append(update_last_observed_manifest_dict(val, response[idx]))
elif type(response[idx]) == list:
res.append(update_last_observed_manifest_list(val, response[idx]))
else:
res.append(response[idx])
# Append the special control dictionary to the list
res.append({"observer_schema_list_current_length": len(response)})
return res
def update_last_applied_manifest_dict_from_spec(
resource_status_new, resource_status_old, resource_observed
):
"""Together with :func:``update_last_applied_manifest_list_from_spec``, this
function is called recursively to update a partial ``last_applied_manifest``
Args:
resource_status_new (dict): partial ``last_applied_manifest`` being updated
resource_status_old (dict): partial of the current ``last_applied_manifest``
resource_observed (dict): partial observer_schema for the manifest file
being updated
"""
for key, value in resource_observed.items():
if key not in resource_status_old:
continue
if key in resource_status_new:
if isinstance(value, dict):
update_last_applied_manifest_dict_from_spec(
resource_status_new[key],
resource_status_old[key],
resource_observed[key],
)
elif isinstance(value, list):
update_last_applied_manifest_list_from_spec(
resource_status_new[key],
resource_status_old[key],
resource_observed[key],
)
else:
# If the key is not present the spec.manifest, we first need to
# initialize it
if isinstance(value, dict):
resource_status_new[key] = {}
update_last_applied_manifest_dict_from_spec(
resource_status_new[key],
resource_status_old[key],
resource_observed[key],
)
elif isinstance(value, list):
resource_status_new[key] = []
update_last_applied_manifest_list_from_spec(
resource_status_new[key],
resource_status_old[key],
resource_observed[key],
)
else:
resource_status_new[key] = resource_status_old[key]
def update_last_applied_manifest_list_from_spec(
resource_status_new, resource_status_old, resource_observed
):
"""Together with :func:``update_last_applied_manifest_dict_from_spec``, this
function is called recursively to update a partial ``last_applied_manifest``
Args:
resource_status_new (list): partial ``last_applied_manifest`` being updated
resource_status_old (list): partial of the current ``last_applied_manifest``
resource_observed (list): partial observer_schema for the manifest file
being updated
"""
# Looping over the observed resource, except the last element which is the
# special control dictionary
for idx, val in enumerate(resource_observed[:-1]):
if idx >= len(resource_status_old):
# The element in not in the current last_applied_manifest, and neither
# is the rest of the list
break
if idx < len(resource_status_new):
# The element is present in spec.manifest and in the current
# last_applied_manifest. Updating observed fields
if isinstance(val, dict):
update_last_applied_manifest_dict_from_spec(
resource_status_new[idx],
resource_status_old[idx],
resource_observed[idx],
)
elif isinstance(val, list):
update_last_applied_manifest_list_from_spec(
resource_status_new[idx],
resource_status_old[idx],
resource_observed[idx],
)
else:
# If the element is not present in the spec.manifest, we first have to
# initialize it.
if isinstance(val, dict):
resource_status_new.append({})
update_last_applied_manifest_dict_from_spec(
resource_status_new[idx],
resource_status_old[idx],
resource_observed[idx],
)
elif isinstance(val, list):
resource_status_new.append([])
update_last_applied_manifest_list_from_spec(
resource_status_new[idx],
resource_status_old[idx],
resource_observed[idx],
)
else:
resource_status_new.append(resource_status_old[idx])
def update_last_applied_manifest_from_spec(app):
"""Update the status.last_applied_manifest of an application from spec.manifests
Args:
app (krake.data.kubernetes.Application): Application to update
This function is called on application creation and updates. The
last_applied_manifest of an application is initialized as a copy of spec.manifest,
and is augmented by all known observed fields not yet initialized (i.e. all observed
fields or resources which are present in the current last_applied_manifest but not
in the spec.manifest)
"""
# The new last_applied_manifest is initialized as a copy of the spec.manifest, and
# augmented by all observed fields which are present in the current
# last_applied_manifest but not in the original spec.manifest
new_last_applied_manifest = deepcopy(app.spec.manifest)
# Loop over observed resources and observed fields, and check if they should be
# added to the new last_applied_manifest (i.e. present in the current
# last_applied_manifest but not in spec.manifest)
for resource_observed in app.status.mangled_observer_schema:
# If the resource is not present in the current last_applied_manifest, there is
# nothing to do. Whether the resource was initialized by spec.manifest doesn't
# matter.
try:
idx_status_old = get_kubernetes_resource_idx(
app.status.last_applied_manifest, resource_observed
)
except IndexError:
continue
# As the resource is present in the current last_applied_manifest, we need to go
# through it to check if observed fields should be set to their current value
# (i.e. fields are present in the current last_applied_manifest, but not in
# spec.manifest)
try:
# Check if the observed resource is present in spec.manifest
idx_status_new = get_kubernetes_resource_idx(
new_last_applied_manifest, resource_observed
)
except IndexError:
# The resource is observed but is not present in the spec.manifest.
# Create an empty resource, which will be augmented in
# update_last_applied_manifest_dict_from_spec with the observed and known
# fields.
new_last_applied_manifest.append({})
idx_status_new = len(new_last_applied_manifest) - 1
update_last_applied_manifest_dict_from_spec(
new_last_applied_manifest[idx_status_new],
app.status.last_applied_manifest[idx_status_old],
resource_observed,
)
app.status.last_applied_manifest = new_last_applied_manifest
class KubernetesApplicationObserver(Observer):
"""Observer specific for Kubernetes Applications. One observer is created for each
Application managed by the Controller, but not one per Kubernetes resource
(Deployment, Service...). If several resources are defined by an Application, they
are all monitored by the same observer.
The observer gets the actual status of the resources on the cluster using the
Kubernetes API, and compare it to the status stored in the API.
The observer is:
* started at initial Krake resource creation;
* deleted when a resource needs to be updated, then started again when it is done;
* simply deleted on resource deletion.
Args:
cluster (krake.data.kubernetes.Cluster): the cluster on which the observed
Application is created.
resource (krake.data.kubernetes.Application): the application that will be
observed.
on_res_update (coroutine): a coroutine called when a resource's actual status
differs from the status sent by the database. Its signature is:
``(resource) -> updated_resource``. ``updated_resource`` is the instance of
the resource that is up-to-date with the API. The Observer internal instance
of the resource to observe will be updated. If the API cannot be contacted,
``None`` can be returned. In this case the internal instance of the Observer
will not be updated.
time_step (int, optional): how frequently the Observer should watch the actual
status of the resources.
"""
def __init__(self, cluster, resource, on_res_update, time_step=2):
super().__init__(resource, on_res_update, time_step)
self.cluster = cluster
async def poll_resource(self):
"""Fetch the current status of the Application monitored by the Observer.
Returns:
krake.data.core.Status: the status object created using information from the
real world Applications resource.
"""
app = self.resource
status = deepcopy(app.status)
status.last_observed_manifest = []
# For each observed kubernetes resource of the Application,
# get its current status on the cluster.
for desired_resource in app.status.last_applied_manifest:
kube = KubernetesClient(self.cluster.spec.kubeconfig)
idx_observed = get_kubernetes_resource_idx(
app.status.mangled_observer_schema, desired_resource
)
observed_resource = app.status.mangled_observer_schema[idx_observed]
async with kube:
try:
group, version, kind, name, namespace = kube.get_immutables(
desired_resource
)
resource_api = await kube.get_resource_api(group, version, kind)
resp = await resource_api.read(kind, name, namespace)
except ApiException as err:
if err.status == 404:
# Resource does not exist
continue
# Otherwise, log the unexpected errors
logger.error(err)
observed_manifest = update_last_observed_manifest_dict(
observed_resource, resp.to_dict()
)
status.last_observed_manifest.append(observed_manifest)
return status
class KubernetesClusterObserver(Observer):
"""Observer specific for Kubernetes Clusters. One observer is created for each
Cluster managed by the Controller.
The observer gets the actual status of the cluster using the
Kubernetes API, and compare it to the status stored in the API.
The observer is:
* started at initial Krake resource creation;
* deleted when a resource needs to be updated, then started again when it is done;
* simply deleted on resource deletion.
Args:
cluster (krake.data.kubernetes.Cluster): the cluster which will be observed.
on_res_update (coroutine): a coroutine called when a resource's actual status
differs from the status sent by the database. Its signature is:
``(resource) -> updated_resource``. ``updated_resource`` is the instance of
the resource that is up-to-date with the API. The Observer internal instance
of the resource to observe will be updated. If the API cannot be contacted,
``None`` can be returned. In this case the internal instance of the Observer
will not be updated.
time_step (int, optional): how frequently the Observer should watch the actual
status of the resources.
"""
def __init__(self, cluster, on_res_update, time_step=2):
super().__init__(cluster, on_res_update, time_step)
self.cluster = cluster
async def poll_resource(self):
"""Fetch the current status of the Cluster monitored by the Observer.
Returns:
krake.data.core.Status: the status object created using information from the
real world Cluster.
"""
status = deepcopy(self.cluster.status)
# For each observed kubernetes cluster registered in Krake,
# get its current node status.
loader = KubeConfigLoader(self.cluster.spec.kubeconfig)
config = Configuration()
await loader.load_and_set(config)
kube = ApiClient(config)
async with kube as api:
v1 = client.CoreV1Api(api)
try:
response = await v1.list_node()
except ClientConnectorError as err:
status.state = ClusterState.OFFLINE
self.cluster.status.state = ClusterState.OFFLINE
# Log the error
logger.debug(err)
return status
condition_dict = {
"MemoryPressure": [],
"DiskPressure": [],
"PIDPressure": [],
"Ready": [],
}
for item in response.items:
for condition in item.status.conditions:
condition_dict[condition.type].append(condition.status)
if (
condition_dict["MemoryPressure"] == ["True"]
or condition_dict["DiskPressure"] == ["True"]
or condition_dict["PIDPressure"] == ["True"]
):
status.state = ClusterState.UNHEALTHY
self.cluster.status.state = ClusterState.UNHEALTHY
return status
elif (
condition_dict["Ready"] == ["True"]
and status.state is ClusterState.OFFLINE
):
status.state = ClusterState.CONNECTING
self.cluster.status.state = ClusterState.CONNECTING
return status
elif condition_dict["Ready"] == ["True"]:
status.state = ClusterState.ONLINE
self.cluster.status.state = ClusterState.ONLINE
return status
else:
status.state = ClusterState.NOTREADY
self.cluster.status.state = ClusterState.NOTREADY
return status
@listen.on(HookType.ApplicationPostReconcile)
@listen.on(HookType.ApplicationPostMigrate)
@listen.on(HookType.ClusterCreation)
async def register_observer(controller, resource, start=True, **kwargs):
"""Create an observer for the given Application or Cluster, and start it as a
background task if wanted.
If an observer already existed for this Application or Cluster, it is stopped
and deleted.
Args:
controller (KubernetesController): the controller for which the observer will be
added in the list of working observers.
resource (krake.data.kubernetes.Application): the Application to observe or
resource (krake.data.kubernetes.Cluster): the Cluster to observe.
start (bool, optional): if False, does not start the observer as background
task.
"""
if resource.kind == Application.kind:
cluster = await controller.kubernetes_api.read_cluster(
namespace=resource.status.running_on.namespace,
name=resource.status.running_on.name,
)
observer = KubernetesApplicationObserver(
cluster,
resource,
controller.on_status_update,
time_step=controller.observer_time_step,
)
elif resource.kind == Cluster.kind:
observer = KubernetesClusterObserver(
resource,
controller.on_status_update,
time_step=controller.observer_time_step,
)
else:
logger.debug("Unknown resource kind. No observer was registered.", resource)
return
logger.debug(f"Start observer for {resource.kind} %r", resource.metadata.name)
task = None
if start:
task = controller.loop.create_task(observer.run())
controller.observers[resource.metadata.uid] = (observer, task)
@listen.on(HookType.ApplicationPreReconcile)
@listen.on(HookType.ApplicationPreMigrate)
@listen.on(HookType.ApplicationPreDelete)
@listen.on(HookType.ClusterDeletion)
async def unregister_observer(controller, resource, **kwargs):
"""Stop and delete the observer for the given Application or Cluster. If no observer
is started, do nothing.
Args:
controller (KubernetesController): the controller for which the observer will be
removed from the list of working observers.
resource (krake.data.kubernetes.Application): the Application whose observer
will be stopped or
resource (krake.data.kubernetes.Cluster): the Cluster whose observer will be
stopped.
"""
if resource.metadata.uid not in controller.observers:
return
logger.debug(f"Stop observer for {resource.kind} %r", resource.metadata.name)
_, task = controller.observers.pop(resource.metadata.uid)
task.cancel()
with suppress(asyncio.CancelledError):
await task
def utc_difference():
"""Get the difference in seconds between the current time and the current UTC time.
Returns:
int: the time difference in seconds.
"""
delta = datetime.now() - datetime.utcnow()
return delta.seconds
def generate_certificate(config):
"""Create and sign a new certificate using the one defined in the complete hook
configuration as intermediate certificate.
Args:
config (krake.data.config.CompleteHookConfiguration): the configuration of the
complete hook.
Returns:
CertificatePair: the content of the certificate created and its corresponding
key.
"""
with open(config.intermediate_src, "rb") as f:
intermediate_src = crypto.load_certificate(crypto.FILETYPE_PEM, f.read())
with open(config.intermediate_key_src, "rb") as f:
intermediate_key_src = crypto.load_privatekey(crypto.FILETYPE_PEM, f.read())
client_cert = crypto.X509()
# Set general information
client_cert.set_version(3)
client_cert.set_serial_number(random.randint(50000000000000, 100000000000000))
# If not set before, TLS will not accept to use this certificate in UTC cases, as
# the server time may be earlier.
time_offset = utc_difference() * -1
client_cert.gmtime_adj_notBefore(time_offset)
client_cert.gmtime_adj_notAfter(1 * 365 * 24 * 60 * 60)
# Set issuer and subject
intermediate_subject = intermediate_src.get_subject()
client_cert.set_issuer(intermediate_subject)
client_subj = crypto.X509Name(intermediate_subject)
client_subj.CN = config.hook_user
client_cert.set_subject(client_subj)
# Create and set the private key
client_key = crypto.PKey()
client_key.generate_key(crypto.TYPE_RSA, 2048)
client_cert.set_pubkey(client_key)
client_cert.sign(intermediate_key_src, "sha256")
cert_dump = crypto.dump_certificate(crypto.FILETYPE_PEM, client_cert).decode()
key_dump = crypto.dump_privatekey(crypto.FILETYPE_PEM, client_key).decode()
return CertificatePair(cert=cert_dump, key=key_dump)
def generate_default_observer_schema(app):
"""Generate the default observer schema for each Kubernetes resource present in
``spec.manifest`` for which a custom observer schema hasn't been specified.
Args:
app (krake.data.kubernetes.Application): The application for which to generate a
default observer schema
"""
app.status.mangled_observer_schema = deepcopy(app.spec.observer_schema)
for resource_manifest in app.spec.manifest:
try:
get_kubernetes_resource_idx(
app.status.mangled_observer_schema, resource_manifest
)
except IndexError:
# Only create a default observer schema, if a custom observer schema hasn't
# been set by the user.
app.status.mangled_observer_schema.append(
generate_default_observer_schema_dict(
resource_manifest,
first_level=True,
)
)
def generate_default_observer_schema_dict(manifest_dict, first_level=False):
"""Together with :func:``generate_default_observer_schema_list``, this function is
called recursively to generate part of a default ``observer_schema`` from part of a
Kubernetes resource, defined respectively by ``manifest_dict`` or ``manifest_list``.
Args:
manifest_dict (dict): Partial Kubernetes resources
first_level (bool, optional): If True, indicates that the dictionary represents
the whole observer schema of a Kubernetes resource
Returns:
dict: Generated partial observer_schema
This function creates a new dictionary from ``manifest_dict`` and replaces all
non-list and non-dict values by ``None``.
In case of ``first_level`` dictionary (i.e. complete ``observer_schema`` for a
resource), the values of the identifying fields are copied from the manifest file.
"""
observer_schema_dict = {}
for key, value in manifest_dict.items():
if isinstance(value, dict):
observer_schema_dict[key] = generate_default_observer_schema_dict(value)
elif isinstance(value, list):
observer_schema_dict[key] = generate_default_observer_schema_list(value)
else:
observer_schema_dict[key] = None
if first_level:
observer_schema_dict["apiVersion"] = manifest_dict["apiVersion"]
observer_schema_dict["kind"] = manifest_dict["kind"]
observer_schema_dict["metadata"]["name"] = manifest_dict["metadata"]["name"]
if (
"spec" in manifest_dict
and "type" in manifest_dict["spec"]
and manifest_dict["spec"]["type"] == "LoadBalancer"
):
observer_schema_dict["status"] = {"load_balancer": {"ingress": None}}
return observer_schema_dict
def generate_default_observer_schema_list(manifest_list):
"""Together with :func:``generate_default_observer_schema_dict``, this function is
called recursively to generate part of a default ``observer_schema`` from part of a
Kubernetes resource, defined respectively by ``manifest_list`` or ``manifest_dict``.
Args:
manifest_list (list): Partial Kubernetes resources
Returns:
list: Generated partial observer_schema
This function creates a new list from ``manifest_list`` and replaces all non-list
and non-dict elements by ``None``.
Additionally, it generates the default list control dictionary, using the current
length of the list as default minimum and maximum values.
"""
observer_schema_list = []
for value in manifest_list:
if isinstance(value, dict):
observer_schema_list.append(generate_default_observer_schema_dict(value))
elif isinstance(value, list):
observer_schema_list.append(generate_default_observer_schema_list(value))
else:
observer_schema_list.append(None)
observer_schema_list.append(
{
"observer_schema_list_min_length": len(manifest_list),
"observer_schema_list_max_length": len(manifest_list),
}
)
return observer_schema_list
@listen.on(HookType.ApplicationMangling)
async def complete(app, api_endpoint, ssl_context, config):
"""Execute application complete hook defined by :class:`Complete`.
Hook mangles given application and injects complete hooks variables.
Application complete hook is disabled by default.
User enables this hook by the --hook-complete argument in rok cli.
Args:
app (krake.data.kubernetes.Application): Application object processed
when the hook is called
api_endpoint (str): the given API endpoint
ssl_context (ssl.SSLContext): SSL context to communicate with the API endpoint
config (krake.data.config.HooksConfiguration): Complete hook
configuration.
"""
if "complete" not in app.spec.hooks:
return
# Use the endpoint of the API only if the external endpoint has not been set.
if config.complete.external_endpoint:
api_endpoint = config.complete.external_endpoint
app.status.complete_token = \
app.status.complete_token if app.status.complete_token else token_urlsafe()
# Generate only once the certificate and key for a specific Application
generated_cert = CertificatePair(
cert=app.status.complete_cert, key=app.status.complete_key
)
if ssl_context and generated_cert == (None, None):
generated_cert = generate_certificate(config.complete)
app.status.complete_cert = generated_cert.cert
app.status.complete_key = generated_cert.key
hook = Complete(
api_endpoint,
ssl_context,
hook_user=config.complete.hook_user,
cert_dest=config.complete.cert_dest,
env_token=config.complete.env_token,
env_url=config.complete.env_url,
)
hook.mangle_app(
app.metadata.name,
app.metadata.namespace,
app.status.complete_token,
app.status.last_applied_manifest,
config.complete.intermediate_src,
generated_cert,
app.status.mangled_observer_schema,
"complete"
)
@listen.on(HookType.ApplicationMangling)
async def shutdown(app, api_endpoint, ssl_context, config):
"""Executes an application shutdown hook defined by :class:`Shutdown`.
The hook mangles the given application and injects shutdown hooks variables.
Application shutdown hook is disabled by default.
User enables this hook by the --hook-shutdown argument in rok cli.
Args:
app (krake.data.kubernetes.Application): Application object processed
when the hook is called
api_endpoint (str): the given API endpoint
ssl_context (ssl.SSLContext): SSL context to communicate with the API endpoint
config (krake.data.config.HooksConfiguration): Shutdown hook
configuration.
"""
if "shutdown" not in app.spec.hooks:
return
# Use the endpoint of the API only if the external endpoint has not been set.
if config.shutdown.external_endpoint:
api_endpoint = config.shutdown.external_endpoint
app.status.shutdown_token = \
app.status.shutdown_token if app.status.shutdown_token else token_urlsafe()
# Generate only once the certificate and key for a specific Application
generated_cert = CertificatePair(
cert=app.status.shutdown_cert, key=app.status.shutdown_key
)
if ssl_context and generated_cert == (None, None):
generated_cert = generate_certificate(config.shutdown)
app.status.shutdown_cert = generated_cert.cert
app.status.shutdown_key = generated_cert.key
hook = Shutdown(
api_endpoint,
ssl_context,
hook_user=config.shutdown.hook_user,
cert_dest=config.shutdown.cert_dest,
env_token=config.shutdown.env_token,
env_url=config.shutdown.env_url,
)
hook.mangle_app(
app.metadata.name,
app.metadata.namespace,
app.status.shutdown_token,
app.status.last_applied_manifest,
config.shutdown.intermediate_src,
generated_cert,
app.status.mangled_observer_schema,
"shutdown"
)
@listen.on(HookType.ResourcePreDelete)
async def pre_shutdown(controller, app, **kwargs):
"""
Args:
app (krake.data.kubernetes.Application): Application object processed
when the hook is called
"""
if "shutdown" not in app.spec.hooks:
return
return
class SubResource(NamedTuple):
group: str
name: str
body: dict
path: tuple
class CertificatePair(NamedTuple):
"""Tuple which contains a certificate and its corresponding key.
Attributes:
cert (str): content of a certificate.
key (str): content of the key that corresponds to the certificate.
"""
cert: str
key: str
class Hook(object):
hook_resources = ()
ca_name = "ca-bundle.pem"
cert_name = "cert.pem"
key_name = "key.pem"
def __init__(
self, api_endpoint, ssl_context, hook_user, cert_dest, env_token, env_url
):
self.api_endpoint = api_endpoint
self.ssl_context = ssl_context
self.hook_user = hook_user
self.cert_dest = cert_dest
self.env_token = env_token
self.env_url = env_url
def mangle_app(
self,
name,
namespace,
token,
last_applied_manifest,
intermediate_src,
generated_cert,
mangled_observer_schema,
hook_type="",
):
"""Mangle a given application and inject complete hook resources and
sub-resources into the :attr:`last_applied_manifest` object by :meth:`mangle`.
Also mangle the observer_schema as new resources and sub-resources should
be observed.
:attr:`last_applied_manifest` is created as a deep copy of the desired
application resources, as defined by user. It can be updated by custom hook
resources or modified by custom hook sub-resources. It is used as a desired
state for the Krake deployment process.
Args:
name (str): Application name
namespace (str): Application namespace
token (str): Complete hook authentication token
last_applied_manifest (list): Application resources
intermediate_src (str): content of the certificate that is used to sign new
certificates for the complete hook.
generated_cert (CertificatePair): tuple that contains the content of the
new signed certificate for the Application, and the content of its
corresponding key.
mangled_observer_schema (list): Observed fields
hook_type (str, optional): Name of the hook the app should be mangled for
"""
secret_certs_name = "-".join([name, "krake", hook_type, "secret", "certs"])
secret_token_name = "-".join([name, "krake", hook_type, "secret", "token"])
volume_name = "-".join([name, "krake", hook_type, "volume"])
ca_certs = (
self.ssl_context.get_ca_certs(binary_form=True)
if self.ssl_context
else None
)
# Extract all different namespaces
# FIXME: too many assumptions here: do we create one ConfigMap for each
# namespace?
resource_namespaces = {
resource["metadata"].get("namespace", "default")
for resource in last_applied_manifest
}
hook_resources = []
hook_sub_resources = []
if ca_certs:
hook_resources.extend(
[
self.secret_certs(
secret_certs_name,
resource_namespace,
intermediate_src=intermediate_src,
generated_cert=generated_cert,
ca_certs=ca_certs,
)
for resource_namespace in resource_namespaces
]
)
hook_sub_resources.extend(
[*self.volumes(secret_certs_name, volume_name, self.cert_dest)]
)
hook_resources.extend(
[
self.secret_token(
secret_token_name,
name,
namespace,
resource_namespace,
self.api_endpoint,
token,
)
for resource_namespace in resource_namespaces
]
)
hook_sub_resources.extend(
[
*self.env_vars(secret_token_name),
]
)
self.mangle(
hook_resources,
last_applied_manifest,
mangled_observer_schema,
)
self.mangle(
hook_sub_resources,
last_applied_manifest,
mangled_observer_schema,
is_sub_resource=True,
)
def mangle(
self,
items,
last_applied_manifest,
mangled_observer_schema,
is_sub_resource=False,
):
"""Mangle applications desired state with custom hook resources or
sub-resources.
Example:
.. code:: python
last_applied_manifest = [
{
'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {'name': 'test', 'namespace': 'default'},
'spec': {'containers': [{'name': 'test'}]}
}
]
mangled_observer_schema = [
{
'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {'name': 'test', 'namespace': 'default'},
'spec': {
'containers': [
{'name': None},
{
'observer_schema_list_max_length': 1,
'observer_schema_list_min_length': 1,
},
]
},
}
]
hook_resources = [
{
'apiVersion': 'v1',
'kind': 'Secret',
'metadata': {'name': 'sct', 'namespace': 'default'}
}
]
hook_sub_resources = [
SubResource(
group='env', name='env', body={'name': 'test', 'value': 'test'},
path=(('spec', 'containers'),)
)
]
mangle(
hook_resources,
last_applied_manifest,
mangled_observer_schema,
)
mangle(
hook_sub_resources,
last_applied_manifest,
mangled_observer_schema,
is_sub_resource=True
)
assert last_applied_manifest == [
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {"name": "test", 'namespace': 'default'},
"spec": {
"containers": [
{
"name": "test",
"env": [{"name": "test", "value": "test"}]
}
]
},
},
{"apiVersion": "v1", "kind": "Secret", "metadata": {"name": "sct"}},
]
assert mangled_observer_schema == [
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {"name": "test", "namespace": None},
"spec": {
"containers": [
{
"name": None,
"env": [
{"name": None, "value": None},
{
"observer_schema_list_max_length": 1,
"observer_schema_list_min_length": 1,
},
],
},
{
"observer_schema_list_max_length": 1,
"observer_schema_list_min_length": 1,
},
]
},
},
{
"apiVersion": "v1",
"kind": "Secret",
"metadata": {"name": "sct", "namespace": None},
},
]
Args:
items (list[SubResource]): Custom hook resources or sub-resources
last_applied_manifest (list): Application resources
mangled_observer_schema (list): Observed resources
is_sub_resource (bool, optional): if False, the function only extend the
list of Kubernetes resources defined in :attr:`last_applied_manifest`
with new hook resources. Otherwise, the function injects each new hook
sub-resource into the :attr:`last_applied_manifest` object
sub-resources. Defaults to False.
"""
if not items:
return
if not is_sub_resource:
last_applied_manifest.extend(items)
for sub_resource in items:
# Generate the default observer schema for each resource
mangled_observer_schema.append(
generate_default_observer_schema_dict(
sub_resource,
first_level=True,
)
)
return
def inject(sub_resource, sub_resource_to_mangle, observed_resource_to_mangle):
"""Inject a hooks defined sub-resource into a Kubernetes sub-resource.
Args:
sub_resource (SubResource): Hook sub-resource that needs to be injected
into :attr:`last_applied_manifest`
sub_resource_to_mangle (object): Kubernetes sub-resources from
:attr:`last_applied_manifest` which need to be processed
observed_resource_to_mangle (dict): partial mangled_observer_schema
corresponding to the Kubernetes sub-resource.
Raises:
InvalidManifestError: if the sub-resource which will be mangled is not a
list or a dict.
"""
# Create sub-resource group if not present in the Kubernetes sub-resource
if sub_resource.group not in sub_resource_to_mangle:
# FIXME: This assumes the subresource group contains a list
sub_resource_to_mangle.update({sub_resource.group: []})
# Create sub-resource group if not present in the observed fields
if sub_resource.group not in observed_resource_to_mangle:
observed_resource_to_mangle.update(
{
sub_resource.group: [
{
"observer_schema_list_min_length": 0,
"observer_schema_list_max_length": 0,
}
]
}
)
# Inject sub-resource
# If sub-resource name is already there update it, if not, append it
if sub_resource.name in [
g["name"] for g in sub_resource_to_mangle[sub_resource.group]
]:
# FIXME: Assuming we are dealing with a list
for idx, item in enumerate(sub_resource_to_mangle[sub_resource.group]):
if item["name"]:
if hasattr(item, "body"):
sub_resource_to_mangle[item.group][idx] = item["body"]
else:
sub_resource_to_mangle[sub_resource.group].append(sub_resource.body)
# Make sure the value is observed
if sub_resource.name not in [
g["name"] for g in observed_resource_to_mangle[sub_resource.group][:-1]
]:
observed_resource_to_mangle[sub_resource.group].insert(
-1, generate_default_observer_schema_dict(sub_resource.body)
)
observed_resource_to_mangle[sub_resource.group][-1][
"observer_schema_list_min_length"
] += 1
observed_resource_to_mangle[sub_resource.group][-1][
"observer_schema_list_max_length"
] += 1
for resource in last_applied_manifest:
# Complete hook is applied only on defined Kubernetes resources
if resource["kind"] not in self.hook_resources:
continue
for sub_resource in items:
sub_resources_to_mangle = None
idx_observed = get_kubernetes_resource_idx(
mangled_observer_schema, resource
)
for keys in sub_resource.path:
try:
sub_resources_to_mangle = reduce(getitem, keys, resource)
except KeyError:
continue
break
# Create the path to the observed sub-resource, if it doesn't yet exist
try:
observed_sub_resources = reduce(
getitem, keys, mangled_observer_schema[idx_observed]
)
except KeyError:
Complete.create_path(
mangled_observer_schema[idx_observed], list(keys)
)
observed_sub_resources = reduce(
getitem, keys, mangled_observer_schema[idx_observed]
)
if isinstance(sub_resources_to_mangle, list):
for idx, sub_resource_to_mangle in enumerate(
sub_resources_to_mangle
):
# Ensure that each element of the list is observed.
idx_observed = idx
if idx >= len(observed_sub_resources[:-1]):
idx_observed = len(observed_sub_resources[:-1])
# FIXME: Assuming each element of the list contains a
# dictionary, therefore initializing new elements with an
# empty dict
observed_sub_resources.insert(-1, {})
observed_sub_resource = observed_sub_resources[idx_observed]
# FIXME: This is assuming a list always contains dict
inject(
sub_resource, sub_resource_to_mangle, observed_sub_resource
)
elif isinstance(sub_resources_to_mangle, dict):
inject(
sub_resource, sub_resources_to_mangle, observed_sub_resources
)
else:
message = (
f"The sub-resource to mangle {sub_resources_to_mangle!r} has an"
"invalid type, should be in '[dict, list]'"
)
raise InvalidManifestError(message)
@staticmethod
def attribute_map(obj):
"""Convert a Kubernetes object to dict based on its attribute mapping
Example:
.. code:: python
from kubernetes_asyncio.client import V1VolumeMount
d = attribute_map(
V1VolumeMount(name="name", mount_path="path")
)
assert d == {'mountPath': 'path', 'name': 'name'}
Args:
obj (object): Kubernetes object
Returns:
dict: Converted Kubernetes object
"""
return {
obj.attribute_map[attr]: getattr(obj, attr)
for attr, _ in obj.to_dict().items()
if getattr(obj, attr) is not None
}
@staticmethod
def create_path(mangled_observer_schema, keys):
"""Create the path to the observed field in the observer schema.
When a sub-resource is mangled, it should be observed. This function creates
the path to the subresource to observe.
Args:
mangled_observer_schema (dict): Partial observer schema of a resource
keys (list): list of keys forming the path to the sub-resource to
observe
FIXME: This assumes we are only adding keys to dict. We don't consider lists
"""
# Unpack the first key first, as it contains the base directory
key = keys.pop(0)
# If the key is the last of the list, we reached the end of the path.
if len(keys) == 0:
mangled_observer_schema[key] = None
return
if key not in mangled_observer_schema:
mangled_observer_schema[key] = {}
Hook.create_path(mangled_observer_schema[key], keys)
def secret_certs(
self,
secret_name,
namespace,
ca_certs=None,
intermediate_src=None,
generated_cert=None,
):
"""Create a complete hooks secret resource.
Complete hook secret stores Krake CAs and client certificates to communicate
with the Krake API.
Args:
secret_name (str): Secret name
namespace (str): Kubernetes namespace where the Secret will be created.
ca_certs (list): Krake CA list
intermediate_src (str): content of the certificate that is used to sign new
certificates for the complete hook.
generated_cert (CertificatePair): tuple that contains the content of the
new signed certificate for the Application, and the content of its
corresponding key.
Returns:
dict: complete hook secret resource
"""
ca_certs_pem = ""
for ca_cert in ca_certs:
x509 = crypto.load_certificate(crypto.FILETYPE_ASN1, ca_cert)
ca_certs_pem += crypto.dump_certificate(crypto.FILETYPE_PEM, x509).decode()
# Add the intermediate certificate into the chain
with open(intermediate_src, "r") as f:
intermediate_src_content = f.read()
ca_certs_pem += intermediate_src_content
data = {
self.ca_name: self._encode_to_64(ca_certs_pem),
self.cert_name: self._encode_to_64(generated_cert.cert),
self.key_name: self._encode_to_64(generated_cert.key),
}
return self.secret(secret_name, data, namespace)
def secret_token(
self, secret_name, name, namespace, resource_namespace, api_endpoint, token
):
"""Create a hooks secret resource.
The hook secret stores Krake authentication token
and hook URL for given application.
Args:
secret_name (str): Secret name
name (str): Application name
namespace (str): Application namespace
resource_namespace (str): Kubernetes namespace where the
Secret will be created.
api_endpoint (str): Krake API endpoint
token (str): Complete hook authentication token
Returns:
dict: complete hook secret resource
"""
pass
def volumes(self, secret_name, volume_name, mount_path):
"""Create complete hooks volume and volume mount sub-resources
Complete hook volume gives access to hook's secret, which stores
Krake CAs and client certificates to communicate with the Krake API.
Complete hook volume mount puts the volume into the application
Args:
secret_name (str): Secret name
volume_name (str): Volume name
mount_path (list): Volume mount path
Returns:
list: List of complete hook volume and volume mount sub-resources
"""
volume = V1Volume(name=volume_name, secret={"secretName": secret_name})
volume_mount = V1VolumeMount(name=volume_name, mount_path=mount_path)
return [
SubResource(
group="volumes",
name=volume.name,
body=self.attribute_map(volume),
path=(("spec", "template", "spec"), ("spec",)),
),
SubResource(
group="volumeMounts",
name=volume_mount.name,
body=self.attribute_map(volume_mount),
path=(
("spec", "template", "spec", "containers"),
("spec", "containers"), # kind: Pod
),
),
]
@staticmethod
def _encode_to_64(string):
"""Compute the base 64 encoding of a string.
Args:
string (str): the string to encode.
Returns:
str: the result of the encoding.
"""
return b64encode(string.encode()).decode()
def secret(self, secret_name, secret_data, namespace, _type="Opaque"):
"""Create a secret resource.
Args:
secret_name (str): Secret name
secret_data (dict): Secret data
namespace (str): Kubernetes namespace where the Secret will be created.
_type (str, optional): Secret type. Defaults to Opaque.
Returns:
dict: secret resource
"""
return self.attribute_map(
V1Secret(
api_version="v1",
kind="Secret",
data=secret_data,
metadata={"name": secret_name, "namespace": namespace},
type=_type,
)
)
@staticmethod
def create_hook_url(name, namespace, api_endpoint):
"""Create an applications' hook URL.
Function needs to be specified for each hook.
Args:
name (str): Application name
namespace (str): Application namespace
api_endpoint (str): Krake API endpoint
Returns:
str: Application shutdown url
"""
pass
def env_vars(self, secret_name):
"""Create the hooks' environment variables sub-resources.
Function needs to be specified for each hook.
Creates hook environment variables to store Krake authentication token
and a hook URL for the given applications.
Args:
secret_name (str): Secret name
Returns:
list: List of shutdown hook environment variables sub-resources
"""
pass
class Complete(Hook):
"""Mangle given application and inject complete hooks variables into it.
Hook injects a Kubernetes secret, which stores Krake authentication token
and the Krake complete hook URL for the given application. The variables
from Kubernetes secret are imported as environment variables
into the application resource definition. Only resources defined in
:args:`hook_resources` can be modified.
Names of environment variables are defined in the application controller
configuration file.
If TLS is enabled on the Krake API, the complete hook injects a Kubernetes secret,
and it's corresponding volume and volume mount definitions for the Krake CA,
the client certificate with the right CN, and its key. The directory where the
secret is mounted is defined in the configuration.
Args:
api_endpoint (str): the given API endpoint
ssl_context (ssl.SSLContext): SSL context to communicate with the API endpoint
cert_dest (str, optional): Path of the directory where the CA, client
certificate and key to the Krake API will be stored.
env_token (str, optional): Name of the environment variable, which stores Krake
authentication token.
env_url (str, optional): Name of the environment variable,
which stores Krake complete hook URL.
"""
hook_resources = ("Pod", "Deployment", "ReplicationController")
def __init__(
self, api_endpoint, ssl_context, hook_user, cert_dest, env_token, env_url
):
super().__init__(
api_endpoint, ssl_context, hook_user, cert_dest, env_token, env_url
)
self.env_url = env_url
def secret_token(
self, secret_name, name, namespace, resource_namespace, api_endpoint, token
):
"""Create complete hooks secret resource.
Complete hook secret stores Krake authentication token
and complete hook URL for given application.
Args:
secret_name (str): Secret name
name (str): Application name
namespace (str): Application namespace
resource_namespace (str): Kubernetes namespace where the
Secret will be created.
api_endpoint (str): Krake API endpoint
token (str): Complete hook authentication token
Returns:
dict: complete hook secret resource
"""
complete_url = self.create_hook_url(name, namespace, api_endpoint)
data = {
self.env_token.lower(): self._encode_to_64(token),
self.env_url.lower(): self._encode_to_64(complete_url),
}
return self.secret(secret_name, data, resource_namespace)
@staticmethod
def create_hook_url(name, namespace, api_endpoint):
"""Create an applications' complete URL.
Args:
name (str): Application name
namespace (str): Application namespace
api_endpoint (str): Krake API endpoint
Returns:
str: Application complete url
"""
api_url = URL(api_endpoint)
return str(
api_url.with_path(
f"/kubernetes/namespaces/{namespace}/applications/{name}/complete"
)
)
def env_vars(self, secret_name):
"""Create complete hooks environment variables sub-resources
Create complete hook environment variables store Krake authentication token
and complete hook URL for given application.
Args:
secret_name (str): Secret name
Returns:
list: List of complete hook environment variables sub-resources
"""
sub_resources = []
env_token = V1EnvVar(
name=self.env_token,
value_from=self.attribute_map(
V1EnvVarSource(
secret_key_ref=self.attribute_map(
V1SecretKeySelector(
name=secret_name, key=self.env_token.lower()
)
)
)
),
)
env_url = V1EnvVar(
name=self.env_url,
value_from=self.attribute_map(
V1EnvVarSource(
secret_key_ref=self.attribute_map(
V1SecretKeySelector(name=secret_name, key=self.env_url.lower())
)
)
),
)
for env in (env_token, env_url):
sub_resources.append(
SubResource(
group="env",
name=env.name,
body=self.attribute_map(env),
path=(
("spec", "template", "spec", "containers"),
("spec", "containers"), # kind: Pod
),
)
)
return sub_resources
class Shutdown(Hook):
"""Mangle given application and inject shutdown hooks variables into it.
Hook injects a Kubernetes secret, which stores Krake authentication token
and the Krake complete hook URL for the given application. The variables
from the Kubernetes secret are imported as environment variables
into the application resource definition. Only resources defined in
:args:`hook_resources` can be modified.
Names of environment variables are defined in the application controller
configuration file.
If TLS is enabled on the Krake API, the shutdown hook injects a Kubernetes secret,
and it's corresponding volume and volume mount definitions for the Krake CA,
the client certificate with the right CN, and its key. The directory where the
secret is mounted is defined in the configuration.
Args:
api_endpoint (str): the given API endpoint
ssl_context (ssl.SSLContext): SSL context to communicate with the API endpoint
cert_dest (str, optional): Path of the directory where the CA, client
certificate and key to the Krake API will be stored.
env_token (str, optional): Name of the environment variable, which stores Krake
authentication token.
env_url (str, optional): Name of the environment variable,
which stores Krake complete hook URL.
"""
hook_resources = ("Pod", "Deployment", "ReplicationController")
def __init__(
self, api_endpoint, ssl_context, hook_user, cert_dest, env_token, env_url
):
super().__init__(
api_endpoint, ssl_context, hook_user, cert_dest, env_token, env_url
)
self.env_url = env_url
def secret_token(
self, secret_name, name, namespace, resource_namespace, api_endpoint, token
):
"""Create shutdown hooks secret resource.
Shutdown hook secret stores Krake authentication token
and shutdown hook URL for given application.
Args:
secret_name (str): Secret name
name (str): Application name
namespace (str): Application namespace
resource_namespace (str): Kubernetes namespace where the
Secret will be created.
api_endpoint (str): Krake API endpoint
token (str): Shutdown hook authentication token
Returns:
dict: shutdown hook secret resource
"""
shutdown_url = self.create_hook_url(name, namespace, api_endpoint)
data = {
self.env_token.lower(): self._encode_to_64(token),
self.env_url.lower(): self._encode_to_64(shutdown_url),
}
return self.secret(secret_name, data, resource_namespace)
@staticmethod
def create_hook_url(name, namespace, api_endpoint):
"""Create an applications' shutdown URL.
Args:
name (str): Application name
namespace (str): Application namespace
api_endpoint (str): Krake API endpoint
Returns:
str: Application shutdown url
"""
api_url = URL(api_endpoint)
return str(
api_url.with_path(
f"/kubernetes/namespaces/{namespace}/applications/{name}/shutdown"
)
)
def env_vars(self, secret_name):
"""Create shutdown hooks environment variables sub-resources.
Creates shutdown hook environment variables to store Krake authentication token
and a shutdown hook URL for given applications.
Args:
secret_name (str): Secret name
Returns:
list: List of shutdown hook environment variables sub-resources
"""
sub_resources = []
env_resources = []
env_token = V1EnvVar(
name=self.env_token,
value_from=self.attribute_map(
V1EnvVarSource(
secret_key_ref=self.attribute_map(
V1SecretKeySelector(
name=secret_name,
key=self.env_token.lower()
)
)
)
)
)
env_resources.append(env_token)
env_url = V1EnvVar(
name=self.env_url,
value_from=self.attribute_map(
V1EnvVarSource(
secret_key_ref=self.attribute_map(
V1SecretKeySelector(name=secret_name, key=self.env_url.lower())
)
)
),
)
env_resources.append(env_url)
for env in env_resources:
sub_resources.append(
SubResource(
group="env",
name=env.name,
body=self.attribute_map(env),
path=(
("spec", "template", "spec", "containers"),
("spec", "containers"), # kind: Pod
),
)
)
return sub_resources
|
krake/krake/controller/kubernetes/hooks.py
|
codereval_python_data_65
|
Generate the default observer schema for each Kubernetes resource present in
``spec.manifest`` for which a custom observer schema hasn't been specified.
Args:
app (krake.data.kubernetes.Application): The application for which to generate a
default observer schema
def generate_default_observer_schema(app):
"""Generate the default observer schema for each Kubernetes resource present in
``spec.manifest`` for which a custom observer schema hasn't been specified.
Args:
app (krake.data.kubernetes.Application): The application for which to generate a
default observer schema
"""
app.status.mangled_observer_schema = deepcopy(app.spec.observer_schema)
for resource_manifest in app.spec.manifest:
try:
get_kubernetes_resource_idx(
app.status.mangled_observer_schema, resource_manifest
)
except IndexError:
# Only create a default observer schema, if a custom observer schema hasn't
# been set by the user.
app.status.mangled_observer_schema.append(
generate_default_observer_schema_dict(
resource_manifest,
first_level=True,
)
)
"""This module defines the Hook Dispatcher and listeners for registering and
executing hooks. Hook Dispatcher emits hooks based on :class:`Hook` attributes which
define when the hook will be executed.
"""
import asyncio
import logging
import random
from base64 import b64encode
from collections import defaultdict
from contextlib import suppress
from copy import deepcopy
from datetime import datetime
from functools import reduce
from operator import getitem
from enum import Enum, auto
from inspect import iscoroutinefunction
from OpenSSL import crypto
from typing import NamedTuple
import yarl
from aiohttp import ClientConnectorError
from krake.controller import Observer
from krake.controller.kubernetes.client import KubernetesClient, InvalidManifestError
from krake.utils import camel_to_snake_case, get_kubernetes_resource_idx
from kubernetes_asyncio.client.rest import ApiException
from kubernetes_asyncio.client.api_client import ApiClient
from kubernetes_asyncio import client
from krake.data.kubernetes import ClusterState, Application, Cluster
from yarl import URL
from secrets import token_urlsafe
from kubernetes_asyncio.client import (
Configuration,
V1Secret,
V1EnvVar,
V1VolumeMount,
V1Volume,
V1SecretKeySelector,
V1EnvVarSource,
)
from kubernetes_asyncio.config.kube_config import KubeConfigLoader
logger = logging.getLogger(__name__)
class HookType(Enum):
ResourcePreCreate = auto()
ResourcePostCreate = auto()
ResourcePreUpdate = auto()
ResourcePostUpdate = auto()
ResourcePreDelete = auto()
ResourcePostDelete = auto()
ApplicationMangling = auto()
ApplicationPreMigrate = auto()
ApplicationPostMigrate = auto()
ApplicationPreReconcile = auto()
ApplicationPostReconcile = auto()
ApplicationPreDelete = auto()
ApplicationPostDelete = auto()
ClusterCreation = auto()
ClusterDeletion = auto()
class HookDispatcher(object):
"""Simple wrapper around a registry of handlers associated to :class:`Hook`
attributes. Each :class:`Hook` attribute defines when the handler will be
executed.
Listeners for certain hooks can be registered via :meth:`on`. Registered
listeners are executed via :meth:`hook`.
Example:
.. code:: python
listen = HookDispatcher()
@listen.on(HookType.PreApply)
def to_perform_before_app_creation(app, cluster, resource, controller):
# Do Stuff
@listen.on(HookType.PostApply)
def another_to_perform_after_app_creation(app, cluster, resource, resp):
# Do Stuff
@listen.on(HookType.PostDelete)
def to_perform_after_app_deletion(app, cluster, resource, resp):
# Do Stuff
"""
def __init__(self):
self.registry = defaultdict(list)
def on(self, hook):
"""Decorator function to add a new handler to the registry.
Args:
hook (HookType): Hook attribute for which to register the handler.
Returns:
callable: Decorator for registering listeners for the specified
hook.
"""
def decorator(handler):
self.registry[hook].append(handler)
return handler
return decorator
async def hook(self, hook, **kwargs):
"""Execute the list of handlers associated to the provided :class:`Hook`
attribute.
Args:
hook (HookType): The hook attribute for which to execute handlers.
"""
try:
handlers = self.registry[hook]
except KeyError:
pass
else:
for handler in handlers:
if iscoroutinefunction(handler):
await handler(**kwargs)
else:
handler(**kwargs)
listen = HookDispatcher()
@listen.on(HookType.ResourcePostCreate)
@listen.on(HookType.ResourcePostUpdate)
async def register_service(app, cluster, resource, response):
"""Register endpoint of Kubernetes Service object on creation and update.
Args:
app (krake.data.kubernetes.Application): Application the service belongs to
cluster (krake.data.kubernetes.Cluster): The cluster on which the
application is running
resource (dict): Kubernetes object description as specified in the
specification of the application.
response (kubernetes_asyncio.client.V1Service): Response of the
Kubernetes API
"""
if resource["kind"] != "Service":
return
service_name = resource["metadata"]["name"]
if response.spec and response.spec.type == "LoadBalancer":
# For a "LoadBalancer" type of Service, an external IP is given in the cluster
# by a load balancer controller to the service. In this case, the "port"
# specified in the spec is reachable from the outside.
if (
not response.status.load_balancer
or not response.status.load_balancer.ingress
):
# When a "LoadBalancer" type of service is created, the IP is given by an
# additional controller (e.g. a controller that requests a floating IP to an
# OpenStack infrastructure). This process can take some time, but the
# Service itself already exist before the IP is assigned. In the case of an
# error with the controller, the IP is also not given. This "<pending>" IP
# just expresses that the Service exists, but the IP is not ready yet.
external_ip = "<pending>"
else:
external_ip = response.status.load_balancer.ingress[0].ip
if not response.spec.ports:
external_port = "<pending>"
else:
external_port = response.spec.ports[0].port
app.status.services[service_name] = f"{external_ip}:{external_port}"
return
node_port = None
# Ensure that ports are specified
if response.spec and response.spec.ports:
node_port = response.spec.ports[0].node_port
# If the service does not have a node port, remove a potential reference
# and return.
if node_port is None:
try:
del app.status.services[service_name]
except KeyError:
pass
return
# Determine URL of Kubernetes cluster API
loader = KubeConfigLoader(cluster.spec.kubeconfig)
config = Configuration()
await loader.load_and_set(config)
cluster_url = yarl.URL(config.host)
app.status.services[service_name] = f"{cluster_url.host}:{node_port}"
@listen.on(HookType.ResourcePostDelete)
async def unregister_service(app, resource, **kwargs):
"""Unregister endpoint of Kubernetes Service object on deletion.
Args:
app (krake.data.kubernetes.Application): Application the service belongs to
resource (dict): Kubernetes object description as specified in the
specification of the application.
"""
if resource["kind"] != "Service":
return
service_name = resource["metadata"]["name"]
try:
del app.status.services[service_name]
except KeyError:
pass
@listen.on(HookType.ResourcePostDelete)
async def remove_resource_from_last_observed_manifest(app, resource, **kwargs):
"""Remove a given resource from the last_observed_manifest after its deletion
Args:
app (krake.data.kubernetes.Application): Application the service belongs to
resource (dict): Kubernetes object description as specified in the
specification of the application.
"""
try:
idx = get_kubernetes_resource_idx(app.status.last_observed_manifest, resource)
except IndexError:
return
app.status.last_observed_manifest.pop(idx)
def update_last_applied_manifest_dict_from_resp(
last_applied_manifest, observer_schema, response
):
"""Together with :func:``update_last_applied_manifest_list_from_resp``, this
function is called recursively to update a partial ``last_applied_manifest``
from a partial Kubernetes response
Args:
last_applied_manifest (dict): partial ``last_applied_manifest`` being
updated
observer_schema (dict): partial ``observer_schema``
response (dict): partial response from the Kubernetes API.
Raises:
KeyError: If the observed field is not present in the Kubernetes response
This function go through all observed fields, and initialized their value in
last_applied_manifest if they are not yet present
"""
for key, value in observer_schema.items():
# Keys in the response are in camelCase
camel_key = camel_to_snake_case(key)
if camel_key not in response:
# An observed key should always be present in the k8s response
raise KeyError(
f"Observed key {camel_key} is not present in response {response}"
)
if isinstance(value, dict):
if key not in last_applied_manifest:
# The dictionary is observed, but not present in
# last_applied_manifest
last_applied_manifest[key] = {}
update_last_applied_manifest_dict_from_resp(
last_applied_manifest[key], observer_schema[key], response[camel_key]
)
elif isinstance(value, list):
if key not in last_applied_manifest:
# The list is observed, but not present in last_applied_manifest
last_applied_manifest[key] = []
update_last_applied_manifest_list_from_resp(
last_applied_manifest[key], observer_schema[key], response[camel_key]
)
elif key not in last_applied_manifest:
# If key not present in last_applied_manifest, and value is neither a
# dict nor a list, simply add it.
last_applied_manifest[key] = response[camel_key]
def update_last_applied_manifest_list_from_resp(
last_applied_manifest, observer_schema, response
):
"""Together with :func:``update_last_applied_manifest_dict_from_resp``, this
function is called recursively to update a partial ``last_applied_manifest``
from a partial Kubernetes response
Args:
last_applied_manifest (list): partial ``last_applied_manifest`` being
updated
observer_schema (list): partial ``observer_schema``
response (list): partial response from the Kubernetes API.
This function go through all observed fields, and initialized their value in
last_applied_manifest if they are not yet present
"""
# Looping over the observed resource, except the last element which is the
# special control dictionary
for idx, val in enumerate(observer_schema[:-1]):
if idx >= len(response):
# Element is observed but not present in k8s response, so following
# elements will also not exist.
#
# This doesn't raise an Exception as observing the element of a list
# doesn't ensure its presence. The list length is controlled by the
# special control dictionary
return
if isinstance(val, dict):
if idx >= len(last_applied_manifest):
# The dict is observed, but not present in last_applied_manifest
last_applied_manifest.append({})
update_last_applied_manifest_dict_from_resp(
last_applied_manifest[idx], observer_schema[idx], response[idx]
)
elif isinstance(response[idx], list):
if idx >= len(last_applied_manifest):
# The list is observed, but not present in last_applied_manifest
last_applied_manifest.append([])
update_last_applied_manifest_list_from_resp(
last_applied_manifest[idx], observer_schema[idx], response[idx]
)
elif idx >= len(last_applied_manifest):
# Element is not yet present in last_applied_manifest. Adding it.
last_applied_manifest.append(response[idx])
@listen.on(HookType.ResourcePostCreate)
@listen.on(HookType.ResourcePostUpdate)
def update_last_applied_manifest_from_resp(app, response, **kwargs):
"""Hook run after the creation or update of an application in order to update the
`status.last_applied_manifest` using the k8s response.
Args:
app (krake.data.kubernetes.Application): Application the service belongs to
response (kubernetes_asyncio.client.V1Status): Response of the Kubernetes API
After a Kubernetes resource has been created/updated, the
`status.last_applied_manifest` has to be updated. All fields already initialized
(either from the mangling of `spec.manifest`, or by a previous call to this
function) should be left untouched. Only observed fields which are not present in
`status.last_applied_manifest` should be initialized.
"""
if isinstance(response, dict):
# The Kubernetes API couldn't deserialize the k8s response into an object
resp = response
else:
# The Kubernetes API deserialized the k8s response into an object
resp = response.to_dict()
idx_applied = get_kubernetes_resource_idx(app.status.last_applied_manifest, resp)
idx_observed = get_kubernetes_resource_idx(app.status.mangled_observer_schema, resp)
update_last_applied_manifest_dict_from_resp(
app.status.last_applied_manifest[idx_applied],
app.status.mangled_observer_schema[idx_observed],
resp,
)
@listen.on(HookType.ResourcePostCreate)
@listen.on(HookType.ResourcePostUpdate)
def update_last_observed_manifest_from_resp(app, response, **kwargs):
"""Handler to run after the creation or update of a Kubernetes resource to update
the last_observed_manifest from the response of the Kubernetes API.
Args:
app (krake.data.kubernetes.Application): Application the service belongs to
response (kubernetes_asyncio.client.V1Service): Response of the
Kubernetes API
The target last_observed_manifest holds the value of all observed fields plus the
special control dictionaries for the list length
"""
if isinstance(response, dict):
# The Kubernetes API couldn't deserialize the k8s response into an object
resp = response
else:
# The Kubernetes API deserialized the k8s response into an object
resp = response.to_dict()
try:
idx_observed = get_kubernetes_resource_idx(
app.status.mangled_observer_schema,
resp,
)
except IndexError:
# All created resources should be observed
raise
try:
idx_last_observed = get_kubernetes_resource_idx(
app.status.last_observed_manifest,
resp,
)
except IndexError:
# If the resource is not yes present in last_observed_manifest, append it.
idx_last_observed = len(app.status.last_observed_manifest)
app.status.last_observed_manifest.append({})
# Overwrite the last_observed_manifest for this resource
app.status.last_observed_manifest[
idx_last_observed
] = update_last_observed_manifest_dict(
app.status.mangled_observer_schema[idx_observed], resp
)
def update_last_observed_manifest_dict(observed_resource, response):
"""Together with :func:``update_last_observed_manifest_list``, recursively
crafts the ``last_observed_manifest`` from the Kubernetes :attr:``response``.
Args:
observed_resource (dict): The schema to observe for the partial given resource
response (dict): The partial Kubernetes response for this resource.
Raises:
KeyError: If an observed key is not present in the Kubernetes response
Returns:
dict: The dictionary of observed keys and their value
Get the value of all observed fields from the Kubernetes response
"""
res = {}
for key, value in observed_resource.items():
camel_key = camel_to_snake_case(key)
if camel_key not in response:
raise KeyError(
f"Observed key {camel_key} is not present in response {response}"
)
if isinstance(value, dict):
res[key] = update_last_observed_manifest_dict(value, response[camel_key])
elif isinstance(value, list):
res[key] = update_last_observed_manifest_list(value, response[camel_key])
else:
res[key] = response[camel_key]
return res
def update_last_observed_manifest_list(observed_resource, response):
"""Together with :func:``update_last_observed_manifest_dict``, recursively
crafts the ``last_observed_manifest`` from the Kubernetes :attr:``response``.
Args:
observed_resource (list): the schema to observe for the partial given resource
response (list): the partial Kubernetes response for this resource.
Returns:
list: The list of observed elements, plus the special list length control
dictionary
Get the value of all observed elements from the Kubernetes response
"""
if not response:
return [{"observer_schema_list_current_length": 0}]
res = []
# Looping over the observed resource, except the last element which is the special
# control dictionary
for idx, val in enumerate(observed_resource[:-1]):
if idx >= len(response):
# Element is not present in the Kubernetes response, nothing more to do
break
if type(response[idx]) == dict:
res.append(update_last_observed_manifest_dict(val, response[idx]))
elif type(response[idx]) == list:
res.append(update_last_observed_manifest_list(val, response[idx]))
else:
res.append(response[idx])
# Append the special control dictionary to the list
res.append({"observer_schema_list_current_length": len(response)})
return res
def update_last_applied_manifest_dict_from_spec(
resource_status_new, resource_status_old, resource_observed
):
"""Together with :func:``update_last_applied_manifest_list_from_spec``, this
function is called recursively to update a partial ``last_applied_manifest``
Args:
resource_status_new (dict): partial ``last_applied_manifest`` being updated
resource_status_old (dict): partial of the current ``last_applied_manifest``
resource_observed (dict): partial observer_schema for the manifest file
being updated
"""
for key, value in resource_observed.items():
if key not in resource_status_old:
continue
if key in resource_status_new:
if isinstance(value, dict):
update_last_applied_manifest_dict_from_spec(
resource_status_new[key],
resource_status_old[key],
resource_observed[key],
)
elif isinstance(value, list):
update_last_applied_manifest_list_from_spec(
resource_status_new[key],
resource_status_old[key],
resource_observed[key],
)
else:
# If the key is not present the spec.manifest, we first need to
# initialize it
if isinstance(value, dict):
resource_status_new[key] = {}
update_last_applied_manifest_dict_from_spec(
resource_status_new[key],
resource_status_old[key],
resource_observed[key],
)
elif isinstance(value, list):
resource_status_new[key] = []
update_last_applied_manifest_list_from_spec(
resource_status_new[key],
resource_status_old[key],
resource_observed[key],
)
else:
resource_status_new[key] = resource_status_old[key]
def update_last_applied_manifest_list_from_spec(
resource_status_new, resource_status_old, resource_observed
):
"""Together with :func:``update_last_applied_manifest_dict_from_spec``, this
function is called recursively to update a partial ``last_applied_manifest``
Args:
resource_status_new (list): partial ``last_applied_manifest`` being updated
resource_status_old (list): partial of the current ``last_applied_manifest``
resource_observed (list): partial observer_schema for the manifest file
being updated
"""
# Looping over the observed resource, except the last element which is the
# special control dictionary
for idx, val in enumerate(resource_observed[:-1]):
if idx >= len(resource_status_old):
# The element in not in the current last_applied_manifest, and neither
# is the rest of the list
break
if idx < len(resource_status_new):
# The element is present in spec.manifest and in the current
# last_applied_manifest. Updating observed fields
if isinstance(val, dict):
update_last_applied_manifest_dict_from_spec(
resource_status_new[idx],
resource_status_old[idx],
resource_observed[idx],
)
elif isinstance(val, list):
update_last_applied_manifest_list_from_spec(
resource_status_new[idx],
resource_status_old[idx],
resource_observed[idx],
)
else:
# If the element is not present in the spec.manifest, we first have to
# initialize it.
if isinstance(val, dict):
resource_status_new.append({})
update_last_applied_manifest_dict_from_spec(
resource_status_new[idx],
resource_status_old[idx],
resource_observed[idx],
)
elif isinstance(val, list):
resource_status_new.append([])
update_last_applied_manifest_list_from_spec(
resource_status_new[idx],
resource_status_old[idx],
resource_observed[idx],
)
else:
resource_status_new.append(resource_status_old[idx])
def update_last_applied_manifest_from_spec(app):
"""Update the status.last_applied_manifest of an application from spec.manifests
Args:
app (krake.data.kubernetes.Application): Application to update
This function is called on application creation and updates. The
last_applied_manifest of an application is initialized as a copy of spec.manifest,
and is augmented by all known observed fields not yet initialized (i.e. all observed
fields or resources which are present in the current last_applied_manifest but not
in the spec.manifest)
"""
# The new last_applied_manifest is initialized as a copy of the spec.manifest, and
# augmented by all observed fields which are present in the current
# last_applied_manifest but not in the original spec.manifest
new_last_applied_manifest = deepcopy(app.spec.manifest)
# Loop over observed resources and observed fields, and check if they should be
# added to the new last_applied_manifest (i.e. present in the current
# last_applied_manifest but not in spec.manifest)
for resource_observed in app.status.mangled_observer_schema:
# If the resource is not present in the current last_applied_manifest, there is
# nothing to do. Whether the resource was initialized by spec.manifest doesn't
# matter.
try:
idx_status_old = get_kubernetes_resource_idx(
app.status.last_applied_manifest, resource_observed
)
except IndexError:
continue
# As the resource is present in the current last_applied_manifest, we need to go
# through it to check if observed fields should be set to their current value
# (i.e. fields are present in the current last_applied_manifest, but not in
# spec.manifest)
try:
# Check if the observed resource is present in spec.manifest
idx_status_new = get_kubernetes_resource_idx(
new_last_applied_manifest, resource_observed
)
except IndexError:
# The resource is observed but is not present in the spec.manifest.
# Create an empty resource, which will be augmented in
# update_last_applied_manifest_dict_from_spec with the observed and known
# fields.
new_last_applied_manifest.append({})
idx_status_new = len(new_last_applied_manifest) - 1
update_last_applied_manifest_dict_from_spec(
new_last_applied_manifest[idx_status_new],
app.status.last_applied_manifest[idx_status_old],
resource_observed,
)
app.status.last_applied_manifest = new_last_applied_manifest
class KubernetesApplicationObserver(Observer):
"""Observer specific for Kubernetes Applications. One observer is created for each
Application managed by the Controller, but not one per Kubernetes resource
(Deployment, Service...). If several resources are defined by an Application, they
are all monitored by the same observer.
The observer gets the actual status of the resources on the cluster using the
Kubernetes API, and compare it to the status stored in the API.
The observer is:
* started at initial Krake resource creation;
* deleted when a resource needs to be updated, then started again when it is done;
* simply deleted on resource deletion.
Args:
cluster (krake.data.kubernetes.Cluster): the cluster on which the observed
Application is created.
resource (krake.data.kubernetes.Application): the application that will be
observed.
on_res_update (coroutine): a coroutine called when a resource's actual status
differs from the status sent by the database. Its signature is:
``(resource) -> updated_resource``. ``updated_resource`` is the instance of
the resource that is up-to-date with the API. The Observer internal instance
of the resource to observe will be updated. If the API cannot be contacted,
``None`` can be returned. In this case the internal instance of the Observer
will not be updated.
time_step (int, optional): how frequently the Observer should watch the actual
status of the resources.
"""
def __init__(self, cluster, resource, on_res_update, time_step=2):
super().__init__(resource, on_res_update, time_step)
self.cluster = cluster
async def poll_resource(self):
"""Fetch the current status of the Application monitored by the Observer.
Returns:
krake.data.core.Status: the status object created using information from the
real world Applications resource.
"""
app = self.resource
status = deepcopy(app.status)
status.last_observed_manifest = []
# For each observed kubernetes resource of the Application,
# get its current status on the cluster.
for desired_resource in app.status.last_applied_manifest:
kube = KubernetesClient(self.cluster.spec.kubeconfig)
idx_observed = get_kubernetes_resource_idx(
app.status.mangled_observer_schema, desired_resource
)
observed_resource = app.status.mangled_observer_schema[idx_observed]
async with kube:
try:
group, version, kind, name, namespace = kube.get_immutables(
desired_resource
)
resource_api = await kube.get_resource_api(group, version, kind)
resp = await resource_api.read(kind, name, namespace)
except ApiException as err:
if err.status == 404:
# Resource does not exist
continue
# Otherwise, log the unexpected errors
logger.error(err)
observed_manifest = update_last_observed_manifest_dict(
observed_resource, resp.to_dict()
)
status.last_observed_manifest.append(observed_manifest)
return status
class KubernetesClusterObserver(Observer):
"""Observer specific for Kubernetes Clusters. One observer is created for each
Cluster managed by the Controller.
The observer gets the actual status of the cluster using the
Kubernetes API, and compare it to the status stored in the API.
The observer is:
* started at initial Krake resource creation;
* deleted when a resource needs to be updated, then started again when it is done;
* simply deleted on resource deletion.
Args:
cluster (krake.data.kubernetes.Cluster): the cluster which will be observed.
on_res_update (coroutine): a coroutine called when a resource's actual status
differs from the status sent by the database. Its signature is:
``(resource) -> updated_resource``. ``updated_resource`` is the instance of
the resource that is up-to-date with the API. The Observer internal instance
of the resource to observe will be updated. If the API cannot be contacted,
``None`` can be returned. In this case the internal instance of the Observer
will not be updated.
time_step (int, optional): how frequently the Observer should watch the actual
status of the resources.
"""
def __init__(self, cluster, on_res_update, time_step=2):
super().__init__(cluster, on_res_update, time_step)
self.cluster = cluster
async def poll_resource(self):
"""Fetch the current status of the Cluster monitored by the Observer.
Returns:
krake.data.core.Status: the status object created using information from the
real world Cluster.
"""
status = deepcopy(self.cluster.status)
# For each observed kubernetes cluster registered in Krake,
# get its current node status.
loader = KubeConfigLoader(self.cluster.spec.kubeconfig)
config = Configuration()
await loader.load_and_set(config)
kube = ApiClient(config)
async with kube as api:
v1 = client.CoreV1Api(api)
try:
response = await v1.list_node()
except ClientConnectorError as err:
status.state = ClusterState.OFFLINE
self.cluster.status.state = ClusterState.OFFLINE
# Log the error
logger.debug(err)
return status
condition_dict = {
"MemoryPressure": [],
"DiskPressure": [],
"PIDPressure": [],
"Ready": [],
}
for item in response.items:
for condition in item.status.conditions:
condition_dict[condition.type].append(condition.status)
if (
condition_dict["MemoryPressure"] == ["True"]
or condition_dict["DiskPressure"] == ["True"]
or condition_dict["PIDPressure"] == ["True"]
):
status.state = ClusterState.UNHEALTHY
self.cluster.status.state = ClusterState.UNHEALTHY
return status
elif (
condition_dict["Ready"] == ["True"]
and status.state is ClusterState.OFFLINE
):
status.state = ClusterState.CONNECTING
self.cluster.status.state = ClusterState.CONNECTING
return status
elif condition_dict["Ready"] == ["True"]:
status.state = ClusterState.ONLINE
self.cluster.status.state = ClusterState.ONLINE
return status
else:
status.state = ClusterState.NOTREADY
self.cluster.status.state = ClusterState.NOTREADY
return status
@listen.on(HookType.ApplicationPostReconcile)
@listen.on(HookType.ApplicationPostMigrate)
@listen.on(HookType.ClusterCreation)
async def register_observer(controller, resource, start=True, **kwargs):
"""Create an observer for the given Application or Cluster, and start it as a
background task if wanted.
If an observer already existed for this Application or Cluster, it is stopped
and deleted.
Args:
controller (KubernetesController): the controller for which the observer will be
added in the list of working observers.
resource (krake.data.kubernetes.Application): the Application to observe or
resource (krake.data.kubernetes.Cluster): the Cluster to observe.
start (bool, optional): if False, does not start the observer as background
task.
"""
if resource.kind == Application.kind:
cluster = await controller.kubernetes_api.read_cluster(
namespace=resource.status.running_on.namespace,
name=resource.status.running_on.name,
)
observer = KubernetesApplicationObserver(
cluster,
resource,
controller.on_status_update,
time_step=controller.observer_time_step,
)
elif resource.kind == Cluster.kind:
observer = KubernetesClusterObserver(
resource,
controller.on_status_update,
time_step=controller.observer_time_step,
)
else:
logger.debug("Unknown resource kind. No observer was registered.", resource)
return
logger.debug(f"Start observer for {resource.kind} %r", resource.metadata.name)
task = None
if start:
task = controller.loop.create_task(observer.run())
controller.observers[resource.metadata.uid] = (observer, task)
@listen.on(HookType.ApplicationPreReconcile)
@listen.on(HookType.ApplicationPreMigrate)
@listen.on(HookType.ApplicationPreDelete)
@listen.on(HookType.ClusterDeletion)
async def unregister_observer(controller, resource, **kwargs):
"""Stop and delete the observer for the given Application or Cluster. If no observer
is started, do nothing.
Args:
controller (KubernetesController): the controller for which the observer will be
removed from the list of working observers.
resource (krake.data.kubernetes.Application): the Application whose observer
will be stopped or
resource (krake.data.kubernetes.Cluster): the Cluster whose observer will be
stopped.
"""
if resource.metadata.uid not in controller.observers:
return
logger.debug(f"Stop observer for {resource.kind} %r", resource.metadata.name)
_, task = controller.observers.pop(resource.metadata.uid)
task.cancel()
with suppress(asyncio.CancelledError):
await task
def utc_difference():
"""Get the difference in seconds between the current time and the current UTC time.
Returns:
int: the time difference in seconds.
"""
delta = datetime.now() - datetime.utcnow()
return delta.seconds
def generate_certificate(config):
"""Create and sign a new certificate using the one defined in the complete hook
configuration as intermediate certificate.
Args:
config (krake.data.config.CompleteHookConfiguration): the configuration of the
complete hook.
Returns:
CertificatePair: the content of the certificate created and its corresponding
key.
"""
with open(config.intermediate_src, "rb") as f:
intermediate_src = crypto.load_certificate(crypto.FILETYPE_PEM, f.read())
with open(config.intermediate_key_src, "rb") as f:
intermediate_key_src = crypto.load_privatekey(crypto.FILETYPE_PEM, f.read())
client_cert = crypto.X509()
# Set general information
client_cert.set_version(3)
client_cert.set_serial_number(random.randint(50000000000000, 100000000000000))
# If not set before, TLS will not accept to use this certificate in UTC cases, as
# the server time may be earlier.
time_offset = utc_difference() * -1
client_cert.gmtime_adj_notBefore(time_offset)
client_cert.gmtime_adj_notAfter(1 * 365 * 24 * 60 * 60)
# Set issuer and subject
intermediate_subject = intermediate_src.get_subject()
client_cert.set_issuer(intermediate_subject)
client_subj = crypto.X509Name(intermediate_subject)
client_subj.CN = config.hook_user
client_cert.set_subject(client_subj)
# Create and set the private key
client_key = crypto.PKey()
client_key.generate_key(crypto.TYPE_RSA, 2048)
client_cert.set_pubkey(client_key)
client_cert.sign(intermediate_key_src, "sha256")
cert_dump = crypto.dump_certificate(crypto.FILETYPE_PEM, client_cert).decode()
key_dump = crypto.dump_privatekey(crypto.FILETYPE_PEM, client_key).decode()
return CertificatePair(cert=cert_dump, key=key_dump)
def generate_default_observer_schema(app):
"""Generate the default observer schema for each Kubernetes resource present in
``spec.manifest`` for which a custom observer schema hasn't been specified.
Args:
app (krake.data.kubernetes.Application): The application for which to generate a
default observer schema
"""
app.status.mangled_observer_schema = deepcopy(app.spec.observer_schema)
for resource_manifest in app.spec.manifest:
try:
get_kubernetes_resource_idx(
app.status.mangled_observer_schema, resource_manifest
)
except IndexError:
# Only create a default observer schema, if a custom observer schema hasn't
# been set by the user.
app.status.mangled_observer_schema.append(
generate_default_observer_schema_dict(
resource_manifest,
first_level=True,
)
)
def generate_default_observer_schema_dict(manifest_dict, first_level=False):
"""Together with :func:``generate_default_observer_schema_list``, this function is
called recursively to generate part of a default ``observer_schema`` from part of a
Kubernetes resource, defined respectively by ``manifest_dict`` or ``manifest_list``.
Args:
manifest_dict (dict): Partial Kubernetes resources
first_level (bool, optional): If True, indicates that the dictionary represents
the whole observer schema of a Kubernetes resource
Returns:
dict: Generated partial observer_schema
This function creates a new dictionary from ``manifest_dict`` and replaces all
non-list and non-dict values by ``None``.
In case of ``first_level`` dictionary (i.e. complete ``observer_schema`` for a
resource), the values of the identifying fields are copied from the manifest file.
"""
observer_schema_dict = {}
for key, value in manifest_dict.items():
if isinstance(value, dict):
observer_schema_dict[key] = generate_default_observer_schema_dict(value)
elif isinstance(value, list):
observer_schema_dict[key] = generate_default_observer_schema_list(value)
else:
observer_schema_dict[key] = None
if first_level:
observer_schema_dict["apiVersion"] = manifest_dict["apiVersion"]
observer_schema_dict["kind"] = manifest_dict["kind"]
observer_schema_dict["metadata"]["name"] = manifest_dict["metadata"]["name"]
if (
"spec" in manifest_dict
and "type" in manifest_dict["spec"]
and manifest_dict["spec"]["type"] == "LoadBalancer"
):
observer_schema_dict["status"] = {"load_balancer": {"ingress": None}}
return observer_schema_dict
def generate_default_observer_schema_list(manifest_list):
"""Together with :func:``generate_default_observer_schema_dict``, this function is
called recursively to generate part of a default ``observer_schema`` from part of a
Kubernetes resource, defined respectively by ``manifest_list`` or ``manifest_dict``.
Args:
manifest_list (list): Partial Kubernetes resources
Returns:
list: Generated partial observer_schema
This function creates a new list from ``manifest_list`` and replaces all non-list
and non-dict elements by ``None``.
Additionally, it generates the default list control dictionary, using the current
length of the list as default minimum and maximum values.
"""
observer_schema_list = []
for value in manifest_list:
if isinstance(value, dict):
observer_schema_list.append(generate_default_observer_schema_dict(value))
elif isinstance(value, list):
observer_schema_list.append(generate_default_observer_schema_list(value))
else:
observer_schema_list.append(None)
observer_schema_list.append(
{
"observer_schema_list_min_length": len(manifest_list),
"observer_schema_list_max_length": len(manifest_list),
}
)
return observer_schema_list
@listen.on(HookType.ApplicationMangling)
async def complete(app, api_endpoint, ssl_context, config):
"""Execute application complete hook defined by :class:`Complete`.
Hook mangles given application and injects complete hooks variables.
Application complete hook is disabled by default.
User enables this hook by the --hook-complete argument in rok cli.
Args:
app (krake.data.kubernetes.Application): Application object processed
when the hook is called
api_endpoint (str): the given API endpoint
ssl_context (ssl.SSLContext): SSL context to communicate with the API endpoint
config (krake.data.config.HooksConfiguration): Complete hook
configuration.
"""
if "complete" not in app.spec.hooks:
return
# Use the endpoint of the API only if the external endpoint has not been set.
if config.complete.external_endpoint:
api_endpoint = config.complete.external_endpoint
app.status.complete_token = \
app.status.complete_token if app.status.complete_token else token_urlsafe()
# Generate only once the certificate and key for a specific Application
generated_cert = CertificatePair(
cert=app.status.complete_cert, key=app.status.complete_key
)
if ssl_context and generated_cert == (None, None):
generated_cert = generate_certificate(config.complete)
app.status.complete_cert = generated_cert.cert
app.status.complete_key = generated_cert.key
hook = Complete(
api_endpoint,
ssl_context,
hook_user=config.complete.hook_user,
cert_dest=config.complete.cert_dest,
env_token=config.complete.env_token,
env_url=config.complete.env_url,
)
hook.mangle_app(
app.metadata.name,
app.metadata.namespace,
app.status.complete_token,
app.status.last_applied_manifest,
config.complete.intermediate_src,
generated_cert,
app.status.mangled_observer_schema,
"complete"
)
@listen.on(HookType.ApplicationMangling)
async def shutdown(app, api_endpoint, ssl_context, config):
"""Executes an application shutdown hook defined by :class:`Shutdown`.
The hook mangles the given application and injects shutdown hooks variables.
Application shutdown hook is disabled by default.
User enables this hook by the --hook-shutdown argument in rok cli.
Args:
app (krake.data.kubernetes.Application): Application object processed
when the hook is called
api_endpoint (str): the given API endpoint
ssl_context (ssl.SSLContext): SSL context to communicate with the API endpoint
config (krake.data.config.HooksConfiguration): Shutdown hook
configuration.
"""
if "shutdown" not in app.spec.hooks:
return
# Use the endpoint of the API only if the external endpoint has not been set.
if config.shutdown.external_endpoint:
api_endpoint = config.shutdown.external_endpoint
app.status.shutdown_token = \
app.status.shutdown_token if app.status.shutdown_token else token_urlsafe()
# Generate only once the certificate and key for a specific Application
generated_cert = CertificatePair(
cert=app.status.shutdown_cert, key=app.status.shutdown_key
)
if ssl_context and generated_cert == (None, None):
generated_cert = generate_certificate(config.shutdown)
app.status.shutdown_cert = generated_cert.cert
app.status.shutdown_key = generated_cert.key
hook = Shutdown(
api_endpoint,
ssl_context,
hook_user=config.shutdown.hook_user,
cert_dest=config.shutdown.cert_dest,
env_token=config.shutdown.env_token,
env_url=config.shutdown.env_url,
)
hook.mangle_app(
app.metadata.name,
app.metadata.namespace,
app.status.shutdown_token,
app.status.last_applied_manifest,
config.shutdown.intermediate_src,
generated_cert,
app.status.mangled_observer_schema,
"shutdown"
)
@listen.on(HookType.ResourcePreDelete)
async def pre_shutdown(controller, app, **kwargs):
"""
Args:
app (krake.data.kubernetes.Application): Application object processed
when the hook is called
"""
if "shutdown" not in app.spec.hooks:
return
return
class SubResource(NamedTuple):
group: str
name: str
body: dict
path: tuple
class CertificatePair(NamedTuple):
"""Tuple which contains a certificate and its corresponding key.
Attributes:
cert (str): content of a certificate.
key (str): content of the key that corresponds to the certificate.
"""
cert: str
key: str
class Hook(object):
hook_resources = ()
ca_name = "ca-bundle.pem"
cert_name = "cert.pem"
key_name = "key.pem"
def __init__(
self, api_endpoint, ssl_context, hook_user, cert_dest, env_token, env_url
):
self.api_endpoint = api_endpoint
self.ssl_context = ssl_context
self.hook_user = hook_user
self.cert_dest = cert_dest
self.env_token = env_token
self.env_url = env_url
def mangle_app(
self,
name,
namespace,
token,
last_applied_manifest,
intermediate_src,
generated_cert,
mangled_observer_schema,
hook_type="",
):
"""Mangle a given application and inject complete hook resources and
sub-resources into the :attr:`last_applied_manifest` object by :meth:`mangle`.
Also mangle the observer_schema as new resources and sub-resources should
be observed.
:attr:`last_applied_manifest` is created as a deep copy of the desired
application resources, as defined by user. It can be updated by custom hook
resources or modified by custom hook sub-resources. It is used as a desired
state for the Krake deployment process.
Args:
name (str): Application name
namespace (str): Application namespace
token (str): Complete hook authentication token
last_applied_manifest (list): Application resources
intermediate_src (str): content of the certificate that is used to sign new
certificates for the complete hook.
generated_cert (CertificatePair): tuple that contains the content of the
new signed certificate for the Application, and the content of its
corresponding key.
mangled_observer_schema (list): Observed fields
hook_type (str, optional): Name of the hook the app should be mangled for
"""
secret_certs_name = "-".join([name, "krake", hook_type, "secret", "certs"])
secret_token_name = "-".join([name, "krake", hook_type, "secret", "token"])
volume_name = "-".join([name, "krake", hook_type, "volume"])
ca_certs = (
self.ssl_context.get_ca_certs(binary_form=True)
if self.ssl_context
else None
)
# Extract all different namespaces
# FIXME: too many assumptions here: do we create one ConfigMap for each
# namespace?
resource_namespaces = {
resource["metadata"].get("namespace", "default")
for resource in last_applied_manifest
}
hook_resources = []
hook_sub_resources = []
if ca_certs:
hook_resources.extend(
[
self.secret_certs(
secret_certs_name,
resource_namespace,
intermediate_src=intermediate_src,
generated_cert=generated_cert,
ca_certs=ca_certs,
)
for resource_namespace in resource_namespaces
]
)
hook_sub_resources.extend(
[*self.volumes(secret_certs_name, volume_name, self.cert_dest)]
)
hook_resources.extend(
[
self.secret_token(
secret_token_name,
name,
namespace,
resource_namespace,
self.api_endpoint,
token,
)
for resource_namespace in resource_namespaces
]
)
hook_sub_resources.extend(
[
*self.env_vars(secret_token_name),
]
)
self.mangle(
hook_resources,
last_applied_manifest,
mangled_observer_schema,
)
self.mangle(
hook_sub_resources,
last_applied_manifest,
mangled_observer_schema,
is_sub_resource=True,
)
def mangle(
self,
items,
last_applied_manifest,
mangled_observer_schema,
is_sub_resource=False,
):
"""Mangle applications desired state with custom hook resources or
sub-resources.
Example:
.. code:: python
last_applied_manifest = [
{
'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {'name': 'test', 'namespace': 'default'},
'spec': {'containers': [{'name': 'test'}]}
}
]
mangled_observer_schema = [
{
'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {'name': 'test', 'namespace': 'default'},
'spec': {
'containers': [
{'name': None},
{
'observer_schema_list_max_length': 1,
'observer_schema_list_min_length': 1,
},
]
},
}
]
hook_resources = [
{
'apiVersion': 'v1',
'kind': 'Secret',
'metadata': {'name': 'sct', 'namespace': 'default'}
}
]
hook_sub_resources = [
SubResource(
group='env', name='env', body={'name': 'test', 'value': 'test'},
path=(('spec', 'containers'),)
)
]
mangle(
hook_resources,
last_applied_manifest,
mangled_observer_schema,
)
mangle(
hook_sub_resources,
last_applied_manifest,
mangled_observer_schema,
is_sub_resource=True
)
assert last_applied_manifest == [
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {"name": "test", 'namespace': 'default'},
"spec": {
"containers": [
{
"name": "test",
"env": [{"name": "test", "value": "test"}]
}
]
},
},
{"apiVersion": "v1", "kind": "Secret", "metadata": {"name": "sct"}},
]
assert mangled_observer_schema == [
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {"name": "test", "namespace": None},
"spec": {
"containers": [
{
"name": None,
"env": [
{"name": None, "value": None},
{
"observer_schema_list_max_length": 1,
"observer_schema_list_min_length": 1,
},
],
},
{
"observer_schema_list_max_length": 1,
"observer_schema_list_min_length": 1,
},
]
},
},
{
"apiVersion": "v1",
"kind": "Secret",
"metadata": {"name": "sct", "namespace": None},
},
]
Args:
items (list[SubResource]): Custom hook resources or sub-resources
last_applied_manifest (list): Application resources
mangled_observer_schema (list): Observed resources
is_sub_resource (bool, optional): if False, the function only extend the
list of Kubernetes resources defined in :attr:`last_applied_manifest`
with new hook resources. Otherwise, the function injects each new hook
sub-resource into the :attr:`last_applied_manifest` object
sub-resources. Defaults to False.
"""
if not items:
return
if not is_sub_resource:
last_applied_manifest.extend(items)
for sub_resource in items:
# Generate the default observer schema for each resource
mangled_observer_schema.append(
generate_default_observer_schema_dict(
sub_resource,
first_level=True,
)
)
return
def inject(sub_resource, sub_resource_to_mangle, observed_resource_to_mangle):
"""Inject a hooks defined sub-resource into a Kubernetes sub-resource.
Args:
sub_resource (SubResource): Hook sub-resource that needs to be injected
into :attr:`last_applied_manifest`
sub_resource_to_mangle (object): Kubernetes sub-resources from
:attr:`last_applied_manifest` which need to be processed
observed_resource_to_mangle (dict): partial mangled_observer_schema
corresponding to the Kubernetes sub-resource.
Raises:
InvalidManifestError: if the sub-resource which will be mangled is not a
list or a dict.
"""
# Create sub-resource group if not present in the Kubernetes sub-resource
if sub_resource.group not in sub_resource_to_mangle:
# FIXME: This assumes the subresource group contains a list
sub_resource_to_mangle.update({sub_resource.group: []})
# Create sub-resource group if not present in the observed fields
if sub_resource.group not in observed_resource_to_mangle:
observed_resource_to_mangle.update(
{
sub_resource.group: [
{
"observer_schema_list_min_length": 0,
"observer_schema_list_max_length": 0,
}
]
}
)
# Inject sub-resource
# If sub-resource name is already there update it, if not, append it
if sub_resource.name in [
g["name"] for g in sub_resource_to_mangle[sub_resource.group]
]:
# FIXME: Assuming we are dealing with a list
for idx, item in enumerate(sub_resource_to_mangle[sub_resource.group]):
if item["name"]:
if hasattr(item, "body"):
sub_resource_to_mangle[item.group][idx] = item["body"]
else:
sub_resource_to_mangle[sub_resource.group].append(sub_resource.body)
# Make sure the value is observed
if sub_resource.name not in [
g["name"] for g in observed_resource_to_mangle[sub_resource.group][:-1]
]:
observed_resource_to_mangle[sub_resource.group].insert(
-1, generate_default_observer_schema_dict(sub_resource.body)
)
observed_resource_to_mangle[sub_resource.group][-1][
"observer_schema_list_min_length"
] += 1
observed_resource_to_mangle[sub_resource.group][-1][
"observer_schema_list_max_length"
] += 1
for resource in last_applied_manifest:
# Complete hook is applied only on defined Kubernetes resources
if resource["kind"] not in self.hook_resources:
continue
for sub_resource in items:
sub_resources_to_mangle = None
idx_observed = get_kubernetes_resource_idx(
mangled_observer_schema, resource
)
for keys in sub_resource.path:
try:
sub_resources_to_mangle = reduce(getitem, keys, resource)
except KeyError:
continue
break
# Create the path to the observed sub-resource, if it doesn't yet exist
try:
observed_sub_resources = reduce(
getitem, keys, mangled_observer_schema[idx_observed]
)
except KeyError:
Complete.create_path(
mangled_observer_schema[idx_observed], list(keys)
)
observed_sub_resources = reduce(
getitem, keys, mangled_observer_schema[idx_observed]
)
if isinstance(sub_resources_to_mangle, list):
for idx, sub_resource_to_mangle in enumerate(
sub_resources_to_mangle
):
# Ensure that each element of the list is observed.
idx_observed = idx
if idx >= len(observed_sub_resources[:-1]):
idx_observed = len(observed_sub_resources[:-1])
# FIXME: Assuming each element of the list contains a
# dictionary, therefore initializing new elements with an
# empty dict
observed_sub_resources.insert(-1, {})
observed_sub_resource = observed_sub_resources[idx_observed]
# FIXME: This is assuming a list always contains dict
inject(
sub_resource, sub_resource_to_mangle, observed_sub_resource
)
elif isinstance(sub_resources_to_mangle, dict):
inject(
sub_resource, sub_resources_to_mangle, observed_sub_resources
)
else:
message = (
f"The sub-resource to mangle {sub_resources_to_mangle!r} has an"
"invalid type, should be in '[dict, list]'"
)
raise InvalidManifestError(message)
@staticmethod
def attribute_map(obj):
"""Convert a Kubernetes object to dict based on its attribute mapping
Example:
.. code:: python
from kubernetes_asyncio.client import V1VolumeMount
d = attribute_map(
V1VolumeMount(name="name", mount_path="path")
)
assert d == {'mountPath': 'path', 'name': 'name'}
Args:
obj (object): Kubernetes object
Returns:
dict: Converted Kubernetes object
"""
return {
obj.attribute_map[attr]: getattr(obj, attr)
for attr, _ in obj.to_dict().items()
if getattr(obj, attr) is not None
}
@staticmethod
def create_path(mangled_observer_schema, keys):
"""Create the path to the observed field in the observer schema.
When a sub-resource is mangled, it should be observed. This function creates
the path to the subresource to observe.
Args:
mangled_observer_schema (dict): Partial observer schema of a resource
keys (list): list of keys forming the path to the sub-resource to
observe
FIXME: This assumes we are only adding keys to dict. We don't consider lists
"""
# Unpack the first key first, as it contains the base directory
key = keys.pop(0)
# If the key is the last of the list, we reached the end of the path.
if len(keys) == 0:
mangled_observer_schema[key] = None
return
if key not in mangled_observer_schema:
mangled_observer_schema[key] = {}
Hook.create_path(mangled_observer_schema[key], keys)
def secret_certs(
self,
secret_name,
namespace,
ca_certs=None,
intermediate_src=None,
generated_cert=None,
):
"""Create a complete hooks secret resource.
Complete hook secret stores Krake CAs and client certificates to communicate
with the Krake API.
Args:
secret_name (str): Secret name
namespace (str): Kubernetes namespace where the Secret will be created.
ca_certs (list): Krake CA list
intermediate_src (str): content of the certificate that is used to sign new
certificates for the complete hook.
generated_cert (CertificatePair): tuple that contains the content of the
new signed certificate for the Application, and the content of its
corresponding key.
Returns:
dict: complete hook secret resource
"""
ca_certs_pem = ""
for ca_cert in ca_certs:
x509 = crypto.load_certificate(crypto.FILETYPE_ASN1, ca_cert)
ca_certs_pem += crypto.dump_certificate(crypto.FILETYPE_PEM, x509).decode()
# Add the intermediate certificate into the chain
with open(intermediate_src, "r") as f:
intermediate_src_content = f.read()
ca_certs_pem += intermediate_src_content
data = {
self.ca_name: self._encode_to_64(ca_certs_pem),
self.cert_name: self._encode_to_64(generated_cert.cert),
self.key_name: self._encode_to_64(generated_cert.key),
}
return self.secret(secret_name, data, namespace)
def secret_token(
self, secret_name, name, namespace, resource_namespace, api_endpoint, token
):
"""Create a hooks secret resource.
The hook secret stores Krake authentication token
and hook URL for given application.
Args:
secret_name (str): Secret name
name (str): Application name
namespace (str): Application namespace
resource_namespace (str): Kubernetes namespace where the
Secret will be created.
api_endpoint (str): Krake API endpoint
token (str): Complete hook authentication token
Returns:
dict: complete hook secret resource
"""
pass
def volumes(self, secret_name, volume_name, mount_path):
"""Create complete hooks volume and volume mount sub-resources
Complete hook volume gives access to hook's secret, which stores
Krake CAs and client certificates to communicate with the Krake API.
Complete hook volume mount puts the volume into the application
Args:
secret_name (str): Secret name
volume_name (str): Volume name
mount_path (list): Volume mount path
Returns:
list: List of complete hook volume and volume mount sub-resources
"""
volume = V1Volume(name=volume_name, secret={"secretName": secret_name})
volume_mount = V1VolumeMount(name=volume_name, mount_path=mount_path)
return [
SubResource(
group="volumes",
name=volume.name,
body=self.attribute_map(volume),
path=(("spec", "template", "spec"), ("spec",)),
),
SubResource(
group="volumeMounts",
name=volume_mount.name,
body=self.attribute_map(volume_mount),
path=(
("spec", "template", "spec", "containers"),
("spec", "containers"), # kind: Pod
),
),
]
@staticmethod
def _encode_to_64(string):
"""Compute the base 64 encoding of a string.
Args:
string (str): the string to encode.
Returns:
str: the result of the encoding.
"""
return b64encode(string.encode()).decode()
def secret(self, secret_name, secret_data, namespace, _type="Opaque"):
"""Create a secret resource.
Args:
secret_name (str): Secret name
secret_data (dict): Secret data
namespace (str): Kubernetes namespace where the Secret will be created.
_type (str, optional): Secret type. Defaults to Opaque.
Returns:
dict: secret resource
"""
return self.attribute_map(
V1Secret(
api_version="v1",
kind="Secret",
data=secret_data,
metadata={"name": secret_name, "namespace": namespace},
type=_type,
)
)
@staticmethod
def create_hook_url(name, namespace, api_endpoint):
"""Create an applications' hook URL.
Function needs to be specified for each hook.
Args:
name (str): Application name
namespace (str): Application namespace
api_endpoint (str): Krake API endpoint
Returns:
str: Application shutdown url
"""
pass
def env_vars(self, secret_name):
"""Create the hooks' environment variables sub-resources.
Function needs to be specified for each hook.
Creates hook environment variables to store Krake authentication token
and a hook URL for the given applications.
Args:
secret_name (str): Secret name
Returns:
list: List of shutdown hook environment variables sub-resources
"""
pass
class Complete(Hook):
"""Mangle given application and inject complete hooks variables into it.
Hook injects a Kubernetes secret, which stores Krake authentication token
and the Krake complete hook URL for the given application. The variables
from Kubernetes secret are imported as environment variables
into the application resource definition. Only resources defined in
:args:`hook_resources` can be modified.
Names of environment variables are defined in the application controller
configuration file.
If TLS is enabled on the Krake API, the complete hook injects a Kubernetes secret,
and it's corresponding volume and volume mount definitions for the Krake CA,
the client certificate with the right CN, and its key. The directory where the
secret is mounted is defined in the configuration.
Args:
api_endpoint (str): the given API endpoint
ssl_context (ssl.SSLContext): SSL context to communicate with the API endpoint
cert_dest (str, optional): Path of the directory where the CA, client
certificate and key to the Krake API will be stored.
env_token (str, optional): Name of the environment variable, which stores Krake
authentication token.
env_url (str, optional): Name of the environment variable,
which stores Krake complete hook URL.
"""
hook_resources = ("Pod", "Deployment", "ReplicationController")
def __init__(
self, api_endpoint, ssl_context, hook_user, cert_dest, env_token, env_url
):
super().__init__(
api_endpoint, ssl_context, hook_user, cert_dest, env_token, env_url
)
self.env_url = env_url
def secret_token(
self, secret_name, name, namespace, resource_namespace, api_endpoint, token
):
"""Create complete hooks secret resource.
Complete hook secret stores Krake authentication token
and complete hook URL for given application.
Args:
secret_name (str): Secret name
name (str): Application name
namespace (str): Application namespace
resource_namespace (str): Kubernetes namespace where the
Secret will be created.
api_endpoint (str): Krake API endpoint
token (str): Complete hook authentication token
Returns:
dict: complete hook secret resource
"""
complete_url = self.create_hook_url(name, namespace, api_endpoint)
data = {
self.env_token.lower(): self._encode_to_64(token),
self.env_url.lower(): self._encode_to_64(complete_url),
}
return self.secret(secret_name, data, resource_namespace)
@staticmethod
def create_hook_url(name, namespace, api_endpoint):
"""Create an applications' complete URL.
Args:
name (str): Application name
namespace (str): Application namespace
api_endpoint (str): Krake API endpoint
Returns:
str: Application complete url
"""
api_url = URL(api_endpoint)
return str(
api_url.with_path(
f"/kubernetes/namespaces/{namespace}/applications/{name}/complete"
)
)
def env_vars(self, secret_name):
"""Create complete hooks environment variables sub-resources
Create complete hook environment variables store Krake authentication token
and complete hook URL for given application.
Args:
secret_name (str): Secret name
Returns:
list: List of complete hook environment variables sub-resources
"""
sub_resources = []
env_token = V1EnvVar(
name=self.env_token,
value_from=self.attribute_map(
V1EnvVarSource(
secret_key_ref=self.attribute_map(
V1SecretKeySelector(
name=secret_name, key=self.env_token.lower()
)
)
)
),
)
env_url = V1EnvVar(
name=self.env_url,
value_from=self.attribute_map(
V1EnvVarSource(
secret_key_ref=self.attribute_map(
V1SecretKeySelector(name=secret_name, key=self.env_url.lower())
)
)
),
)
for env in (env_token, env_url):
sub_resources.append(
SubResource(
group="env",
name=env.name,
body=self.attribute_map(env),
path=(
("spec", "template", "spec", "containers"),
("spec", "containers"), # kind: Pod
),
)
)
return sub_resources
class Shutdown(Hook):
"""Mangle given application and inject shutdown hooks variables into it.
Hook injects a Kubernetes secret, which stores Krake authentication token
and the Krake complete hook URL for the given application. The variables
from the Kubernetes secret are imported as environment variables
into the application resource definition. Only resources defined in
:args:`hook_resources` can be modified.
Names of environment variables are defined in the application controller
configuration file.
If TLS is enabled on the Krake API, the shutdown hook injects a Kubernetes secret,
and it's corresponding volume and volume mount definitions for the Krake CA,
the client certificate with the right CN, and its key. The directory where the
secret is mounted is defined in the configuration.
Args:
api_endpoint (str): the given API endpoint
ssl_context (ssl.SSLContext): SSL context to communicate with the API endpoint
cert_dest (str, optional): Path of the directory where the CA, client
certificate and key to the Krake API will be stored.
env_token (str, optional): Name of the environment variable, which stores Krake
authentication token.
env_url (str, optional): Name of the environment variable,
which stores Krake complete hook URL.
"""
hook_resources = ("Pod", "Deployment", "ReplicationController")
def __init__(
self, api_endpoint, ssl_context, hook_user, cert_dest, env_token, env_url
):
super().__init__(
api_endpoint, ssl_context, hook_user, cert_dest, env_token, env_url
)
self.env_url = env_url
def secret_token(
self, secret_name, name, namespace, resource_namespace, api_endpoint, token
):
"""Create shutdown hooks secret resource.
Shutdown hook secret stores Krake authentication token
and shutdown hook URL for given application.
Args:
secret_name (str): Secret name
name (str): Application name
namespace (str): Application namespace
resource_namespace (str): Kubernetes namespace where the
Secret will be created.
api_endpoint (str): Krake API endpoint
token (str): Shutdown hook authentication token
Returns:
dict: shutdown hook secret resource
"""
shutdown_url = self.create_hook_url(name, namespace, api_endpoint)
data = {
self.env_token.lower(): self._encode_to_64(token),
self.env_url.lower(): self._encode_to_64(shutdown_url),
}
return self.secret(secret_name, data, resource_namespace)
@staticmethod
def create_hook_url(name, namespace, api_endpoint):
"""Create an applications' shutdown URL.
Args:
name (str): Application name
namespace (str): Application namespace
api_endpoint (str): Krake API endpoint
Returns:
str: Application shutdown url
"""
api_url = URL(api_endpoint)
return str(
api_url.with_path(
f"/kubernetes/namespaces/{namespace}/applications/{name}/shutdown"
)
)
def env_vars(self, secret_name):
"""Create shutdown hooks environment variables sub-resources.
Creates shutdown hook environment variables to store Krake authentication token
and a shutdown hook URL for given applications.
Args:
secret_name (str): Secret name
Returns:
list: List of shutdown hook environment variables sub-resources
"""
sub_resources = []
env_resources = []
env_token = V1EnvVar(
name=self.env_token,
value_from=self.attribute_map(
V1EnvVarSource(
secret_key_ref=self.attribute_map(
V1SecretKeySelector(
name=secret_name,
key=self.env_token.lower()
)
)
)
)
)
env_resources.append(env_token)
env_url = V1EnvVar(
name=self.env_url,
value_from=self.attribute_map(
V1EnvVarSource(
secret_key_ref=self.attribute_map(
V1SecretKeySelector(name=secret_name, key=self.env_url.lower())
)
)
),
)
env_resources.append(env_url)
for env in env_resources:
sub_resources.append(
SubResource(
group="env",
name=env.name,
body=self.attribute_map(env),
path=(
("spec", "template", "spec", "containers"),
("spec", "containers"), # kind: Pod
),
)
)
return sub_resources
|
krake/krake/controller/kubernetes/hooks.py
|
codereval_python_data_66
|
Convert the SQL query to use the out-style parameters instead of
the in-style parameters.
*sql* (:class:`str` or :class:`bytes`) is the SQL query.
*params* (:class:`~collections.abc.Mapping` or :class:`~collections.abc.Sequence`)
contains the set of in-style parameters. It maps each parameter
(:class:`str` or :class:`int`) to value. If :attr:`.SQLParams.in_style`
is a named parameter style. then *params* must be a :class:`~collections.abc.Mapping`.
If :attr:`.SQLParams.in_style` is an ordinal parameter style, then
*params* must be a :class:`~collections.abc.Sequence`.
Returns a :class:`tuple` containing:
- The formatted SQL query (:class:`str` or :class:`bytes`).
- The set of converted out-style parameters (:class:`dict` or
:class:`list`).
def format(
self,
sql: AnyStr,
params: Union[Dict[Union[str, int], Any], Sequence[Any]],
) -> Tuple[AnyStr, Union[Dict[Union[str, int], Any], Sequence[Any]]]:
"""
Convert the SQL query to use the out-style parameters instead of
the in-style parameters.
*sql* (:class:`str` or :class:`bytes`) is the SQL query.
*params* (:class:`~collections.abc.Mapping` or :class:`~collections.abc.Sequence`)
contains the set of in-style parameters. It maps each parameter
(:class:`str` or :class:`int`) to value. If :attr:`.SQLParams.in_style`
is a named parameter style. then *params* must be a :class:`~collections.abc.Mapping`.
If :attr:`.SQLParams.in_style` is an ordinal parameter style, then
*params* must be a :class:`~collections.abc.Sequence`.
Returns a :class:`tuple` containing:
- The formatted SQL query (:class:`str` or :class:`bytes`).
- The set of converted out-style parameters (:class:`dict` or
:class:`list`).
"""
# Normalize query encoding to simplify processing.
if isinstance(sql, str):
use_sql = sql
string_type = str
elif isinstance(sql, bytes):
use_sql = sql.decode(_BYTES_ENCODING)
string_type = bytes
else:
raise TypeError("sql:{!r} is not a unicode or byte string.".format(sql))
# Replace in-style with out-style parameters.
use_sql, out_params = self._converter.convert(use_sql, params)
# Make sure the query is returned as the proper string type.
if string_type is bytes:
out_sql = use_sql.encode(_BYTES_ENCODING)
else:
out_sql = use_sql
# Return converted SQL and out-parameters.
return out_sql, out_params
"""
:mod:`sqlparams` is a utility package for converting between various SQL
parameter styles.
"""
import re
from typing import (
Any,
AnyStr,
Dict,
Iterable,
List,
Optional,
Pattern,
Sequence,
Tuple,
Type,
Union)
from . import _converting
from . import _styles
from ._util import _is_iterable
from ._meta import (
__author__,
__copyright__,
__credits__,
__license__,
__version__,
)
_BYTES_ENCODING = 'latin1'
"""
The encoding to use when parsing a byte query string.
"""
_STYLES = {}
"""
Maps parameter style by name.
"""
class SQLParams(object):
"""
The :class:`.SQLParams` class is used to support named parameters in
SQL queries where they are not otherwise supported (e.g., pyodbc).
This is done by converting from one parameter style query to another
parameter style query.
By default, when converting to a numeric or ordinal style any
:class:`tuple` parameter will be expanded into "(?,?,...)" to support
the widely used "IN {tuple}" SQL expression without leaking any
unescaped values.
"""
def __init__(
self,
in_style: str,
out_style: str,
escape_char: Union[str, bool, None] = None,
expand_tuples: Optional[bool] = None,
) -> None:
"""
Instantiates the :class:`.SQLParams` instance.
*in_style* (:class:`str`) is the parameter style that will be used
in an SQL query before being parsed and converted to :attr:`.SQLParams.out_style`.
*out_style* (:class:`str`) is the parameter style that the SQL query
will be converted to.
*escape_char* (:class:`str`, :class:`bool`, or :data:`None`) is the
escape character used to prevent matching a in-style parameter. If
:data:`True`, use the default escape character (repeat the initial
character to escape it; e.g., "%%"). If :data:`False`, do not use an
escape character. Default is :data:`None` for :data:`False`.
*expand_tuples* (:class:`bool` or :data:`None`) is whether to
expand tuples into a sequence of parameters. Default is :data:`None`
to let it be determined by *out_style* (to maintain backward
compatibility). If *out_style* is a numeric or ordinal style, expand
tuples by default (:data:`True`). If *out_style* is a named style,
do not expand tuples by default (:data:`False`).
The following parameter styles are supported by both *in_style* and
*out_style*:
- For all named styles the parameter keys must be valid `Python identifiers`_.
They cannot start with a digit. This is to help prevent
incorrectly matching common strings such as datetimes.
Named styles:
- "named" indicates parameters will use the named style::
... WHERE name = :name
- "named_dollar" indicates parameters will use the named dollar
sign style::
... WHERE name = $name
.. NOTE:: This is not defined by `PEP 249`_.
- "pyformat" indicates parameters will use the named Python
extended format style::
... WHERE name = %(name)s
.. NOTE:: Strictly speaking, `PEP 249`_ only specifies
"%(name)s" for the "pyformat" parameter style so only that
form (without any other conversions or flags) is supported.
- All numeric styles start at :data:`1`. When using a
:class:`~collections.abc.Sequence` for the parameters, the 1st
parameter (e.g., ":1") will correspond to the 1st element of the
sequence (i.e., index :data:`0`). When using a :class:`~collections.abc.Mapping`
for the parameters, the 1st parameter (e.g., ":1") will correspond
to the matching key (i.e., :data:`1` or :data:`"1"`).
Numeric styles:
- "numeric" indicates parameters will use the numeric style::
... WHERE name = :1
- "numeric_dollar" indicates parameters will use the numeric
dollar sign style (starts at :data:`1`)::
... WHERE name = $1
.. NOTE:: This is not defined by `PEP 249`_.
- Ordinal styles:
- "format" indicates parameters will use the ordinal Python format
style::
... WHERE name = %s
.. NOTE:: Strictly speaking, `PEP 249`_ only specifies "%s" for
the "format" parameter styles so only that form (without any
other conversions or flags) is supported.
- "qmark" indicates parameters will use the ordinal question mark
style::
... WHERE name = ?
.. _`PEP 249`: http://www.python.org/dev/peps/pep-0249/
.. _`Python identifiers`: https://docs.python.org/3/reference/lexical_analysis.html#identifiers
"""
self._converter: _converting._Converter = None
"""
*_converter* (:class:`._converting._Converter`) is the parameter
converter to use.
"""
self._escape_char: Optional[str] = None
"""
*_escape_char* (:class:`str` or :data:`None`) is the escape
character used to prevent matching a in-style parameter.
"""
self._expand_tuples: bool = None
"""
*_expand_tuples* (:class:`bool`) is whether to convert tuples into a
sequence of parameters.
"""
self._in_obj: _styles._Style = None
"""
*_in_obj* (:class:`._styles._Style`) is the in-style parameter object.
"""
self._in_regex: Pattern = None
"""
*_in_regex* (:class:`re.Pattern`) is the regular expression used to
extract the in-style parameters.
"""
self._in_style: str = None
"""
*_in_style* (:class:`str`) is the parameter style that will be used
in an SQL query before being parsed and converted to :attr:`.SQLParams.out_style`.
"""
self._out_obj: _styles._Style = None
"""
*_out_obj* (:class:`._styles._Style`) is the out-style parameter object.
"""
self._out_style: str = None
"""
*_out_style* (:class:`str`) is the parameter style that the SQL query
will be converted to.
"""
if not isinstance(in_style, str):
raise TypeError("in_style:{!r} is not a string.".format(in_style))
if not isinstance(out_style, str):
raise TypeError("out_style:{!r} is not a string.".format(out_style))
self._in_style = in_style
self._out_style = out_style
self._in_obj = _styles._STYLES[self._in_style]
self._out_obj = _styles._STYLES[self._out_style]
if escape_char is True:
use_char = self._in_obj.escape_char
elif not escape_char:
use_char = None
elif isinstance(escape_char, str):
use_char = escape_char
else:
raise TypeError("escape_char:{!r} is not a string or bool.")
if expand_tuples is None:
expand_tuples = not isinstance(self._out_obj, _styles._NamedStyle)
self._escape_char = use_char
self._expand_tuples = bool(expand_tuples)
self._in_regex = self._create_in_regex()
self._converter = self._create_converter()
def __repr__(self) -> str:
"""
Returns the canonical string representation (:class:`str`) of this
instance.
"""
return "{}.{}({!r}, {!r})".format(self.__class__.__module__, self.__class__.__name__, self._in_style, self._out_style)
def _create_converter(self) -> _converting._Converter:
"""
Create the parameter style converter.
Returns the parameter style converter (:class:`._converting._Converter`).
"""
assert self._in_regex is not None, self._in_regex
assert self._out_obj is not None, self._out_obj
# Determine converter class.
converter_class: Type[_converting._Converter]
if isinstance(self._in_obj, _styles._NamedStyle):
if isinstance(self._out_obj, _styles._NamedStyle):
converter_class = _converting._NamedToNamedConverter
elif isinstance(self._out_obj, _styles._NumericStyle):
converter_class = _converting._NamedToNumericConverter
elif isinstance(self._out_obj, _styles._OrdinalStyle):
converter_class = _converting._NamedToOrdinalConverter
else:
raise TypeError("out_style:{!r} maps to an unexpected type: {!r}".format(self._out_style, self._out_obj))
elif isinstance(self._in_obj, _styles._NumericStyle):
if isinstance(self._out_obj, _styles._NamedStyle):
converter_class = _converting._NumericToNamedConverter
elif isinstance(self._out_obj, _styles._NumericStyle):
converter_class = _converting._NumericToNumericConverter
elif isinstance(self._out_obj, _styles._OrdinalStyle):
converter_class = _converting._NumericToOrdinalConverter
else:
raise TypeError("out_style:{!r} maps to an unexpected type: {!r}".format(self._out_style, self._out_obj))
elif isinstance(self._in_obj, _styles._OrdinalStyle):
if isinstance(self._out_obj, _styles._NamedStyle):
converter_class = _converting._OrdinalToNamedConverter
elif isinstance(self._out_obj, _styles._NumericStyle):
converter_class = _converting._OrdinalToNumericConverter
elif isinstance(self._out_obj, _styles._OrdinalStyle):
converter_class = _converting._OrdinalToOrdinalConverter
else:
raise TypeError("out_style:{!r} maps to an unexpected type: {!r}".format(self._out_style, self._out_obj))
else:
raise TypeError("in_style:{!r} maps to an unexpected type: {!r}".format(self._in_style, self._in_obj))
# Create converter.
converter = converter_class(
escape_char=self._escape_char,
expand_tuples=self._expand_tuples,
in_regex=self._in_regex,
in_style=self._in_obj,
out_style=self._out_obj,
)
return converter
def _create_in_regex(self) -> Pattern:
"""
Create the in-style parameter regular expression.
Returns the in-style parameter regular expression (:class:`re.Pattern`).
"""
regex_parts = []
if self._in_obj.escape_char != "%" and self._out_obj.escape_char == "%":
regex_parts.append("(?P<out_percent>%)")
if self._escape_char:
# Escaping is enabled.
escape = self._in_obj.escape_regex.format(char=re.escape(self._escape_char))
regex_parts.append(escape)
regex_parts.append(self._in_obj.param_regex)
return re.compile("|".join(regex_parts))
@property
def escape_char(self) -> Optional[str]:
"""
*escape_char* (:class:`str` or :data:`None`) is the escape character
used to prevent matching a in-style parameter.
"""
return self._escape_char
@property
def expand_tuples(self) -> bool:
"""
*expand_tuples* (:class:`bool`) is whether to convert tuples into a
sequence of parameters.
"""
return self._expand_tuples
def format(
self,
sql: AnyStr,
params: Union[Dict[Union[str, int], Any], Sequence[Any]],
) -> Tuple[AnyStr, Union[Dict[Union[str, int], Any], Sequence[Any]]]:
"""
Convert the SQL query to use the out-style parameters instead of
the in-style parameters.
*sql* (:class:`str` or :class:`bytes`) is the SQL query.
*params* (:class:`~collections.abc.Mapping` or :class:`~collections.abc.Sequence`)
contains the set of in-style parameters. It maps each parameter
(:class:`str` or :class:`int`) to value. If :attr:`.SQLParams.in_style`
is a named parameter style. then *params* must be a :class:`~collections.abc.Mapping`.
If :attr:`.SQLParams.in_style` is an ordinal parameter style, then
*params* must be a :class:`~collections.abc.Sequence`.
Returns a :class:`tuple` containing:
- The formatted SQL query (:class:`str` or :class:`bytes`).
- The set of converted out-style parameters (:class:`dict` or
:class:`list`).
"""
# Normalize query encoding to simplify processing.
if isinstance(sql, str):
use_sql = sql
string_type = str
elif isinstance(sql, bytes):
use_sql = sql.decode(_BYTES_ENCODING)
string_type = bytes
else:
raise TypeError("sql:{!r} is not a unicode or byte string.".format(sql))
# Replace in-style with out-style parameters.
use_sql, out_params = self._converter.convert(use_sql, params)
# Make sure the query is returned as the proper string type.
if string_type is bytes:
out_sql = use_sql.encode(_BYTES_ENCODING)
else:
out_sql = use_sql
# Return converted SQL and out-parameters.
return out_sql, out_params
def formatmany(
self,
sql: AnyStr,
many_params: Union[Iterable[Dict[Union[str, int], Any]], Iterable[Sequence[Any]]],
) -> Tuple[AnyStr, Union[List[Dict[Union[str, int], Any]], List[Sequence[Any]]]]:
"""
Convert the SQL query to use the out-style parameters instead of the
in-style parameters.
*sql* (:class:`str` or :class:`bytes`) is the SQL query.
*many_params* (:class:`~collections.abc.Iterable`) contains each set
of in-style parameters (*params*).
- *params* (:class:`~collections.abc.Mapping` or :class:`~collections.abc.Sequence`)
contains the set of in-style parameters. It maps each parameter
(:class:`str` or :class:`int`) to value. If :attr:`.SQLParams.in_style`
is a named parameter style. then *params* must be a :class:`~collections.abc.Mapping`.
If :attr:`.SQLParams.in_style` is an ordinal parameter style. then
*params* must be a :class:`~collections.abc.Sequence`.
Returns a :class:`tuple` containing:
- The formatted SQL query (:class:`str` or :class:`bytes`).
- A :class:`list` containing each set of converted out-style
parameters (:class:`dict` or :class:`list`).
"""
# Normalize query encoding to simplify processing.
if isinstance(sql, str):
use_sql = sql
string_type = str
elif isinstance(sql, bytes):
use_sql = sql.decode(_BYTES_ENCODING)
string_type = bytes
else:
raise TypeError("sql:{!r} is not a unicode or byte string.".format(sql))
if not _is_iterable(many_params):
raise TypeError("many_params:{!r} is not iterable.".format(many_params))
# Replace in-style with out-style parameters.
use_sql, many_out_params = self._converter.convert_many(use_sql, many_params)
# Make sure the query is returned as the proper string type.
if string_type is bytes:
out_sql = use_sql.encode(_BYTES_ENCODING)
else:
out_sql = use_sql
# Return converted SQL and out-parameters.
return out_sql, many_out_params
@property
def in_style(self) -> str:
"""
*in_style* (:class:`str`) is the parameter style to expect in an SQL
query when being parsed.
"""
return self._in_style
@property
def out_style(self) -> str:
"""
*out_style* (:class:`str`) is the parameter style that the SQL query
will be converted to.
"""
return self._out_style
|
sqlparams/__init__.py
|
codereval_python_data_67
|
Convert the SQL query to use the out-style parameters instead of the
in-style parameters.
*sql* (:class:`str` or :class:`bytes`) is the SQL query.
*many_params* (:class:`~collections.abc.Iterable`) contains each set
of in-style parameters (*params*).
- *params* (:class:`~collections.abc.Mapping` or :class:`~collections.abc.Sequence`)
contains the set of in-style parameters. It maps each parameter
(:class:`str` or :class:`int`) to value. If :attr:`.SQLParams.in_style`
is a named parameter style. then *params* must be a :class:`~collections.abc.Mapping`.
If :attr:`.SQLParams.in_style` is an ordinal parameter style. then
*params* must be a :class:`~collections.abc.Sequence`.
Returns a :class:`tuple` containing:
- The formatted SQL query (:class:`str` or :class:`bytes`).
- A :class:`list` containing each set of converted out-style
parameters (:class:`dict` or :class:`list`).
def formatmany(
self,
sql: AnyStr,
many_params: Union[Iterable[Dict[Union[str, int], Any]], Iterable[Sequence[Any]]],
) -> Tuple[AnyStr, Union[List[Dict[Union[str, int], Any]], List[Sequence[Any]]]]:
"""
Convert the SQL query to use the out-style parameters instead of the
in-style parameters.
*sql* (:class:`str` or :class:`bytes`) is the SQL query.
*many_params* (:class:`~collections.abc.Iterable`) contains each set
of in-style parameters (*params*).
- *params* (:class:`~collections.abc.Mapping` or :class:`~collections.abc.Sequence`)
contains the set of in-style parameters. It maps each parameter
(:class:`str` or :class:`int`) to value. If :attr:`.SQLParams.in_style`
is a named parameter style. then *params* must be a :class:`~collections.abc.Mapping`.
If :attr:`.SQLParams.in_style` is an ordinal parameter style. then
*params* must be a :class:`~collections.abc.Sequence`.
Returns a :class:`tuple` containing:
- The formatted SQL query (:class:`str` or :class:`bytes`).
- A :class:`list` containing each set of converted out-style
parameters (:class:`dict` or :class:`list`).
"""
# Normalize query encoding to simplify processing.
if isinstance(sql, str):
use_sql = sql
string_type = str
elif isinstance(sql, bytes):
use_sql = sql.decode(_BYTES_ENCODING)
string_type = bytes
else:
raise TypeError("sql:{!r} is not a unicode or byte string.".format(sql))
if not _is_iterable(many_params):
raise TypeError("many_params:{!r} is not iterable.".format(many_params))
# Replace in-style with out-style parameters.
use_sql, many_out_params = self._converter.convert_many(use_sql, many_params)
# Make sure the query is returned as the proper string type.
if string_type is bytes:
out_sql = use_sql.encode(_BYTES_ENCODING)
else:
out_sql = use_sql
# Return converted SQL and out-parameters.
return out_sql, many_out_params
"""
:mod:`sqlparams` is a utility package for converting between various SQL
parameter styles.
"""
import re
from typing import (
Any,
AnyStr,
Dict,
Iterable,
List,
Optional,
Pattern,
Sequence,
Tuple,
Type,
Union)
from . import _converting
from . import _styles
from ._util import _is_iterable
from ._meta import (
__author__,
__copyright__,
__credits__,
__license__,
__version__,
)
_BYTES_ENCODING = 'latin1'
"""
The encoding to use when parsing a byte query string.
"""
_STYLES = {}
"""
Maps parameter style by name.
"""
class SQLParams(object):
"""
The :class:`.SQLParams` class is used to support named parameters in
SQL queries where they are not otherwise supported (e.g., pyodbc).
This is done by converting from one parameter style query to another
parameter style query.
By default, when converting to a numeric or ordinal style any
:class:`tuple` parameter will be expanded into "(?,?,...)" to support
the widely used "IN {tuple}" SQL expression without leaking any
unescaped values.
"""
def __init__(
self,
in_style: str,
out_style: str,
escape_char: Union[str, bool, None] = None,
expand_tuples: Optional[bool] = None,
) -> None:
"""
Instantiates the :class:`.SQLParams` instance.
*in_style* (:class:`str`) is the parameter style that will be used
in an SQL query before being parsed and converted to :attr:`.SQLParams.out_style`.
*out_style* (:class:`str`) is the parameter style that the SQL query
will be converted to.
*escape_char* (:class:`str`, :class:`bool`, or :data:`None`) is the
escape character used to prevent matching a in-style parameter. If
:data:`True`, use the default escape character (repeat the initial
character to escape it; e.g., "%%"). If :data:`False`, do not use an
escape character. Default is :data:`None` for :data:`False`.
*expand_tuples* (:class:`bool` or :data:`None`) is whether to
expand tuples into a sequence of parameters. Default is :data:`None`
to let it be determined by *out_style* (to maintain backward
compatibility). If *out_style* is a numeric or ordinal style, expand
tuples by default (:data:`True`). If *out_style* is a named style,
do not expand tuples by default (:data:`False`).
The following parameter styles are supported by both *in_style* and
*out_style*:
- For all named styles the parameter keys must be valid `Python identifiers`_.
They cannot start with a digit. This is to help prevent
incorrectly matching common strings such as datetimes.
Named styles:
- "named" indicates parameters will use the named style::
... WHERE name = :name
- "named_dollar" indicates parameters will use the named dollar
sign style::
... WHERE name = $name
.. NOTE:: This is not defined by `PEP 249`_.
- "pyformat" indicates parameters will use the named Python
extended format style::
... WHERE name = %(name)s
.. NOTE:: Strictly speaking, `PEP 249`_ only specifies
"%(name)s" for the "pyformat" parameter style so only that
form (without any other conversions or flags) is supported.
- All numeric styles start at :data:`1`. When using a
:class:`~collections.abc.Sequence` for the parameters, the 1st
parameter (e.g., ":1") will correspond to the 1st element of the
sequence (i.e., index :data:`0`). When using a :class:`~collections.abc.Mapping`
for the parameters, the 1st parameter (e.g., ":1") will correspond
to the matching key (i.e., :data:`1` or :data:`"1"`).
Numeric styles:
- "numeric" indicates parameters will use the numeric style::
... WHERE name = :1
- "numeric_dollar" indicates parameters will use the numeric
dollar sign style (starts at :data:`1`)::
... WHERE name = $1
.. NOTE:: This is not defined by `PEP 249`_.
- Ordinal styles:
- "format" indicates parameters will use the ordinal Python format
style::
... WHERE name = %s
.. NOTE:: Strictly speaking, `PEP 249`_ only specifies "%s" for
the "format" parameter styles so only that form (without any
other conversions or flags) is supported.
- "qmark" indicates parameters will use the ordinal question mark
style::
... WHERE name = ?
.. _`PEP 249`: http://www.python.org/dev/peps/pep-0249/
.. _`Python identifiers`: https://docs.python.org/3/reference/lexical_analysis.html#identifiers
"""
self._converter: _converting._Converter = None
"""
*_converter* (:class:`._converting._Converter`) is the parameter
converter to use.
"""
self._escape_char: Optional[str] = None
"""
*_escape_char* (:class:`str` or :data:`None`) is the escape
character used to prevent matching a in-style parameter.
"""
self._expand_tuples: bool = None
"""
*_expand_tuples* (:class:`bool`) is whether to convert tuples into a
sequence of parameters.
"""
self._in_obj: _styles._Style = None
"""
*_in_obj* (:class:`._styles._Style`) is the in-style parameter object.
"""
self._in_regex: Pattern = None
"""
*_in_regex* (:class:`re.Pattern`) is the regular expression used to
extract the in-style parameters.
"""
self._in_style: str = None
"""
*_in_style* (:class:`str`) is the parameter style that will be used
in an SQL query before being parsed and converted to :attr:`.SQLParams.out_style`.
"""
self._out_obj: _styles._Style = None
"""
*_out_obj* (:class:`._styles._Style`) is the out-style parameter object.
"""
self._out_style: str = None
"""
*_out_style* (:class:`str`) is the parameter style that the SQL query
will be converted to.
"""
if not isinstance(in_style, str):
raise TypeError("in_style:{!r} is not a string.".format(in_style))
if not isinstance(out_style, str):
raise TypeError("out_style:{!r} is not a string.".format(out_style))
self._in_style = in_style
self._out_style = out_style
self._in_obj = _styles._STYLES[self._in_style]
self._out_obj = _styles._STYLES[self._out_style]
if escape_char is True:
use_char = self._in_obj.escape_char
elif not escape_char:
use_char = None
elif isinstance(escape_char, str):
use_char = escape_char
else:
raise TypeError("escape_char:{!r} is not a string or bool.")
if expand_tuples is None:
expand_tuples = not isinstance(self._out_obj, _styles._NamedStyle)
self._escape_char = use_char
self._expand_tuples = bool(expand_tuples)
self._in_regex = self._create_in_regex()
self._converter = self._create_converter()
def __repr__(self) -> str:
"""
Returns the canonical string representation (:class:`str`) of this
instance.
"""
return "{}.{}({!r}, {!r})".format(self.__class__.__module__, self.__class__.__name__, self._in_style, self._out_style)
def _create_converter(self) -> _converting._Converter:
"""
Create the parameter style converter.
Returns the parameter style converter (:class:`._converting._Converter`).
"""
assert self._in_regex is not None, self._in_regex
assert self._out_obj is not None, self._out_obj
# Determine converter class.
converter_class: Type[_converting._Converter]
if isinstance(self._in_obj, _styles._NamedStyle):
if isinstance(self._out_obj, _styles._NamedStyle):
converter_class = _converting._NamedToNamedConverter
elif isinstance(self._out_obj, _styles._NumericStyle):
converter_class = _converting._NamedToNumericConverter
elif isinstance(self._out_obj, _styles._OrdinalStyle):
converter_class = _converting._NamedToOrdinalConverter
else:
raise TypeError("out_style:{!r} maps to an unexpected type: {!r}".format(self._out_style, self._out_obj))
elif isinstance(self._in_obj, _styles._NumericStyle):
if isinstance(self._out_obj, _styles._NamedStyle):
converter_class = _converting._NumericToNamedConverter
elif isinstance(self._out_obj, _styles._NumericStyle):
converter_class = _converting._NumericToNumericConverter
elif isinstance(self._out_obj, _styles._OrdinalStyle):
converter_class = _converting._NumericToOrdinalConverter
else:
raise TypeError("out_style:{!r} maps to an unexpected type: {!r}".format(self._out_style, self._out_obj))
elif isinstance(self._in_obj, _styles._OrdinalStyle):
if isinstance(self._out_obj, _styles._NamedStyle):
converter_class = _converting._OrdinalToNamedConverter
elif isinstance(self._out_obj, _styles._NumericStyle):
converter_class = _converting._OrdinalToNumericConverter
elif isinstance(self._out_obj, _styles._OrdinalStyle):
converter_class = _converting._OrdinalToOrdinalConverter
else:
raise TypeError("out_style:{!r} maps to an unexpected type: {!r}".format(self._out_style, self._out_obj))
else:
raise TypeError("in_style:{!r} maps to an unexpected type: {!r}".format(self._in_style, self._in_obj))
# Create converter.
converter = converter_class(
escape_char=self._escape_char,
expand_tuples=self._expand_tuples,
in_regex=self._in_regex,
in_style=self._in_obj,
out_style=self._out_obj,
)
return converter
def _create_in_regex(self) -> Pattern:
"""
Create the in-style parameter regular expression.
Returns the in-style parameter regular expression (:class:`re.Pattern`).
"""
regex_parts = []
if self._in_obj.escape_char != "%" and self._out_obj.escape_char == "%":
regex_parts.append("(?P<out_percent>%)")
if self._escape_char:
# Escaping is enabled.
escape = self._in_obj.escape_regex.format(char=re.escape(self._escape_char))
regex_parts.append(escape)
regex_parts.append(self._in_obj.param_regex)
return re.compile("|".join(regex_parts))
@property
def escape_char(self) -> Optional[str]:
"""
*escape_char* (:class:`str` or :data:`None`) is the escape character
used to prevent matching a in-style parameter.
"""
return self._escape_char
@property
def expand_tuples(self) -> bool:
"""
*expand_tuples* (:class:`bool`) is whether to convert tuples into a
sequence of parameters.
"""
return self._expand_tuples
def format(
self,
sql: AnyStr,
params: Union[Dict[Union[str, int], Any], Sequence[Any]],
) -> Tuple[AnyStr, Union[Dict[Union[str, int], Any], Sequence[Any]]]:
"""
Convert the SQL query to use the out-style parameters instead of
the in-style parameters.
*sql* (:class:`str` or :class:`bytes`) is the SQL query.
*params* (:class:`~collections.abc.Mapping` or :class:`~collections.abc.Sequence`)
contains the set of in-style parameters. It maps each parameter
(:class:`str` or :class:`int`) to value. If :attr:`.SQLParams.in_style`
is a named parameter style. then *params* must be a :class:`~collections.abc.Mapping`.
If :attr:`.SQLParams.in_style` is an ordinal parameter style, then
*params* must be a :class:`~collections.abc.Sequence`.
Returns a :class:`tuple` containing:
- The formatted SQL query (:class:`str` or :class:`bytes`).
- The set of converted out-style parameters (:class:`dict` or
:class:`list`).
"""
# Normalize query encoding to simplify processing.
if isinstance(sql, str):
use_sql = sql
string_type = str
elif isinstance(sql, bytes):
use_sql = sql.decode(_BYTES_ENCODING)
string_type = bytes
else:
raise TypeError("sql:{!r} is not a unicode or byte string.".format(sql))
# Replace in-style with out-style parameters.
use_sql, out_params = self._converter.convert(use_sql, params)
# Make sure the query is returned as the proper string type.
if string_type is bytes:
out_sql = use_sql.encode(_BYTES_ENCODING)
else:
out_sql = use_sql
# Return converted SQL and out-parameters.
return out_sql, out_params
def formatmany(
self,
sql: AnyStr,
many_params: Union[Iterable[Dict[Union[str, int], Any]], Iterable[Sequence[Any]]],
) -> Tuple[AnyStr, Union[List[Dict[Union[str, int], Any]], List[Sequence[Any]]]]:
"""
Convert the SQL query to use the out-style parameters instead of the
in-style parameters.
*sql* (:class:`str` or :class:`bytes`) is the SQL query.
*many_params* (:class:`~collections.abc.Iterable`) contains each set
of in-style parameters (*params*).
- *params* (:class:`~collections.abc.Mapping` or :class:`~collections.abc.Sequence`)
contains the set of in-style parameters. It maps each parameter
(:class:`str` or :class:`int`) to value. If :attr:`.SQLParams.in_style`
is a named parameter style. then *params* must be a :class:`~collections.abc.Mapping`.
If :attr:`.SQLParams.in_style` is an ordinal parameter style. then
*params* must be a :class:`~collections.abc.Sequence`.
Returns a :class:`tuple` containing:
- The formatted SQL query (:class:`str` or :class:`bytes`).
- A :class:`list` containing each set of converted out-style
parameters (:class:`dict` or :class:`list`).
"""
# Normalize query encoding to simplify processing.
if isinstance(sql, str):
use_sql = sql
string_type = str
elif isinstance(sql, bytes):
use_sql = sql.decode(_BYTES_ENCODING)
string_type = bytes
else:
raise TypeError("sql:{!r} is not a unicode or byte string.".format(sql))
if not _is_iterable(many_params):
raise TypeError("many_params:{!r} is not iterable.".format(many_params))
# Replace in-style with out-style parameters.
use_sql, many_out_params = self._converter.convert_many(use_sql, many_params)
# Make sure the query is returned as the proper string type.
if string_type is bytes:
out_sql = use_sql.encode(_BYTES_ENCODING)
else:
out_sql = use_sql
# Return converted SQL and out-parameters.
return out_sql, many_out_params
@property
def in_style(self) -> str:
"""
*in_style* (:class:`str`) is the parameter style to expect in an SQL
query when being parsed.
"""
return self._in_style
@property
def out_style(self) -> str:
"""
*out_style* (:class:`str`) is the parameter style that the SQL query
will be converted to.
"""
return self._out_style
|
sqlparams/__init__.py
|
codereval_python_data_68
|
Validate OCFL object at path or pyfs root.
Returns True if valid (warnings permitted), False otherwise.
def validate(self, path):
"""Validate OCFL object at path or pyfs root.
Returns True if valid (warnings permitted), False otherwise.
"""
self.initialize()
try:
if isinstance(path, str):
self.obj_fs = open_fs(path)
else:
self.obj_fs = path
path = self.obj_fs.desc('')
except fs.errors.CreateFailed:
self.log.error('E003e', path=path)
return False
# Object declaration, set spec version number. If there are multiple declarations,
# look for the lastest object version then report any others as errors
namastes = find_namastes(0, pyfs=self.obj_fs)
if len(namastes) == 0:
self.log.error('E003a', assumed_version=self.spec_version)
else:
spec_version = None
for namaste in namastes:
# Extract and check spec version number
this_file_version = None
for version in ('1.1', '1.0'):
if namaste.filename == '0=ocfl_object_' + version:
this_file_version = version
break
if this_file_version is None:
self.log.error('E006', filename=namaste.filename)
elif spec_version is None or this_file_version > spec_version:
spec_version = this_file_version
if not namaste.content_ok(pyfs=self.obj_fs):
self.log.error('E007', filename=namaste.filename)
if spec_version is None:
self.log.error('E003c', assumed_version=self.spec_version)
else:
self.spec_version = spec_version
if len(namastes) > 1:
self.log.error('E003b', files=len(namastes), using_version=self.spec_version)
# Object root inventory file
inv_file = 'inventory.json'
if not self.obj_fs.exists(inv_file):
self.log.error('E063')
return False
try:
inventory, inv_validator = self.validate_inventory(inv_file)
inventory_is_valid = self.log.num_errors == 0
self.root_inv_validator = inv_validator
all_versions = inv_validator.all_versions
self.id = inv_validator.id
self.content_directory = inv_validator.content_directory
self.digest_algorithm = inv_validator.digest_algorithm
self.validate_inventory_digest(inv_file, self.digest_algorithm)
# Object root
self.validate_object_root(all_versions, already_checked=[namaste.filename for namaste in namastes])
# Version inventory files
(prior_manifest_digests, prior_fixity_digests) = self.validate_version_inventories(all_versions)
if inventory_is_valid:
# Object content
self.validate_content(inventory, all_versions, prior_manifest_digests, prior_fixity_digests)
except ValidatorAbortException:
pass
return self.log.num_errors == 0
"""OCFL Validator.
Philosophy of this code is to keep it separate from the implementations
of Store, Object and Version used to build and manipulate OCFL data, but
to leverage lower level functions such as digest creation etc.. Code style
is plain/verbose with detailed and specific validation errors that might
help someone debug an implementation.
This code uses PyFilesystem (import fs) exclusively for access to files. This
should enable application beyond the operating system filesystem.
"""
import json
import re
import fs
from .digest import file_digest, normalized_digest
from .inventory_validator import InventoryValidator
from .namaste import find_namastes
from .pyfs import open_fs, ocfl_walk, ocfl_files_identical
from .validation_logger import ValidationLogger
class ValidatorAbortException(Exception):
"""Exception class to bail out of validation."""
class Validator():
"""Class for OCFL Validator."""
def __init__(self, log=None, show_warnings=False, show_errors=True, check_digests=True, lax_digests=False, lang='en'):
"""Initialize OCFL validator."""
self.log = log
self.check_digests = check_digests
self.lax_digests = lax_digests
if self.log is None:
self.log = ValidationLogger(show_warnings=show_warnings, show_errors=show_errors, lang=lang)
self.registered_extensions = [
'0001-digest-algorithms', '0002-flat-direct-storage-layout',
'0003-hash-and-id-n-tuple-storage-layout', '0004-hashed-n-tuple-storage-layout',
'0005-mutable-head'
]
# The following actually initialized in initialize() method
self.id = None
self.spec_version = None
self.digest_algorithm = None
self.content_directory = None
self.inventory_digest_files = None
self.root_inv_validator = None
self.obj_fs = None
self.initialize()
def initialize(self):
"""Initialize object state.
Must be called between attempts to validate objects.
"""
self.id = None
self.spec_version = '1.0' # default to latest published version
self.digest_algorithm = 'sha512'
self.content_directory = 'content'
self.inventory_digest_files = {} # index by version_dir, algorithms may differ
self.root_inv_validator = None
self.obj_fs = None
def status_str(self, prefix=''):
"""Return string representation of validation log, with optional prefix."""
return self.log.status_str(prefix=prefix)
def __str__(self):
"""Return string representation of validation log."""
return self.status_str()
def validate(self, path):
"""Validate OCFL object at path or pyfs root.
Returns True if valid (warnings permitted), False otherwise.
"""
self.initialize()
try:
if isinstance(path, str):
self.obj_fs = open_fs(path)
else:
self.obj_fs = path
path = self.obj_fs.desc('')
except fs.errors.CreateFailed:
self.log.error('E003e', path=path)
return False
# Object declaration, set spec version number. If there are multiple declarations,
# look for the lastest object version then report any others as errors
namastes = find_namastes(0, pyfs=self.obj_fs)
if len(namastes) == 0:
self.log.error('E003a', assumed_version=self.spec_version)
else:
spec_version = None
for namaste in namastes:
# Extract and check spec version number
this_file_version = None
for version in ('1.1', '1.0'):
if namaste.filename == '0=ocfl_object_' + version:
this_file_version = version
break
if this_file_version is None:
self.log.error('E006', filename=namaste.filename)
elif spec_version is None or this_file_version > spec_version:
spec_version = this_file_version
if not namaste.content_ok(pyfs=self.obj_fs):
self.log.error('E007', filename=namaste.filename)
if spec_version is None:
self.log.error('E003c', assumed_version=self.spec_version)
else:
self.spec_version = spec_version
if len(namastes) > 1:
self.log.error('E003b', files=len(namastes), using_version=self.spec_version)
# Object root inventory file
inv_file = 'inventory.json'
if not self.obj_fs.exists(inv_file):
self.log.error('E063')
return False
try:
inventory, inv_validator = self.validate_inventory(inv_file)
inventory_is_valid = self.log.num_errors == 0
self.root_inv_validator = inv_validator
all_versions = inv_validator.all_versions
self.id = inv_validator.id
self.content_directory = inv_validator.content_directory
self.digest_algorithm = inv_validator.digest_algorithm
self.validate_inventory_digest(inv_file, self.digest_algorithm)
# Object root
self.validate_object_root(all_versions, already_checked=[namaste.filename for namaste in namastes])
# Version inventory files
(prior_manifest_digests, prior_fixity_digests) = self.validate_version_inventories(all_versions)
if inventory_is_valid:
# Object content
self.validate_content(inventory, all_versions, prior_manifest_digests, prior_fixity_digests)
except ValidatorAbortException:
pass
return self.log.num_errors == 0
def validate_inventory(self, inv_file, where='root', extract_spec_version=False):
"""Validate a given inventory file, record errors with self.log.error().
Returns inventory object for use in later validation
of object content. Does not look at anything else in the
object itself.
where - used for reporting messages of where inventory is in object
extract_spec_version - if set True will attempt to take spec_version from the
inventory itself instead of using the spec_version provided
"""
try:
with self.obj_fs.openbin(inv_file, 'r') as fh:
inventory = json.load(fh)
except json.decoder.JSONDecodeError as e:
self.log.error('E033', where=where, explanation=str(e))
raise ValidatorAbortException
inv_validator = InventoryValidator(log=self.log, where=where,
lax_digests=self.lax_digests,
spec_version=self.spec_version)
inv_validator.validate(inventory, extract_spec_version=extract_spec_version)
return inventory, inv_validator
def validate_inventory_digest(self, inv_file, digest_algorithm, where="root"):
"""Validate the appropriate inventory digest file in path."""
inv_digest_file = inv_file + '.' + digest_algorithm
if not self.obj_fs.exists(inv_digest_file):
self.log.error('E058a', where=where, path=inv_digest_file)
else:
self.validate_inventory_digest_match(inv_file, inv_digest_file)
def validate_inventory_digest_match(self, inv_file, inv_digest_file):
"""Validate a given inventory digest for a given inventory file.
On error throws exception with debugging string intended to
be presented to a user.
"""
if not self.check_digests:
return
m = re.match(r'''.*\.(\w+)$''', inv_digest_file)
if m:
digest_algorithm = m.group(1)
try:
digest_recorded = self.read_inventory_digest(inv_digest_file)
digest_actual = file_digest(inv_file, digest_algorithm, pyfs=self.obj_fs)
if digest_actual != digest_recorded:
self.log.error("E060", inv_file=inv_file, actual=digest_actual, recorded=digest_recorded, inv_digest_file=inv_digest_file)
except Exception as e: # pylint: disable=broad-except
self.log.error("E061", description=str(e))
else:
self.log.error("E058b", inv_digest_file=inv_digest_file)
def validate_object_root(self, version_dirs, already_checked):
"""Validate object root.
All expected_files must be present and no other files.
All expected_dirs must be present and no other dirs.
"""
expected_files = ['0=ocfl_object_' + self.spec_version, 'inventory.json',
'inventory.json.' + self.digest_algorithm]
for entry in self.obj_fs.scandir(''):
if entry.is_file:
if entry.name not in expected_files and entry.name not in already_checked:
self.log.error('E001a', file=entry.name)
elif entry.is_dir:
if entry.name in version_dirs:
pass
elif entry.name == 'extensions':
self.validate_extensions_dir()
elif re.match(r'''v\d+$''', entry.name):
# Looks like a version directory so give more specific error
self.log.error('E046b', dir=entry.name)
else:
# Simply an unexpected directory
self.log.error('E001b', dir=entry.name)
else:
self.log.error('E001c', entry=entry.name)
def validate_extensions_dir(self):
"""Validate content of extensions directory inside object root.
Validate the extensions directory by checking that there aren't any
entries in the extensions directory that aren't directories themselves.
Where there are extension directories they SHOULD be registered and
this code relies up the registered_extensions property to list known
extensions.
"""
for entry in self.obj_fs.scandir('extensions'):
if entry.is_dir:
if entry.name not in self.registered_extensions:
self.log.warning('W013', entry=entry.name)
else:
self.log.error('E067', entry=entry.name)
def validate_version_inventories(self, version_dirs):
"""Each version SHOULD have an inventory up to that point.
Also keep a record of any content digests different from those in the root inventory
so that we can also check them when validating the content.
version_dirs is an array of version directory names and is assumed to be in
version sequence (1, 2, 3...).
"""
prior_manifest_digests = {} # file -> algorithm -> digest -> [versions]
prior_fixity_digests = {} # file -> algorithm -> digest -> [versions]
if len(version_dirs) == 0:
return prior_manifest_digests, prior_fixity_digests
last_version = version_dirs[-1]
prev_version_dir = "NONE" # will be set for first directory with inventory
prev_spec_version = '1.0' # lowest version
for version_dir in version_dirs:
inv_file = fs.path.join(version_dir, 'inventory.json')
if not self.obj_fs.exists(inv_file):
self.log.warning('W010', where=version_dir)
continue
# There is an inventory file for this version directory, check it
if version_dir == last_version:
# Don't validate in this case. Per the spec the inventory in the last version
# MUST be identical to the copy in the object root, just check that
root_inv_file = 'inventory.json'
if not ocfl_files_identical(self.obj_fs, inv_file, root_inv_file):
self.log.error('E064', root_inv_file=root_inv_file, inv_file=inv_file)
else:
# We could also just compare digest files but this gives a more helpful error for
# which file has the incorrect digest if they don't match
self.validate_inventory_digest(inv_file, self.digest_algorithm, where=version_dir)
self.inventory_digest_files[version_dir] = 'inventory.json.' + self.digest_algorithm
this_spec_version = self.spec_version
else:
# Note that inventories in prior versions may use different digest algorithms
# from the current invenotory. Also,
# an may accord with the same or earlier versions of the specification
version_inventory, inv_validator = self.validate_inventory(inv_file, where=version_dir, extract_spec_version=True)
this_spec_version = inv_validator.spec_version
digest_algorithm = inv_validator.digest_algorithm
self.validate_inventory_digest(inv_file, digest_algorithm, where=version_dir)
self.inventory_digest_files[version_dir] = 'inventory.json.' + digest_algorithm
if self.id and 'id' in version_inventory:
if version_inventory['id'] != self.id:
self.log.error('E037b', where=version_dir, root_id=self.id, version_id=version_inventory['id'])
if 'manifest' in version_inventory:
# Check that all files listed in prior inventories are in manifest
not_seen = set(prior_manifest_digests.keys())
for digest in version_inventory['manifest']:
for filepath in version_inventory['manifest'][digest]:
# We rely on the validation to check that anything present is OK
if filepath in not_seen:
not_seen.remove(filepath)
if len(not_seen) > 0:
self.log.error('E023b', where=version_dir, missing_filepaths=', '.join(sorted(not_seen)))
# Record all prior digests
for unnormalized_digest in version_inventory['manifest']:
digest = normalized_digest(unnormalized_digest, digest_type=digest_algorithm)
for filepath in version_inventory['manifest'][unnormalized_digest]:
if filepath not in prior_manifest_digests:
prior_manifest_digests[filepath] = {}
if digest_algorithm not in prior_manifest_digests[filepath]:
prior_manifest_digests[filepath][digest_algorithm] = {}
if digest not in prior_manifest_digests[filepath][digest_algorithm]:
prior_manifest_digests[filepath][digest_algorithm][digest] = []
prior_manifest_digests[filepath][digest_algorithm][digest].append(version_dir)
# Is this inventory an appropriate prior version of the object root inventory?
if self.root_inv_validator is not None:
self.root_inv_validator.validate_as_prior_version(inv_validator)
# Fixity blocks are independent in each version. Record all values and the versions
# they occur in for later checks against content
if 'fixity' in version_inventory:
for digest_algorithm in version_inventory['fixity']:
for unnormalized_digest in version_inventory['fixity'][digest_algorithm]:
digest = normalized_digest(unnormalized_digest, digest_type=digest_algorithm)
for filepath in version_inventory['fixity'][digest_algorithm][unnormalized_digest]:
if filepath not in prior_fixity_digests:
prior_fixity_digests[filepath] = {}
if digest_algorithm not in prior_fixity_digests[filepath]:
prior_fixity_digests[filepath][digest_algorithm] = {}
if digest not in prior_fixity_digests[filepath][digest_algorithm]:
prior_fixity_digests[filepath][digest_algorithm][digest] = []
prior_fixity_digests[filepath][digest_algorithm][digest].append(version_dir)
# We are validating the inventories in sequence and each new version must
# follow the same or later spec version to previous inventories
if prev_spec_version > this_spec_version:
self.log.error('E103', where=version_dir, this_spec_version=this_spec_version,
prev_version_dir=prev_version_dir, prev_spec_version=prev_spec_version)
prev_version_dir = version_dir
prev_spec_version = this_spec_version
return prior_manifest_digests, prior_fixity_digests
def validate_content(self, inventory, version_dirs, prior_manifest_digests, prior_fixity_digests):
"""Validate file presence and content against inventory.
The root inventory in `inventory` is assumed to be valid and safe to use
for construction of file paths etc..
"""
files_seen = set()
# Check files in each version directory
for version_dir in version_dirs:
try:
# Check contents of version directory except content_directory
for entry in self.obj_fs.listdir(version_dir):
if ((entry == 'inventory.json')
or (version_dir in self.inventory_digest_files and entry == self.inventory_digest_files[version_dir])):
pass
elif entry == self.content_directory:
# Check content_directory
content_path = fs.path.join(version_dir, self.content_directory)
num_content_files_in_version = 0
for dirpath, dirs, files in ocfl_walk(self.obj_fs, content_path):
if dirpath != '/' + content_path and (len(dirs) + len(files)) == 0:
self.log.error("E024", where=version_dir, path=dirpath)
for file in files:
files_seen.add(fs.path.join(dirpath, file).lstrip('/'))
num_content_files_in_version += 1
if num_content_files_in_version == 0:
self.log.warning("W003", where=version_dir)
elif self.obj_fs.isdir(fs.path.join(version_dir, entry)):
self.log.warning("W002", where=version_dir, entry=entry)
else:
self.log.error("E015", where=version_dir, entry=entry)
except (fs.errors.ResourceNotFound, fs.errors.DirectoryExpected):
self.log.error('E046a', version_dir=version_dir)
# Extract any digests in fixity and organize by filepath
fixity_digests = {}
if 'fixity' in inventory:
for digest_algorithm in inventory['fixity']:
for digest in inventory['fixity'][digest_algorithm]:
for filepath in inventory['fixity'][digest_algorithm][digest]:
if filepath in files_seen:
if filepath not in fixity_digests:
fixity_digests[filepath] = {}
if digest_algorithm not in fixity_digests[filepath]:
fixity_digests[filepath][digest_algorithm] = {}
if digest not in fixity_digests[filepath][digest_algorithm]:
fixity_digests[filepath][digest_algorithm][digest] = ['root']
else:
self.log.error('E093b', where='root', digest_algorithm=digest_algorithm, digest=digest, content_path=filepath)
# Check all files in root manifest
if 'manifest' in inventory:
for digest in inventory['manifest']:
for filepath in inventory['manifest'][digest]:
if filepath not in files_seen:
self.log.error('E092b', where='root', content_path=filepath)
else:
if self.check_digests:
content_digest = file_digest(filepath, digest_type=self.digest_algorithm, pyfs=self.obj_fs)
if content_digest != normalized_digest(digest, digest_type=self.digest_algorithm):
self.log.error('E092a', where='root', digest_algorithm=self.digest_algorithm, digest=digest, content_path=filepath, content_digest=content_digest)
known_digests = {self.digest_algorithm: content_digest}
# Are there digest values in the fixity block?
self.check_additional_digests(filepath, known_digests, fixity_digests, 'E093a')
# Are there other digests for this same file from other inventories?
self.check_additional_digests(filepath, known_digests, prior_manifest_digests, 'E092a')
self.check_additional_digests(filepath, known_digests, prior_fixity_digests, 'E093a')
files_seen.discard(filepath)
# Anything left in files_seen is not mentioned in the inventory
if len(files_seen) > 0:
self.log.error('E023a', where='root', extra_files=', '.join(sorted(files_seen)))
def check_additional_digests(self, filepath, known_digests, additional_digests, error_code):
"""Check all the additional digests for filepath.
This method is intended to be used both for manifest digests in prior versions and
for fixity digests. The digests_seen dict is used to store any values calculated
so that we don't recalculate digests that might appear multiple times. It is added to
with any additional values calculated.
Parameters:
filepath - path of file in object (`v1/content/something` etc.)
known_digests - dict of algorithm->digest that we have calculated
additional_digests - dict: filepath -> algorithm -> digest -> [versions appears in]
error_code - error code to log on mismatch (E092a for manifest, E093a for fixity)
"""
if filepath in additional_digests:
for digest_algorithm in additional_digests[filepath]:
if digest_algorithm in known_digests:
# Don't recompute anything, just use it if we've seen it before
content_digest = known_digests[digest_algorithm]
else:
content_digest = file_digest(filepath, digest_type=digest_algorithm, pyfs=self.obj_fs)
known_digests[digest_algorithm] = content_digest
for digest in additional_digests[filepath][digest_algorithm]:
if content_digest != normalized_digest(digest, digest_type=digest_algorithm):
where = ','.join(additional_digests[filepath][digest_algorithm][digest])
self.log.error(error_code, where=where, digest_algorithm=digest_algorithm, digest=digest, content_path=filepath, content_digest=content_digest)
def read_inventory_digest(self, inv_digest_file):
"""Read inventory digest from sidecar file.
Raise exception if there is an error, else return digest.
"""
with self.obj_fs.open(inv_digest_file, 'r') as fh:
line = fh.readline()
# we ignore any following lines, could raise exception
m = re.match(r'''(\w+)\s+(\S+)\s*$''', line)
if not m:
raise Exception("Bad inventory digest file %s, wrong format" % (inv_digest_file))
if m.group(2) != 'inventory.json':
raise Exception("Bad inventory name in inventory digest file %s" % (inv_digest_file))
return m.group(1)
|
ocfl/validator.py
|
codereval_python_data_69
|
Return string of validator status, with optional prefix.
def status_str(self, prefix=''):
"""Return string of validator status, with optional prefix."""
s = ''
for message in sorted(self.messages):
s += prefix + message + '\n'
return s[:-1]
"""OCFL Validation Logger.
Handle logging of validation errors and warnings.
"""
import json
import os
import os.path
import re
class ValidationLogger():
"""Class for OCFL ValidationLogger."""
validation_codes = None
def __init__(self, show_warnings=False, show_errors=True,
lang='en', validation_codes=None):
"""Initialize OCFL validation logger."""
self.show_warnings = show_warnings
self.show_errors = show_errors
self.lang = lang
self.codes = {}
self.messages = []
self.num_errors = 0
self.num_warnings = 0
self.info = 0
self.spec = 'https://ocfl.io/1.0/spec/'
if validation_codes is not None:
self.validation_codes = validation_codes
elif self.validation_codes is None:
with open(os.path.join(os.path.dirname(__file__), 'data/validation-errors.json'), 'r', encoding="utf-8") as fh:
self.validation_codes = json.load(fh)
def error_or_warning(self, code, severity='error', **args):
"""Add error or warning to self.codes."""
if code in self.validation_codes and 'description' in self.validation_codes[code]:
desc = self.validation_codes[code]['description']
lang_desc = None
if self.lang in desc:
lang_desc = desc[self.lang]
elif 'en' in desc:
lang_desc = desc['en']
elif len(desc) > 0:
# first key alphabetically
lang_desc = desc[sorted(list(desc.keys()))[0]]
else:
lang_desc = "Unknown " + severity + " without a description"
# Add in any parameters
if 'params' in self.validation_codes[code]:
params = []
for param in self.validation_codes[code]['params']:
params.append(str(args[param]) if param in args else '???')
try:
lang_desc = lang_desc % tuple(params)
except TypeError:
lang_desc += ' ' + str(args)
message = '[' + code + '] ' + lang_desc
else:
message = "Unknown " + severity + ": %s - params (%s)" % (code, str(args))
# Add link to spec
m = re.match(r'''([EW](\d\d\d))''', code)
if m and int(m.group(2)) < 200:
message += ' (see ' + self.spec + '#' + m.group(1) + ')'
# Store set of codes with last message for that code, and _full_ list of messages
self.codes[code] = message
if (severity == 'error' and self.show_errors) or (severity != 'error' and self.show_warnings):
self.messages.append(message)
def error(self, code, **args):
"""Add error code to self.codes."""
self.error_or_warning(code, severity='error', **args)
self.num_errors += 1
def warning(self, code, **args):
"""Add warning code to self.codes."""
self.error_or_warning(code, severity='warning', **args)
self.num_warnings += 1
def status_str(self, prefix=''):
"""Return string of validator status, with optional prefix."""
s = ''
for message in sorted(self.messages):
s += prefix + message + '\n'
return s[:-1]
def __str__(self):
"""Return status string."""
return self.status_str()
|
ocfl/validation_logger.py
|
codereval_python_data_70
|
Return string representation of validation log, with optional prefix.
def status_str(self, prefix=''):
"""Return string representation of validation log, with optional prefix."""
return self.log.status_str(prefix=prefix)
"""OCFL Validator.
Philosophy of this code is to keep it separate from the implementations
of Store, Object and Version used to build and manipulate OCFL data, but
to leverage lower level functions such as digest creation etc.. Code style
is plain/verbose with detailed and specific validation errors that might
help someone debug an implementation.
This code uses PyFilesystem (import fs) exclusively for access to files. This
should enable application beyond the operating system filesystem.
"""
import json
import re
import fs
from .digest import file_digest, normalized_digest
from .inventory_validator import InventoryValidator
from .namaste import find_namastes
from .pyfs import open_fs, ocfl_walk, ocfl_files_identical
from .validation_logger import ValidationLogger
class ValidatorAbortException(Exception):
"""Exception class to bail out of validation."""
class Validator():
"""Class for OCFL Validator."""
def __init__(self, log=None, show_warnings=False, show_errors=True, check_digests=True, lax_digests=False, lang='en'):
"""Initialize OCFL validator."""
self.log = log
self.check_digests = check_digests
self.lax_digests = lax_digests
if self.log is None:
self.log = ValidationLogger(show_warnings=show_warnings, show_errors=show_errors, lang=lang)
self.registered_extensions = [
'0001-digest-algorithms', '0002-flat-direct-storage-layout',
'0003-hash-and-id-n-tuple-storage-layout', '0004-hashed-n-tuple-storage-layout',
'0005-mutable-head'
]
# The following actually initialized in initialize() method
self.id = None
self.spec_version = None
self.digest_algorithm = None
self.content_directory = None
self.inventory_digest_files = None
self.root_inv_validator = None
self.obj_fs = None
self.initialize()
def initialize(self):
"""Initialize object state.
Must be called between attempts to validate objects.
"""
self.id = None
self.spec_version = '1.0' # default to latest published version
self.digest_algorithm = 'sha512'
self.content_directory = 'content'
self.inventory_digest_files = {} # index by version_dir, algorithms may differ
self.root_inv_validator = None
self.obj_fs = None
def status_str(self, prefix=''):
"""Return string representation of validation log, with optional prefix."""
return self.log.status_str(prefix=prefix)
def __str__(self):
"""Return string representation of validation log."""
return self.status_str()
def validate(self, path):
"""Validate OCFL object at path or pyfs root.
Returns True if valid (warnings permitted), False otherwise.
"""
self.initialize()
try:
if isinstance(path, str):
self.obj_fs = open_fs(path)
else:
self.obj_fs = path
path = self.obj_fs.desc('')
except fs.errors.CreateFailed:
self.log.error('E003e', path=path)
return False
# Object declaration, set spec version number. If there are multiple declarations,
# look for the lastest object version then report any others as errors
namastes = find_namastes(0, pyfs=self.obj_fs)
if len(namastes) == 0:
self.log.error('E003a', assumed_version=self.spec_version)
else:
spec_version = None
for namaste in namastes:
# Extract and check spec version number
this_file_version = None
for version in ('1.1', '1.0'):
if namaste.filename == '0=ocfl_object_' + version:
this_file_version = version
break
if this_file_version is None:
self.log.error('E006', filename=namaste.filename)
elif spec_version is None or this_file_version > spec_version:
spec_version = this_file_version
if not namaste.content_ok(pyfs=self.obj_fs):
self.log.error('E007', filename=namaste.filename)
if spec_version is None:
self.log.error('E003c', assumed_version=self.spec_version)
else:
self.spec_version = spec_version
if len(namastes) > 1:
self.log.error('E003b', files=len(namastes), using_version=self.spec_version)
# Object root inventory file
inv_file = 'inventory.json'
if not self.obj_fs.exists(inv_file):
self.log.error('E063')
return False
try:
inventory, inv_validator = self.validate_inventory(inv_file)
inventory_is_valid = self.log.num_errors == 0
self.root_inv_validator = inv_validator
all_versions = inv_validator.all_versions
self.id = inv_validator.id
self.content_directory = inv_validator.content_directory
self.digest_algorithm = inv_validator.digest_algorithm
self.validate_inventory_digest(inv_file, self.digest_algorithm)
# Object root
self.validate_object_root(all_versions, already_checked=[namaste.filename for namaste in namastes])
# Version inventory files
(prior_manifest_digests, prior_fixity_digests) = self.validate_version_inventories(all_versions)
if inventory_is_valid:
# Object content
self.validate_content(inventory, all_versions, prior_manifest_digests, prior_fixity_digests)
except ValidatorAbortException:
pass
return self.log.num_errors == 0
def validate_inventory(self, inv_file, where='root', extract_spec_version=False):
"""Validate a given inventory file, record errors with self.log.error().
Returns inventory object for use in later validation
of object content. Does not look at anything else in the
object itself.
where - used for reporting messages of where inventory is in object
extract_spec_version - if set True will attempt to take spec_version from the
inventory itself instead of using the spec_version provided
"""
try:
with self.obj_fs.openbin(inv_file, 'r') as fh:
inventory = json.load(fh)
except json.decoder.JSONDecodeError as e:
self.log.error('E033', where=where, explanation=str(e))
raise ValidatorAbortException
inv_validator = InventoryValidator(log=self.log, where=where,
lax_digests=self.lax_digests,
spec_version=self.spec_version)
inv_validator.validate(inventory, extract_spec_version=extract_spec_version)
return inventory, inv_validator
def validate_inventory_digest(self, inv_file, digest_algorithm, where="root"):
"""Validate the appropriate inventory digest file in path."""
inv_digest_file = inv_file + '.' + digest_algorithm
if not self.obj_fs.exists(inv_digest_file):
self.log.error('E058a', where=where, path=inv_digest_file)
else:
self.validate_inventory_digest_match(inv_file, inv_digest_file)
def validate_inventory_digest_match(self, inv_file, inv_digest_file):
"""Validate a given inventory digest for a given inventory file.
On error throws exception with debugging string intended to
be presented to a user.
"""
if not self.check_digests:
return
m = re.match(r'''.*\.(\w+)$''', inv_digest_file)
if m:
digest_algorithm = m.group(1)
try:
digest_recorded = self.read_inventory_digest(inv_digest_file)
digest_actual = file_digest(inv_file, digest_algorithm, pyfs=self.obj_fs)
if digest_actual != digest_recorded:
self.log.error("E060", inv_file=inv_file, actual=digest_actual, recorded=digest_recorded, inv_digest_file=inv_digest_file)
except Exception as e: # pylint: disable=broad-except
self.log.error("E061", description=str(e))
else:
self.log.error("E058b", inv_digest_file=inv_digest_file)
def validate_object_root(self, version_dirs, already_checked):
"""Validate object root.
All expected_files must be present and no other files.
All expected_dirs must be present and no other dirs.
"""
expected_files = ['0=ocfl_object_' + self.spec_version, 'inventory.json',
'inventory.json.' + self.digest_algorithm]
for entry in self.obj_fs.scandir(''):
if entry.is_file:
if entry.name not in expected_files and entry.name not in already_checked:
self.log.error('E001a', file=entry.name)
elif entry.is_dir:
if entry.name in version_dirs:
pass
elif entry.name == 'extensions':
self.validate_extensions_dir()
elif re.match(r'''v\d+$''', entry.name):
# Looks like a version directory so give more specific error
self.log.error('E046b', dir=entry.name)
else:
# Simply an unexpected directory
self.log.error('E001b', dir=entry.name)
else:
self.log.error('E001c', entry=entry.name)
def validate_extensions_dir(self):
"""Validate content of extensions directory inside object root.
Validate the extensions directory by checking that there aren't any
entries in the extensions directory that aren't directories themselves.
Where there are extension directories they SHOULD be registered and
this code relies up the registered_extensions property to list known
extensions.
"""
for entry in self.obj_fs.scandir('extensions'):
if entry.is_dir:
if entry.name not in self.registered_extensions:
self.log.warning('W013', entry=entry.name)
else:
self.log.error('E067', entry=entry.name)
def validate_version_inventories(self, version_dirs):
"""Each version SHOULD have an inventory up to that point.
Also keep a record of any content digests different from those in the root inventory
so that we can also check them when validating the content.
version_dirs is an array of version directory names and is assumed to be in
version sequence (1, 2, 3...).
"""
prior_manifest_digests = {} # file -> algorithm -> digest -> [versions]
prior_fixity_digests = {} # file -> algorithm -> digest -> [versions]
if len(version_dirs) == 0:
return prior_manifest_digests, prior_fixity_digests
last_version = version_dirs[-1]
prev_version_dir = "NONE" # will be set for first directory with inventory
prev_spec_version = '1.0' # lowest version
for version_dir in version_dirs:
inv_file = fs.path.join(version_dir, 'inventory.json')
if not self.obj_fs.exists(inv_file):
self.log.warning('W010', where=version_dir)
continue
# There is an inventory file for this version directory, check it
if version_dir == last_version:
# Don't validate in this case. Per the spec the inventory in the last version
# MUST be identical to the copy in the object root, just check that
root_inv_file = 'inventory.json'
if not ocfl_files_identical(self.obj_fs, inv_file, root_inv_file):
self.log.error('E064', root_inv_file=root_inv_file, inv_file=inv_file)
else:
# We could also just compare digest files but this gives a more helpful error for
# which file has the incorrect digest if they don't match
self.validate_inventory_digest(inv_file, self.digest_algorithm, where=version_dir)
self.inventory_digest_files[version_dir] = 'inventory.json.' + self.digest_algorithm
this_spec_version = self.spec_version
else:
# Note that inventories in prior versions may use different digest algorithms
# from the current invenotory. Also,
# an may accord with the same or earlier versions of the specification
version_inventory, inv_validator = self.validate_inventory(inv_file, where=version_dir, extract_spec_version=True)
this_spec_version = inv_validator.spec_version
digest_algorithm = inv_validator.digest_algorithm
self.validate_inventory_digest(inv_file, digest_algorithm, where=version_dir)
self.inventory_digest_files[version_dir] = 'inventory.json.' + digest_algorithm
if self.id and 'id' in version_inventory:
if version_inventory['id'] != self.id:
self.log.error('E037b', where=version_dir, root_id=self.id, version_id=version_inventory['id'])
if 'manifest' in version_inventory:
# Check that all files listed in prior inventories are in manifest
not_seen = set(prior_manifest_digests.keys())
for digest in version_inventory['manifest']:
for filepath in version_inventory['manifest'][digest]:
# We rely on the validation to check that anything present is OK
if filepath in not_seen:
not_seen.remove(filepath)
if len(not_seen) > 0:
self.log.error('E023b', where=version_dir, missing_filepaths=', '.join(sorted(not_seen)))
# Record all prior digests
for unnormalized_digest in version_inventory['manifest']:
digest = normalized_digest(unnormalized_digest, digest_type=digest_algorithm)
for filepath in version_inventory['manifest'][unnormalized_digest]:
if filepath not in prior_manifest_digests:
prior_manifest_digests[filepath] = {}
if digest_algorithm not in prior_manifest_digests[filepath]:
prior_manifest_digests[filepath][digest_algorithm] = {}
if digest not in prior_manifest_digests[filepath][digest_algorithm]:
prior_manifest_digests[filepath][digest_algorithm][digest] = []
prior_manifest_digests[filepath][digest_algorithm][digest].append(version_dir)
# Is this inventory an appropriate prior version of the object root inventory?
if self.root_inv_validator is not None:
self.root_inv_validator.validate_as_prior_version(inv_validator)
# Fixity blocks are independent in each version. Record all values and the versions
# they occur in for later checks against content
if 'fixity' in version_inventory:
for digest_algorithm in version_inventory['fixity']:
for unnormalized_digest in version_inventory['fixity'][digest_algorithm]:
digest = normalized_digest(unnormalized_digest, digest_type=digest_algorithm)
for filepath in version_inventory['fixity'][digest_algorithm][unnormalized_digest]:
if filepath not in prior_fixity_digests:
prior_fixity_digests[filepath] = {}
if digest_algorithm not in prior_fixity_digests[filepath]:
prior_fixity_digests[filepath][digest_algorithm] = {}
if digest not in prior_fixity_digests[filepath][digest_algorithm]:
prior_fixity_digests[filepath][digest_algorithm][digest] = []
prior_fixity_digests[filepath][digest_algorithm][digest].append(version_dir)
# We are validating the inventories in sequence and each new version must
# follow the same or later spec version to previous inventories
if prev_spec_version > this_spec_version:
self.log.error('E103', where=version_dir, this_spec_version=this_spec_version,
prev_version_dir=prev_version_dir, prev_spec_version=prev_spec_version)
prev_version_dir = version_dir
prev_spec_version = this_spec_version
return prior_manifest_digests, prior_fixity_digests
def validate_content(self, inventory, version_dirs, prior_manifest_digests, prior_fixity_digests):
"""Validate file presence and content against inventory.
The root inventory in `inventory` is assumed to be valid and safe to use
for construction of file paths etc..
"""
files_seen = set()
# Check files in each version directory
for version_dir in version_dirs:
try:
# Check contents of version directory except content_directory
for entry in self.obj_fs.listdir(version_dir):
if ((entry == 'inventory.json')
or (version_dir in self.inventory_digest_files and entry == self.inventory_digest_files[version_dir])):
pass
elif entry == self.content_directory:
# Check content_directory
content_path = fs.path.join(version_dir, self.content_directory)
num_content_files_in_version = 0
for dirpath, dirs, files in ocfl_walk(self.obj_fs, content_path):
if dirpath != '/' + content_path and (len(dirs) + len(files)) == 0:
self.log.error("E024", where=version_dir, path=dirpath)
for file in files:
files_seen.add(fs.path.join(dirpath, file).lstrip('/'))
num_content_files_in_version += 1
if num_content_files_in_version == 0:
self.log.warning("W003", where=version_dir)
elif self.obj_fs.isdir(fs.path.join(version_dir, entry)):
self.log.warning("W002", where=version_dir, entry=entry)
else:
self.log.error("E015", where=version_dir, entry=entry)
except (fs.errors.ResourceNotFound, fs.errors.DirectoryExpected):
self.log.error('E046a', version_dir=version_dir)
# Extract any digests in fixity and organize by filepath
fixity_digests = {}
if 'fixity' in inventory:
for digest_algorithm in inventory['fixity']:
for digest in inventory['fixity'][digest_algorithm]:
for filepath in inventory['fixity'][digest_algorithm][digest]:
if filepath in files_seen:
if filepath not in fixity_digests:
fixity_digests[filepath] = {}
if digest_algorithm not in fixity_digests[filepath]:
fixity_digests[filepath][digest_algorithm] = {}
if digest not in fixity_digests[filepath][digest_algorithm]:
fixity_digests[filepath][digest_algorithm][digest] = ['root']
else:
self.log.error('E093b', where='root', digest_algorithm=digest_algorithm, digest=digest, content_path=filepath)
# Check all files in root manifest
if 'manifest' in inventory:
for digest in inventory['manifest']:
for filepath in inventory['manifest'][digest]:
if filepath not in files_seen:
self.log.error('E092b', where='root', content_path=filepath)
else:
if self.check_digests:
content_digest = file_digest(filepath, digest_type=self.digest_algorithm, pyfs=self.obj_fs)
if content_digest != normalized_digest(digest, digest_type=self.digest_algorithm):
self.log.error('E092a', where='root', digest_algorithm=self.digest_algorithm, digest=digest, content_path=filepath, content_digest=content_digest)
known_digests = {self.digest_algorithm: content_digest}
# Are there digest values in the fixity block?
self.check_additional_digests(filepath, known_digests, fixity_digests, 'E093a')
# Are there other digests for this same file from other inventories?
self.check_additional_digests(filepath, known_digests, prior_manifest_digests, 'E092a')
self.check_additional_digests(filepath, known_digests, prior_fixity_digests, 'E093a')
files_seen.discard(filepath)
# Anything left in files_seen is not mentioned in the inventory
if len(files_seen) > 0:
self.log.error('E023a', where='root', extra_files=', '.join(sorted(files_seen)))
def check_additional_digests(self, filepath, known_digests, additional_digests, error_code):
"""Check all the additional digests for filepath.
This method is intended to be used both for manifest digests in prior versions and
for fixity digests. The digests_seen dict is used to store any values calculated
so that we don't recalculate digests that might appear multiple times. It is added to
with any additional values calculated.
Parameters:
filepath - path of file in object (`v1/content/something` etc.)
known_digests - dict of algorithm->digest that we have calculated
additional_digests - dict: filepath -> algorithm -> digest -> [versions appears in]
error_code - error code to log on mismatch (E092a for manifest, E093a for fixity)
"""
if filepath in additional_digests:
for digest_algorithm in additional_digests[filepath]:
if digest_algorithm in known_digests:
# Don't recompute anything, just use it if we've seen it before
content_digest = known_digests[digest_algorithm]
else:
content_digest = file_digest(filepath, digest_type=digest_algorithm, pyfs=self.obj_fs)
known_digests[digest_algorithm] = content_digest
for digest in additional_digests[filepath][digest_algorithm]:
if content_digest != normalized_digest(digest, digest_type=digest_algorithm):
where = ','.join(additional_digests[filepath][digest_algorithm][digest])
self.log.error(error_code, where=where, digest_algorithm=digest_algorithm, digest=digest, content_path=filepath, content_digest=content_digest)
def read_inventory_digest(self, inv_digest_file):
"""Read inventory digest from sidecar file.
Raise exception if there is an error, else return digest.
"""
with self.obj_fs.open(inv_digest_file, 'r') as fh:
line = fh.readline()
# we ignore any following lines, could raise exception
m = re.match(r'''(\w+)\s+(\S+)\s*$''', line)
if not m:
raise Exception("Bad inventory digest file %s, wrong format" % (inv_digest_file))
if m.group(2) != 'inventory.json':
raise Exception("Bad inventory name in inventory digest file %s" % (inv_digest_file))
return m.group(1)
|
ocfl/validator.py
|
codereval_python_data_71
|
Return True if identifier is valid, always True in this base implementation.
def is_valid(self, identifier): # pylint: disable=unused-argument
"""Return True if identifier is valid, always True in this base implementation."""
return True
"""Base class for Dispositor objects."""
import os
import os.path
from urllib.parse import quote_plus, unquote_plus
class Dispositor:
"""Base class for disposition handlers -- let's call them Dispositors."""
def strip_root(self, path, root):
"""Remove root from path, throw exception on failure."""
root = root.rstrip(os.sep) # ditch any trailing path separator
if os.path.commonprefix((path, root)) == root:
return os.path.relpath(path, start=root)
raise Exception("Path %s is not in root %s" % (path, root))
def is_valid(self, identifier): # pylint: disable=unused-argument
"""Return True if identifier is valid, always True in this base implementation."""
return True
def encode(self, identifier):
"""Encode identifier to get rid of unsafe chars."""
return quote_plus(identifier)
def decode(self, identifier):
"""Decode identifier to put back unsafe chars."""
return unquote_plus(identifier)
def identifier_to_path(self, identifier):
"""Convert identifier to path relative to some root."""
raise Exception("No yet implemented")
def relative_path_to_identifier(self, path):
"""Convert relative path to identifier."""
raise Exception("No yet implemented")
def path_to_identifier(self, path, root=None):
"""Convert path relative to root to identifier."""
if root is not None:
path = self.strip_root(path, root)
return self.relative_path_to_identifier(path)
|
ocfl/dispositor.py
|
codereval_python_data_72
|
Validate a given inventory.
If extract_spec_version is True then will look at the type value to determine
the specification version. In the case that there is no type value or it isn't
valid, then other tests will be based on the version given in self.spec_version.
def validate(self, inventory, extract_spec_version=False):
"""Validate a given inventory.
If extract_spec_version is True then will look at the type value to determine
the specification version. In the case that there is no type value or it isn't
valid, then other tests will be based on the version given in self.spec_version.
"""
# Basic structure
self.inventory = inventory
if 'id' in inventory:
iid = inventory['id']
if not isinstance(iid, str) or iid == '':
self.error("E037a")
else:
# URI syntax https://www.rfc-editor.org/rfc/rfc3986.html#section-3.1 :
# scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )
if not re.match(r'''[a-z][a-z\d\+\-\.]*:.+''', iid, re.IGNORECASE):
self.warning("W005", id=iid)
self.id = iid
else:
self.error("E036a")
if 'type' not in inventory:
self.error("E036b")
elif not isinstance(inventory['type'], str):
self.error("E999")
elif extract_spec_version:
m = re.match(r'''https://ocfl.io/(\d+.\d)/spec/#inventory''', inventory['type'])
if not m:
self.error('E038b', got=inventory['type'], assumed_spec_version=self.spec_version)
elif m.group(1) in self.spec_versions_supported:
self.spec_version = m.group(1)
else:
self.error("E038c", got=m.group(1), assumed_spec_version=self.spec_version)
elif inventory['type'] != 'https://ocfl.io/' + self.spec_version + '/spec/#inventory':
self.error("E038a", expected='https://ocfl.io/' + self.spec_version + '/spec/#inventory', got=inventory['type'])
if 'digestAlgorithm' not in inventory:
self.error("E036c")
elif inventory['digestAlgorithm'] == 'sha512':
pass
elif self.lax_digests:
self.digest_algorithm = inventory['digestAlgorithm']
elif inventory['digestAlgorithm'] == 'sha256':
self.warning("W004")
self.digest_algorithm = inventory['digestAlgorithm']
else:
self.error("E039", digest_algorithm=inventory['digestAlgorithm'])
if 'contentDirectory' in inventory:
# Careful only to set self.content_directory if value is safe
cd = inventory['contentDirectory']
if not isinstance(cd, str) or '/' in cd:
self.error("E017")
elif cd in ('.', '..'):
self.error("E018")
else:
self.content_directory = cd
manifest_files_correct_format = None
if 'manifest' not in inventory:
self.error("E041a")
else:
(self.manifest_files, manifest_files_correct_format, self.unnormalized_digests) = self.validate_manifest(inventory['manifest'])
digests_used = []
if 'versions' not in inventory:
self.error("E041b")
else:
self.all_versions = self.validate_version_sequence(inventory['versions'])
digests_used = self.validate_versions(inventory['versions'], self.all_versions, self.unnormalized_digests)
if 'head' not in inventory:
self.error("E036d")
elif len(self.all_versions) > 0:
self.head = self.all_versions[-1]
if inventory['head'] != self.head:
self.error("E040", got=inventory['head'], expected=self.head)
if len(self.all_versions) == 0:
# Abort tests is we don't have a valid version sequence, otherwise
# there will likely be spurious subsequent error reports
return
if len(self.all_versions) > 0:
if manifest_files_correct_format is not None:
self.check_content_paths_map_to_versions(manifest_files_correct_format, self.all_versions)
if self.manifest_files is not None:
self.check_digests_present_and_used(self.manifest_files, digests_used)
if 'fixity' in inventory:
self.validate_fixity(inventory['fixity'], self.manifest_files)
"""OCFL Inventory Validator.
Code to validate the Python representation of an OCFL Inventory
as read with json.load(). Does not examine anything in storage.
"""
import re
from .digest import digest_regex, normalized_digest
from .validation_logger import ValidationLogger
from .w3c_datetime import str_to_datetime
def get_logical_path_map(inventory, version):
"""Get a map of logical paths in state to files on disk for version in inventory.
Returns a dictionary: logical_path_in_state -> set(content_files)
The set of content_files may includes references to duplicate files in
later versions than the version being described.
"""
state = inventory['versions'][version]['state']
manifest = inventory['manifest']
file_map = {}
for digest in state:
if digest in manifest:
for file in state[digest]:
file_map[file] = set(manifest[digest])
return file_map
class InventoryValidator():
"""Class for OCFL Inventory Validator."""
def __init__(self, log=None, where='???',
lax_digests=False, spec_version='1.0'):
"""Initialize OCFL Inventory Validator."""
self.log = ValidationLogger() if log is None else log
self.where = where
self.spec_version = spec_version
# Object state
self.inventory = None
self.id = None
self.digest_algorithm = 'sha512'
self.content_directory = 'content'
self.all_versions = []
self.manifest_files = None
self.unnormalized_digests = None
self.head = 'UNKNOWN'
# Validation control
self.lax_digests = lax_digests
# Configuration
self.spec_versions_supported = ('1.0', '1.1')
def error(self, code, **args):
"""Error with added context."""
self.log.error(code, where=self.where, **args)
def warning(self, code, **args):
"""Warning with added context."""
self.log.warning(code, where=self.where, **args)
def validate(self, inventory, extract_spec_version=False):
"""Validate a given inventory.
If extract_spec_version is True then will look at the type value to determine
the specification version. In the case that there is no type value or it isn't
valid, then other tests will be based on the version given in self.spec_version.
"""
# Basic structure
self.inventory = inventory
if 'id' in inventory:
iid = inventory['id']
if not isinstance(iid, str) or iid == '':
self.error("E037a")
else:
# URI syntax https://www.rfc-editor.org/rfc/rfc3986.html#section-3.1 :
# scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )
if not re.match(r'''[a-z][a-z\d\+\-\.]*:.+''', iid, re.IGNORECASE):
self.warning("W005", id=iid)
self.id = iid
else:
self.error("E036a")
if 'type' not in inventory:
self.error("E036b")
elif not isinstance(inventory['type'], str):
self.error("E999")
elif extract_spec_version:
m = re.match(r'''https://ocfl.io/(\d+.\d)/spec/#inventory''', inventory['type'])
if not m:
self.error('E038b', got=inventory['type'], assumed_spec_version=self.spec_version)
elif m.group(1) in self.spec_versions_supported:
self.spec_version = m.group(1)
else:
self.error("E038c", got=m.group(1), assumed_spec_version=self.spec_version)
elif inventory['type'] != 'https://ocfl.io/' + self.spec_version + '/spec/#inventory':
self.error("E038a", expected='https://ocfl.io/' + self.spec_version + '/spec/#inventory', got=inventory['type'])
if 'digestAlgorithm' not in inventory:
self.error("E036c")
elif inventory['digestAlgorithm'] == 'sha512':
pass
elif self.lax_digests:
self.digest_algorithm = inventory['digestAlgorithm']
elif inventory['digestAlgorithm'] == 'sha256':
self.warning("W004")
self.digest_algorithm = inventory['digestAlgorithm']
else:
self.error("E039", digest_algorithm=inventory['digestAlgorithm'])
if 'contentDirectory' in inventory:
# Careful only to set self.content_directory if value is safe
cd = inventory['contentDirectory']
if not isinstance(cd, str) or '/' in cd:
self.error("E017")
elif cd in ('.', '..'):
self.error("E018")
else:
self.content_directory = cd
manifest_files_correct_format = None
if 'manifest' not in inventory:
self.error("E041a")
else:
(self.manifest_files, manifest_files_correct_format, self.unnormalized_digests) = self.validate_manifest(inventory['manifest'])
digests_used = []
if 'versions' not in inventory:
self.error("E041b")
else:
self.all_versions = self.validate_version_sequence(inventory['versions'])
digests_used = self.validate_versions(inventory['versions'], self.all_versions, self.unnormalized_digests)
if 'head' not in inventory:
self.error("E036d")
elif len(self.all_versions) > 0:
self.head = self.all_versions[-1]
if inventory['head'] != self.head:
self.error("E040", got=inventory['head'], expected=self.head)
if len(self.all_versions) == 0:
# Abort tests is we don't have a valid version sequence, otherwise
# there will likely be spurious subsequent error reports
return
if len(self.all_versions) > 0:
if manifest_files_correct_format is not None:
self.check_content_paths_map_to_versions(manifest_files_correct_format, self.all_versions)
if self.manifest_files is not None:
self.check_digests_present_and_used(self.manifest_files, digests_used)
if 'fixity' in inventory:
self.validate_fixity(inventory['fixity'], self.manifest_files)
def validate_manifest(self, manifest):
"""Validate manifest block in inventory.
Returns:
* manifest_files - a mapping from file to digest for each file in
the manifest
* manifest_files_correct_format - a simple list of the manifest file
path that passed initial checks. They need to be checked for valid
version directories later, when we know what version directories
are valid
* unnormalized_digests - a set of the original digests in unnormalized
form that MUST match exactly the values used in state blocks
"""
manifest_files = {}
manifest_files_correct_format = []
unnormalized_digests = set()
manifest_digests = set()
if not isinstance(manifest, dict):
self.error('E041c')
else:
content_paths = set()
content_directories = set()
for digest in manifest:
m = re.match(self.digest_regex(), digest)
if not m:
self.error('E025a', digest=digest, algorithm=self.digest_algorithm) # wrong form of digest
elif not isinstance(manifest[digest], list):
self.error('E092', digest=digest) # must have path list value
else:
unnormalized_digests.add(digest)
norm_digest = normalized_digest(digest, self.digest_algorithm)
if norm_digest in manifest_digests:
# We have already seen this in different un-normalized form!
self.error("E096", digest=norm_digest)
else:
manifest_digests.add(norm_digest)
for file in manifest[digest]:
manifest_files[file] = norm_digest
if self.check_content_path(file, content_paths, content_directories):
manifest_files_correct_format.append(file)
# Check for conflicting content paths
for path in content_directories:
if path in content_paths:
self.error("E101b", path=path)
return manifest_files, manifest_files_correct_format, unnormalized_digests
def validate_fixity(self, fixity, manifest_files):
"""Validate fixity block in inventory.
Check the structure of the fixity block and makes sure that only files
listed in the manifest are referenced.
"""
if not isinstance(fixity, dict):
# The value of fixity must be a JSON object. In v1.0 I catch not an object
# as part of E056 but this was clarified as E111 in v1.1. The value may
# be an empty object in either case
self.error('E056a' if self.spec_version == '1.0' else 'E111')
else:
for digest_algorithm in fixity:
known_digest = True
try:
regex = digest_regex(digest_algorithm)
except ValueError:
if not self.lax_digests:
self.error('E056b', algorithm=self.digest_algorithm)
continue
# Match anything
regex = r'''^.*$'''
known_digest = False
fixity_algoritm_block = fixity[digest_algorithm]
if not isinstance(fixity_algoritm_block, dict):
self.error('E057a', algorithm=self.digest_algorithm)
else:
digests_seen = set()
for digest in fixity_algoritm_block:
m = re.match(regex, digest)
if not m:
self.error('E057b', digest=digest, algorithm=digest_algorithm) # wrong form of digest
elif not isinstance(fixity_algoritm_block[digest], list):
self.error('E057c', digest=digest, algorithm=digest_algorithm) # must have path list value
else:
if known_digest:
norm_digest = normalized_digest(digest, digest_algorithm)
else:
norm_digest = digest
if norm_digest in digests_seen:
# We have already seen this in different un-normalized form!
self.error("E097", digest=norm_digest, algorithm=digest_algorithm)
else:
digests_seen.add(norm_digest)
for file in fixity_algoritm_block[digest]:
if file not in manifest_files:
self.error("E057d", digest=norm_digest, algorithm=digest_algorithm, path=file)
def validate_version_sequence(self, versions):
"""Validate sequence of version names in versions block in inventory.
Returns an array of in-sequence version directories that are part
of a valid sequences. May exclude other version directory names that are
not part of the valid sequence if an error is thrown.
"""
all_versions = []
if not isinstance(versions, dict):
self.error("E044")
return all_versions
if len(versions) == 0:
self.error("E008")
return all_versions
# Validate version sequence
# https://ocfl.io/draft/spec/#version-directories
zero_padded = None
max_version_num = 999999 # Excessive limit
if 'v1' in versions:
fmt = 'v%d'
zero_padded = False
all_versions.append('v1')
else: # Find padding size
for n in range(2, 11):
fmt = 'v%0' + str(n) + 'd'
vkey = fmt % 1
if vkey in versions:
all_versions.append(vkey)
zero_padded = n
max_version_num = (10 ** (n - 1)) - 1
break
if not zero_padded:
self.error("E009")
return all_versions
if zero_padded:
self.warning("W001")
# Have v1 and know format, work through to check sequence
for n in range(2, max_version_num + 1):
v = (fmt % n)
if v in versions:
all_versions.append(v)
else:
if len(versions) != (n - 1):
self.error("E010") # Extra version dirs outside sequence
return all_versions
# We have now included all possible versions up to the zero padding
# size, if there are more versions than this number then we must
# have extra that violate the zero-padding rule or are out of
# sequence
if len(versions) > max_version_num:
self.error("E011")
return all_versions
def validate_versions(self, versions, all_versions, unnormalized_digests):
"""Validate versions blocks in inventory.
Requires as input two things which are assumed to be structurally correct
from prior basic validation:
* versions - which is the JSON object (dict) from the inventory
* all_versions - an ordered list of the versions to look at in versions
(all other keys in versions will be ignored)
Returns a list of digests_used which can then be checked against the
manifest.
"""
digests_used = []
for v in all_versions:
version = versions[v]
if 'created' not in version:
self.error('E048', version=v) # No created
elif not isinstance(versions[v]['created'], str):
self.error('E049d', version=v) # Bad created
else:
created = versions[v]['created']
try:
str_to_datetime(created) # catch ValueError if fails
if not re.search(r'''(Z|[+-]\d\d:\d\d)$''', created): # FIXME - kludge
self.error('E049a', version=v)
if not re.search(r'''T\d\d:\d\d:\d\d''', created): # FIXME - kludge
self.error('E049b', version=v)
except ValueError as e:
self.error('E049c', version=v, description=str(e))
if 'state' in version:
digests_used += self.validate_state_block(version['state'], version=v, unnormalized_digests=unnormalized_digests)
else:
self.error('E048c', version=v)
if 'message' not in version:
self.warning('W007a', version=v)
elif not isinstance(version['message'], str):
self.error('E094', version=v)
if 'user' not in version:
self.warning('W007b', version=v)
else:
user = version['user']
if not isinstance(user, dict):
self.error('E054a', version=v)
else:
if 'name' not in user or not isinstance(user['name'], str):
self.error('E054b', version=v)
if 'address' not in user:
self.warning('W008', version=v)
elif not isinstance(user['address'], str):
self.error('E054c', version=v)
elif not re.match(r'''\w{3,6}:''', user['address']):
self.warning('W009', version=v)
return digests_used
def validate_state_block(self, state, version, unnormalized_digests):
"""Validate state block in a version in an inventory.
The version is used only for error reporting.
Returns a list of content digests referenced in the state block.
"""
digests = []
logical_paths = set()
logical_directories = set()
if not isinstance(state, dict):
self.error('E050c', version=version)
else:
digest_re = re.compile(self.digest_regex())
for digest in state:
if not digest_re.match(digest):
self.error('E050d', version=version, digest=digest)
elif not isinstance(state[digest], list):
self.error('E050e', version=version, digest=digest)
else:
for path in state[digest]:
if path in logical_paths:
self.error("E095a", version=version, path=path)
else:
self.check_logical_path(path, version, logical_paths, logical_directories)
if digest not in unnormalized_digests:
# Exact string value must match, not just normalized
self.error("E050f", version=version, digest=digest)
norm_digest = normalized_digest(digest, self.digest_algorithm)
digests.append(norm_digest)
# Check for conflicting logical paths
for path in logical_directories:
if path in logical_paths:
self.error("E095b", version=version, path=path)
return digests
def check_content_paths_map_to_versions(self, manifest_files, all_versions):
"""Check that every content path starts with a valid version.
The content directory component has already been checked in
check_content_path(). We have already tested all paths enough
to know that they can be split into at least 2 components.
"""
for path in manifest_files:
version_dir, dummy_rest = path.split('/', 1)
if version_dir not in all_versions:
self.error('E042b', path=path)
def check_digests_present_and_used(self, manifest_files, digests_used):
"""Check all digests in manifest that are needed are present and used."""
in_manifest = set(manifest_files.values())
in_state = set(digests_used)
not_in_manifest = in_state.difference(in_manifest)
if len(not_in_manifest) > 0:
self.error("E050a", digests=", ".join(sorted(not_in_manifest)))
not_in_state = in_manifest.difference(in_state)
if len(not_in_state) > 0:
self.error("E107", digests=", ".join(sorted(not_in_state)))
def digest_regex(self):
"""Return regex for validating un-normalized digest format."""
try:
return digest_regex(self.digest_algorithm)
except ValueError:
if not self.lax_digests:
self.error('E026a', digest=self.digest_algorithm)
# Match anything
return r'''^.*$'''
def check_logical_path(self, path, version, logical_paths, logical_directories):
"""Check logical path and accumulate paths/directories for E095b check.
logical_paths and logical_directories are expected to be sets.
Only adds good paths to the accumulated paths/directories.
"""
if path.startswith('/') or path.endswith('/'):
self.error("E053", version=version, path=path)
else:
elements = path.split('/')
for element in elements:
if element in ['.', '..', '']:
self.error("E052", version=version, path=path)
return
# Accumulate paths and directories
logical_paths.add(path)
logical_directories.add('/'.join(elements[0:-1]))
def check_content_path(self, path, content_paths, content_directories):
"""Check logical path and accumulate paths/directories for E101 check.
Returns True if valid, else False. Only adds good paths to the
accumulated paths/directories. We don't yet know the set of valid
version directories so the check here is just for 'v' + digits.
"""
if path.startswith('/') or path.endswith('/'):
self.error("E100", path=path)
return False
m = re.match(r'''^(v\d+/''' + self.content_directory + r''')/(.+)''', path)
if not m:
self.error("E042a", path=path)
return False
elements = m.group(2).split('/')
for element in elements:
if element in ('', '.', '..'):
self.error("E099", path=path)
return False
# Accumulate paths and directories if not seen before
if path in content_paths:
self.error("E101a", path=path)
return False
content_paths.add(path)
content_directories.add('/'.join([m.group(1)] + elements[0:-1]))
return True
def validate_as_prior_version(self, prior):
"""Check that prior is a valid prior version of the current inventory object.
The input variable prior is also expected to be an InventoryValidator object
and both self and prior inventories are assumed to have been checked for
internal consistency.
"""
# Must have a subset of versions which also checks zero padding format etc.
if not set(prior.all_versions) < set(self.all_versions):
self.error('E066a', prior_head=prior.head)
else:
# Check references to files but realize that there might be different
# digest algorithms between versions
version = 'no-version'
for version in prior.all_versions:
# If the digest algorithm is the same then we can make a
# direct check on whether the state blocks match
if prior.digest_algorithm == self.digest_algorithm:
self.compare_states_for_version(prior, version)
# Now check the mappings from state to logical path, which must
# be consistent even if the digestAlgorithm is different between
# versions. Get maps from logical paths to files on disk:
prior_map = get_logical_path_map(prior.inventory, version)
self_map = get_logical_path_map(self.inventory, version)
# Look first for differences in logical paths listed
only_in_prior = prior_map.keys() - self_map.keys()
only_in_self = self_map.keys() - prior_map.keys()
if only_in_prior or only_in_self:
if only_in_prior:
self.error('E066b', version=version, prior_head=prior.head, only_in=prior.head, logical_paths=','.join(only_in_prior))
if only_in_self:
self.error('E066b', version=version, prior_head=prior.head, only_in=self.where, logical_paths=','.join(only_in_self))
else:
# Check them all in details - digests must match
for logical_path, this_map in prior_map.items():
if not this_map.issubset(self_map[logical_path]):
self.error('E066c', version=version, prior_head=prior.head,
logical_path=logical_path, prior_content=','.join(this_map),
current_content=','.join(self_map[logical_path]))
# Check metadata
prior_version = prior.inventory['versions'][version]
self_version = self.inventory['versions'][version]
for key in ('created', 'message', 'user'):
if prior_version.get(key) != self_version.get(key):
self.warning('W011', version=version, prior_head=prior.head, key=key)
def compare_states_for_version(self, prior, version):
"""Compare state blocks for version between self and prior.
Assumes the same digest algorithm in both, do not call otherwise!
Looks only for digests that appear in one but not in the other, the code
in validate_as_prior_version(..) does a check for whether the same sets
of logical files appear and we don't want to duplicate an error message
about that.
While the mapping checks in validate_as_prior_version(..) do all that is
necessary to detect an error, the additional errors that may be generated
here provide more detailed diagnostics in the case that the digest
algorithm is the same across versions being compared.
"""
self_state = self.inventory['versions'][version]['state']
prior_state = prior.inventory['versions'][version]['state']
for digest in set(self_state.keys()).union(prior_state.keys()):
if digest not in prior_state:
self.error('E066d', version=version, prior_head=prior.head,
digest=digest, logical_files=', '.join(self_state[digest]))
elif digest not in self_state:
self.error('E066e', version=version, prior_head=prior.head,
digest=digest, logical_files=', '.join(prior_state[digest]))
|
ocfl/inventory_validator.py
|
codereval_python_data_73
|
Check all digests in manifest that are needed are present and used.
def check_digests_present_and_used(self, manifest_files, digests_used):
"""Check all digests in manifest that are needed are present and used."""
in_manifest = set(manifest_files.values())
in_state = set(digests_used)
not_in_manifest = in_state.difference(in_manifest)
if len(not_in_manifest) > 0:
self.error("E050a", digests=", ".join(sorted(not_in_manifest)))
not_in_state = in_manifest.difference(in_state)
if len(not_in_state) > 0:
self.error("E107", digests=", ".join(sorted(not_in_state)))
"""OCFL Inventory Validator.
Code to validate the Python representation of an OCFL Inventory
as read with json.load(). Does not examine anything in storage.
"""
import re
from .digest import digest_regex, normalized_digest
from .validation_logger import ValidationLogger
from .w3c_datetime import str_to_datetime
def get_logical_path_map(inventory, version):
"""Get a map of logical paths in state to files on disk for version in inventory.
Returns a dictionary: logical_path_in_state -> set(content_files)
The set of content_files may includes references to duplicate files in
later versions than the version being described.
"""
state = inventory['versions'][version]['state']
manifest = inventory['manifest']
file_map = {}
for digest in state:
if digest in manifest:
for file in state[digest]:
file_map[file] = set(manifest[digest])
return file_map
class InventoryValidator():
"""Class for OCFL Inventory Validator."""
def __init__(self, log=None, where='???',
lax_digests=False, spec_version='1.0'):
"""Initialize OCFL Inventory Validator."""
self.log = ValidationLogger() if log is None else log
self.where = where
self.spec_version = spec_version
# Object state
self.inventory = None
self.id = None
self.digest_algorithm = 'sha512'
self.content_directory = 'content'
self.all_versions = []
self.manifest_files = None
self.unnormalized_digests = None
self.head = 'UNKNOWN'
# Validation control
self.lax_digests = lax_digests
# Configuration
self.spec_versions_supported = ('1.0', '1.1')
def error(self, code, **args):
"""Error with added context."""
self.log.error(code, where=self.where, **args)
def warning(self, code, **args):
"""Warning with added context."""
self.log.warning(code, where=self.where, **args)
def validate(self, inventory, extract_spec_version=False):
"""Validate a given inventory.
If extract_spec_version is True then will look at the type value to determine
the specification version. In the case that there is no type value or it isn't
valid, then other tests will be based on the version given in self.spec_version.
"""
# Basic structure
self.inventory = inventory
if 'id' in inventory:
iid = inventory['id']
if not isinstance(iid, str) or iid == '':
self.error("E037a")
else:
# URI syntax https://www.rfc-editor.org/rfc/rfc3986.html#section-3.1 :
# scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )
if not re.match(r'''[a-z][a-z\d\+\-\.]*:.+''', iid, re.IGNORECASE):
self.warning("W005", id=iid)
self.id = iid
else:
self.error("E036a")
if 'type' not in inventory:
self.error("E036b")
elif not isinstance(inventory['type'], str):
self.error("E999")
elif extract_spec_version:
m = re.match(r'''https://ocfl.io/(\d+.\d)/spec/#inventory''', inventory['type'])
if not m:
self.error('E038b', got=inventory['type'], assumed_spec_version=self.spec_version)
elif m.group(1) in self.spec_versions_supported:
self.spec_version = m.group(1)
else:
self.error("E038c", got=m.group(1), assumed_spec_version=self.spec_version)
elif inventory['type'] != 'https://ocfl.io/' + self.spec_version + '/spec/#inventory':
self.error("E038a", expected='https://ocfl.io/' + self.spec_version + '/spec/#inventory', got=inventory['type'])
if 'digestAlgorithm' not in inventory:
self.error("E036c")
elif inventory['digestAlgorithm'] == 'sha512':
pass
elif self.lax_digests:
self.digest_algorithm = inventory['digestAlgorithm']
elif inventory['digestAlgorithm'] == 'sha256':
self.warning("W004")
self.digest_algorithm = inventory['digestAlgorithm']
else:
self.error("E039", digest_algorithm=inventory['digestAlgorithm'])
if 'contentDirectory' in inventory:
# Careful only to set self.content_directory if value is safe
cd = inventory['contentDirectory']
if not isinstance(cd, str) or '/' in cd:
self.error("E017")
elif cd in ('.', '..'):
self.error("E018")
else:
self.content_directory = cd
manifest_files_correct_format = None
if 'manifest' not in inventory:
self.error("E041a")
else:
(self.manifest_files, manifest_files_correct_format, self.unnormalized_digests) = self.validate_manifest(inventory['manifest'])
digests_used = []
if 'versions' not in inventory:
self.error("E041b")
else:
self.all_versions = self.validate_version_sequence(inventory['versions'])
digests_used = self.validate_versions(inventory['versions'], self.all_versions, self.unnormalized_digests)
if 'head' not in inventory:
self.error("E036d")
elif len(self.all_versions) > 0:
self.head = self.all_versions[-1]
if inventory['head'] != self.head:
self.error("E040", got=inventory['head'], expected=self.head)
if len(self.all_versions) == 0:
# Abort tests is we don't have a valid version sequence, otherwise
# there will likely be spurious subsequent error reports
return
if len(self.all_versions) > 0:
if manifest_files_correct_format is not None:
self.check_content_paths_map_to_versions(manifest_files_correct_format, self.all_versions)
if self.manifest_files is not None:
self.check_digests_present_and_used(self.manifest_files, digests_used)
if 'fixity' in inventory:
self.validate_fixity(inventory['fixity'], self.manifest_files)
def validate_manifest(self, manifest):
"""Validate manifest block in inventory.
Returns:
* manifest_files - a mapping from file to digest for each file in
the manifest
* manifest_files_correct_format - a simple list of the manifest file
path that passed initial checks. They need to be checked for valid
version directories later, when we know what version directories
are valid
* unnormalized_digests - a set of the original digests in unnormalized
form that MUST match exactly the values used in state blocks
"""
manifest_files = {}
manifest_files_correct_format = []
unnormalized_digests = set()
manifest_digests = set()
if not isinstance(manifest, dict):
self.error('E041c')
else:
content_paths = set()
content_directories = set()
for digest in manifest:
m = re.match(self.digest_regex(), digest)
if not m:
self.error('E025a', digest=digest, algorithm=self.digest_algorithm) # wrong form of digest
elif not isinstance(manifest[digest], list):
self.error('E092', digest=digest) # must have path list value
else:
unnormalized_digests.add(digest)
norm_digest = normalized_digest(digest, self.digest_algorithm)
if norm_digest in manifest_digests:
# We have already seen this in different un-normalized form!
self.error("E096", digest=norm_digest)
else:
manifest_digests.add(norm_digest)
for file in manifest[digest]:
manifest_files[file] = norm_digest
if self.check_content_path(file, content_paths, content_directories):
manifest_files_correct_format.append(file)
# Check for conflicting content paths
for path in content_directories:
if path in content_paths:
self.error("E101b", path=path)
return manifest_files, manifest_files_correct_format, unnormalized_digests
def validate_fixity(self, fixity, manifest_files):
"""Validate fixity block in inventory.
Check the structure of the fixity block and makes sure that only files
listed in the manifest are referenced.
"""
if not isinstance(fixity, dict):
# The value of fixity must be a JSON object. In v1.0 I catch not an object
# as part of E056 but this was clarified as E111 in v1.1. The value may
# be an empty object in either case
self.error('E056a' if self.spec_version == '1.0' else 'E111')
else:
for digest_algorithm in fixity:
known_digest = True
try:
regex = digest_regex(digest_algorithm)
except ValueError:
if not self.lax_digests:
self.error('E056b', algorithm=self.digest_algorithm)
continue
# Match anything
regex = r'''^.*$'''
known_digest = False
fixity_algoritm_block = fixity[digest_algorithm]
if not isinstance(fixity_algoritm_block, dict):
self.error('E057a', algorithm=self.digest_algorithm)
else:
digests_seen = set()
for digest in fixity_algoritm_block:
m = re.match(regex, digest)
if not m:
self.error('E057b', digest=digest, algorithm=digest_algorithm) # wrong form of digest
elif not isinstance(fixity_algoritm_block[digest], list):
self.error('E057c', digest=digest, algorithm=digest_algorithm) # must have path list value
else:
if known_digest:
norm_digest = normalized_digest(digest, digest_algorithm)
else:
norm_digest = digest
if norm_digest in digests_seen:
# We have already seen this in different un-normalized form!
self.error("E097", digest=norm_digest, algorithm=digest_algorithm)
else:
digests_seen.add(norm_digest)
for file in fixity_algoritm_block[digest]:
if file not in manifest_files:
self.error("E057d", digest=norm_digest, algorithm=digest_algorithm, path=file)
def validate_version_sequence(self, versions):
"""Validate sequence of version names in versions block in inventory.
Returns an array of in-sequence version directories that are part
of a valid sequences. May exclude other version directory names that are
not part of the valid sequence if an error is thrown.
"""
all_versions = []
if not isinstance(versions, dict):
self.error("E044")
return all_versions
if len(versions) == 0:
self.error("E008")
return all_versions
# Validate version sequence
# https://ocfl.io/draft/spec/#version-directories
zero_padded = None
max_version_num = 999999 # Excessive limit
if 'v1' in versions:
fmt = 'v%d'
zero_padded = False
all_versions.append('v1')
else: # Find padding size
for n in range(2, 11):
fmt = 'v%0' + str(n) + 'd'
vkey = fmt % 1
if vkey in versions:
all_versions.append(vkey)
zero_padded = n
max_version_num = (10 ** (n - 1)) - 1
break
if not zero_padded:
self.error("E009")
return all_versions
if zero_padded:
self.warning("W001")
# Have v1 and know format, work through to check sequence
for n in range(2, max_version_num + 1):
v = (fmt % n)
if v in versions:
all_versions.append(v)
else:
if len(versions) != (n - 1):
self.error("E010") # Extra version dirs outside sequence
return all_versions
# We have now included all possible versions up to the zero padding
# size, if there are more versions than this number then we must
# have extra that violate the zero-padding rule or are out of
# sequence
if len(versions) > max_version_num:
self.error("E011")
return all_versions
def validate_versions(self, versions, all_versions, unnormalized_digests):
"""Validate versions blocks in inventory.
Requires as input two things which are assumed to be structurally correct
from prior basic validation:
* versions - which is the JSON object (dict) from the inventory
* all_versions - an ordered list of the versions to look at in versions
(all other keys in versions will be ignored)
Returns a list of digests_used which can then be checked against the
manifest.
"""
digests_used = []
for v in all_versions:
version = versions[v]
if 'created' not in version:
self.error('E048', version=v) # No created
elif not isinstance(versions[v]['created'], str):
self.error('E049d', version=v) # Bad created
else:
created = versions[v]['created']
try:
str_to_datetime(created) # catch ValueError if fails
if not re.search(r'''(Z|[+-]\d\d:\d\d)$''', created): # FIXME - kludge
self.error('E049a', version=v)
if not re.search(r'''T\d\d:\d\d:\d\d''', created): # FIXME - kludge
self.error('E049b', version=v)
except ValueError as e:
self.error('E049c', version=v, description=str(e))
if 'state' in version:
digests_used += self.validate_state_block(version['state'], version=v, unnormalized_digests=unnormalized_digests)
else:
self.error('E048c', version=v)
if 'message' not in version:
self.warning('W007a', version=v)
elif not isinstance(version['message'], str):
self.error('E094', version=v)
if 'user' not in version:
self.warning('W007b', version=v)
else:
user = version['user']
if not isinstance(user, dict):
self.error('E054a', version=v)
else:
if 'name' not in user or not isinstance(user['name'], str):
self.error('E054b', version=v)
if 'address' not in user:
self.warning('W008', version=v)
elif not isinstance(user['address'], str):
self.error('E054c', version=v)
elif not re.match(r'''\w{3,6}:''', user['address']):
self.warning('W009', version=v)
return digests_used
def validate_state_block(self, state, version, unnormalized_digests):
"""Validate state block in a version in an inventory.
The version is used only for error reporting.
Returns a list of content digests referenced in the state block.
"""
digests = []
logical_paths = set()
logical_directories = set()
if not isinstance(state, dict):
self.error('E050c', version=version)
else:
digest_re = re.compile(self.digest_regex())
for digest in state:
if not digest_re.match(digest):
self.error('E050d', version=version, digest=digest)
elif not isinstance(state[digest], list):
self.error('E050e', version=version, digest=digest)
else:
for path in state[digest]:
if path in logical_paths:
self.error("E095a", version=version, path=path)
else:
self.check_logical_path(path, version, logical_paths, logical_directories)
if digest not in unnormalized_digests:
# Exact string value must match, not just normalized
self.error("E050f", version=version, digest=digest)
norm_digest = normalized_digest(digest, self.digest_algorithm)
digests.append(norm_digest)
# Check for conflicting logical paths
for path in logical_directories:
if path in logical_paths:
self.error("E095b", version=version, path=path)
return digests
def check_content_paths_map_to_versions(self, manifest_files, all_versions):
"""Check that every content path starts with a valid version.
The content directory component has already been checked in
check_content_path(). We have already tested all paths enough
to know that they can be split into at least 2 components.
"""
for path in manifest_files:
version_dir, dummy_rest = path.split('/', 1)
if version_dir not in all_versions:
self.error('E042b', path=path)
def check_digests_present_and_used(self, manifest_files, digests_used):
"""Check all digests in manifest that are needed are present and used."""
in_manifest = set(manifest_files.values())
in_state = set(digests_used)
not_in_manifest = in_state.difference(in_manifest)
if len(not_in_manifest) > 0:
self.error("E050a", digests=", ".join(sorted(not_in_manifest)))
not_in_state = in_manifest.difference(in_state)
if len(not_in_state) > 0:
self.error("E107", digests=", ".join(sorted(not_in_state)))
def digest_regex(self):
"""Return regex for validating un-normalized digest format."""
try:
return digest_regex(self.digest_algorithm)
except ValueError:
if not self.lax_digests:
self.error('E026a', digest=self.digest_algorithm)
# Match anything
return r'''^.*$'''
def check_logical_path(self, path, version, logical_paths, logical_directories):
"""Check logical path and accumulate paths/directories for E095b check.
logical_paths and logical_directories are expected to be sets.
Only adds good paths to the accumulated paths/directories.
"""
if path.startswith('/') or path.endswith('/'):
self.error("E053", version=version, path=path)
else:
elements = path.split('/')
for element in elements:
if element in ['.', '..', '']:
self.error("E052", version=version, path=path)
return
# Accumulate paths and directories
logical_paths.add(path)
logical_directories.add('/'.join(elements[0:-1]))
def check_content_path(self, path, content_paths, content_directories):
"""Check logical path and accumulate paths/directories for E101 check.
Returns True if valid, else False. Only adds good paths to the
accumulated paths/directories. We don't yet know the set of valid
version directories so the check here is just for 'v' + digits.
"""
if path.startswith('/') or path.endswith('/'):
self.error("E100", path=path)
return False
m = re.match(r'''^(v\d+/''' + self.content_directory + r''')/(.+)''', path)
if not m:
self.error("E042a", path=path)
return False
elements = m.group(2).split('/')
for element in elements:
if element in ('', '.', '..'):
self.error("E099", path=path)
return False
# Accumulate paths and directories if not seen before
if path in content_paths:
self.error("E101a", path=path)
return False
content_paths.add(path)
content_directories.add('/'.join([m.group(1)] + elements[0:-1]))
return True
def validate_as_prior_version(self, prior):
"""Check that prior is a valid prior version of the current inventory object.
The input variable prior is also expected to be an InventoryValidator object
and both self and prior inventories are assumed to have been checked for
internal consistency.
"""
# Must have a subset of versions which also checks zero padding format etc.
if not set(prior.all_versions) < set(self.all_versions):
self.error('E066a', prior_head=prior.head)
else:
# Check references to files but realize that there might be different
# digest algorithms between versions
version = 'no-version'
for version in prior.all_versions:
# If the digest algorithm is the same then we can make a
# direct check on whether the state blocks match
if prior.digest_algorithm == self.digest_algorithm:
self.compare_states_for_version(prior, version)
# Now check the mappings from state to logical path, which must
# be consistent even if the digestAlgorithm is different between
# versions. Get maps from logical paths to files on disk:
prior_map = get_logical_path_map(prior.inventory, version)
self_map = get_logical_path_map(self.inventory, version)
# Look first for differences in logical paths listed
only_in_prior = prior_map.keys() - self_map.keys()
only_in_self = self_map.keys() - prior_map.keys()
if only_in_prior or only_in_self:
if only_in_prior:
self.error('E066b', version=version, prior_head=prior.head, only_in=prior.head, logical_paths=','.join(only_in_prior))
if only_in_self:
self.error('E066b', version=version, prior_head=prior.head, only_in=self.where, logical_paths=','.join(only_in_self))
else:
# Check them all in details - digests must match
for logical_path, this_map in prior_map.items():
if not this_map.issubset(self_map[logical_path]):
self.error('E066c', version=version, prior_head=prior.head,
logical_path=logical_path, prior_content=','.join(this_map),
current_content=','.join(self_map[logical_path]))
# Check metadata
prior_version = prior.inventory['versions'][version]
self_version = self.inventory['versions'][version]
for key in ('created', 'message', 'user'):
if prior_version.get(key) != self_version.get(key):
self.warning('W011', version=version, prior_head=prior.head, key=key)
def compare_states_for_version(self, prior, version):
"""Compare state blocks for version between self and prior.
Assumes the same digest algorithm in both, do not call otherwise!
Looks only for digests that appear in one but not in the other, the code
in validate_as_prior_version(..) does a check for whether the same sets
of logical files appear and we don't want to duplicate an error message
about that.
While the mapping checks in validate_as_prior_version(..) do all that is
necessary to detect an error, the additional errors that may be generated
here provide more detailed diagnostics in the case that the digest
algorithm is the same across versions being compared.
"""
self_state = self.inventory['versions'][version]['state']
prior_state = prior.inventory['versions'][version]['state']
for digest in set(self_state.keys()).union(prior_state.keys()):
if digest not in prior_state:
self.error('E066d', version=version, prior_head=prior.head,
digest=digest, logical_files=', '.join(self_state[digest]))
elif digest not in self_state:
self.error('E066e', version=version, prior_head=prior.head,
digest=digest, logical_files=', '.join(prior_state[digest]))
|
ocfl/inventory_validator.py
|
codereval_python_data_74
|
Check that prior is a valid prior version of the current inventory object.
The input variable prior is also expected to be an InventoryValidator object
and both self and prior inventories are assumed to have been checked for
internal consistency.
def validate_as_prior_version(self, prior):
"""Check that prior is a valid prior version of the current inventory object.
The input variable prior is also expected to be an InventoryValidator object
and both self and prior inventories are assumed to have been checked for
internal consistency.
"""
# Must have a subset of versions which also checks zero padding format etc.
if not set(prior.all_versions) < set(self.all_versions):
self.error('E066a', prior_head=prior.head)
else:
# Check references to files but realize that there might be different
# digest algorithms between versions
version = 'no-version'
for version in prior.all_versions:
# If the digest algorithm is the same then we can make a
# direct check on whether the state blocks match
if prior.digest_algorithm == self.digest_algorithm:
self.compare_states_for_version(prior, version)
# Now check the mappings from state to logical path, which must
# be consistent even if the digestAlgorithm is different between
# versions. Get maps from logical paths to files on disk:
prior_map = get_logical_path_map(prior.inventory, version)
self_map = get_logical_path_map(self.inventory, version)
# Look first for differences in logical paths listed
only_in_prior = prior_map.keys() - self_map.keys()
only_in_self = self_map.keys() - prior_map.keys()
if only_in_prior or only_in_self:
if only_in_prior:
self.error('E066b', version=version, prior_head=prior.head, only_in=prior.head, logical_paths=','.join(only_in_prior))
if only_in_self:
self.error('E066b', version=version, prior_head=prior.head, only_in=self.where, logical_paths=','.join(only_in_self))
else:
# Check them all in details - digests must match
for logical_path, this_map in prior_map.items():
if not this_map.issubset(self_map[logical_path]):
self.error('E066c', version=version, prior_head=prior.head,
logical_path=logical_path, prior_content=','.join(this_map),
current_content=','.join(self_map[logical_path]))
# Check metadata
prior_version = prior.inventory['versions'][version]
self_version = self.inventory['versions'][version]
for key in ('created', 'message', 'user'):
if prior_version.get(key) != self_version.get(key):
self.warning('W011', version=version, prior_head=prior.head, key=key)
"""OCFL Inventory Validator.
Code to validate the Python representation of an OCFL Inventory
as read with json.load(). Does not examine anything in storage.
"""
import re
from .digest import digest_regex, normalized_digest
from .validation_logger import ValidationLogger
from .w3c_datetime import str_to_datetime
def get_logical_path_map(inventory, version):
"""Get a map of logical paths in state to files on disk for version in inventory.
Returns a dictionary: logical_path_in_state -> set(content_files)
The set of content_files may includes references to duplicate files in
later versions than the version being described.
"""
state = inventory['versions'][version]['state']
manifest = inventory['manifest']
file_map = {}
for digest in state:
if digest in manifest:
for file in state[digest]:
file_map[file] = set(manifest[digest])
return file_map
class InventoryValidator():
"""Class for OCFL Inventory Validator."""
def __init__(self, log=None, where='???',
lax_digests=False, spec_version='1.0'):
"""Initialize OCFL Inventory Validator."""
self.log = ValidationLogger() if log is None else log
self.where = where
self.spec_version = spec_version
# Object state
self.inventory = None
self.id = None
self.digest_algorithm = 'sha512'
self.content_directory = 'content'
self.all_versions = []
self.manifest_files = None
self.unnormalized_digests = None
self.head = 'UNKNOWN'
# Validation control
self.lax_digests = lax_digests
# Configuration
self.spec_versions_supported = ('1.0', '1.1')
def error(self, code, **args):
"""Error with added context."""
self.log.error(code, where=self.where, **args)
def warning(self, code, **args):
"""Warning with added context."""
self.log.warning(code, where=self.where, **args)
def validate(self, inventory, extract_spec_version=False):
"""Validate a given inventory.
If extract_spec_version is True then will look at the type value to determine
the specification version. In the case that there is no type value or it isn't
valid, then other tests will be based on the version given in self.spec_version.
"""
# Basic structure
self.inventory = inventory
if 'id' in inventory:
iid = inventory['id']
if not isinstance(iid, str) or iid == '':
self.error("E037a")
else:
# URI syntax https://www.rfc-editor.org/rfc/rfc3986.html#section-3.1 :
# scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )
if not re.match(r'''[a-z][a-z\d\+\-\.]*:.+''', iid, re.IGNORECASE):
self.warning("W005", id=iid)
self.id = iid
else:
self.error("E036a")
if 'type' not in inventory:
self.error("E036b")
elif not isinstance(inventory['type'], str):
self.error("E999")
elif extract_spec_version:
m = re.match(r'''https://ocfl.io/(\d+.\d)/spec/#inventory''', inventory['type'])
if not m:
self.error('E038b', got=inventory['type'], assumed_spec_version=self.spec_version)
elif m.group(1) in self.spec_versions_supported:
self.spec_version = m.group(1)
else:
self.error("E038c", got=m.group(1), assumed_spec_version=self.spec_version)
elif inventory['type'] != 'https://ocfl.io/' + self.spec_version + '/spec/#inventory':
self.error("E038a", expected='https://ocfl.io/' + self.spec_version + '/spec/#inventory', got=inventory['type'])
if 'digestAlgorithm' not in inventory:
self.error("E036c")
elif inventory['digestAlgorithm'] == 'sha512':
pass
elif self.lax_digests:
self.digest_algorithm = inventory['digestAlgorithm']
elif inventory['digestAlgorithm'] == 'sha256':
self.warning("W004")
self.digest_algorithm = inventory['digestAlgorithm']
else:
self.error("E039", digest_algorithm=inventory['digestAlgorithm'])
if 'contentDirectory' in inventory:
# Careful only to set self.content_directory if value is safe
cd = inventory['contentDirectory']
if not isinstance(cd, str) or '/' in cd:
self.error("E017")
elif cd in ('.', '..'):
self.error("E018")
else:
self.content_directory = cd
manifest_files_correct_format = None
if 'manifest' not in inventory:
self.error("E041a")
else:
(self.manifest_files, manifest_files_correct_format, self.unnormalized_digests) = self.validate_manifest(inventory['manifest'])
digests_used = []
if 'versions' not in inventory:
self.error("E041b")
else:
self.all_versions = self.validate_version_sequence(inventory['versions'])
digests_used = self.validate_versions(inventory['versions'], self.all_versions, self.unnormalized_digests)
if 'head' not in inventory:
self.error("E036d")
elif len(self.all_versions) > 0:
self.head = self.all_versions[-1]
if inventory['head'] != self.head:
self.error("E040", got=inventory['head'], expected=self.head)
if len(self.all_versions) == 0:
# Abort tests is we don't have a valid version sequence, otherwise
# there will likely be spurious subsequent error reports
return
if len(self.all_versions) > 0:
if manifest_files_correct_format is not None:
self.check_content_paths_map_to_versions(manifest_files_correct_format, self.all_versions)
if self.manifest_files is not None:
self.check_digests_present_and_used(self.manifest_files, digests_used)
if 'fixity' in inventory:
self.validate_fixity(inventory['fixity'], self.manifest_files)
def validate_manifest(self, manifest):
"""Validate manifest block in inventory.
Returns:
* manifest_files - a mapping from file to digest for each file in
the manifest
* manifest_files_correct_format - a simple list of the manifest file
path that passed initial checks. They need to be checked for valid
version directories later, when we know what version directories
are valid
* unnormalized_digests - a set of the original digests in unnormalized
form that MUST match exactly the values used in state blocks
"""
manifest_files = {}
manifest_files_correct_format = []
unnormalized_digests = set()
manifest_digests = set()
if not isinstance(manifest, dict):
self.error('E041c')
else:
content_paths = set()
content_directories = set()
for digest in manifest:
m = re.match(self.digest_regex(), digest)
if not m:
self.error('E025a', digest=digest, algorithm=self.digest_algorithm) # wrong form of digest
elif not isinstance(manifest[digest], list):
self.error('E092', digest=digest) # must have path list value
else:
unnormalized_digests.add(digest)
norm_digest = normalized_digest(digest, self.digest_algorithm)
if norm_digest in manifest_digests:
# We have already seen this in different un-normalized form!
self.error("E096", digest=norm_digest)
else:
manifest_digests.add(norm_digest)
for file in manifest[digest]:
manifest_files[file] = norm_digest
if self.check_content_path(file, content_paths, content_directories):
manifest_files_correct_format.append(file)
# Check for conflicting content paths
for path in content_directories:
if path in content_paths:
self.error("E101b", path=path)
return manifest_files, manifest_files_correct_format, unnormalized_digests
def validate_fixity(self, fixity, manifest_files):
"""Validate fixity block in inventory.
Check the structure of the fixity block and makes sure that only files
listed in the manifest are referenced.
"""
if not isinstance(fixity, dict):
# The value of fixity must be a JSON object. In v1.0 I catch not an object
# as part of E056 but this was clarified as E111 in v1.1. The value may
# be an empty object in either case
self.error('E056a' if self.spec_version == '1.0' else 'E111')
else:
for digest_algorithm in fixity:
known_digest = True
try:
regex = digest_regex(digest_algorithm)
except ValueError:
if not self.lax_digests:
self.error('E056b', algorithm=self.digest_algorithm)
continue
# Match anything
regex = r'''^.*$'''
known_digest = False
fixity_algoritm_block = fixity[digest_algorithm]
if not isinstance(fixity_algoritm_block, dict):
self.error('E057a', algorithm=self.digest_algorithm)
else:
digests_seen = set()
for digest in fixity_algoritm_block:
m = re.match(regex, digest)
if not m:
self.error('E057b', digest=digest, algorithm=digest_algorithm) # wrong form of digest
elif not isinstance(fixity_algoritm_block[digest], list):
self.error('E057c', digest=digest, algorithm=digest_algorithm) # must have path list value
else:
if known_digest:
norm_digest = normalized_digest(digest, digest_algorithm)
else:
norm_digest = digest
if norm_digest in digests_seen:
# We have already seen this in different un-normalized form!
self.error("E097", digest=norm_digest, algorithm=digest_algorithm)
else:
digests_seen.add(norm_digest)
for file in fixity_algoritm_block[digest]:
if file not in manifest_files:
self.error("E057d", digest=norm_digest, algorithm=digest_algorithm, path=file)
def validate_version_sequence(self, versions):
"""Validate sequence of version names in versions block in inventory.
Returns an array of in-sequence version directories that are part
of a valid sequences. May exclude other version directory names that are
not part of the valid sequence if an error is thrown.
"""
all_versions = []
if not isinstance(versions, dict):
self.error("E044")
return all_versions
if len(versions) == 0:
self.error("E008")
return all_versions
# Validate version sequence
# https://ocfl.io/draft/spec/#version-directories
zero_padded = None
max_version_num = 999999 # Excessive limit
if 'v1' in versions:
fmt = 'v%d'
zero_padded = False
all_versions.append('v1')
else: # Find padding size
for n in range(2, 11):
fmt = 'v%0' + str(n) + 'd'
vkey = fmt % 1
if vkey in versions:
all_versions.append(vkey)
zero_padded = n
max_version_num = (10 ** (n - 1)) - 1
break
if not zero_padded:
self.error("E009")
return all_versions
if zero_padded:
self.warning("W001")
# Have v1 and know format, work through to check sequence
for n in range(2, max_version_num + 1):
v = (fmt % n)
if v in versions:
all_versions.append(v)
else:
if len(versions) != (n - 1):
self.error("E010") # Extra version dirs outside sequence
return all_versions
# We have now included all possible versions up to the zero padding
# size, if there are more versions than this number then we must
# have extra that violate the zero-padding rule or are out of
# sequence
if len(versions) > max_version_num:
self.error("E011")
return all_versions
def validate_versions(self, versions, all_versions, unnormalized_digests):
"""Validate versions blocks in inventory.
Requires as input two things which are assumed to be structurally correct
from prior basic validation:
* versions - which is the JSON object (dict) from the inventory
* all_versions - an ordered list of the versions to look at in versions
(all other keys in versions will be ignored)
Returns a list of digests_used which can then be checked against the
manifest.
"""
digests_used = []
for v in all_versions:
version = versions[v]
if 'created' not in version:
self.error('E048', version=v) # No created
elif not isinstance(versions[v]['created'], str):
self.error('E049d', version=v) # Bad created
else:
created = versions[v]['created']
try:
str_to_datetime(created) # catch ValueError if fails
if not re.search(r'''(Z|[+-]\d\d:\d\d)$''', created): # FIXME - kludge
self.error('E049a', version=v)
if not re.search(r'''T\d\d:\d\d:\d\d''', created): # FIXME - kludge
self.error('E049b', version=v)
except ValueError as e:
self.error('E049c', version=v, description=str(e))
if 'state' in version:
digests_used += self.validate_state_block(version['state'], version=v, unnormalized_digests=unnormalized_digests)
else:
self.error('E048c', version=v)
if 'message' not in version:
self.warning('W007a', version=v)
elif not isinstance(version['message'], str):
self.error('E094', version=v)
if 'user' not in version:
self.warning('W007b', version=v)
else:
user = version['user']
if not isinstance(user, dict):
self.error('E054a', version=v)
else:
if 'name' not in user or not isinstance(user['name'], str):
self.error('E054b', version=v)
if 'address' not in user:
self.warning('W008', version=v)
elif not isinstance(user['address'], str):
self.error('E054c', version=v)
elif not re.match(r'''\w{3,6}:''', user['address']):
self.warning('W009', version=v)
return digests_used
def validate_state_block(self, state, version, unnormalized_digests):
"""Validate state block in a version in an inventory.
The version is used only for error reporting.
Returns a list of content digests referenced in the state block.
"""
digests = []
logical_paths = set()
logical_directories = set()
if not isinstance(state, dict):
self.error('E050c', version=version)
else:
digest_re = re.compile(self.digest_regex())
for digest in state:
if not digest_re.match(digest):
self.error('E050d', version=version, digest=digest)
elif not isinstance(state[digest], list):
self.error('E050e', version=version, digest=digest)
else:
for path in state[digest]:
if path in logical_paths:
self.error("E095a", version=version, path=path)
else:
self.check_logical_path(path, version, logical_paths, logical_directories)
if digest not in unnormalized_digests:
# Exact string value must match, not just normalized
self.error("E050f", version=version, digest=digest)
norm_digest = normalized_digest(digest, self.digest_algorithm)
digests.append(norm_digest)
# Check for conflicting logical paths
for path in logical_directories:
if path in logical_paths:
self.error("E095b", version=version, path=path)
return digests
def check_content_paths_map_to_versions(self, manifest_files, all_versions):
"""Check that every content path starts with a valid version.
The content directory component has already been checked in
check_content_path(). We have already tested all paths enough
to know that they can be split into at least 2 components.
"""
for path in manifest_files:
version_dir, dummy_rest = path.split('/', 1)
if version_dir not in all_versions:
self.error('E042b', path=path)
def check_digests_present_and_used(self, manifest_files, digests_used):
"""Check all digests in manifest that are needed are present and used."""
in_manifest = set(manifest_files.values())
in_state = set(digests_used)
not_in_manifest = in_state.difference(in_manifest)
if len(not_in_manifest) > 0:
self.error("E050a", digests=", ".join(sorted(not_in_manifest)))
not_in_state = in_manifest.difference(in_state)
if len(not_in_state) > 0:
self.error("E107", digests=", ".join(sorted(not_in_state)))
def digest_regex(self):
"""Return regex for validating un-normalized digest format."""
try:
return digest_regex(self.digest_algorithm)
except ValueError:
if not self.lax_digests:
self.error('E026a', digest=self.digest_algorithm)
# Match anything
return r'''^.*$'''
def check_logical_path(self, path, version, logical_paths, logical_directories):
"""Check logical path and accumulate paths/directories for E095b check.
logical_paths and logical_directories are expected to be sets.
Only adds good paths to the accumulated paths/directories.
"""
if path.startswith('/') or path.endswith('/'):
self.error("E053", version=version, path=path)
else:
elements = path.split('/')
for element in elements:
if element in ['.', '..', '']:
self.error("E052", version=version, path=path)
return
# Accumulate paths and directories
logical_paths.add(path)
logical_directories.add('/'.join(elements[0:-1]))
def check_content_path(self, path, content_paths, content_directories):
"""Check logical path and accumulate paths/directories for E101 check.
Returns True if valid, else False. Only adds good paths to the
accumulated paths/directories. We don't yet know the set of valid
version directories so the check here is just for 'v' + digits.
"""
if path.startswith('/') or path.endswith('/'):
self.error("E100", path=path)
return False
m = re.match(r'''^(v\d+/''' + self.content_directory + r''')/(.+)''', path)
if not m:
self.error("E042a", path=path)
return False
elements = m.group(2).split('/')
for element in elements:
if element in ('', '.', '..'):
self.error("E099", path=path)
return False
# Accumulate paths and directories if not seen before
if path in content_paths:
self.error("E101a", path=path)
return False
content_paths.add(path)
content_directories.add('/'.join([m.group(1)] + elements[0:-1]))
return True
def validate_as_prior_version(self, prior):
"""Check that prior is a valid prior version of the current inventory object.
The input variable prior is also expected to be an InventoryValidator object
and both self and prior inventories are assumed to have been checked for
internal consistency.
"""
# Must have a subset of versions which also checks zero padding format etc.
if not set(prior.all_versions) < set(self.all_versions):
self.error('E066a', prior_head=prior.head)
else:
# Check references to files but realize that there might be different
# digest algorithms between versions
version = 'no-version'
for version in prior.all_versions:
# If the digest algorithm is the same then we can make a
# direct check on whether the state blocks match
if prior.digest_algorithm == self.digest_algorithm:
self.compare_states_for_version(prior, version)
# Now check the mappings from state to logical path, which must
# be consistent even if the digestAlgorithm is different between
# versions. Get maps from logical paths to files on disk:
prior_map = get_logical_path_map(prior.inventory, version)
self_map = get_logical_path_map(self.inventory, version)
# Look first for differences in logical paths listed
only_in_prior = prior_map.keys() - self_map.keys()
only_in_self = self_map.keys() - prior_map.keys()
if only_in_prior or only_in_self:
if only_in_prior:
self.error('E066b', version=version, prior_head=prior.head, only_in=prior.head, logical_paths=','.join(only_in_prior))
if only_in_self:
self.error('E066b', version=version, prior_head=prior.head, only_in=self.where, logical_paths=','.join(only_in_self))
else:
# Check them all in details - digests must match
for logical_path, this_map in prior_map.items():
if not this_map.issubset(self_map[logical_path]):
self.error('E066c', version=version, prior_head=prior.head,
logical_path=logical_path, prior_content=','.join(this_map),
current_content=','.join(self_map[logical_path]))
# Check metadata
prior_version = prior.inventory['versions'][version]
self_version = self.inventory['versions'][version]
for key in ('created', 'message', 'user'):
if prior_version.get(key) != self_version.get(key):
self.warning('W011', version=version, prior_head=prior.head, key=key)
def compare_states_for_version(self, prior, version):
"""Compare state blocks for version between self and prior.
Assumes the same digest algorithm in both, do not call otherwise!
Looks only for digests that appear in one but not in the other, the code
in validate_as_prior_version(..) does a check for whether the same sets
of logical files appear and we don't want to duplicate an error message
about that.
While the mapping checks in validate_as_prior_version(..) do all that is
necessary to detect an error, the additional errors that may be generated
here provide more detailed diagnostics in the case that the digest
algorithm is the same across versions being compared.
"""
self_state = self.inventory['versions'][version]['state']
prior_state = prior.inventory['versions'][version]['state']
for digest in set(self_state.keys()).union(prior_state.keys()):
if digest not in prior_state:
self.error('E066d', version=version, prior_head=prior.head,
digest=digest, logical_files=', '.join(self_state[digest]))
elif digest not in self_state:
self.error('E066e', version=version, prior_head=prior.head,
digest=digest, logical_files=', '.join(prior_state[digest]))
|
ocfl/inventory_validator.py
|
codereval_python_data_75
|
Get a map of logical paths in state to files on disk for version in inventory.
Returns a dictionary: logical_path_in_state -> set(content_files)
The set of content_files may includes references to duplicate files in
later versions than the version being described.
def get_logical_path_map(inventory, version):
"""Get a map of logical paths in state to files on disk for version in inventory.
Returns a dictionary: logical_path_in_state -> set(content_files)
The set of content_files may includes references to duplicate files in
later versions than the version being described.
"""
state = inventory['versions'][version]['state']
manifest = inventory['manifest']
file_map = {}
for digest in state:
if digest in manifest:
for file in state[digest]:
file_map[file] = set(manifest[digest])
return file_map
"""OCFL Inventory Validator.
Code to validate the Python representation of an OCFL Inventory
as read with json.load(). Does not examine anything in storage.
"""
import re
from .digest import digest_regex, normalized_digest
from .validation_logger import ValidationLogger
from .w3c_datetime import str_to_datetime
def get_logical_path_map(inventory, version):
"""Get a map of logical paths in state to files on disk for version in inventory.
Returns a dictionary: logical_path_in_state -> set(content_files)
The set of content_files may includes references to duplicate files in
later versions than the version being described.
"""
state = inventory['versions'][version]['state']
manifest = inventory['manifest']
file_map = {}
for digest in state:
if digest in manifest:
for file in state[digest]:
file_map[file] = set(manifest[digest])
return file_map
class InventoryValidator():
"""Class for OCFL Inventory Validator."""
def __init__(self, log=None, where='???',
lax_digests=False, spec_version='1.0'):
"""Initialize OCFL Inventory Validator."""
self.log = ValidationLogger() if log is None else log
self.where = where
self.spec_version = spec_version
# Object state
self.inventory = None
self.id = None
self.digest_algorithm = 'sha512'
self.content_directory = 'content'
self.all_versions = []
self.manifest_files = None
self.unnormalized_digests = None
self.head = 'UNKNOWN'
# Validation control
self.lax_digests = lax_digests
# Configuration
self.spec_versions_supported = ('1.0', '1.1')
def error(self, code, **args):
"""Error with added context."""
self.log.error(code, where=self.where, **args)
def warning(self, code, **args):
"""Warning with added context."""
self.log.warning(code, where=self.where, **args)
def validate(self, inventory, extract_spec_version=False):
"""Validate a given inventory.
If extract_spec_version is True then will look at the type value to determine
the specification version. In the case that there is no type value or it isn't
valid, then other tests will be based on the version given in self.spec_version.
"""
# Basic structure
self.inventory = inventory
if 'id' in inventory:
iid = inventory['id']
if not isinstance(iid, str) or iid == '':
self.error("E037a")
else:
# URI syntax https://www.rfc-editor.org/rfc/rfc3986.html#section-3.1 :
# scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )
if not re.match(r'''[a-z][a-z\d\+\-\.]*:.+''', iid, re.IGNORECASE):
self.warning("W005", id=iid)
self.id = iid
else:
self.error("E036a")
if 'type' not in inventory:
self.error("E036b")
elif not isinstance(inventory['type'], str):
self.error("E999")
elif extract_spec_version:
m = re.match(r'''https://ocfl.io/(\d+.\d)/spec/#inventory''', inventory['type'])
if not m:
self.error('E038b', got=inventory['type'], assumed_spec_version=self.spec_version)
elif m.group(1) in self.spec_versions_supported:
self.spec_version = m.group(1)
else:
self.error("E038c", got=m.group(1), assumed_spec_version=self.spec_version)
elif inventory['type'] != 'https://ocfl.io/' + self.spec_version + '/spec/#inventory':
self.error("E038a", expected='https://ocfl.io/' + self.spec_version + '/spec/#inventory', got=inventory['type'])
if 'digestAlgorithm' not in inventory:
self.error("E036c")
elif inventory['digestAlgorithm'] == 'sha512':
pass
elif self.lax_digests:
self.digest_algorithm = inventory['digestAlgorithm']
elif inventory['digestAlgorithm'] == 'sha256':
self.warning("W004")
self.digest_algorithm = inventory['digestAlgorithm']
else:
self.error("E039", digest_algorithm=inventory['digestAlgorithm'])
if 'contentDirectory' in inventory:
# Careful only to set self.content_directory if value is safe
cd = inventory['contentDirectory']
if not isinstance(cd, str) or '/' in cd:
self.error("E017")
elif cd in ('.', '..'):
self.error("E018")
else:
self.content_directory = cd
manifest_files_correct_format = None
if 'manifest' not in inventory:
self.error("E041a")
else:
(self.manifest_files, manifest_files_correct_format, self.unnormalized_digests) = self.validate_manifest(inventory['manifest'])
digests_used = []
if 'versions' not in inventory:
self.error("E041b")
else:
self.all_versions = self.validate_version_sequence(inventory['versions'])
digests_used = self.validate_versions(inventory['versions'], self.all_versions, self.unnormalized_digests)
if 'head' not in inventory:
self.error("E036d")
elif len(self.all_versions) > 0:
self.head = self.all_versions[-1]
if inventory['head'] != self.head:
self.error("E040", got=inventory['head'], expected=self.head)
if len(self.all_versions) == 0:
# Abort tests is we don't have a valid version sequence, otherwise
# there will likely be spurious subsequent error reports
return
if len(self.all_versions) > 0:
if manifest_files_correct_format is not None:
self.check_content_paths_map_to_versions(manifest_files_correct_format, self.all_versions)
if self.manifest_files is not None:
self.check_digests_present_and_used(self.manifest_files, digests_used)
if 'fixity' in inventory:
self.validate_fixity(inventory['fixity'], self.manifest_files)
def validate_manifest(self, manifest):
"""Validate manifest block in inventory.
Returns:
* manifest_files - a mapping from file to digest for each file in
the manifest
* manifest_files_correct_format - a simple list of the manifest file
path that passed initial checks. They need to be checked for valid
version directories later, when we know what version directories
are valid
* unnormalized_digests - a set of the original digests in unnormalized
form that MUST match exactly the values used in state blocks
"""
manifest_files = {}
manifest_files_correct_format = []
unnormalized_digests = set()
manifest_digests = set()
if not isinstance(manifest, dict):
self.error('E041c')
else:
content_paths = set()
content_directories = set()
for digest in manifest:
m = re.match(self.digest_regex(), digest)
if not m:
self.error('E025a', digest=digest, algorithm=self.digest_algorithm) # wrong form of digest
elif not isinstance(manifest[digest], list):
self.error('E092', digest=digest) # must have path list value
else:
unnormalized_digests.add(digest)
norm_digest = normalized_digest(digest, self.digest_algorithm)
if norm_digest in manifest_digests:
# We have already seen this in different un-normalized form!
self.error("E096", digest=norm_digest)
else:
manifest_digests.add(norm_digest)
for file in manifest[digest]:
manifest_files[file] = norm_digest
if self.check_content_path(file, content_paths, content_directories):
manifest_files_correct_format.append(file)
# Check for conflicting content paths
for path in content_directories:
if path in content_paths:
self.error("E101b", path=path)
return manifest_files, manifest_files_correct_format, unnormalized_digests
def validate_fixity(self, fixity, manifest_files):
"""Validate fixity block in inventory.
Check the structure of the fixity block and makes sure that only files
listed in the manifest are referenced.
"""
if not isinstance(fixity, dict):
# The value of fixity must be a JSON object. In v1.0 I catch not an object
# as part of E056 but this was clarified as E111 in v1.1. The value may
# be an empty object in either case
self.error('E056a' if self.spec_version == '1.0' else 'E111')
else:
for digest_algorithm in fixity:
known_digest = True
try:
regex = digest_regex(digest_algorithm)
except ValueError:
if not self.lax_digests:
self.error('E056b', algorithm=self.digest_algorithm)
continue
# Match anything
regex = r'''^.*$'''
known_digest = False
fixity_algoritm_block = fixity[digest_algorithm]
if not isinstance(fixity_algoritm_block, dict):
self.error('E057a', algorithm=self.digest_algorithm)
else:
digests_seen = set()
for digest in fixity_algoritm_block:
m = re.match(regex, digest)
if not m:
self.error('E057b', digest=digest, algorithm=digest_algorithm) # wrong form of digest
elif not isinstance(fixity_algoritm_block[digest], list):
self.error('E057c', digest=digest, algorithm=digest_algorithm) # must have path list value
else:
if known_digest:
norm_digest = normalized_digest(digest, digest_algorithm)
else:
norm_digest = digest
if norm_digest in digests_seen:
# We have already seen this in different un-normalized form!
self.error("E097", digest=norm_digest, algorithm=digest_algorithm)
else:
digests_seen.add(norm_digest)
for file in fixity_algoritm_block[digest]:
if file not in manifest_files:
self.error("E057d", digest=norm_digest, algorithm=digest_algorithm, path=file)
def validate_version_sequence(self, versions):
"""Validate sequence of version names in versions block in inventory.
Returns an array of in-sequence version directories that are part
of a valid sequences. May exclude other version directory names that are
not part of the valid sequence if an error is thrown.
"""
all_versions = []
if not isinstance(versions, dict):
self.error("E044")
return all_versions
if len(versions) == 0:
self.error("E008")
return all_versions
# Validate version sequence
# https://ocfl.io/draft/spec/#version-directories
zero_padded = None
max_version_num = 999999 # Excessive limit
if 'v1' in versions:
fmt = 'v%d'
zero_padded = False
all_versions.append('v1')
else: # Find padding size
for n in range(2, 11):
fmt = 'v%0' + str(n) + 'd'
vkey = fmt % 1
if vkey in versions:
all_versions.append(vkey)
zero_padded = n
max_version_num = (10 ** (n - 1)) - 1
break
if not zero_padded:
self.error("E009")
return all_versions
if zero_padded:
self.warning("W001")
# Have v1 and know format, work through to check sequence
for n in range(2, max_version_num + 1):
v = (fmt % n)
if v in versions:
all_versions.append(v)
else:
if len(versions) != (n - 1):
self.error("E010") # Extra version dirs outside sequence
return all_versions
# We have now included all possible versions up to the zero padding
# size, if there are more versions than this number then we must
# have extra that violate the zero-padding rule or are out of
# sequence
if len(versions) > max_version_num:
self.error("E011")
return all_versions
def validate_versions(self, versions, all_versions, unnormalized_digests):
"""Validate versions blocks in inventory.
Requires as input two things which are assumed to be structurally correct
from prior basic validation:
* versions - which is the JSON object (dict) from the inventory
* all_versions - an ordered list of the versions to look at in versions
(all other keys in versions will be ignored)
Returns a list of digests_used which can then be checked against the
manifest.
"""
digests_used = []
for v in all_versions:
version = versions[v]
if 'created' not in version:
self.error('E048', version=v) # No created
elif not isinstance(versions[v]['created'], str):
self.error('E049d', version=v) # Bad created
else:
created = versions[v]['created']
try:
str_to_datetime(created) # catch ValueError if fails
if not re.search(r'''(Z|[+-]\d\d:\d\d)$''', created): # FIXME - kludge
self.error('E049a', version=v)
if not re.search(r'''T\d\d:\d\d:\d\d''', created): # FIXME - kludge
self.error('E049b', version=v)
except ValueError as e:
self.error('E049c', version=v, description=str(e))
if 'state' in version:
digests_used += self.validate_state_block(version['state'], version=v, unnormalized_digests=unnormalized_digests)
else:
self.error('E048c', version=v)
if 'message' not in version:
self.warning('W007a', version=v)
elif not isinstance(version['message'], str):
self.error('E094', version=v)
if 'user' not in version:
self.warning('W007b', version=v)
else:
user = version['user']
if not isinstance(user, dict):
self.error('E054a', version=v)
else:
if 'name' not in user or not isinstance(user['name'], str):
self.error('E054b', version=v)
if 'address' not in user:
self.warning('W008', version=v)
elif not isinstance(user['address'], str):
self.error('E054c', version=v)
elif not re.match(r'''\w{3,6}:''', user['address']):
self.warning('W009', version=v)
return digests_used
def validate_state_block(self, state, version, unnormalized_digests):
"""Validate state block in a version in an inventory.
The version is used only for error reporting.
Returns a list of content digests referenced in the state block.
"""
digests = []
logical_paths = set()
logical_directories = set()
if not isinstance(state, dict):
self.error('E050c', version=version)
else:
digest_re = re.compile(self.digest_regex())
for digest in state:
if not digest_re.match(digest):
self.error('E050d', version=version, digest=digest)
elif not isinstance(state[digest], list):
self.error('E050e', version=version, digest=digest)
else:
for path in state[digest]:
if path in logical_paths:
self.error("E095a", version=version, path=path)
else:
self.check_logical_path(path, version, logical_paths, logical_directories)
if digest not in unnormalized_digests:
# Exact string value must match, not just normalized
self.error("E050f", version=version, digest=digest)
norm_digest = normalized_digest(digest, self.digest_algorithm)
digests.append(norm_digest)
# Check for conflicting logical paths
for path in logical_directories:
if path in logical_paths:
self.error("E095b", version=version, path=path)
return digests
def check_content_paths_map_to_versions(self, manifest_files, all_versions):
"""Check that every content path starts with a valid version.
The content directory component has already been checked in
check_content_path(). We have already tested all paths enough
to know that they can be split into at least 2 components.
"""
for path in manifest_files:
version_dir, dummy_rest = path.split('/', 1)
if version_dir not in all_versions:
self.error('E042b', path=path)
def check_digests_present_and_used(self, manifest_files, digests_used):
"""Check all digests in manifest that are needed are present and used."""
in_manifest = set(manifest_files.values())
in_state = set(digests_used)
not_in_manifest = in_state.difference(in_manifest)
if len(not_in_manifest) > 0:
self.error("E050a", digests=", ".join(sorted(not_in_manifest)))
not_in_state = in_manifest.difference(in_state)
if len(not_in_state) > 0:
self.error("E107", digests=", ".join(sorted(not_in_state)))
def digest_regex(self):
"""Return regex for validating un-normalized digest format."""
try:
return digest_regex(self.digest_algorithm)
except ValueError:
if not self.lax_digests:
self.error('E026a', digest=self.digest_algorithm)
# Match anything
return r'''^.*$'''
def check_logical_path(self, path, version, logical_paths, logical_directories):
"""Check logical path and accumulate paths/directories for E095b check.
logical_paths and logical_directories are expected to be sets.
Only adds good paths to the accumulated paths/directories.
"""
if path.startswith('/') or path.endswith('/'):
self.error("E053", version=version, path=path)
else:
elements = path.split('/')
for element in elements:
if element in ['.', '..', '']:
self.error("E052", version=version, path=path)
return
# Accumulate paths and directories
logical_paths.add(path)
logical_directories.add('/'.join(elements[0:-1]))
def check_content_path(self, path, content_paths, content_directories):
"""Check logical path and accumulate paths/directories for E101 check.
Returns True if valid, else False. Only adds good paths to the
accumulated paths/directories. We don't yet know the set of valid
version directories so the check here is just for 'v' + digits.
"""
if path.startswith('/') or path.endswith('/'):
self.error("E100", path=path)
return False
m = re.match(r'''^(v\d+/''' + self.content_directory + r''')/(.+)''', path)
if not m:
self.error("E042a", path=path)
return False
elements = m.group(2).split('/')
for element in elements:
if element in ('', '.', '..'):
self.error("E099", path=path)
return False
# Accumulate paths and directories if not seen before
if path in content_paths:
self.error("E101a", path=path)
return False
content_paths.add(path)
content_directories.add('/'.join([m.group(1)] + elements[0:-1]))
return True
def validate_as_prior_version(self, prior):
"""Check that prior is a valid prior version of the current inventory object.
The input variable prior is also expected to be an InventoryValidator object
and both self and prior inventories are assumed to have been checked for
internal consistency.
"""
# Must have a subset of versions which also checks zero padding format etc.
if not set(prior.all_versions) < set(self.all_versions):
self.error('E066a', prior_head=prior.head)
else:
# Check references to files but realize that there might be different
# digest algorithms between versions
version = 'no-version'
for version in prior.all_versions:
# If the digest algorithm is the same then we can make a
# direct check on whether the state blocks match
if prior.digest_algorithm == self.digest_algorithm:
self.compare_states_for_version(prior, version)
# Now check the mappings from state to logical path, which must
# be consistent even if the digestAlgorithm is different between
# versions. Get maps from logical paths to files on disk:
prior_map = get_logical_path_map(prior.inventory, version)
self_map = get_logical_path_map(self.inventory, version)
# Look first for differences in logical paths listed
only_in_prior = prior_map.keys() - self_map.keys()
only_in_self = self_map.keys() - prior_map.keys()
if only_in_prior or only_in_self:
if only_in_prior:
self.error('E066b', version=version, prior_head=prior.head, only_in=prior.head, logical_paths=','.join(only_in_prior))
if only_in_self:
self.error('E066b', version=version, prior_head=prior.head, only_in=self.where, logical_paths=','.join(only_in_self))
else:
# Check them all in details - digests must match
for logical_path, this_map in prior_map.items():
if not this_map.issubset(self_map[logical_path]):
self.error('E066c', version=version, prior_head=prior.head,
logical_path=logical_path, prior_content=','.join(this_map),
current_content=','.join(self_map[logical_path]))
# Check metadata
prior_version = prior.inventory['versions'][version]
self_version = self.inventory['versions'][version]
for key in ('created', 'message', 'user'):
if prior_version.get(key) != self_version.get(key):
self.warning('W011', version=version, prior_head=prior.head, key=key)
def compare_states_for_version(self, prior, version):
"""Compare state blocks for version between self and prior.
Assumes the same digest algorithm in both, do not call otherwise!
Looks only for digests that appear in one but not in the other, the code
in validate_as_prior_version(..) does a check for whether the same sets
of logical files appear and we don't want to duplicate an error message
about that.
While the mapping checks in validate_as_prior_version(..) do all that is
necessary to detect an error, the additional errors that may be generated
here provide more detailed diagnostics in the case that the digest
algorithm is the same across versions being compared.
"""
self_state = self.inventory['versions'][version]['state']
prior_state = prior.inventory['versions'][version]['state']
for digest in set(self_state.keys()).union(prior_state.keys()):
if digest not in prior_state:
self.error('E066d', version=version, prior_head=prior.head,
digest=digest, logical_files=', '.join(self_state[digest]))
elif digest not in self_state:
self.error('E066e', version=version, prior_head=prior.head,
digest=digest, logical_files=', '.join(prior_state[digest]))
|
ocfl/inventory_validator.py
|
codereval_python_data_76
|
Validate fixity block in inventory.
Check the structure of the fixity block and makes sure that only files
listed in the manifest are referenced.
def validate_fixity(self, fixity, manifest_files):
"""Validate fixity block in inventory.
Check the structure of the fixity block and makes sure that only files
listed in the manifest are referenced.
"""
if not isinstance(fixity, dict):
# The value of fixity must be a JSON object. In v1.0 I catch not an object
# as part of E056 but this was clarified as E111 in v1.1. The value may
# be an empty object in either case
self.error('E056a' if self.spec_version == '1.0' else 'E111')
else:
for digest_algorithm in fixity:
known_digest = True
try:
regex = digest_regex(digest_algorithm)
except ValueError:
if not self.lax_digests:
self.error('E056b', algorithm=self.digest_algorithm)
continue
# Match anything
regex = r'''^.*$'''
known_digest = False
fixity_algoritm_block = fixity[digest_algorithm]
if not isinstance(fixity_algoritm_block, dict):
self.error('E057a', algorithm=self.digest_algorithm)
else:
digests_seen = set()
for digest in fixity_algoritm_block:
m = re.match(regex, digest)
if not m:
self.error('E057b', digest=digest, algorithm=digest_algorithm) # wrong form of digest
elif not isinstance(fixity_algoritm_block[digest], list):
self.error('E057c', digest=digest, algorithm=digest_algorithm) # must have path list value
else:
if known_digest:
norm_digest = normalized_digest(digest, digest_algorithm)
else:
norm_digest = digest
if norm_digest in digests_seen:
# We have already seen this in different un-normalized form!
self.error("E097", digest=norm_digest, algorithm=digest_algorithm)
else:
digests_seen.add(norm_digest)
for file in fixity_algoritm_block[digest]:
if file not in manifest_files:
self.error("E057d", digest=norm_digest, algorithm=digest_algorithm, path=file)
"""OCFL Inventory Validator.
Code to validate the Python representation of an OCFL Inventory
as read with json.load(). Does not examine anything in storage.
"""
import re
from .digest import digest_regex, normalized_digest
from .validation_logger import ValidationLogger
from .w3c_datetime import str_to_datetime
def get_logical_path_map(inventory, version):
"""Get a map of logical paths in state to files on disk for version in inventory.
Returns a dictionary: logical_path_in_state -> set(content_files)
The set of content_files may includes references to duplicate files in
later versions than the version being described.
"""
state = inventory['versions'][version]['state']
manifest = inventory['manifest']
file_map = {}
for digest in state:
if digest in manifest:
for file in state[digest]:
file_map[file] = set(manifest[digest])
return file_map
class InventoryValidator():
"""Class for OCFL Inventory Validator."""
def __init__(self, log=None, where='???',
lax_digests=False, spec_version='1.0'):
"""Initialize OCFL Inventory Validator."""
self.log = ValidationLogger() if log is None else log
self.where = where
self.spec_version = spec_version
# Object state
self.inventory = None
self.id = None
self.digest_algorithm = 'sha512'
self.content_directory = 'content'
self.all_versions = []
self.manifest_files = None
self.unnormalized_digests = None
self.head = 'UNKNOWN'
# Validation control
self.lax_digests = lax_digests
# Configuration
self.spec_versions_supported = ('1.0', '1.1')
def error(self, code, **args):
"""Error with added context."""
self.log.error(code, where=self.where, **args)
def warning(self, code, **args):
"""Warning with added context."""
self.log.warning(code, where=self.where, **args)
def validate(self, inventory, extract_spec_version=False):
"""Validate a given inventory.
If extract_spec_version is True then will look at the type value to determine
the specification version. In the case that there is no type value or it isn't
valid, then other tests will be based on the version given in self.spec_version.
"""
# Basic structure
self.inventory = inventory
if 'id' in inventory:
iid = inventory['id']
if not isinstance(iid, str) or iid == '':
self.error("E037a")
else:
# URI syntax https://www.rfc-editor.org/rfc/rfc3986.html#section-3.1 :
# scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )
if not re.match(r'''[a-z][a-z\d\+\-\.]*:.+''', iid, re.IGNORECASE):
self.warning("W005", id=iid)
self.id = iid
else:
self.error("E036a")
if 'type' not in inventory:
self.error("E036b")
elif not isinstance(inventory['type'], str):
self.error("E999")
elif extract_spec_version:
m = re.match(r'''https://ocfl.io/(\d+.\d)/spec/#inventory''', inventory['type'])
if not m:
self.error('E038b', got=inventory['type'], assumed_spec_version=self.spec_version)
elif m.group(1) in self.spec_versions_supported:
self.spec_version = m.group(1)
else:
self.error("E038c", got=m.group(1), assumed_spec_version=self.spec_version)
elif inventory['type'] != 'https://ocfl.io/' + self.spec_version + '/spec/#inventory':
self.error("E038a", expected='https://ocfl.io/' + self.spec_version + '/spec/#inventory', got=inventory['type'])
if 'digestAlgorithm' not in inventory:
self.error("E036c")
elif inventory['digestAlgorithm'] == 'sha512':
pass
elif self.lax_digests:
self.digest_algorithm = inventory['digestAlgorithm']
elif inventory['digestAlgorithm'] == 'sha256':
self.warning("W004")
self.digest_algorithm = inventory['digestAlgorithm']
else:
self.error("E039", digest_algorithm=inventory['digestAlgorithm'])
if 'contentDirectory' in inventory:
# Careful only to set self.content_directory if value is safe
cd = inventory['contentDirectory']
if not isinstance(cd, str) or '/' in cd:
self.error("E017")
elif cd in ('.', '..'):
self.error("E018")
else:
self.content_directory = cd
manifest_files_correct_format = None
if 'manifest' not in inventory:
self.error("E041a")
else:
(self.manifest_files, manifest_files_correct_format, self.unnormalized_digests) = self.validate_manifest(inventory['manifest'])
digests_used = []
if 'versions' not in inventory:
self.error("E041b")
else:
self.all_versions = self.validate_version_sequence(inventory['versions'])
digests_used = self.validate_versions(inventory['versions'], self.all_versions, self.unnormalized_digests)
if 'head' not in inventory:
self.error("E036d")
elif len(self.all_versions) > 0:
self.head = self.all_versions[-1]
if inventory['head'] != self.head:
self.error("E040", got=inventory['head'], expected=self.head)
if len(self.all_versions) == 0:
# Abort tests is we don't have a valid version sequence, otherwise
# there will likely be spurious subsequent error reports
return
if len(self.all_versions) > 0:
if manifest_files_correct_format is not None:
self.check_content_paths_map_to_versions(manifest_files_correct_format, self.all_versions)
if self.manifest_files is not None:
self.check_digests_present_and_used(self.manifest_files, digests_used)
if 'fixity' in inventory:
self.validate_fixity(inventory['fixity'], self.manifest_files)
def validate_manifest(self, manifest):
"""Validate manifest block in inventory.
Returns:
* manifest_files - a mapping from file to digest for each file in
the manifest
* manifest_files_correct_format - a simple list of the manifest file
path that passed initial checks. They need to be checked for valid
version directories later, when we know what version directories
are valid
* unnormalized_digests - a set of the original digests in unnormalized
form that MUST match exactly the values used in state blocks
"""
manifest_files = {}
manifest_files_correct_format = []
unnormalized_digests = set()
manifest_digests = set()
if not isinstance(manifest, dict):
self.error('E041c')
else:
content_paths = set()
content_directories = set()
for digest in manifest:
m = re.match(self.digest_regex(), digest)
if not m:
self.error('E025a', digest=digest, algorithm=self.digest_algorithm) # wrong form of digest
elif not isinstance(manifest[digest], list):
self.error('E092', digest=digest) # must have path list value
else:
unnormalized_digests.add(digest)
norm_digest = normalized_digest(digest, self.digest_algorithm)
if norm_digest in manifest_digests:
# We have already seen this in different un-normalized form!
self.error("E096", digest=norm_digest)
else:
manifest_digests.add(norm_digest)
for file in manifest[digest]:
manifest_files[file] = norm_digest
if self.check_content_path(file, content_paths, content_directories):
manifest_files_correct_format.append(file)
# Check for conflicting content paths
for path in content_directories:
if path in content_paths:
self.error("E101b", path=path)
return manifest_files, manifest_files_correct_format, unnormalized_digests
def validate_fixity(self, fixity, manifest_files):
"""Validate fixity block in inventory.
Check the structure of the fixity block and makes sure that only files
listed in the manifest are referenced.
"""
if not isinstance(fixity, dict):
# The value of fixity must be a JSON object. In v1.0 I catch not an object
# as part of E056 but this was clarified as E111 in v1.1. The value may
# be an empty object in either case
self.error('E056a' if self.spec_version == '1.0' else 'E111')
else:
for digest_algorithm in fixity:
known_digest = True
try:
regex = digest_regex(digest_algorithm)
except ValueError:
if not self.lax_digests:
self.error('E056b', algorithm=self.digest_algorithm)
continue
# Match anything
regex = r'''^.*$'''
known_digest = False
fixity_algoritm_block = fixity[digest_algorithm]
if not isinstance(fixity_algoritm_block, dict):
self.error('E057a', algorithm=self.digest_algorithm)
else:
digests_seen = set()
for digest in fixity_algoritm_block:
m = re.match(regex, digest)
if not m:
self.error('E057b', digest=digest, algorithm=digest_algorithm) # wrong form of digest
elif not isinstance(fixity_algoritm_block[digest], list):
self.error('E057c', digest=digest, algorithm=digest_algorithm) # must have path list value
else:
if known_digest:
norm_digest = normalized_digest(digest, digest_algorithm)
else:
norm_digest = digest
if norm_digest in digests_seen:
# We have already seen this in different un-normalized form!
self.error("E097", digest=norm_digest, algorithm=digest_algorithm)
else:
digests_seen.add(norm_digest)
for file in fixity_algoritm_block[digest]:
if file not in manifest_files:
self.error("E057d", digest=norm_digest, algorithm=digest_algorithm, path=file)
def validate_version_sequence(self, versions):
"""Validate sequence of version names in versions block in inventory.
Returns an array of in-sequence version directories that are part
of a valid sequences. May exclude other version directory names that are
not part of the valid sequence if an error is thrown.
"""
all_versions = []
if not isinstance(versions, dict):
self.error("E044")
return all_versions
if len(versions) == 0:
self.error("E008")
return all_versions
# Validate version sequence
# https://ocfl.io/draft/spec/#version-directories
zero_padded = None
max_version_num = 999999 # Excessive limit
if 'v1' in versions:
fmt = 'v%d'
zero_padded = False
all_versions.append('v1')
else: # Find padding size
for n in range(2, 11):
fmt = 'v%0' + str(n) + 'd'
vkey = fmt % 1
if vkey in versions:
all_versions.append(vkey)
zero_padded = n
max_version_num = (10 ** (n - 1)) - 1
break
if not zero_padded:
self.error("E009")
return all_versions
if zero_padded:
self.warning("W001")
# Have v1 and know format, work through to check sequence
for n in range(2, max_version_num + 1):
v = (fmt % n)
if v in versions:
all_versions.append(v)
else:
if len(versions) != (n - 1):
self.error("E010") # Extra version dirs outside sequence
return all_versions
# We have now included all possible versions up to the zero padding
# size, if there are more versions than this number then we must
# have extra that violate the zero-padding rule or are out of
# sequence
if len(versions) > max_version_num:
self.error("E011")
return all_versions
def validate_versions(self, versions, all_versions, unnormalized_digests):
"""Validate versions blocks in inventory.
Requires as input two things which are assumed to be structurally correct
from prior basic validation:
* versions - which is the JSON object (dict) from the inventory
* all_versions - an ordered list of the versions to look at in versions
(all other keys in versions will be ignored)
Returns a list of digests_used which can then be checked against the
manifest.
"""
digests_used = []
for v in all_versions:
version = versions[v]
if 'created' not in version:
self.error('E048', version=v) # No created
elif not isinstance(versions[v]['created'], str):
self.error('E049d', version=v) # Bad created
else:
created = versions[v]['created']
try:
str_to_datetime(created) # catch ValueError if fails
if not re.search(r'''(Z|[+-]\d\d:\d\d)$''', created): # FIXME - kludge
self.error('E049a', version=v)
if not re.search(r'''T\d\d:\d\d:\d\d''', created): # FIXME - kludge
self.error('E049b', version=v)
except ValueError as e:
self.error('E049c', version=v, description=str(e))
if 'state' in version:
digests_used += self.validate_state_block(version['state'], version=v, unnormalized_digests=unnormalized_digests)
else:
self.error('E048c', version=v)
if 'message' not in version:
self.warning('W007a', version=v)
elif not isinstance(version['message'], str):
self.error('E094', version=v)
if 'user' not in version:
self.warning('W007b', version=v)
else:
user = version['user']
if not isinstance(user, dict):
self.error('E054a', version=v)
else:
if 'name' not in user or not isinstance(user['name'], str):
self.error('E054b', version=v)
if 'address' not in user:
self.warning('W008', version=v)
elif not isinstance(user['address'], str):
self.error('E054c', version=v)
elif not re.match(r'''\w{3,6}:''', user['address']):
self.warning('W009', version=v)
return digests_used
def validate_state_block(self, state, version, unnormalized_digests):
"""Validate state block in a version in an inventory.
The version is used only for error reporting.
Returns a list of content digests referenced in the state block.
"""
digests = []
logical_paths = set()
logical_directories = set()
if not isinstance(state, dict):
self.error('E050c', version=version)
else:
digest_re = re.compile(self.digest_regex())
for digest in state:
if not digest_re.match(digest):
self.error('E050d', version=version, digest=digest)
elif not isinstance(state[digest], list):
self.error('E050e', version=version, digest=digest)
else:
for path in state[digest]:
if path in logical_paths:
self.error("E095a", version=version, path=path)
else:
self.check_logical_path(path, version, logical_paths, logical_directories)
if digest not in unnormalized_digests:
# Exact string value must match, not just normalized
self.error("E050f", version=version, digest=digest)
norm_digest = normalized_digest(digest, self.digest_algorithm)
digests.append(norm_digest)
# Check for conflicting logical paths
for path in logical_directories:
if path in logical_paths:
self.error("E095b", version=version, path=path)
return digests
def check_content_paths_map_to_versions(self, manifest_files, all_versions):
"""Check that every content path starts with a valid version.
The content directory component has already been checked in
check_content_path(). We have already tested all paths enough
to know that they can be split into at least 2 components.
"""
for path in manifest_files:
version_dir, dummy_rest = path.split('/', 1)
if version_dir not in all_versions:
self.error('E042b', path=path)
def check_digests_present_and_used(self, manifest_files, digests_used):
"""Check all digests in manifest that are needed are present and used."""
in_manifest = set(manifest_files.values())
in_state = set(digests_used)
not_in_manifest = in_state.difference(in_manifest)
if len(not_in_manifest) > 0:
self.error("E050a", digests=", ".join(sorted(not_in_manifest)))
not_in_state = in_manifest.difference(in_state)
if len(not_in_state) > 0:
self.error("E107", digests=", ".join(sorted(not_in_state)))
def digest_regex(self):
"""Return regex for validating un-normalized digest format."""
try:
return digest_regex(self.digest_algorithm)
except ValueError:
if not self.lax_digests:
self.error('E026a', digest=self.digest_algorithm)
# Match anything
return r'''^.*$'''
def check_logical_path(self, path, version, logical_paths, logical_directories):
"""Check logical path and accumulate paths/directories for E095b check.
logical_paths and logical_directories are expected to be sets.
Only adds good paths to the accumulated paths/directories.
"""
if path.startswith('/') or path.endswith('/'):
self.error("E053", version=version, path=path)
else:
elements = path.split('/')
for element in elements:
if element in ['.', '..', '']:
self.error("E052", version=version, path=path)
return
# Accumulate paths and directories
logical_paths.add(path)
logical_directories.add('/'.join(elements[0:-1]))
def check_content_path(self, path, content_paths, content_directories):
"""Check logical path and accumulate paths/directories for E101 check.
Returns True if valid, else False. Only adds good paths to the
accumulated paths/directories. We don't yet know the set of valid
version directories so the check here is just for 'v' + digits.
"""
if path.startswith('/') or path.endswith('/'):
self.error("E100", path=path)
return False
m = re.match(r'''^(v\d+/''' + self.content_directory + r''')/(.+)''', path)
if not m:
self.error("E042a", path=path)
return False
elements = m.group(2).split('/')
for element in elements:
if element in ('', '.', '..'):
self.error("E099", path=path)
return False
# Accumulate paths and directories if not seen before
if path in content_paths:
self.error("E101a", path=path)
return False
content_paths.add(path)
content_directories.add('/'.join([m.group(1)] + elements[0:-1]))
return True
def validate_as_prior_version(self, prior):
"""Check that prior is a valid prior version of the current inventory object.
The input variable prior is also expected to be an InventoryValidator object
and both self and prior inventories are assumed to have been checked for
internal consistency.
"""
# Must have a subset of versions which also checks zero padding format etc.
if not set(prior.all_versions) < set(self.all_versions):
self.error('E066a', prior_head=prior.head)
else:
# Check references to files but realize that there might be different
# digest algorithms between versions
version = 'no-version'
for version in prior.all_versions:
# If the digest algorithm is the same then we can make a
# direct check on whether the state blocks match
if prior.digest_algorithm == self.digest_algorithm:
self.compare_states_for_version(prior, version)
# Now check the mappings from state to logical path, which must
# be consistent even if the digestAlgorithm is different between
# versions. Get maps from logical paths to files on disk:
prior_map = get_logical_path_map(prior.inventory, version)
self_map = get_logical_path_map(self.inventory, version)
# Look first for differences in logical paths listed
only_in_prior = prior_map.keys() - self_map.keys()
only_in_self = self_map.keys() - prior_map.keys()
if only_in_prior or only_in_self:
if only_in_prior:
self.error('E066b', version=version, prior_head=prior.head, only_in=prior.head, logical_paths=','.join(only_in_prior))
if only_in_self:
self.error('E066b', version=version, prior_head=prior.head, only_in=self.where, logical_paths=','.join(only_in_self))
else:
# Check them all in details - digests must match
for logical_path, this_map in prior_map.items():
if not this_map.issubset(self_map[logical_path]):
self.error('E066c', version=version, prior_head=prior.head,
logical_path=logical_path, prior_content=','.join(this_map),
current_content=','.join(self_map[logical_path]))
# Check metadata
prior_version = prior.inventory['versions'][version]
self_version = self.inventory['versions'][version]
for key in ('created', 'message', 'user'):
if prior_version.get(key) != self_version.get(key):
self.warning('W011', version=version, prior_head=prior.head, key=key)
def compare_states_for_version(self, prior, version):
"""Compare state blocks for version between self and prior.
Assumes the same digest algorithm in both, do not call otherwise!
Looks only for digests that appear in one but not in the other, the code
in validate_as_prior_version(..) does a check for whether the same sets
of logical files appear and we don't want to duplicate an error message
about that.
While the mapping checks in validate_as_prior_version(..) do all that is
necessary to detect an error, the additional errors that may be generated
here provide more detailed diagnostics in the case that the digest
algorithm is the same across versions being compared.
"""
self_state = self.inventory['versions'][version]['state']
prior_state = prior.inventory['versions'][version]['state']
for digest in set(self_state.keys()).union(prior_state.keys()):
if digest not in prior_state:
self.error('E066d', version=version, prior_head=prior.head,
digest=digest, logical_files=', '.join(self_state[digest]))
elif digest not in self_state:
self.error('E066e', version=version, prior_head=prior.head,
digest=digest, logical_files=', '.join(prior_state[digest]))
|
ocfl/inventory_validator.py
|
codereval_python_data_77
|
Return the files in `path`
def files_list(path):
"""
Return the files in `path`
"""
return os.listdir(path)
import os
import logging
import re
import shutil
import tempfile
from zipfile import ZipFile, ZIP_DEFLATED
logger = logging.getLogger(__name__)
def is_folder(source):
return os.path.isdir(source)
def is_zipfile(source):
return os.path.isfile(source) and source.endswith(".zip")
def xml_files_list(path):
"""
Return the XML files found in `path`
"""
return (f for f in os.listdir(path) if f.endswith(".xml"))
def files_list(path):
"""
Return the files in `path`
"""
return os.listdir(path)
def read_file(path, encoding="utf-8", mode="r"):
with open(path, mode=mode, encoding=encoding) as f:
text = f.read()
return text
def read_from_zipfile(zip_path, filename):
with ZipFile(zip_path) as zf:
return zf.read(filename)
def xml_files_list_from_zipfile(zip_path):
with ZipFile(zip_path) as zf:
xmls_filenames = [
xml_filename
for xml_filename in zf.namelist()
if os.path.splitext(xml_filename)[-1] == ".xml"
]
return xmls_filenames
def files_list_from_zipfile(zip_path):
"""
Return the files in `zip_path`
Example:
```
[
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200069.pdf',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200069.xml',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200071.pdf',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200071.xml',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200071-gf01.tif',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200071-gf02.tif',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200071-gf03.tif',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200071-gf04.tif',
]
```
"""
with ZipFile(zip_path) as zf:
return zf.namelist()
def write_file(path, source, mode="w"):
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
logger.debug("Gravando arquivo: %s", path)
if "b" in mode:
with open(path, mode) as f:
f.write(source)
return
with open(path, mode, encoding="utf-8") as f:
f.write(source)
def create_zip_file(files, zip_name, zip_folder=None):
zip_folder = zip_folder or tempfile.mkdtemp()
zip_path = os.path.join(zip_folder, zip_name)
with ZipFile(zip_path, 'w', ZIP_DEFLATED) as myzip:
for f in files:
myzip.write(f, os.path.basename(f))
return zip_path
def delete_folder(path):
try:
shutil.rmtree(path)
except:
pass
def create_temp_file(filename, content=None, mode='w'):
file_path = tempfile.mkdtemp()
file_path = os.path.join(file_path, filename)
write_file(file_path, content or '', mode)
return file_path
def copy_file(source, target):
tmpdir = tempfile.mkdtemp()
fullpath_target = os.path.join(tmpdir, target)
logger.info(f'Copying file {source} to {fullpath_target}')
return shutil.copyfile(source, fullpath_target)
def size(file_path):
return os.path.getsize(file_path)
def get_prefix_by_xml_filename(xml_filename):
"""
Obtém o prefixo associado a um arquivo xml
Parameters
----------
xml_filename : str
Nome de arquivo xml
Returns
-------
str
Prefixo associado ao arquivo xml
"""
file, ext = os.path.splitext(xml_filename)
return file
def get_file_role(file_path, prefix, pdf_langs):
"""
Obtém o papel/função de um arquivo (xml, renditions ou assets) no contexto de um documento
Parameters
----------
file_path : str
Nome de um arquivo
prefix: str
Prefixo associado ao arquivo
pdf_langs: list
Idiomas dos PDFs do documento
Returns
-------
str
Papel/função de arquivo (xml, rendition ou assets) no contexto de um documento
"""
file, ext = os.path.splitext(file_path)
if ext == '.xml':
return 'xml'
elif ext == '.pdf':
if file == prefix:
return 'renditions'
for lang in pdf_langs:
if file == f'{prefix}-{lang}':
return 'renditions'
return 'assets'
def extract_issn_from_zip_uri(zip_uri):
"""
Extrai código ISSN a partir do endereço de um arquivo zip
Parameters
----------
zip_uri : str
Endereço de um arquivo zip
Returns
-------
str
ISSN
"""
match = re.search(r'.*/ingress/packages/(\d{4}-\d{4})/.*.zip', zip_uri)
if match:
return match.group(1)
def get_filename(file_path):
return os.path.basename(file_path)
|
packtools/file_utils.py
|
codereval_python_data_78
|
Group files by their XML basename
Groups files by their XML basename and returns data in dict format.
Parameters
----------
xml_filename : str
XML filenames
files : list
list of files in the folder or zipfile
Returns
-------
dict
key: name of the XML files
value: Package
def _group_files_by_xml_filename(source, xmls, files):
"""
Group files by their XML basename
Groups files by their XML basename and returns data in dict format.
Parameters
----------
xml_filename : str
XML filenames
files : list
list of files in the folder or zipfile
Returns
-------
dict
key: name of the XML files
value: Package
"""
docs = {}
for xml in xmls:
basename = os.path.basename(xml)
prefix, ext = os.path.splitext(basename)
docs.setdefault(prefix, Package(source, prefix))
# XML
docs[prefix].xml = xml
for file in select_filenames_by_prefix(prefix, files):
# avalia arquivo do pacote, se é asset ou rendition
component = _eval_file(prefix, file)
if not component:
continue
# resultado do avaliação do pacote
ftype = component.get("ftype")
file_path = component["file_path"]
comp_id = component["component_id"]
if ftype:
docs[prefix].add_asset(comp_id, file_path)
else:
docs[prefix].add_rendition(comp_id, file_path)
files.remove(file)
return docs
import logging
import os
from packtools import file_utils
from zipfile import ZipFile
logger = logging.getLogger(__name__)
class Package:
def __init__(self, source, name):
self._source = source
self._xml = None
self._assets = {}
self._renditions = {}
self._name = name
self.zip_file_path = file_utils.is_zipfile(source) and source
@property
def assets(self):
return self._assets
@property
def name(self):
return self._name
def file_path(self, file_path):
if file_utils.is_folder(self._source):
return os.path.join(self._source, file_path)
return file_path
def add_asset(self, basename, file_path):
"""
"{
"artigo02-gf03.tiff": "/path/artigo02-gf03.tiff",
"artigo02-gf03.jpg": "/path/artigo02-gf03.jpg",
"artigo02-gf03.png": "/path/artigo02-gf03.png",
}
"""
self._assets[basename] = self.file_path(file_path)
def get_asset(self, basename):
try:
return self._assets[basename]
except KeyError:
return
def add_rendition(self, lang, file_path):
"""
{
"original": "artigo02.pdf",
"en": "artigo02-en.pdf",
}
"""
self._renditions[lang] = self.file_path(file_path)
def get_rendition(self, lang):
try:
return self._renditions[lang]
except KeyError:
return
@property
def source(self):
return self._source
@property
def xml(self):
return self.file_path(self._xml)
@xml.setter
def xml(self, value):
self._xml = value
@property
def renditions(self):
return self._renditions
@property
def xml_content(self):
if file_utils.is_folder(self._source):
with open(self.xml, "rb") as fp:
return fp.read()
with ZipFile(self._source) as zf:
return zf.read(self.xml)
def select_filenames_by_prefix(prefix, files):
"""
Get files which belongs to a document package.
Retorna os arquivos da lista `files` cujos nomes iniciam com `prefix`
Parameters
----------
prefix : str
Filename prefix
files : str list
Files paths
Returns
-------
list
files paths which basename files matches to prefix
"""
return [
item
for item in files
if match_file_by_prefix(prefix, item)
]
def match_file_by_prefix(prefix, file_path):
"""
Identify if a `file_path` belongs to a document package by a given `prefix`
Retorna `True` para documentos pertencentes a um pacote.
Parameters
----------
prefix : str
Filename prefix
file_path : str
File path
Returns
-------
bool
True - file belongs to the package
"""
basename = os.path.basename(file_path)
if basename.startswith(prefix + "-"):
return True
if basename.startswith(prefix + "."):
return True
return False
def explore_source(source):
packages = _explore_zipfile(source)
if not packages:
packages = _explore_folder(source)
if not packages:
raise ValueError("%s: Invalid value for `source`" % source)
return packages
def _explore_folder(folder):
"""
Get packages' data from folder
Groups files by their XML basename and returns data in dict format.
Parameters
----------
folder : str
Folder of the package
Returns
-------
dict
"""
if file_utils.is_folder(folder):
data = _group_files_by_xml_filename(
folder,
file_utils.xml_files_list(folder),
file_utils.files_list(folder),
)
return data
def _explore_zipfile(zip_path):
"""
Get packages' data from zip_path
Groups files by their XML basename and returns data in dict format.
Parameters
----------
zip_path : str
zip file path
Returns
-------
dict
"""
if file_utils.is_zipfile(zip_path):
with ZipFile(zip_path, 'r'):
data = _group_files_by_xml_filename(
zip_path,
file_utils.xml_files_list_from_zipfile(zip_path),
file_utils.files_list_from_zipfile(zip_path),
)
return data
def _group_files_by_xml_filename(source, xmls, files):
"""
Group files by their XML basename
Groups files by their XML basename and returns data in dict format.
Parameters
----------
xml_filename : str
XML filenames
files : list
list of files in the folder or zipfile
Returns
-------
dict
key: name of the XML files
value: Package
"""
docs = {}
for xml in xmls:
basename = os.path.basename(xml)
prefix, ext = os.path.splitext(basename)
docs.setdefault(prefix, Package(source, prefix))
# XML
docs[prefix].xml = xml
for file in select_filenames_by_prefix(prefix, files):
# avalia arquivo do pacote, se é asset ou rendition
component = _eval_file(prefix, file)
if not component:
continue
# resultado do avaliação do pacote
ftype = component.get("ftype")
file_path = component["file_path"]
comp_id = component["component_id"]
if ftype:
docs[prefix].add_asset(comp_id, file_path)
else:
docs[prefix].add_rendition(comp_id, file_path)
files.remove(file)
return docs
def _eval_file(prefix, file_path):
"""
Identifica o tipo de arquivo do pacote: `asset` ou `rendition`.
Identifica o tipo de arquivo do pacote e atualiza `packages` com o tipo e
o endereço do arquivo em análise.
Parameters
----------
prefix : str
nome do arquivo XML sem extensão
filename : str
filename
file_folder : str
file folder
Returns
-------
dict
"""
if not match_file_by_prefix(prefix, file_path):
# ignore files which name does not match
return
if file_path.endswith(".xml"):
# ignore XML files
return
# it matches
filename = os.path.basename(file_path)
fname, ext = os.path.splitext(filename)
lang = None
if ext == ".pdf":
suffix = fname.replace(prefix, "")
if fname == prefix:
lang = "original"
elif len(suffix) == 3 and suffix[0] == "-":
# it is a rendition
lang = suffix[1:]
if lang:
return dict(
component_id=lang,
file_path=file_path,
)
else:
return dict(
component_id=filename,
component_name=fname,
ftype=ext[1:],
file_path=file_path,
)
|
packtools/sps/models/packages.py
|
codereval_python_data_79
|
Identify if a `file_path` belongs to a document package by a given `prefix`
Retorna `True` para documentos pertencentes a um pacote.
Parameters
----------
prefix : str
Filename prefix
file_path : str
File path
Returns
-------
bool
True - file belongs to the package
def match_file_by_prefix(prefix, file_path):
"""
Identify if a `file_path` belongs to a document package by a given `prefix`
Retorna `True` para documentos pertencentes a um pacote.
Parameters
----------
prefix : str
Filename prefix
file_path : str
File path
Returns
-------
bool
True - file belongs to the package
"""
basename = os.path.basename(file_path)
if basename.startswith(prefix + "-"):
return True
if basename.startswith(prefix + "."):
return True
return False
import logging
import os
from packtools import file_utils
from zipfile import ZipFile
logger = logging.getLogger(__name__)
class Package:
def __init__(self, source, name):
self._source = source
self._xml = None
self._assets = {}
self._renditions = {}
self._name = name
self.zip_file_path = file_utils.is_zipfile(source) and source
@property
def assets(self):
return self._assets
@property
def name(self):
return self._name
def file_path(self, file_path):
if file_utils.is_folder(self._source):
return os.path.join(self._source, file_path)
return file_path
def add_asset(self, basename, file_path):
"""
"{
"artigo02-gf03.tiff": "/path/artigo02-gf03.tiff",
"artigo02-gf03.jpg": "/path/artigo02-gf03.jpg",
"artigo02-gf03.png": "/path/artigo02-gf03.png",
}
"""
self._assets[basename] = self.file_path(file_path)
def get_asset(self, basename):
try:
return self._assets[basename]
except KeyError:
return
def add_rendition(self, lang, file_path):
"""
{
"original": "artigo02.pdf",
"en": "artigo02-en.pdf",
}
"""
self._renditions[lang] = self.file_path(file_path)
def get_rendition(self, lang):
try:
return self._renditions[lang]
except KeyError:
return
@property
def source(self):
return self._source
@property
def xml(self):
return self.file_path(self._xml)
@xml.setter
def xml(self, value):
self._xml = value
@property
def renditions(self):
return self._renditions
@property
def xml_content(self):
if file_utils.is_folder(self._source):
with open(self.xml, "rb") as fp:
return fp.read()
with ZipFile(self._source) as zf:
return zf.read(self.xml)
def select_filenames_by_prefix(prefix, files):
"""
Get files which belongs to a document package.
Retorna os arquivos da lista `files` cujos nomes iniciam com `prefix`
Parameters
----------
prefix : str
Filename prefix
files : str list
Files paths
Returns
-------
list
files paths which basename files matches to prefix
"""
return [
item
for item in files
if match_file_by_prefix(prefix, item)
]
def match_file_by_prefix(prefix, file_path):
"""
Identify if a `file_path` belongs to a document package by a given `prefix`
Retorna `True` para documentos pertencentes a um pacote.
Parameters
----------
prefix : str
Filename prefix
file_path : str
File path
Returns
-------
bool
True - file belongs to the package
"""
basename = os.path.basename(file_path)
if basename.startswith(prefix + "-"):
return True
if basename.startswith(prefix + "."):
return True
return False
def explore_source(source):
packages = _explore_zipfile(source)
if not packages:
packages = _explore_folder(source)
if not packages:
raise ValueError("%s: Invalid value for `source`" % source)
return packages
def _explore_folder(folder):
"""
Get packages' data from folder
Groups files by their XML basename and returns data in dict format.
Parameters
----------
folder : str
Folder of the package
Returns
-------
dict
"""
if file_utils.is_folder(folder):
data = _group_files_by_xml_filename(
folder,
file_utils.xml_files_list(folder),
file_utils.files_list(folder),
)
return data
def _explore_zipfile(zip_path):
"""
Get packages' data from zip_path
Groups files by their XML basename and returns data in dict format.
Parameters
----------
zip_path : str
zip file path
Returns
-------
dict
"""
if file_utils.is_zipfile(zip_path):
with ZipFile(zip_path, 'r'):
data = _group_files_by_xml_filename(
zip_path,
file_utils.xml_files_list_from_zipfile(zip_path),
file_utils.files_list_from_zipfile(zip_path),
)
return data
def _group_files_by_xml_filename(source, xmls, files):
"""
Group files by their XML basename
Groups files by their XML basename and returns data in dict format.
Parameters
----------
xml_filename : str
XML filenames
files : list
list of files in the folder or zipfile
Returns
-------
dict
key: name of the XML files
value: Package
"""
docs = {}
for xml in xmls:
basename = os.path.basename(xml)
prefix, ext = os.path.splitext(basename)
docs.setdefault(prefix, Package(source, prefix))
# XML
docs[prefix].xml = xml
for file in select_filenames_by_prefix(prefix, files):
# avalia arquivo do pacote, se é asset ou rendition
component = _eval_file(prefix, file)
if not component:
continue
# resultado do avaliação do pacote
ftype = component.get("ftype")
file_path = component["file_path"]
comp_id = component["component_id"]
if ftype:
docs[prefix].add_asset(comp_id, file_path)
else:
docs[prefix].add_rendition(comp_id, file_path)
files.remove(file)
return docs
def _eval_file(prefix, file_path):
"""
Identifica o tipo de arquivo do pacote: `asset` ou `rendition`.
Identifica o tipo de arquivo do pacote e atualiza `packages` com o tipo e
o endereço do arquivo em análise.
Parameters
----------
prefix : str
nome do arquivo XML sem extensão
filename : str
filename
file_folder : str
file folder
Returns
-------
dict
"""
if not match_file_by_prefix(prefix, file_path):
# ignore files which name does not match
return
if file_path.endswith(".xml"):
# ignore XML files
return
# it matches
filename = os.path.basename(file_path)
fname, ext = os.path.splitext(filename)
lang = None
if ext == ".pdf":
suffix = fname.replace(prefix, "")
if fname == prefix:
lang = "original"
elif len(suffix) == 3 and suffix[0] == "-":
# it is a rendition
lang = suffix[1:]
if lang:
return dict(
component_id=lang,
file_path=file_path,
)
else:
return dict(
component_id=filename,
component_name=fname,
ftype=ext[1:],
file_path=file_path,
)
|
packtools/sps/models/packages.py
|
codereval_python_data_80
|
Get files which belongs to a document package.
Retorna os arquivos da lista `files` cujos nomes iniciam com `prefix`
Parameters
----------
prefix : str
Filename prefix
files : str list
Files paths
Returns
-------
list
files paths which basename files matches to prefix
def select_filenames_by_prefix(prefix, files):
"""
Get files which belongs to a document package.
Retorna os arquivos da lista `files` cujos nomes iniciam com `prefix`
Parameters
----------
prefix : str
Filename prefix
files : str list
Files paths
Returns
-------
list
files paths which basename files matches to prefix
"""
return [
item
for item in files
if match_file_by_prefix(prefix, item)
]
import logging
import os
from packtools import file_utils
from zipfile import ZipFile
logger = logging.getLogger(__name__)
class Package:
def __init__(self, source, name):
self._source = source
self._xml = None
self._assets = {}
self._renditions = {}
self._name = name
self.zip_file_path = file_utils.is_zipfile(source) and source
@property
def assets(self):
return self._assets
@property
def name(self):
return self._name
def file_path(self, file_path):
if file_utils.is_folder(self._source):
return os.path.join(self._source, file_path)
return file_path
def add_asset(self, basename, file_path):
"""
"{
"artigo02-gf03.tiff": "/path/artigo02-gf03.tiff",
"artigo02-gf03.jpg": "/path/artigo02-gf03.jpg",
"artigo02-gf03.png": "/path/artigo02-gf03.png",
}
"""
self._assets[basename] = self.file_path(file_path)
def get_asset(self, basename):
try:
return self._assets[basename]
except KeyError:
return
def add_rendition(self, lang, file_path):
"""
{
"original": "artigo02.pdf",
"en": "artigo02-en.pdf",
}
"""
self._renditions[lang] = self.file_path(file_path)
def get_rendition(self, lang):
try:
return self._renditions[lang]
except KeyError:
return
@property
def source(self):
return self._source
@property
def xml(self):
return self.file_path(self._xml)
@xml.setter
def xml(self, value):
self._xml = value
@property
def renditions(self):
return self._renditions
@property
def xml_content(self):
if file_utils.is_folder(self._source):
with open(self.xml, "rb") as fp:
return fp.read()
with ZipFile(self._source) as zf:
return zf.read(self.xml)
def select_filenames_by_prefix(prefix, files):
"""
Get files which belongs to a document package.
Retorna os arquivos da lista `files` cujos nomes iniciam com `prefix`
Parameters
----------
prefix : str
Filename prefix
files : str list
Files paths
Returns
-------
list
files paths which basename files matches to prefix
"""
return [
item
for item in files
if match_file_by_prefix(prefix, item)
]
def match_file_by_prefix(prefix, file_path):
"""
Identify if a `file_path` belongs to a document package by a given `prefix`
Retorna `True` para documentos pertencentes a um pacote.
Parameters
----------
prefix : str
Filename prefix
file_path : str
File path
Returns
-------
bool
True - file belongs to the package
"""
basename = os.path.basename(file_path)
if basename.startswith(prefix + "-"):
return True
if basename.startswith(prefix + "."):
return True
return False
def explore_source(source):
packages = _explore_zipfile(source)
if not packages:
packages = _explore_folder(source)
if not packages:
raise ValueError("%s: Invalid value for `source`" % source)
return packages
def _explore_folder(folder):
"""
Get packages' data from folder
Groups files by their XML basename and returns data in dict format.
Parameters
----------
folder : str
Folder of the package
Returns
-------
dict
"""
if file_utils.is_folder(folder):
data = _group_files_by_xml_filename(
folder,
file_utils.xml_files_list(folder),
file_utils.files_list(folder),
)
return data
def _explore_zipfile(zip_path):
"""
Get packages' data from zip_path
Groups files by their XML basename and returns data in dict format.
Parameters
----------
zip_path : str
zip file path
Returns
-------
dict
"""
if file_utils.is_zipfile(zip_path):
with ZipFile(zip_path, 'r'):
data = _group_files_by_xml_filename(
zip_path,
file_utils.xml_files_list_from_zipfile(zip_path),
file_utils.files_list_from_zipfile(zip_path),
)
return data
def _group_files_by_xml_filename(source, xmls, files):
"""
Group files by their XML basename
Groups files by their XML basename and returns data in dict format.
Parameters
----------
xml_filename : str
XML filenames
files : list
list of files in the folder or zipfile
Returns
-------
dict
key: name of the XML files
value: Package
"""
docs = {}
for xml in xmls:
basename = os.path.basename(xml)
prefix, ext = os.path.splitext(basename)
docs.setdefault(prefix, Package(source, prefix))
# XML
docs[prefix].xml = xml
for file in select_filenames_by_prefix(prefix, files):
# avalia arquivo do pacote, se é asset ou rendition
component = _eval_file(prefix, file)
if not component:
continue
# resultado do avaliação do pacote
ftype = component.get("ftype")
file_path = component["file_path"]
comp_id = component["component_id"]
if ftype:
docs[prefix].add_asset(comp_id, file_path)
else:
docs[prefix].add_rendition(comp_id, file_path)
files.remove(file)
return docs
def _eval_file(prefix, file_path):
"""
Identifica o tipo de arquivo do pacote: `asset` ou `rendition`.
Identifica o tipo de arquivo do pacote e atualiza `packages` com o tipo e
o endereço do arquivo em análise.
Parameters
----------
prefix : str
nome do arquivo XML sem extensão
filename : str
filename
file_folder : str
file folder
Returns
-------
dict
"""
if not match_file_by_prefix(prefix, file_path):
# ignore files which name does not match
return
if file_path.endswith(".xml"):
# ignore XML files
return
# it matches
filename = os.path.basename(file_path)
fname, ext = os.path.splitext(filename)
lang = None
if ext == ".pdf":
suffix = fname.replace(prefix, "")
if fname == prefix:
lang = "original"
elif len(suffix) == 3 and suffix[0] == "-":
# it is a rendition
lang = suffix[1:]
if lang:
return dict(
component_id=lang,
file_path=file_path,
)
else:
return dict(
component_id=filename,
component_name=fname,
ftype=ext[1:],
file_path=file_path,
)
|
packtools/sps/models/packages.py
|
codereval_python_data_81
|
Get packages' data from folder
Groups files by their XML basename and returns data in dict format.
Parameters
----------
folder : str
Folder of the package
Returns
-------
dict
def _explore_folder(folder):
"""
Get packages' data from folder
Groups files by their XML basename and returns data in dict format.
Parameters
----------
folder : str
Folder of the package
Returns
-------
dict
"""
if file_utils.is_folder(folder):
data = _group_files_by_xml_filename(
folder,
file_utils.xml_files_list(folder),
file_utils.files_list(folder),
)
return data
import logging
import os
from packtools import file_utils
from zipfile import ZipFile
logger = logging.getLogger(__name__)
class Package:
def __init__(self, source, name):
self._source = source
self._xml = None
self._assets = {}
self._renditions = {}
self._name = name
self.zip_file_path = file_utils.is_zipfile(source) and source
@property
def assets(self):
return self._assets
@property
def name(self):
return self._name
def file_path(self, file_path):
if file_utils.is_folder(self._source):
return os.path.join(self._source, file_path)
return file_path
def add_asset(self, basename, file_path):
"""
"{
"artigo02-gf03.tiff": "/path/artigo02-gf03.tiff",
"artigo02-gf03.jpg": "/path/artigo02-gf03.jpg",
"artigo02-gf03.png": "/path/artigo02-gf03.png",
}
"""
self._assets[basename] = self.file_path(file_path)
def get_asset(self, basename):
try:
return self._assets[basename]
except KeyError:
return
def add_rendition(self, lang, file_path):
"""
{
"original": "artigo02.pdf",
"en": "artigo02-en.pdf",
}
"""
self._renditions[lang] = self.file_path(file_path)
def get_rendition(self, lang):
try:
return self._renditions[lang]
except KeyError:
return
@property
def source(self):
return self._source
@property
def xml(self):
return self.file_path(self._xml)
@xml.setter
def xml(self, value):
self._xml = value
@property
def renditions(self):
return self._renditions
@property
def xml_content(self):
if file_utils.is_folder(self._source):
with open(self.xml, "rb") as fp:
return fp.read()
with ZipFile(self._source) as zf:
return zf.read(self.xml)
def select_filenames_by_prefix(prefix, files):
"""
Get files which belongs to a document package.
Retorna os arquivos da lista `files` cujos nomes iniciam com `prefix`
Parameters
----------
prefix : str
Filename prefix
files : str list
Files paths
Returns
-------
list
files paths which basename files matches to prefix
"""
return [
item
for item in files
if match_file_by_prefix(prefix, item)
]
def match_file_by_prefix(prefix, file_path):
"""
Identify if a `file_path` belongs to a document package by a given `prefix`
Retorna `True` para documentos pertencentes a um pacote.
Parameters
----------
prefix : str
Filename prefix
file_path : str
File path
Returns
-------
bool
True - file belongs to the package
"""
basename = os.path.basename(file_path)
if basename.startswith(prefix + "-"):
return True
if basename.startswith(prefix + "."):
return True
return False
def explore_source(source):
packages = _explore_zipfile(source)
if not packages:
packages = _explore_folder(source)
if not packages:
raise ValueError("%s: Invalid value for `source`" % source)
return packages
def _explore_folder(folder):
"""
Get packages' data from folder
Groups files by their XML basename and returns data in dict format.
Parameters
----------
folder : str
Folder of the package
Returns
-------
dict
"""
if file_utils.is_folder(folder):
data = _group_files_by_xml_filename(
folder,
file_utils.xml_files_list(folder),
file_utils.files_list(folder),
)
return data
def _explore_zipfile(zip_path):
"""
Get packages' data from zip_path
Groups files by their XML basename and returns data in dict format.
Parameters
----------
zip_path : str
zip file path
Returns
-------
dict
"""
if file_utils.is_zipfile(zip_path):
with ZipFile(zip_path, 'r'):
data = _group_files_by_xml_filename(
zip_path,
file_utils.xml_files_list_from_zipfile(zip_path),
file_utils.files_list_from_zipfile(zip_path),
)
return data
def _group_files_by_xml_filename(source, xmls, files):
"""
Group files by their XML basename
Groups files by their XML basename and returns data in dict format.
Parameters
----------
xml_filename : str
XML filenames
files : list
list of files in the folder or zipfile
Returns
-------
dict
key: name of the XML files
value: Package
"""
docs = {}
for xml in xmls:
basename = os.path.basename(xml)
prefix, ext = os.path.splitext(basename)
docs.setdefault(prefix, Package(source, prefix))
# XML
docs[prefix].xml = xml
for file in select_filenames_by_prefix(prefix, files):
# avalia arquivo do pacote, se é asset ou rendition
component = _eval_file(prefix, file)
if not component:
continue
# resultado do avaliação do pacote
ftype = component.get("ftype")
file_path = component["file_path"]
comp_id = component["component_id"]
if ftype:
docs[prefix].add_asset(comp_id, file_path)
else:
docs[prefix].add_rendition(comp_id, file_path)
files.remove(file)
return docs
def _eval_file(prefix, file_path):
"""
Identifica o tipo de arquivo do pacote: `asset` ou `rendition`.
Identifica o tipo de arquivo do pacote e atualiza `packages` com o tipo e
o endereço do arquivo em análise.
Parameters
----------
prefix : str
nome do arquivo XML sem extensão
filename : str
filename
file_folder : str
file folder
Returns
-------
dict
"""
if not match_file_by_prefix(prefix, file_path):
# ignore files which name does not match
return
if file_path.endswith(".xml"):
# ignore XML files
return
# it matches
filename = os.path.basename(file_path)
fname, ext = os.path.splitext(filename)
lang = None
if ext == ".pdf":
suffix = fname.replace(prefix, "")
if fname == prefix:
lang = "original"
elif len(suffix) == 3 and suffix[0] == "-":
# it is a rendition
lang = suffix[1:]
if lang:
return dict(
component_id=lang,
file_path=file_path,
)
else:
return dict(
component_id=filename,
component_name=fname,
ftype=ext[1:],
file_path=file_path,
)
|
packtools/sps/models/packages.py
|
codereval_python_data_82
|
Identifica o tipo de arquivo do pacote: `asset` ou `rendition`.
Identifica o tipo de arquivo do pacote e atualiza `packages` com o tipo e
o endereço do arquivo em análise.
Parameters
----------
prefix : str
nome do arquivo XML sem extensão
filename : str
filename
file_folder : str
file folder
Returns
-------
dict
def _eval_file(prefix, file_path):
"""
Identifica o tipo de arquivo do pacote: `asset` ou `rendition`.
Identifica o tipo de arquivo do pacote e atualiza `packages` com o tipo e
o endereço do arquivo em análise.
Parameters
----------
prefix : str
nome do arquivo XML sem extensão
filename : str
filename
file_folder : str
file folder
Returns
-------
dict
"""
if not match_file_by_prefix(prefix, file_path):
# ignore files which name does not match
return
if file_path.endswith(".xml"):
# ignore XML files
return
# it matches
filename = os.path.basename(file_path)
fname, ext = os.path.splitext(filename)
lang = None
if ext == ".pdf":
suffix = fname.replace(prefix, "")
if fname == prefix:
lang = "original"
elif len(suffix) == 3 and suffix[0] == "-":
# it is a rendition
lang = suffix[1:]
if lang:
return dict(
component_id=lang,
file_path=file_path,
)
else:
return dict(
component_id=filename,
component_name=fname,
ftype=ext[1:],
file_path=file_path,
)
import logging
import os
from packtools import file_utils
from zipfile import ZipFile
logger = logging.getLogger(__name__)
class Package:
def __init__(self, source, name):
self._source = source
self._xml = None
self._assets = {}
self._renditions = {}
self._name = name
self.zip_file_path = file_utils.is_zipfile(source) and source
@property
def assets(self):
return self._assets
@property
def name(self):
return self._name
def file_path(self, file_path):
if file_utils.is_folder(self._source):
return os.path.join(self._source, file_path)
return file_path
def add_asset(self, basename, file_path):
"""
"{
"artigo02-gf03.tiff": "/path/artigo02-gf03.tiff",
"artigo02-gf03.jpg": "/path/artigo02-gf03.jpg",
"artigo02-gf03.png": "/path/artigo02-gf03.png",
}
"""
self._assets[basename] = self.file_path(file_path)
def get_asset(self, basename):
try:
return self._assets[basename]
except KeyError:
return
def add_rendition(self, lang, file_path):
"""
{
"original": "artigo02.pdf",
"en": "artigo02-en.pdf",
}
"""
self._renditions[lang] = self.file_path(file_path)
def get_rendition(self, lang):
try:
return self._renditions[lang]
except KeyError:
return
@property
def source(self):
return self._source
@property
def xml(self):
return self.file_path(self._xml)
@xml.setter
def xml(self, value):
self._xml = value
@property
def renditions(self):
return self._renditions
@property
def xml_content(self):
if file_utils.is_folder(self._source):
with open(self.xml, "rb") as fp:
return fp.read()
with ZipFile(self._source) as zf:
return zf.read(self.xml)
def select_filenames_by_prefix(prefix, files):
"""
Get files which belongs to a document package.
Retorna os arquivos da lista `files` cujos nomes iniciam com `prefix`
Parameters
----------
prefix : str
Filename prefix
files : str list
Files paths
Returns
-------
list
files paths which basename files matches to prefix
"""
return [
item
for item in files
if match_file_by_prefix(prefix, item)
]
def match_file_by_prefix(prefix, file_path):
"""
Identify if a `file_path` belongs to a document package by a given `prefix`
Retorna `True` para documentos pertencentes a um pacote.
Parameters
----------
prefix : str
Filename prefix
file_path : str
File path
Returns
-------
bool
True - file belongs to the package
"""
basename = os.path.basename(file_path)
if basename.startswith(prefix + "-"):
return True
if basename.startswith(prefix + "."):
return True
return False
def explore_source(source):
packages = _explore_zipfile(source)
if not packages:
packages = _explore_folder(source)
if not packages:
raise ValueError("%s: Invalid value for `source`" % source)
return packages
def _explore_folder(folder):
"""
Get packages' data from folder
Groups files by their XML basename and returns data in dict format.
Parameters
----------
folder : str
Folder of the package
Returns
-------
dict
"""
if file_utils.is_folder(folder):
data = _group_files_by_xml_filename(
folder,
file_utils.xml_files_list(folder),
file_utils.files_list(folder),
)
return data
def _explore_zipfile(zip_path):
"""
Get packages' data from zip_path
Groups files by their XML basename and returns data in dict format.
Parameters
----------
zip_path : str
zip file path
Returns
-------
dict
"""
if file_utils.is_zipfile(zip_path):
with ZipFile(zip_path, 'r'):
data = _group_files_by_xml_filename(
zip_path,
file_utils.xml_files_list_from_zipfile(zip_path),
file_utils.files_list_from_zipfile(zip_path),
)
return data
def _group_files_by_xml_filename(source, xmls, files):
"""
Group files by their XML basename
Groups files by their XML basename and returns data in dict format.
Parameters
----------
xml_filename : str
XML filenames
files : list
list of files in the folder or zipfile
Returns
-------
dict
key: name of the XML files
value: Package
"""
docs = {}
for xml in xmls:
basename = os.path.basename(xml)
prefix, ext = os.path.splitext(basename)
docs.setdefault(prefix, Package(source, prefix))
# XML
docs[prefix].xml = xml
for file in select_filenames_by_prefix(prefix, files):
# avalia arquivo do pacote, se é asset ou rendition
component = _eval_file(prefix, file)
if not component:
continue
# resultado do avaliação do pacote
ftype = component.get("ftype")
file_path = component["file_path"]
comp_id = component["component_id"]
if ftype:
docs[prefix].add_asset(comp_id, file_path)
else:
docs[prefix].add_rendition(comp_id, file_path)
files.remove(file)
return docs
def _eval_file(prefix, file_path):
"""
Identifica o tipo de arquivo do pacote: `asset` ou `rendition`.
Identifica o tipo de arquivo do pacote e atualiza `packages` com o tipo e
o endereço do arquivo em análise.
Parameters
----------
prefix : str
nome do arquivo XML sem extensão
filename : str
filename
file_folder : str
file folder
Returns
-------
dict
"""
if not match_file_by_prefix(prefix, file_path):
# ignore files which name does not match
return
if file_path.endswith(".xml"):
# ignore XML files
return
# it matches
filename = os.path.basename(file_path)
fname, ext = os.path.splitext(filename)
lang = None
if ext == ".pdf":
suffix = fname.replace(prefix, "")
if fname == prefix:
lang = "original"
elif len(suffix) == 3 and suffix[0] == "-":
# it is a rendition
lang = suffix[1:]
if lang:
return dict(
component_id=lang,
file_path=file_path,
)
else:
return dict(
component_id=filename,
component_name=fname,
ftype=ext[1:],
file_path=file_path,
)
|
packtools/sps/models/packages.py
|
codereval_python_data_83
|
{
"original": "artigo02.pdf",
"en": "artigo02-en.pdf",
}
def add_rendition(self, lang, file_path):
"""
{
"original": "artigo02.pdf",
"en": "artigo02-en.pdf",
}
"""
self._renditions[lang] = self.file_path(file_path)
import logging
import os
from packtools import file_utils
from zipfile import ZipFile
logger = logging.getLogger(__name__)
class Package:
def __init__(self, source, name):
self._source = source
self._xml = None
self._assets = {}
self._renditions = {}
self._name = name
self.zip_file_path = file_utils.is_zipfile(source) and source
@property
def assets(self):
return self._assets
@property
def name(self):
return self._name
def file_path(self, file_path):
if file_utils.is_folder(self._source):
return os.path.join(self._source, file_path)
return file_path
def add_asset(self, basename, file_path):
"""
"{
"artigo02-gf03.tiff": "/path/artigo02-gf03.tiff",
"artigo02-gf03.jpg": "/path/artigo02-gf03.jpg",
"artigo02-gf03.png": "/path/artigo02-gf03.png",
}
"""
self._assets[basename] = self.file_path(file_path)
def get_asset(self, basename):
try:
return self._assets[basename]
except KeyError:
return
def add_rendition(self, lang, file_path):
"""
{
"original": "artigo02.pdf",
"en": "artigo02-en.pdf",
}
"""
self._renditions[lang] = self.file_path(file_path)
def get_rendition(self, lang):
try:
return self._renditions[lang]
except KeyError:
return
@property
def source(self):
return self._source
@property
def xml(self):
return self.file_path(self._xml)
@xml.setter
def xml(self, value):
self._xml = value
@property
def renditions(self):
return self._renditions
@property
def xml_content(self):
if file_utils.is_folder(self._source):
with open(self.xml, "rb") as fp:
return fp.read()
with ZipFile(self._source) as zf:
return zf.read(self.xml)
def select_filenames_by_prefix(prefix, files):
"""
Get files which belongs to a document package.
Retorna os arquivos da lista `files` cujos nomes iniciam com `prefix`
Parameters
----------
prefix : str
Filename prefix
files : str list
Files paths
Returns
-------
list
files paths which basename files matches to prefix
"""
return [
item
for item in files
if match_file_by_prefix(prefix, item)
]
def match_file_by_prefix(prefix, file_path):
"""
Identify if a `file_path` belongs to a document package by a given `prefix`
Retorna `True` para documentos pertencentes a um pacote.
Parameters
----------
prefix : str
Filename prefix
file_path : str
File path
Returns
-------
bool
True - file belongs to the package
"""
basename = os.path.basename(file_path)
if basename.startswith(prefix + "-"):
return True
if basename.startswith(prefix + "."):
return True
return False
def explore_source(source):
packages = _explore_zipfile(source)
if not packages:
packages = _explore_folder(source)
if not packages:
raise ValueError("%s: Invalid value for `source`" % source)
return packages
def _explore_folder(folder):
"""
Get packages' data from folder
Groups files by their XML basename and returns data in dict format.
Parameters
----------
folder : str
Folder of the package
Returns
-------
dict
"""
if file_utils.is_folder(folder):
data = _group_files_by_xml_filename(
folder,
file_utils.xml_files_list(folder),
file_utils.files_list(folder),
)
return data
def _explore_zipfile(zip_path):
"""
Get packages' data from zip_path
Groups files by their XML basename and returns data in dict format.
Parameters
----------
zip_path : str
zip file path
Returns
-------
dict
"""
if file_utils.is_zipfile(zip_path):
with ZipFile(zip_path, 'r'):
data = _group_files_by_xml_filename(
zip_path,
file_utils.xml_files_list_from_zipfile(zip_path),
file_utils.files_list_from_zipfile(zip_path),
)
return data
def _group_files_by_xml_filename(source, xmls, files):
"""
Group files by their XML basename
Groups files by their XML basename and returns data in dict format.
Parameters
----------
xml_filename : str
XML filenames
files : list
list of files in the folder or zipfile
Returns
-------
dict
key: name of the XML files
value: Package
"""
docs = {}
for xml in xmls:
basename = os.path.basename(xml)
prefix, ext = os.path.splitext(basename)
docs.setdefault(prefix, Package(source, prefix))
# XML
docs[prefix].xml = xml
for file in select_filenames_by_prefix(prefix, files):
# avalia arquivo do pacote, se é asset ou rendition
component = _eval_file(prefix, file)
if not component:
continue
# resultado do avaliação do pacote
ftype = component.get("ftype")
file_path = component["file_path"]
comp_id = component["component_id"]
if ftype:
docs[prefix].add_asset(comp_id, file_path)
else:
docs[prefix].add_rendition(comp_id, file_path)
files.remove(file)
return docs
def _eval_file(prefix, file_path):
"""
Identifica o tipo de arquivo do pacote: `asset` ou `rendition`.
Identifica o tipo de arquivo do pacote e atualiza `packages` com o tipo e
o endereço do arquivo em análise.
Parameters
----------
prefix : str
nome do arquivo XML sem extensão
filename : str
filename
file_folder : str
file folder
Returns
-------
dict
"""
if not match_file_by_prefix(prefix, file_path):
# ignore files which name does not match
return
if file_path.endswith(".xml"):
# ignore XML files
return
# it matches
filename = os.path.basename(file_path)
fname, ext = os.path.splitext(filename)
lang = None
if ext == ".pdf":
suffix = fname.replace(prefix, "")
if fname == prefix:
lang = "original"
elif len(suffix) == 3 and suffix[0] == "-":
# it is a rendition
lang = suffix[1:]
if lang:
return dict(
component_id=lang,
file_path=file_path,
)
else:
return dict(
component_id=filename,
component_name=fname,
ftype=ext[1:],
file_path=file_path,
)
|
packtools/sps/models/packages.py
|
codereval_python_data_84
|
"{
"artigo02-gf03.tiff": "/path/artigo02-gf03.tiff",
"artigo02-gf03.jpg": "/path/artigo02-gf03.jpg",
"artigo02-gf03.png": "/path/artigo02-gf03.png",
}
def add_asset(self, basename, file_path):
"""
"{
"artigo02-gf03.tiff": "/path/artigo02-gf03.tiff",
"artigo02-gf03.jpg": "/path/artigo02-gf03.jpg",
"artigo02-gf03.png": "/path/artigo02-gf03.png",
}
"""
self._assets[basename] = self.file_path(file_path)
import logging
import os
from packtools import file_utils
from zipfile import ZipFile
logger = logging.getLogger(__name__)
class Package:
def __init__(self, source, name):
self._source = source
self._xml = None
self._assets = {}
self._renditions = {}
self._name = name
self.zip_file_path = file_utils.is_zipfile(source) and source
@property
def assets(self):
return self._assets
@property
def name(self):
return self._name
def file_path(self, file_path):
if file_utils.is_folder(self._source):
return os.path.join(self._source, file_path)
return file_path
def add_asset(self, basename, file_path):
"""
"{
"artigo02-gf03.tiff": "/path/artigo02-gf03.tiff",
"artigo02-gf03.jpg": "/path/artigo02-gf03.jpg",
"artigo02-gf03.png": "/path/artigo02-gf03.png",
}
"""
self._assets[basename] = self.file_path(file_path)
def get_asset(self, basename):
try:
return self._assets[basename]
except KeyError:
return
def add_rendition(self, lang, file_path):
"""
{
"original": "artigo02.pdf",
"en": "artigo02-en.pdf",
}
"""
self._renditions[lang] = self.file_path(file_path)
def get_rendition(self, lang):
try:
return self._renditions[lang]
except KeyError:
return
@property
def source(self):
return self._source
@property
def xml(self):
return self.file_path(self._xml)
@xml.setter
def xml(self, value):
self._xml = value
@property
def renditions(self):
return self._renditions
@property
def xml_content(self):
if file_utils.is_folder(self._source):
with open(self.xml, "rb") as fp:
return fp.read()
with ZipFile(self._source) as zf:
return zf.read(self.xml)
def select_filenames_by_prefix(prefix, files):
"""
Get files which belongs to a document package.
Retorna os arquivos da lista `files` cujos nomes iniciam com `prefix`
Parameters
----------
prefix : str
Filename prefix
files : str list
Files paths
Returns
-------
list
files paths which basename files matches to prefix
"""
return [
item
for item in files
if match_file_by_prefix(prefix, item)
]
def match_file_by_prefix(prefix, file_path):
"""
Identify if a `file_path` belongs to a document package by a given `prefix`
Retorna `True` para documentos pertencentes a um pacote.
Parameters
----------
prefix : str
Filename prefix
file_path : str
File path
Returns
-------
bool
True - file belongs to the package
"""
basename = os.path.basename(file_path)
if basename.startswith(prefix + "-"):
return True
if basename.startswith(prefix + "."):
return True
return False
def explore_source(source):
packages = _explore_zipfile(source)
if not packages:
packages = _explore_folder(source)
if not packages:
raise ValueError("%s: Invalid value for `source`" % source)
return packages
def _explore_folder(folder):
"""
Get packages' data from folder
Groups files by their XML basename and returns data in dict format.
Parameters
----------
folder : str
Folder of the package
Returns
-------
dict
"""
if file_utils.is_folder(folder):
data = _group_files_by_xml_filename(
folder,
file_utils.xml_files_list(folder),
file_utils.files_list(folder),
)
return data
def _explore_zipfile(zip_path):
"""
Get packages' data from zip_path
Groups files by their XML basename and returns data in dict format.
Parameters
----------
zip_path : str
zip file path
Returns
-------
dict
"""
if file_utils.is_zipfile(zip_path):
with ZipFile(zip_path, 'r'):
data = _group_files_by_xml_filename(
zip_path,
file_utils.xml_files_list_from_zipfile(zip_path),
file_utils.files_list_from_zipfile(zip_path),
)
return data
def _group_files_by_xml_filename(source, xmls, files):
"""
Group files by their XML basename
Groups files by their XML basename and returns data in dict format.
Parameters
----------
xml_filename : str
XML filenames
files : list
list of files in the folder or zipfile
Returns
-------
dict
key: name of the XML files
value: Package
"""
docs = {}
for xml in xmls:
basename = os.path.basename(xml)
prefix, ext = os.path.splitext(basename)
docs.setdefault(prefix, Package(source, prefix))
# XML
docs[prefix].xml = xml
for file in select_filenames_by_prefix(prefix, files):
# avalia arquivo do pacote, se é asset ou rendition
component = _eval_file(prefix, file)
if not component:
continue
# resultado do avaliação do pacote
ftype = component.get("ftype")
file_path = component["file_path"]
comp_id = component["component_id"]
if ftype:
docs[prefix].add_asset(comp_id, file_path)
else:
docs[prefix].add_rendition(comp_id, file_path)
files.remove(file)
return docs
def _eval_file(prefix, file_path):
"""
Identifica o tipo de arquivo do pacote: `asset` ou `rendition`.
Identifica o tipo de arquivo do pacote e atualiza `packages` com o tipo e
o endereço do arquivo em análise.
Parameters
----------
prefix : str
nome do arquivo XML sem extensão
filename : str
filename
file_folder : str
file folder
Returns
-------
dict
"""
if not match_file_by_prefix(prefix, file_path):
# ignore files which name does not match
return
if file_path.endswith(".xml"):
# ignore XML files
return
# it matches
filename = os.path.basename(file_path)
fname, ext = os.path.splitext(filename)
lang = None
if ext == ".pdf":
suffix = fname.replace(prefix, "")
if fname == prefix:
lang = "original"
elif len(suffix) == 3 and suffix[0] == "-":
# it is a rendition
lang = suffix[1:]
if lang:
return dict(
component_id=lang,
file_path=file_path,
)
else:
return dict(
component_id=filename,
component_name=fname,
ftype=ext[1:],
file_path=file_path,
)
|
packtools/sps/models/packages.py
|
codereval_python_data_85
|
Get packages' data from zip_path
Groups files by their XML basename and returns data in dict format.
Parameters
----------
zip_path : str
zip file path
Returns
-------
dict
def _explore_zipfile(zip_path):
"""
Get packages' data from zip_path
Groups files by their XML basename and returns data in dict format.
Parameters
----------
zip_path : str
zip file path
Returns
-------
dict
"""
if file_utils.is_zipfile(zip_path):
with ZipFile(zip_path, 'r'):
data = _group_files_by_xml_filename(
zip_path,
file_utils.xml_files_list_from_zipfile(zip_path),
file_utils.files_list_from_zipfile(zip_path),
)
return data
import logging
import os
from packtools import file_utils
from zipfile import ZipFile
logger = logging.getLogger(__name__)
class Package:
def __init__(self, source, name):
self._source = source
self._xml = None
self._assets = {}
self._renditions = {}
self._name = name
self.zip_file_path = file_utils.is_zipfile(source) and source
@property
def assets(self):
return self._assets
@property
def name(self):
return self._name
def file_path(self, file_path):
if file_utils.is_folder(self._source):
return os.path.join(self._source, file_path)
return file_path
def add_asset(self, basename, file_path):
"""
"{
"artigo02-gf03.tiff": "/path/artigo02-gf03.tiff",
"artigo02-gf03.jpg": "/path/artigo02-gf03.jpg",
"artigo02-gf03.png": "/path/artigo02-gf03.png",
}
"""
self._assets[basename] = self.file_path(file_path)
def get_asset(self, basename):
try:
return self._assets[basename]
except KeyError:
return
def add_rendition(self, lang, file_path):
"""
{
"original": "artigo02.pdf",
"en": "artigo02-en.pdf",
}
"""
self._renditions[lang] = self.file_path(file_path)
def get_rendition(self, lang):
try:
return self._renditions[lang]
except KeyError:
return
@property
def source(self):
return self._source
@property
def xml(self):
return self.file_path(self._xml)
@xml.setter
def xml(self, value):
self._xml = value
@property
def renditions(self):
return self._renditions
@property
def xml_content(self):
if file_utils.is_folder(self._source):
with open(self.xml, "rb") as fp:
return fp.read()
with ZipFile(self._source) as zf:
return zf.read(self.xml)
def select_filenames_by_prefix(prefix, files):
"""
Get files which belongs to a document package.
Retorna os arquivos da lista `files` cujos nomes iniciam com `prefix`
Parameters
----------
prefix : str
Filename prefix
files : str list
Files paths
Returns
-------
list
files paths which basename files matches to prefix
"""
return [
item
for item in files
if match_file_by_prefix(prefix, item)
]
def match_file_by_prefix(prefix, file_path):
"""
Identify if a `file_path` belongs to a document package by a given `prefix`
Retorna `True` para documentos pertencentes a um pacote.
Parameters
----------
prefix : str
Filename prefix
file_path : str
File path
Returns
-------
bool
True - file belongs to the package
"""
basename = os.path.basename(file_path)
if basename.startswith(prefix + "-"):
return True
if basename.startswith(prefix + "."):
return True
return False
def explore_source(source):
packages = _explore_zipfile(source)
if not packages:
packages = _explore_folder(source)
if not packages:
raise ValueError("%s: Invalid value for `source`" % source)
return packages
def _explore_folder(folder):
"""
Get packages' data from folder
Groups files by their XML basename and returns data in dict format.
Parameters
----------
folder : str
Folder of the package
Returns
-------
dict
"""
if file_utils.is_folder(folder):
data = _group_files_by_xml_filename(
folder,
file_utils.xml_files_list(folder),
file_utils.files_list(folder),
)
return data
def _explore_zipfile(zip_path):
"""
Get packages' data from zip_path
Groups files by their XML basename and returns data in dict format.
Parameters
----------
zip_path : str
zip file path
Returns
-------
dict
"""
if file_utils.is_zipfile(zip_path):
with ZipFile(zip_path, 'r'):
data = _group_files_by_xml_filename(
zip_path,
file_utils.xml_files_list_from_zipfile(zip_path),
file_utils.files_list_from_zipfile(zip_path),
)
return data
def _group_files_by_xml_filename(source, xmls, files):
"""
Group files by their XML basename
Groups files by their XML basename and returns data in dict format.
Parameters
----------
xml_filename : str
XML filenames
files : list
list of files in the folder or zipfile
Returns
-------
dict
key: name of the XML files
value: Package
"""
docs = {}
for xml in xmls:
basename = os.path.basename(xml)
prefix, ext = os.path.splitext(basename)
docs.setdefault(prefix, Package(source, prefix))
# XML
docs[prefix].xml = xml
for file in select_filenames_by_prefix(prefix, files):
# avalia arquivo do pacote, se é asset ou rendition
component = _eval_file(prefix, file)
if not component:
continue
# resultado do avaliação do pacote
ftype = component.get("ftype")
file_path = component["file_path"]
comp_id = component["component_id"]
if ftype:
docs[prefix].add_asset(comp_id, file_path)
else:
docs[prefix].add_rendition(comp_id, file_path)
files.remove(file)
return docs
def _eval_file(prefix, file_path):
"""
Identifica o tipo de arquivo do pacote: `asset` ou `rendition`.
Identifica o tipo de arquivo do pacote e atualiza `packages` com o tipo e
o endereço do arquivo em análise.
Parameters
----------
prefix : str
nome do arquivo XML sem extensão
filename : str
filename
file_folder : str
file folder
Returns
-------
dict
"""
if not match_file_by_prefix(prefix, file_path):
# ignore files which name does not match
return
if file_path.endswith(".xml"):
# ignore XML files
return
# it matches
filename = os.path.basename(file_path)
fname, ext = os.path.splitext(filename)
lang = None
if ext == ".pdf":
suffix = fname.replace(prefix, "")
if fname == prefix:
lang = "original"
elif len(suffix) == 3 and suffix[0] == "-":
# it is a rendition
lang = suffix[1:]
if lang:
return dict(
component_id=lang,
file_path=file_path,
)
else:
return dict(
component_id=filename,
component_name=fname,
ftype=ext[1:],
file_path=file_path,
)
|
packtools/sps/models/packages.py
|
codereval_python_data_86
|
Return the files in `zip_path`
Example:
```
[
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200069.pdf',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200069.xml',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200071.pdf',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200071.xml',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200071-gf01.tif',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200071-gf02.tif',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200071-gf03.tif',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200071-gf04.tif',
]
```
def files_list_from_zipfile(zip_path):
"""
Return the files in `zip_path`
Example:
```
[
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200069.pdf',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200069.xml',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200071.pdf',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200071.xml',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200071-gf01.tif',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200071-gf02.tif',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200071-gf03.tif',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200071-gf04.tif',
]
```
"""
with ZipFile(zip_path) as zf:
return zf.namelist()
import os
import logging
import re
import shutil
import tempfile
from zipfile import ZipFile, ZIP_DEFLATED
logger = logging.getLogger(__name__)
def is_folder(source):
return os.path.isdir(source)
def is_zipfile(source):
return os.path.isfile(source) and source.endswith(".zip")
def xml_files_list(path):
"""
Return the XML files found in `path`
"""
return (f for f in os.listdir(path) if f.endswith(".xml"))
def files_list(path):
"""
Return the files in `path`
"""
return os.listdir(path)
def read_file(path, encoding="utf-8", mode="r"):
with open(path, mode=mode, encoding=encoding) as f:
text = f.read()
return text
def read_from_zipfile(zip_path, filename):
with ZipFile(zip_path) as zf:
return zf.read(filename)
def xml_files_list_from_zipfile(zip_path):
with ZipFile(zip_path) as zf:
xmls_filenames = [
xml_filename
for xml_filename in zf.namelist()
if os.path.splitext(xml_filename)[-1] == ".xml"
]
return xmls_filenames
def files_list_from_zipfile(zip_path):
"""
Return the files in `zip_path`
Example:
```
[
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200069.pdf',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200069.xml',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200071.pdf',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200071.xml',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200071-gf01.tif',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200071-gf02.tif',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200071-gf03.tif',
'2318-0889-tinf-33-0421/2318-0889-tinf-33-e200071-gf04.tif',
]
```
"""
with ZipFile(zip_path) as zf:
return zf.namelist()
def write_file(path, source, mode="w"):
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
logger.debug("Gravando arquivo: %s", path)
if "b" in mode:
with open(path, mode) as f:
f.write(source)
return
with open(path, mode, encoding="utf-8") as f:
f.write(source)
def create_zip_file(files, zip_name, zip_folder=None):
zip_folder = zip_folder or tempfile.mkdtemp()
zip_path = os.path.join(zip_folder, zip_name)
with ZipFile(zip_path, 'w', ZIP_DEFLATED) as myzip:
for f in files:
myzip.write(f, os.path.basename(f))
return zip_path
def delete_folder(path):
try:
shutil.rmtree(path)
except:
pass
def create_temp_file(filename, content=None, mode='w'):
file_path = tempfile.mkdtemp()
file_path = os.path.join(file_path, filename)
write_file(file_path, content or '', mode)
return file_path
def copy_file(source, target):
tmpdir = tempfile.mkdtemp()
fullpath_target = os.path.join(tmpdir, target)
logger.info(f'Copying file {source} to {fullpath_target}')
return shutil.copyfile(source, fullpath_target)
def size(file_path):
return os.path.getsize(file_path)
def get_prefix_by_xml_filename(xml_filename):
"""
Obtém o prefixo associado a um arquivo xml
Parameters
----------
xml_filename : str
Nome de arquivo xml
Returns
-------
str
Prefixo associado ao arquivo xml
"""
file, ext = os.path.splitext(xml_filename)
return file
def get_file_role(file_path, prefix, pdf_langs):
"""
Obtém o papel/função de um arquivo (xml, renditions ou assets) no contexto de um documento
Parameters
----------
file_path : str
Nome de um arquivo
prefix: str
Prefixo associado ao arquivo
pdf_langs: list
Idiomas dos PDFs do documento
Returns
-------
str
Papel/função de arquivo (xml, rendition ou assets) no contexto de um documento
"""
file, ext = os.path.splitext(file_path)
if ext == '.xml':
return 'xml'
elif ext == '.pdf':
if file == prefix:
return 'renditions'
for lang in pdf_langs:
if file == f'{prefix}-{lang}':
return 'renditions'
return 'assets'
def extract_issn_from_zip_uri(zip_uri):
"""
Extrai código ISSN a partir do endereço de um arquivo zip
Parameters
----------
zip_uri : str
Endereço de um arquivo zip
Returns
-------
str
ISSN
"""
match = re.search(r'.*/ingress/packages/(\d{4}-\d{4})/.*.zip', zip_uri)
if match:
return match.group(1)
def get_filename(file_path):
return os.path.basename(file_path)
|
packtools/file_utils.py
|
codereval_python_data_87
|
Convert text that defaults to 'w:st="' to 'w-st="'
def fix_namespace_prefix_w(content):
"""
Convert os textos cujo padrão é `w:st="` em `w-st="`
"""
pattern = r"\bw:[a-z]{1,}=\""
found_items = re.findall(pattern, content)
logger.debug("Found %i namespace prefix w", len(found_items))
for item in set(found_items):
new_namespace = item.replace(":", "-")
logger.debug("%s -> %s" % (item, new_namespace))
content = content.replace(item, new_namespace)
return content
import logging
import re
from copy import deepcopy
from lxml import etree
from packtools import validations
from packtools.sps import exceptions
from packtools import file_utils
logger = logging.getLogger(__name__)
def get_nodes_with_lang(xmltree, lang_xpath, node_xpath=None):
_items = []
for node in xmltree.xpath(lang_xpath):
_item = {}
if node_xpath:
_item["node"] = node.find(node_xpath)
else:
_item["node"] = node
_item["lang"] = node.get('{http://www.w3.org/XML/1998/namespace}lang')
_items.append(_item)
return _items
def node_text_without_xref(node):
"""
Retorna text com subtags, exceto `xref`
"""
if node is None:
return
node = deepcopy(node)
for xref in node.findall(".//xref"):
if xref.tail:
_next = xref.getnext()
if _next is None or _next.tag != "xref":
e = etree.Element("EMPTYTAGTOKEEPXREFTAIL")
xref.addnext(e)
for xref in node.findall(".//xref"):
parent = xref.getparent()
parent.remove(xref)
etree.strip_tags(node, "EMPTYTAGTOKEEPXREFTAIL")
return node_text(node)
def formatted_text(title_node):
# FIXME substituir `formatted_text` por `node_text_without_xref`
# por ser mais explícito
return node_text_without_xref(title_node)
def fix_xml(xml_str):
return fix_namespace_prefix_w(xml_str)
def fix_namespace_prefix_w(content):
"""
Convert os textos cujo padrão é `w:st="` em `w-st="`
"""
pattern = r"\bw:[a-z]{1,}=\""
found_items = re.findall(pattern, content)
logger.debug("Found %i namespace prefix w", len(found_items))
for item in set(found_items):
new_namespace = item.replace(":", "-")
logger.debug("%s -> %s" % (item, new_namespace))
content = content.replace(item, new_namespace)
return content
def _get_xml_content(xml):
if isinstance(xml, str):
try:
content = file_utils.read_file(xml)
except (FileNotFoundError, OSError):
content = xml
content = fix_xml(content)
return content.encode("utf-8")
return xml
def get_xml_tree(content):
parser = etree.XMLParser(remove_blank_text=True, no_network=True)
try:
content = _get_xml_content(content)
xml_tree = etree.XML(content, parser)
except etree.XMLSyntaxError as exc:
raise exceptions.SPSLoadToXMLError(str(exc)) from None
else:
return xml_tree
def tostring(node, doctype=None, pretty_print=False):
return etree.tostring(
node,
doctype=doctype,
xml_declaration=True,
method="xml",
encoding="utf-8",
pretty_print=pretty_print,
).decode("utf-8")
def node_text(node):
"""
Retorna todos os node.text, incluindo a subtags
Para <title>Text <bold>text</bold> Text</title>, retorna
Text <bold>text</bold> Text
"""
items = [node.text or ""]
for child in node.getchildren():
items.append(
etree.tostring(child, encoding="utf-8").decode("utf-8")
)
return "".join(items)
def get_year_month_day(node):
"""
Retorna os valores respectivos dos elementos "year", "month", "day".
Parameters
----------
node : lxml.etree.Element
Elemento do tipo _date_, que tem os elementos "year", "month", "day".
Returns
-------
tuple of strings
("YYYY", "MM", "DD")
None se node is None
"""
if node is not None:
return tuple(
[(node.findtext(item) or "").zfill(2)
for item in ["year", "month", "day"]]
)
def create_alternatives(node, assets_data):
"""
```xml
<alternatives>
<graphic
xlink:href="https://minio.scielo.br/documentstore/1678-2674/
rQRTPbt6jkrncZTsPdCyXsn/
6d6b2cfaa2dc5bd1fb84644218506cbfbc4dfb1e.tif"/>
<graphic
xlink:href="https://minio.scielo.br/documentstore/1678-2674/
rQRTPbt6jkrncZTsPdCyXsn/
b810735a45beb5f829d4eb07e4cf68842f57313f.png"
specific-use="scielo-web"/>
<graphic
xlink:href="https://minio.scielo.br/documentstore/1678-2674/
rQRTPbt6jkrncZTsPdCyXsn/
e9d0cd6430c85a125e7490629ce43f227d00ef5e.jpg"
specific-use="scielo-web"
content-type="scielo-267x140"/>
</alternatives>
```
"""
if node is None or not assets_data:
return
parent = node.getparent()
if parent is None:
return
if len(assets_data) == 1:
for extension, uri in assets_data.items():
node.set("{http://www.w3.org/1999/xlink}href", uri)
if extension in [".tif", ".tiff"]:
pass
elif extension in [".png"]:
node.set("specific-use", "scielo-web")
else:
node.set("specific-use", "scielo-web")
node.set("content-type", "scielo-267x140")
else:
alternative_node = etree.Element("alternatives")
for extension, uri in assets_data.items():
_node = etree.Element("graphic")
_node.set("{http://www.w3.org/1999/xlink}href", uri)
alternative_node.append(_node)
if extension in [".tif", ".tiff"]:
pass
elif extension in [".png"]:
_node.set("specific-use", "scielo-web")
else:
_node.set("specific-use", "scielo-web")
_node.set("content-type", "scielo-267x140")
parent.replace(node, alternative_node)
def parse_value(value):
value = value.lower()
if value.isdigit():
return value.zfill(2)
if "spe" in value:
return "spe"
if "sup" in value:
return "s"
return value
def parse_issue(issue):
issue = " ".join([item for item in issue.split()])
parts = issue.split()
parts = [parse_value(item) for item in parts]
s = "-".join(parts)
s = s.replace("spe-", "spe")
s = s.replace("s-", "s")
if s.endswith("s"):
s += "0"
return s
def is_allowed_to_update(xml_sps, attr_name, attr_new_value):
"""
Se há uma função de validação associada com o atributo,
verificar se é permitido atualizar o atributo, dados seus valores
atual e/ou novo
"""
validate_function = validations.VALIDATE_FUNCTIONS.get(attr_name)
if validate_function is None:
# não há nenhuma validação, então é permitido fazer a atualização
return True
curr_value = getattr(xml_sps, attr_name)
if attr_new_value == curr_value:
# desnecessario atualizar
return False
try:
# valida o valor atual do atributo
validate_function(curr_value)
except (ValueError, exceptions.InvalidValueForOrderError):
# o valor atual do atributo é inválido,
# então continuar para verificar o valor "novo"
pass
else:
# o valor atual do atributo é válido,
# então não permitir atualização
raise exceptions.NotAllowedtoChangeAttributeValueError(
"Not allowed to update %s (%s) with %s, "
"because current is valid" %
(attr_name, curr_value, attr_new_value))
try:
# valida o valor novo para o atributo
validate_function(attr_new_value)
except (ValueError, exceptions.InvalidValueForOrderError):
# o valor novo é inválido, então não permitir atualização
raise exceptions.InvalidAttributeValueError(
"Not allowed to update %s (%s) with %s, "
"because new value is invalid" %
(attr_name, curr_value, attr_new_value))
else:
# o valor novo é válido, então não permitir atualização
return True
def match_pubdate(node, pubdate_xpaths):
"""
Retorna o primeiro match da lista de pubdate_xpaths
"""
for xpath in pubdate_xpaths:
pubdate = node.find(xpath)
if pubdate is not None:
return pubdate
|
packtools/sps/utils/xml_utils.py
|
codereval_python_data_88
|
Returns the first match in the pubdate_xpaths list
def match_pubdate(node, pubdate_xpaths):
"""
Retorna o primeiro match da lista de pubdate_xpaths
"""
for xpath in pubdate_xpaths:
pubdate = node.find(xpath)
if pubdate is not None:
return pubdate
import logging
import re
from copy import deepcopy
from lxml import etree
from packtools import validations
from packtools.sps import exceptions
from packtools import file_utils
logger = logging.getLogger(__name__)
def get_nodes_with_lang(xmltree, lang_xpath, node_xpath=None):
_items = []
for node in xmltree.xpath(lang_xpath):
_item = {}
if node_xpath:
_item["node"] = node.find(node_xpath)
else:
_item["node"] = node
_item["lang"] = node.get('{http://www.w3.org/XML/1998/namespace}lang')
_items.append(_item)
return _items
def node_text_without_xref(node):
"""
Retorna text com subtags, exceto `xref`
"""
if node is None:
return
node = deepcopy(node)
for xref in node.findall(".//xref"):
if xref.tail:
_next = xref.getnext()
if _next is None or _next.tag != "xref":
e = etree.Element("EMPTYTAGTOKEEPXREFTAIL")
xref.addnext(e)
for xref in node.findall(".//xref"):
parent = xref.getparent()
parent.remove(xref)
etree.strip_tags(node, "EMPTYTAGTOKEEPXREFTAIL")
return node_text(node)
def formatted_text(title_node):
# FIXME substituir `formatted_text` por `node_text_without_xref`
# por ser mais explícito
return node_text_without_xref(title_node)
def fix_xml(xml_str):
return fix_namespace_prefix_w(xml_str)
def fix_namespace_prefix_w(content):
"""
Convert os textos cujo padrão é `w:st="` em `w-st="`
"""
pattern = r"\bw:[a-z]{1,}=\""
found_items = re.findall(pattern, content)
logger.debug("Found %i namespace prefix w", len(found_items))
for item in set(found_items):
new_namespace = item.replace(":", "-")
logger.debug("%s -> %s" % (item, new_namespace))
content = content.replace(item, new_namespace)
return content
def _get_xml_content(xml):
if isinstance(xml, str):
try:
content = file_utils.read_file(xml)
except (FileNotFoundError, OSError):
content = xml
content = fix_xml(content)
return content.encode("utf-8")
return xml
def get_xml_tree(content):
parser = etree.XMLParser(remove_blank_text=True, no_network=True)
try:
content = _get_xml_content(content)
xml_tree = etree.XML(content, parser)
except etree.XMLSyntaxError as exc:
raise exceptions.SPSLoadToXMLError(str(exc)) from None
else:
return xml_tree
def tostring(node, doctype=None, pretty_print=False):
return etree.tostring(
node,
doctype=doctype,
xml_declaration=True,
method="xml",
encoding="utf-8",
pretty_print=pretty_print,
).decode("utf-8")
def node_text(node):
"""
Retorna todos os node.text, incluindo a subtags
Para <title>Text <bold>text</bold> Text</title>, retorna
Text <bold>text</bold> Text
"""
items = [node.text or ""]
for child in node.getchildren():
items.append(
etree.tostring(child, encoding="utf-8").decode("utf-8")
)
return "".join(items)
def get_year_month_day(node):
"""
Retorna os valores respectivos dos elementos "year", "month", "day".
Parameters
----------
node : lxml.etree.Element
Elemento do tipo _date_, que tem os elementos "year", "month", "day".
Returns
-------
tuple of strings
("YYYY", "MM", "DD")
None se node is None
"""
if node is not None:
return tuple(
[(node.findtext(item) or "").zfill(2)
for item in ["year", "month", "day"]]
)
def create_alternatives(node, assets_data):
"""
```xml
<alternatives>
<graphic
xlink:href="https://minio.scielo.br/documentstore/1678-2674/
rQRTPbt6jkrncZTsPdCyXsn/
6d6b2cfaa2dc5bd1fb84644218506cbfbc4dfb1e.tif"/>
<graphic
xlink:href="https://minio.scielo.br/documentstore/1678-2674/
rQRTPbt6jkrncZTsPdCyXsn/
b810735a45beb5f829d4eb07e4cf68842f57313f.png"
specific-use="scielo-web"/>
<graphic
xlink:href="https://minio.scielo.br/documentstore/1678-2674/
rQRTPbt6jkrncZTsPdCyXsn/
e9d0cd6430c85a125e7490629ce43f227d00ef5e.jpg"
specific-use="scielo-web"
content-type="scielo-267x140"/>
</alternatives>
```
"""
if node is None or not assets_data:
return
parent = node.getparent()
if parent is None:
return
if len(assets_data) == 1:
for extension, uri in assets_data.items():
node.set("{http://www.w3.org/1999/xlink}href", uri)
if extension in [".tif", ".tiff"]:
pass
elif extension in [".png"]:
node.set("specific-use", "scielo-web")
else:
node.set("specific-use", "scielo-web")
node.set("content-type", "scielo-267x140")
else:
alternative_node = etree.Element("alternatives")
for extension, uri in assets_data.items():
_node = etree.Element("graphic")
_node.set("{http://www.w3.org/1999/xlink}href", uri)
alternative_node.append(_node)
if extension in [".tif", ".tiff"]:
pass
elif extension in [".png"]:
_node.set("specific-use", "scielo-web")
else:
_node.set("specific-use", "scielo-web")
_node.set("content-type", "scielo-267x140")
parent.replace(node, alternative_node)
def parse_value(value):
value = value.lower()
if value.isdigit():
return value.zfill(2)
if "spe" in value:
return "spe"
if "sup" in value:
return "s"
return value
def parse_issue(issue):
issue = " ".join([item for item in issue.split()])
parts = issue.split()
parts = [parse_value(item) for item in parts]
s = "-".join(parts)
s = s.replace("spe-", "spe")
s = s.replace("s-", "s")
if s.endswith("s"):
s += "0"
return s
def is_allowed_to_update(xml_sps, attr_name, attr_new_value):
"""
Se há uma função de validação associada com o atributo,
verificar se é permitido atualizar o atributo, dados seus valores
atual e/ou novo
"""
validate_function = validations.VALIDATE_FUNCTIONS.get(attr_name)
if validate_function is None:
# não há nenhuma validação, então é permitido fazer a atualização
return True
curr_value = getattr(xml_sps, attr_name)
if attr_new_value == curr_value:
# desnecessario atualizar
return False
try:
# valida o valor atual do atributo
validate_function(curr_value)
except (ValueError, exceptions.InvalidValueForOrderError):
# o valor atual do atributo é inválido,
# então continuar para verificar o valor "novo"
pass
else:
# o valor atual do atributo é válido,
# então não permitir atualização
raise exceptions.NotAllowedtoChangeAttributeValueError(
"Not allowed to update %s (%s) with %s, "
"because current is valid" %
(attr_name, curr_value, attr_new_value))
try:
# valida o valor novo para o atributo
validate_function(attr_new_value)
except (ValueError, exceptions.InvalidValueForOrderError):
# o valor novo é inválido, então não permitir atualização
raise exceptions.InvalidAttributeValueError(
"Not allowed to update %s (%s) with %s, "
"because new value is invalid" %
(attr_name, curr_value, attr_new_value))
else:
# o valor novo é válido, então não permitir atualização
return True
def match_pubdate(node, pubdate_xpaths):
"""
Retorna o primeiro match da lista de pubdate_xpaths
"""
for xpath in pubdate_xpaths:
pubdate = node.find(xpath)
if pubdate is not None:
return pubdate
|
packtools/sps/utils/xml_utils.py
|
codereval_python_data_89
|
Extract the possible values of number and suppl from the contents of issue.
def _extract_number_and_supplment_from_issue_element(issue):
"""
Extrai do conteúdo de <issue>xxxx</issue>, os valores number e suppl.
Valores possíveis
5 (suppl), 5 Suppl, 5 Suppl 1, 5 spe, 5 suppl, 5 suppl 1, 5 suppl. 1,
25 Suppl 1, 2-5 suppl 1, 2spe, Spe, Supl. 1, Suppl, Suppl 12,
s2, spe, spe 1, spe pr, spe2, spe.2, spepr, supp 1, supp5 1, suppl,
suppl 1, suppl 5 pr, suppl 12, suppl 1-2, suppl. 1
"""
if not issue:
return None, None
issue = issue.strip().replace(".", "")
splitted = [s for s in issue.split() if s]
splitted = ["spe"
if "spe" in s.lower() and s.isalpha() else s
for s in splitted
]
if len(splitted) == 1:
issue = splitted[0]
if issue.isdigit():
return issue, None
if "sup" in issue.lower():
# match como sup*
return None, "0"
if issue.startswith("s"):
if issue[1:].isdigit():
return None, issue[1:]
# match com spe, 2-5, 3B
return issue, None
if len(splitted) == 2:
if "sup" in splitted[0].lower():
return None, splitted[1]
if "sup" in splitted[1].lower():
return splitted[0], "0"
# match spe 4 -> spe4
return "".join(splitted), None
if len(splitted) == 3:
if "sup" in splitted[1].lower():
return splitted[0], splitted[2]
# match ????
return "".join(splitted), None
"""<article>
<front>
<article-meta>
<pub-date publication-format="electronic" date-type="collection">
<year>2003</year>
</pub-date>
<volume>4</volume>
<issue>1</issue>
<fpage>108</fpage>
<lpage>123</lpage>
</article-meta>
</front>
</article>
"""
from packtools.sps.models.dates import ArticleDates
def _extract_number_and_supplment_from_issue_element(issue):
"""
Extrai do conteúdo de <issue>xxxx</issue>, os valores number e suppl.
Valores possíveis
5 (suppl), 5 Suppl, 5 Suppl 1, 5 spe, 5 suppl, 5 suppl 1, 5 suppl. 1,
25 Suppl 1, 2-5 suppl 1, 2spe, Spe, Supl. 1, Suppl, Suppl 12,
s2, spe, spe 1, spe pr, spe2, spe.2, spepr, supp 1, supp5 1, suppl,
suppl 1, suppl 5 pr, suppl 12, suppl 1-2, suppl. 1
"""
if not issue:
return None, None
issue = issue.strip().replace(".", "")
splitted = [s for s in issue.split() if s]
splitted = ["spe"
if "spe" in s.lower() and s.isalpha() else s
for s in splitted
]
if len(splitted) == 1:
issue = splitted[0]
if issue.isdigit():
return issue, None
if "sup" in issue.lower():
# match como sup*
return None, "0"
if issue.startswith("s"):
if issue[1:].isdigit():
return None, issue[1:]
# match com spe, 2-5, 3B
return issue, None
if len(splitted) == 2:
if "sup" in splitted[0].lower():
return None, splitted[1]
if "sup" in splitted[1].lower():
return splitted[0], "0"
# match spe 4 -> spe4
return "".join(splitted), None
if len(splitted) == 3:
if "sup" in splitted[1].lower():
return splitted[0], splitted[2]
# match ????
return "".join(splitted), None
class ArticleMetaIssue:
def __init__(self, xmltree):
self.xmltree = xmltree
@property
def data(self):
attr_names = (
"volume", "number", "suppl",
"fpage", "fpage_seq", "lpage",
"elocation_id",
)
_data = {}
for k in attr_names:
try:
value = getattr(self, k)
except AttributeError:
continue
else:
if value:
_data[k] = value
try:
_data["pub_year"] = self.collection_date["year"]
except (KeyError, TypeError):
pass
return _data
@property
def collection_date(self):
_date = ArticleDates(self.xmltree)
return _date.collection_date
@property
def volume(self):
return self.xmltree.findtext(".//front/article-meta/volume")
@property
def issue(self):
return self.xmltree.findtext(".//front/article-meta/issue")
@property
def number(self):
_issue = self.issue
if _issue:
n, s = _extract_number_and_supplment_from_issue_element(_issue)
return n
@property
def suppl(self):
_suppl = self.xmltree.findtext(".//front/article-meta/supplement")
if _suppl:
return _suppl
_issue = self.issue
if _issue:
n, s = _extract_number_and_supplment_from_issue_element(_issue)
return s
@property
def elocation_id(self):
return self.xmltree.findtext(".//front/article-meta/elocation-id")
@property
def fpage(self):
return self.xmltree.findtext(".//front/article-meta/fpage")
@property
def fpage_seq(self):
try:
return self.xmltree.xpath(".//front/article-meta/fpage")[0].get("seq")
except IndexError:
return None
@property
def lpage(self):
return self.xmltree.findtext(".//front/article-meta/lpage")
|
packtools/sps/models/front_articlemeta_issue.py
|
codereval_python_data_90
|
Return a pretty formatted representation of self.
def pretty(self, indent=0, debug=False):
"""
Return a pretty formatted representation of self.
"""
debug_details = ""
if debug:
debug_details += f"<isliteral={self.isliteral!r}, iscanonical={self.iscanonical!r}>"
obj = f"'{self.obj}'" if isinstance(self.obj, str) else repr(self.obj)
return (" " * indent) + f"{self.__class__.__name__}({debug_details}{obj})"
"""
Boolean expressions algebra.
This module defines a Boolean algebra over the set {TRUE, FALSE} with boolean
variables called Symbols and the boolean functions AND, OR, NOT.
Some basic logic comparison is supported: two expressions can be
compared for equivalence or containment. Furthermore you can simplify
an expression and obtain its normal form.
You can create expressions in Python using familiar boolean operators
or parse expressions from strings. The parsing can be extended with
your own tokenizer. You can also customize how expressions behave and
how they are presented.
For extensive documentation look either into the docs directory or view it
online, at https://booleanpy.readthedocs.org/en/latest/.
Copyright (c) Sebastian Kraemer, basti.kr@gmail.com and others
SPDX-License-Identifier: BSD-2-Clause
"""
import inspect
import itertools
from functools import reduce # NOQA
from operator import and_ as and_operator
from operator import or_ as or_operator
# Set to True to enable tracing for parsing
TRACE_PARSE = False
# Token types for standard operators and parens
TOKEN_AND = 1
TOKEN_OR = 2
TOKEN_NOT = 3
TOKEN_LPAR = 4
TOKEN_RPAR = 5
TOKEN_TRUE = 6
TOKEN_FALSE = 7
TOKEN_SYMBOL = 8
TOKEN_TYPES = {
TOKEN_AND: "AND",
TOKEN_OR: "OR",
TOKEN_NOT: "NOT",
TOKEN_LPAR: "(",
TOKEN_RPAR: ")",
TOKEN_TRUE: "TRUE",
TOKEN_FALSE: "FALSE",
TOKEN_SYMBOL: "SYMBOL",
}
# parsing error code and messages
PARSE_UNKNOWN_TOKEN = 1
PARSE_UNBALANCED_CLOSING_PARENS = 2
PARSE_INVALID_EXPRESSION = 3
PARSE_INVALID_NESTING = 4
PARSE_INVALID_SYMBOL_SEQUENCE = 5
PARSE_INVALID_OPERATOR_SEQUENCE = 6
PARSE_ERRORS = {
PARSE_UNKNOWN_TOKEN: "Unknown token",
PARSE_UNBALANCED_CLOSING_PARENS: "Unbalanced parenthesis",
PARSE_INVALID_EXPRESSION: "Invalid expression",
PARSE_INVALID_NESTING: "Invalid expression nesting such as (AND xx)",
PARSE_INVALID_SYMBOL_SEQUENCE: "Invalid symbols sequence such as (A B)",
PARSE_INVALID_OPERATOR_SEQUENCE: "Invalid operator sequence without symbols such as AND OR or OR OR",
}
class ParseError(Exception):
"""
Raised when the parser or tokenizer encounters a syntax error. Instances of
this class have attributes token_type, token_string, position, error_code to
access the details of the error. str() of the exception instance returns a
formatted message.
"""
def __init__(self, token_type=None, token_string="", position=-1, error_code=0):
self.token_type = token_type
self.token_string = token_string
self.position = position
self.error_code = error_code
def __str__(self, *args, **kwargs):
emsg = PARSE_ERRORS.get(self.error_code, "Unknown parsing error")
tstr = ""
if self.token_string:
tstr = f' for token: "{self.token_string}"'
pos = ""
if self.position > 0:
pos = f" at position: {self.position}"
return f"{emsg}{tstr}{pos}"
class BooleanAlgebra(object):
"""
An algebra is defined by:
- the types of its operations and Symbol.
- the tokenizer used when parsing expressions from strings.
This class also serves as a base class for all boolean expressions,
including base elements, functions and variable symbols.
"""
def __init__(
self,
TRUE_class=None,
FALSE_class=None,
Symbol_class=None,
NOT_class=None,
AND_class=None,
OR_class=None,
allowed_in_token=(".", ":", "_"),
):
"""
The types for TRUE, FALSE, NOT, AND, OR and Symbol define the boolean
algebra elements, operations and Symbol variable. They default to the
standard classes if not provided.
You can customize an algebra by providing alternative subclasses of the
standard types.
"""
# TRUE and FALSE base elements are algebra-level "singleton" instances
self.TRUE = TRUE_class or _TRUE
self.TRUE = self.TRUE()
self.FALSE = FALSE_class or _FALSE
self.FALSE = self.FALSE()
# they cross-reference each other
self.TRUE.dual = self.FALSE
self.FALSE.dual = self.TRUE
# boolean operation types, defaulting to the standard types
self.NOT = NOT_class or NOT
self.AND = AND_class or AND
self.OR = OR_class or OR
# class used for Symbols
self.Symbol = Symbol_class or Symbol
tf_nao = {
"TRUE": self.TRUE,
"FALSE": self.FALSE,
"NOT": self.NOT,
"AND": self.AND,
"OR": self.OR,
"Symbol": self.Symbol,
}
# setup cross references such that all algebra types and
# objects hold a named attribute for every other types and
# objects, including themselves.
for obj in tf_nao.values():
for name, value in tf_nao.items():
setattr(obj, name, value)
# Set the set of characters allowed in tokens
self.allowed_in_token = allowed_in_token
def definition(self):
"""
Return a tuple of this algebra defined elements and types as:
(TRUE, FALSE, NOT, AND, OR, Symbol)
"""
return self.TRUE, self.FALSE, self.NOT, self.AND, self.OR, self.Symbol
def symbols(self, *args):
"""
Return a tuple of symbols building a new Symbol from each argument.
"""
return tuple(map(self.Symbol, args))
def parse(self, expr, simplify=False):
"""
Return a boolean expression parsed from `expr` either a unicode string
or tokens iterable.
Optionally simplify the expression if `simplify` is True.
Raise ParseError on errors.
If `expr` is a string, the standard `tokenizer` is used for tokenization
and the algebra configured Symbol type is used to create Symbol
instances from Symbol tokens.
If `expr` is an iterable, it should contain 3-tuples of: (token_type,
token_string, token_position). In this case, the `token_type` can be
a Symbol instance or one of the TOKEN_* constant types.
See the `tokenize()` method for detailed specification.
"""
precedence = {self.NOT: 5, self.AND: 10, self.OR: 15, TOKEN_LPAR: 20}
if isinstance(expr, str):
tokenized = self.tokenize(expr)
else:
tokenized = iter(expr)
if TRACE_PARSE:
tokenized = list(tokenized)
print("tokens:")
for t in tokenized:
print(t)
tokenized = iter(tokenized)
# the abstract syntax tree for this expression that will be build as we
# process tokens
# the first two items are None
# symbol items are appended to this structure
ast = [None, None]
def is_sym(_t):
return isinstance(_t, Symbol) or _t in (TOKEN_TRUE, TOKEN_FALSE, TOKEN_SYMBOL)
def is_operator(_t):
return _t in (TOKEN_AND, TOKEN_OR)
prev_token = None
for token_type, token_string, token_position in tokenized:
if TRACE_PARSE:
print(
"\nprocessing token_type:",
repr(token_type),
"token_string:",
repr(token_string),
"token_position:",
repr(token_position),
)
if prev_token:
prev_token_type, _prev_token_string, _prev_token_position = prev_token
if TRACE_PARSE:
print(" prev_token:", repr(prev_token))
if is_sym(prev_token_type) and (
is_sym(token_type)
): # or token_type == TOKEN_LPAR) :
raise ParseError(
token_type, token_string, token_position, PARSE_INVALID_SYMBOL_SEQUENCE
)
if is_operator(prev_token_type) and (
is_operator(token_type) or token_type == TOKEN_RPAR
):
raise ParseError(
token_type, token_string, token_position, PARSE_INVALID_OPERATOR_SEQUENCE
)
else:
if is_operator(token_type):
raise ParseError(
token_type, token_string, token_position, PARSE_INVALID_OPERATOR_SEQUENCE
)
if token_type == TOKEN_SYMBOL:
ast.append(self.Symbol(token_string))
if TRACE_PARSE:
print(" ast: token_type is TOKEN_SYMBOL: append new symbol", repr(ast))
elif isinstance(token_type, Symbol):
ast.append(token_type)
if TRACE_PARSE:
print(" ast: token_type is Symbol): append existing symbol", repr(ast))
elif token_type == TOKEN_TRUE:
ast.append(self.TRUE)
if TRACE_PARSE:
print(" ast: token_type is TOKEN_TRUE:", repr(ast))
elif token_type == TOKEN_FALSE:
ast.append(self.FALSE)
if TRACE_PARSE:
print(" ast: token_type is TOKEN_FALSE:", repr(ast))
elif token_type == TOKEN_NOT:
ast = [ast, self.NOT]
if TRACE_PARSE:
print(" ast: token_type is TOKEN_NOT:", repr(ast))
elif token_type == TOKEN_AND:
ast = self._start_operation(ast, self.AND, precedence)
if TRACE_PARSE:
print(" ast:token_type is TOKEN_AND: start_operation", ast)
elif token_type == TOKEN_OR:
ast = self._start_operation(ast, self.OR, precedence)
if TRACE_PARSE:
print(" ast:token_type is TOKEN_OR: start_operation", ast)
elif token_type == TOKEN_LPAR:
if prev_token:
# Check that an opening parens is preceded by a function
# or an opening parens
if prev_token_type not in (TOKEN_NOT, TOKEN_AND, TOKEN_OR, TOKEN_LPAR):
raise ParseError(
token_type, token_string, token_position, PARSE_INVALID_NESTING
)
ast = [ast, TOKEN_LPAR]
elif token_type == TOKEN_RPAR:
while True:
if ast[0] is None:
raise ParseError(
token_type,
token_string,
token_position,
PARSE_UNBALANCED_CLOSING_PARENS,
)
if ast[1] is TOKEN_LPAR:
ast[0].append(ast[2])
if TRACE_PARSE:
print("ast9:", repr(ast))
ast = ast[0]
if TRACE_PARSE:
print("ast10:", repr(ast))
break
if isinstance(ast[1], int):
raise ParseError(
token_type,
token_string,
token_position,
PARSE_UNBALANCED_CLOSING_PARENS,
)
# the parens are properly nested
# the top ast node should be a function subclass
if not (inspect.isclass(ast[1]) and issubclass(ast[1], Function)):
raise ParseError(
token_type, token_string, token_position, PARSE_INVALID_NESTING
)
subex = ast[1](*ast[2:])
ast[0].append(subex)
if TRACE_PARSE:
print("ast11:", repr(ast))
ast = ast[0]
if TRACE_PARSE:
print("ast12:", repr(ast))
else:
raise ParseError(token_type, token_string, token_position, PARSE_UNKNOWN_TOKEN)
prev_token = (token_type, token_string, token_position)
try:
while True:
if ast[0] is None:
if TRACE_PARSE:
print("ast[0] is None:", repr(ast))
if ast[1] is None:
if TRACE_PARSE:
print(" ast[1] is None:", repr(ast))
if len(ast) != 3:
raise ParseError(error_code=PARSE_INVALID_EXPRESSION)
parsed = ast[2]
if TRACE_PARSE:
print(" parsed = ast[2]:", repr(parsed))
else:
# call the function in ast[1] with the rest of the ast as args
parsed = ast[1](*ast[2:])
if TRACE_PARSE:
print(" parsed = ast[1](*ast[2:]):", repr(parsed))
break
else:
if TRACE_PARSE:
print("subex = ast[1](*ast[2:]):", repr(ast))
subex = ast[1](*ast[2:])
ast[0].append(subex)
if TRACE_PARSE:
print(" ast[0].append(subex):", repr(ast))
ast = ast[0]
if TRACE_PARSE:
print(" ast = ast[0]:", repr(ast))
except TypeError:
raise ParseError(error_code=PARSE_INVALID_EXPRESSION)
if simplify:
return parsed.simplify()
if TRACE_PARSE:
print("final parsed:", repr(parsed))
return parsed
def _start_operation(self, ast, operation, precedence):
"""
Return an AST where all operations of lower precedence are finalized.
"""
if TRACE_PARSE:
print(" start_operation:", repr(operation), "AST:", ast)
op_prec = precedence[operation]
while True:
if ast[1] is None:
# [None, None, x]
if TRACE_PARSE:
print(" start_op: ast[1] is None:", repr(ast))
ast[1] = operation
if TRACE_PARSE:
print(" --> start_op: ast[1] is None:", repr(ast))
return ast
prec = precedence[ast[1]]
if prec > op_prec: # op=&, [ast, |, x, y] -> [[ast, |, x], &, y]
if TRACE_PARSE:
print(" start_op: prec > op_prec:", repr(ast))
ast = [ast, operation, ast.pop(-1)]
if TRACE_PARSE:
print(" --> start_op: prec > op_prec:", repr(ast))
return ast
if prec == op_prec: # op=&, [ast, &, x] -> [ast, &, x]
if TRACE_PARSE:
print(" start_op: prec == op_prec:", repr(ast))
return ast
if not (inspect.isclass(ast[1]) and issubclass(ast[1], Function)):
# the top ast node should be a function subclass at this stage
raise ParseError(error_code=PARSE_INVALID_NESTING)
if ast[0] is None: # op=|, [None, &, x, y] -> [None, |, x&y]
if TRACE_PARSE:
print(" start_op: ast[0] is None:", repr(ast))
subexp = ast[1](*ast[2:])
new_ast = [ast[0], operation, subexp]
if TRACE_PARSE:
print(" --> start_op: ast[0] is None:", repr(new_ast))
return new_ast
else: # op=|, [[ast, &, x], ~, y] -> [ast, &, x, ~y]
if TRACE_PARSE:
print(" start_op: else:", repr(ast))
ast[0].append(ast[1](*ast[2:]))
ast = ast[0]
if TRACE_PARSE:
print(" --> start_op: else:", repr(ast))
def tokenize(self, expr):
"""
Return an iterable of 3-tuple describing each token given an expression
unicode string.
This 3-tuple contains (token, token string, position):
- token: either a Symbol instance or one of TOKEN_* token types.
- token string: the original token unicode string.
- position: some simple object describing the starting position of the
original token string in the `expr` string. It can be an int for a
character offset, or a tuple of starting (row/line, column).
The token position is used only for error reporting and can be None or
empty.
Raise ParseError on errors. The ParseError.args is a tuple of:
(token_string, position, error message)
You can use this tokenizer as a base to create specialized tokenizers
for your custom algebra by subclassing BooleanAlgebra. See also the
tests for other examples of alternative tokenizers.
This tokenizer has these characteristics:
- The `expr` string can span multiple lines,
- Whitespace is not significant.
- The returned position is the starting character offset of a token.
- A TOKEN_SYMBOL is returned for valid identifiers which is a string
without spaces.
- These are valid identifiers:
- Python identifiers.
- a string even if starting with digits
- digits (except for 0 and 1).
- dotted names : foo.bar consist of one token.
- names with colons: foo:bar consist of one token.
- These are not identifiers:
- quoted strings.
- any punctuation which is not an operation
- Recognized operators are (in any upper/lower case combinations):
- for and: '*', '&', 'and'
- for or: '+', '|', 'or'
- for not: '~', '!', 'not'
- Recognized special symbols are (in any upper/lower case combinations):
- True symbols: 1 and True
- False symbols: 0, False and None
"""
if not isinstance(expr, str):
raise TypeError(f"expr must be string but it is {type(expr)}.")
# mapping of lowercase token strings to a token type id for the standard
# operators, parens and common true or false symbols, as used in the
# default tokenizer implementation.
TOKENS = {
"*": TOKEN_AND,
"&": TOKEN_AND,
"and": TOKEN_AND,
"+": TOKEN_OR,
"|": TOKEN_OR,
"or": TOKEN_OR,
"~": TOKEN_NOT,
"!": TOKEN_NOT,
"not": TOKEN_NOT,
"(": TOKEN_LPAR,
")": TOKEN_RPAR,
"[": TOKEN_LPAR,
"]": TOKEN_RPAR,
"true": TOKEN_TRUE,
"1": TOKEN_TRUE,
"false": TOKEN_FALSE,
"0": TOKEN_FALSE,
"none": TOKEN_FALSE,
}
position = 0
length = len(expr)
while position < length:
tok = expr[position]
sym = tok.isalnum() or tok == "_"
if sym:
position += 1
while position < length:
char = expr[position]
if char.isalnum() or char in self.allowed_in_token:
position += 1
tok += char
else:
break
position -= 1
try:
yield TOKENS[tok.lower()], tok, position
except KeyError:
if sym:
yield TOKEN_SYMBOL, tok, position
elif tok not in (" ", "\t", "\r", "\n"):
raise ParseError(
token_string=tok, position=position, error_code=PARSE_UNKNOWN_TOKEN
)
position += 1
def _recurse_distributive(self, expr, operation_inst):
"""
Recursively flatten, simplify and apply the distributive laws to the
`expr` expression. Distributivity is considered for the AND or OR
`operation_inst` instance.
"""
if expr.isliteral:
return expr
args = (self._recurse_distributive(arg, operation_inst) for arg in expr.args)
args = tuple(arg.simplify() for arg in args)
if len(args) == 1:
return args[0]
flattened_expr = expr.__class__(*args)
dualoperation = operation_inst.dual
if isinstance(flattened_expr, dualoperation):
flattened_expr = flattened_expr.distributive()
return flattened_expr
def normalize(self, expr, operation):
"""
Return a normalized expression transformed to its normal form in the
given AND or OR operation.
The new expression arguments will satisfy these conditions:
- ``operation(*args) == expr`` (here mathematical equality is meant)
- the operation does not occur in any of its arg.
- NOT is only appearing in literals (aka. Negation normal form).
The operation must be an AND or OR operation or a subclass.
"""
# Ensure that the operation is not NOT
assert operation in (
self.AND,
self.OR,
)
# Move NOT inwards.
expr = expr.literalize()
# Simplify first otherwise _recurse_distributive() may take forever.
expr = expr.simplify()
operation_example = operation(self.TRUE, self.FALSE)
# For large dual operations build up from normalized subexpressions,
# otherwise we can get exponential blowup midway through
expr.args = tuple(self.normalize(a, operation) for a in expr.args)
if len(expr.args) > 1 and (
(operation == self.AND and isinstance(expr, self.OR))
or (operation == self.OR and isinstance(expr, self.AND))
):
args = expr.args
expr_class = expr.__class__
expr = args[0]
for arg in args[1:]:
expr = expr_class(expr, arg)
expr = self._recurse_distributive(expr, operation_example)
# Canonicalize
expr = expr.simplify()
else:
expr = self._recurse_distributive(expr, operation_example)
# Canonicalize
expr = expr.simplify()
return expr
def cnf(self, expr):
"""
Return a conjunctive normal form of the `expr` expression.
"""
return self.normalize(expr, self.AND)
conjunctive_normal_form = cnf
def dnf(self, expr):
"""
Return a disjunctive normal form of the `expr` expression.
"""
return self.normalize(expr, self.OR)
disjunctive_normal_form = dnf
class Expression(object):
"""
Abstract base class for all boolean expressions, including functions and
variable symbols.
"""
# these class attributes are configured when a new BooleanAlgebra is created
TRUE = None
FALSE = None
NOT = None
AND = None
OR = None
Symbol = None
def __init__(self):
# Defines sort and comparison order between expressions arguments
self.sort_order = None
# Store arguments aka. subterms of this expressions.
# subterms are either literals or expressions.
self.args = tuple()
# True is this is a literal expression such as a Symbol, TRUE or FALSE
self.isliteral = False
# True if this expression has been simplified to in canonical form.
self.iscanonical = False
@property
def objects(self):
"""
Return a set of all associated objects with this expression symbols.
Include recursively subexpressions objects.
"""
return set(s.obj for s in self.symbols)
def get_literals(self):
"""
Return a list of all the literals contained in this expression.
Include recursively subexpressions symbols.
This includes duplicates.
"""
if self.isliteral:
return [self]
if not self.args:
return []
return list(itertools.chain.from_iterable(arg.get_literals() for arg in self.args))
@property
def literals(self):
"""
Return a set of all literals contained in this expression.
Include recursively subexpressions literals.
"""
return set(self.get_literals())
def literalize(self):
"""
Return an expression where NOTs are only occurring as literals.
Applied recursively to subexpressions.
"""
if self.isliteral:
return self
args = tuple(arg.literalize() for arg in self.args)
if all(arg is self.args[i] for i, arg in enumerate(args)):
return self
return self.__class__(*args)
def get_symbols(self):
"""
Return a list of all the symbols contained in this expression.
Include subexpressions symbols recursively.
This includes duplicates.
"""
return [s if isinstance(s, Symbol) else s.args[0] for s in self.get_literals()]
@property
def symbols(
self,
):
"""
Return a list of all the symbols contained in this expression.
Include subexpressions symbols recursively.
This includes duplicates.
"""
return set(self.get_symbols())
def subs(self, substitutions, default=None, simplify=False):
"""
Return an expression where all subterms of this expression are
by the new expression using a `substitutions` mapping of:
{expr: replacement}
Return the provided `default` value if this expression has no elements,
e.g. is empty.
Simplify the results if `simplify` is True.
Return this expression unmodified if nothing could be substituted. Note
that a possible usage of this function is to check for expression
containment as the expression will be returned unmodified if if does not
contain any of the provided substitutions.
"""
# shortcut: check if we have our whole expression as a possible
# subsitution source
for expr, substitution in substitutions.items():
if expr == self:
return substitution
# otherwise, do a proper substitution of subexpressions
expr = self._subs(substitutions, default, simplify)
return self if expr is None else expr
def _subs(self, substitutions, default, simplify):
"""
Return an expression where all subterms are substituted by the new
expression using a `substitutions` mapping of: {expr: replacement}
"""
# track the new list of unchanged args or replaced args through
# a substitution
new_arguments = []
changed_something = False
# shortcut for basic logic True or False
if self is self.TRUE or self is self.FALSE:
return self
# if the expression has no elements, e.g. is empty, do not apply
# substitutions
if not self.args:
return default
# iterate the subexpressions: either plain symbols or a subexpressions
for arg in self.args:
# collect substitutions for exact matches
# break as soon as we have a match
for expr, substitution in substitutions.items():
if arg == expr:
new_arguments.append(substitution)
changed_something = True
break
# this will execute only if we did not break out of the
# loop, e.g. if we did not change anything and did not
# collect any substitutions
else:
# recursively call _subs on each arg to see if we get a
# substituted arg
new_arg = arg._subs(substitutions, default, simplify)
if new_arg is None:
# if we did not collect a substitution for this arg,
# keep the arg as-is, it is not replaced by anything
new_arguments.append(arg)
else:
# otherwise, we add the substitution for this arg instead
new_arguments.append(new_arg)
changed_something = True
if not changed_something:
return
# here we did some substitution: we return a new expression
# built from the new_arguments
newexpr = self.__class__(*new_arguments)
return newexpr.simplify() if simplify else newexpr
def simplify(self):
"""
Return a new simplified expression in canonical form built from this
expression. The simplified expression may be exactly the same as this
expression.
Subclasses override this method to compute actual simplification.
"""
return self
def __hash__(self):
"""
Expressions are immutable and hashable. The hash of Functions is
computed by respecting the structure of the whole expression by mixing
the class name hash and the recursive hash of a frozenset of arguments.
Hash of elements is based on their boolean equivalent. Hash of symbols
is based on their object.
"""
if not self.args:
arghash = id(self)
else:
arghash = hash(frozenset(map(hash, self.args)))
return hash(self.__class__.__name__) ^ arghash
def __eq__(self, other):
"""
Test if other element is structurally the same as itself.
This method does not make any simplification or transformation, so it
will return False although the expression terms may be mathematically
equal. Use simplify() before testing equality to check the mathematical
equality.
For literals, plain equality is used.
For functions, equality uses the facts that operations are:
- commutative: order does not matter and different orders are equal.
- idempotent: so args can appear more often in one term than in the other.
"""
if self is other:
return True
if isinstance(other, self.__class__):
return frozenset(self.args) == frozenset(other.args)
return NotImplemented
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if self.sort_order is not None and other.sort_order is not None:
if self.sort_order == other.sort_order:
return NotImplemented
return self.sort_order < other.sort_order
return NotImplemented
def __gt__(self, other):
lt = other.__lt__(self)
if lt is NotImplemented:
return not self.__lt__(other)
return lt
def __and__(self, other):
return self.AND(self, other)
__mul__ = __and__
def __invert__(self):
return self.NOT(self)
def __or__(self, other):
return self.OR(self, other)
__add__ = __or__
def __bool__(self):
raise TypeError("Cannot evaluate expression as a Python Boolean.")
__nonzero__ = __bool__
class BaseElement(Expression):
"""
Abstract base class for the base elements TRUE and FALSE of the boolean
algebra.
"""
def __init__(self):
super(BaseElement, self).__init__()
self.sort_order = 0
self.iscanonical = True
# The dual Base Element class for this element: TRUE.dual returns
# _FALSE() and FALSE.dual returns _TRUE(). This is a cyclic reference
# and therefore only assigned after creation of the singletons,
self.dual = None
def __lt__(self, other):
if isinstance(other, BaseElement):
return self == self.FALSE
return NotImplemented
__nonzero__ = __bool__ = lambda s: None
def pretty(self, indent=0, debug=False):
"""
Return a pretty formatted representation of self.
"""
return (" " * indent) + repr(self)
class _TRUE(BaseElement):
"""
Boolean base element TRUE.
Not meant to be subclassed nor instantiated directly.
"""
def __init__(self):
super(_TRUE, self).__init__()
# assigned at singleton creation: self.dual = FALSE
def __hash__(self):
return hash(True)
def __eq__(self, other):
return self is other or other is True or isinstance(other, _TRUE)
def __str__(self):
return "1"
def __repr__(self):
return "TRUE"
def __call__(self):
return self
__nonzero__ = __bool__ = lambda s: True
class _FALSE(BaseElement):
"""
Boolean base element FALSE.
Not meant to be subclassed nor instantiated directly.
"""
def __init__(self):
super(_FALSE, self).__init__()
# assigned at singleton creation: self.dual = TRUE
def __hash__(self):
return hash(False)
def __eq__(self, other):
return self is other or other is False or isinstance(other, _FALSE)
def __str__(self):
return "0"
def __repr__(self):
return "FALSE"
def __call__(self):
return self
__nonzero__ = __bool__ = lambda s: False
class Symbol(Expression):
"""
Boolean variable.
A Symbol can hold an object used to determine equality between symbols.
"""
def __init__(self, obj):
super(Symbol, self).__init__()
self.sort_order = 5
# Store an associated object. This object determines equality
self.obj = obj
self.iscanonical = True
self.isliteral = True
def __call__(self, **kwargs):
"""
Return the evaluated value for this symbol from kwargs
"""
return kwargs[self.obj]
def __hash__(self):
if self.obj is None: # Anonymous Symbol.
return id(self)
return hash(self.obj)
def __eq__(self, other):
if self is other:
return True
if isinstance(other, self.__class__):
return self.obj == other.obj
return NotImplemented
def __lt__(self, other):
comparator = Expression.__lt__(self, other)
if comparator is not NotImplemented:
return comparator
if isinstance(other, Symbol):
return self.obj < other.obj
return NotImplemented
def __str__(self):
return str(self.obj)
def __repr__(self):
obj = f"'{self.obj}'" if isinstance(self.obj, str) else repr(self.obj)
return f"{self.__class__.__name__}({obj})"
def pretty(self, indent=0, debug=False):
"""
Return a pretty formatted representation of self.
"""
debug_details = ""
if debug:
debug_details += f"<isliteral={self.isliteral!r}, iscanonical={self.iscanonical!r}>"
obj = f"'{self.obj}'" if isinstance(self.obj, str) else repr(self.obj)
return (" " * indent) + f"{self.__class__.__name__}({debug_details}{obj})"
class Function(Expression):
"""
Boolean function.
A boolean function takes n (one or more) boolean expressions as arguments
where n is called the order of the function and maps them to one of the base
elements TRUE or FALSE. Implemented functions are AND, OR and NOT.
"""
def __init__(self, *args):
super(Function, self).__init__()
# Specifies an infix notation of an operator for printing such as | or &.
self.operator = None
assert all(
isinstance(arg, Expression) for arg in args
), f"Bad arguments: all arguments must be an Expression: {args!r}"
self.args = tuple(args)
def __str__(self):
args = self.args
if len(args) == 1:
if self.isliteral:
return f"{self.operator}{args[0]}"
return f"{self.operator}({args[0]})"
args_str = []
for arg in args:
if arg.isliteral:
args_str.append(str(arg))
else:
args_str.append(f"({arg})")
return self.operator.join(args_str)
def __repr__(self):
args = ", ".join(map(repr, self.args))
return f"{self.__class__.__name__}({args})"
def pretty(self, indent=0, debug=False):
"""
Return a pretty formatted representation of self as an indented tree.
If debug is True, also prints debug information for each expression arg.
For example:
>>> print(BooleanAlgebra().parse(
... u'not a and not b and not (a and ba and c) and c or c').pretty())
OR(
AND(
NOT(Symbol('a')),
NOT(Symbol('b')),
NOT(
AND(
Symbol('a'),
Symbol('ba'),
Symbol('c')
)
),
Symbol('c')
),
Symbol('c')
)
"""
debug_details = ""
if debug:
debug_details += f"<isliteral={self.isliteral!r}, iscanonical={self.iscanonical!r}"
identity = getattr(self, "identity", None)
if identity is not None:
debug_details += f", identity={identity!r}"
annihilator = getattr(self, "annihilator", None)
if annihilator is not None:
debug_details += f", annihilator={annihilator!r}"
dual = getattr(self, "dual", None)
if dual is not None:
debug_details += f", dual={dual!r}"
debug_details += ">"
cls = self.__class__.__name__
args = [a.pretty(indent=indent + 2, debug=debug) for a in self.args]
pfargs = ",\n".join(args)
cur_indent = " " * indent
new_line = "" if self.isliteral else "\n"
return f"{cur_indent}{cls}({debug_details}{new_line}{pfargs}\n{cur_indent})"
class NOT(Function):
"""
Boolean NOT operation.
The NOT operation takes exactly one argument. If this argument is a Symbol
the resulting expression is also called a literal.
The operator "~" can be used as abbreviation for NOT, e.g. instead of NOT(x)
one can write ~x (where x is some boolean expression). Also for printing "~"
is used for better readability.
You can subclass to define alternative string representation.
For example:
>>> class NOT2(NOT):
... def __init__(self, *args):
... super(NOT2, self).__init__(*args)
... self.operator = '!'
"""
def __init__(self, arg1):
super(NOT, self).__init__(arg1)
self.isliteral = isinstance(self.args[0], Symbol)
self.operator = "~"
def literalize(self):
"""
Return an expression where NOTs are only occurring as literals.
"""
expr = self.demorgan()
if isinstance(expr, self.__class__):
return expr
return expr.literalize()
def simplify(self):
"""
Return a simplified expr in canonical form.
This means double negations are canceled out and all contained boolean
objects are in their canonical form.
"""
if self.iscanonical:
return self
expr = self.cancel()
if not isinstance(expr, self.__class__):
return expr.simplify()
if expr.args[0] in (
self.TRUE,
self.FALSE,
):
return expr.args[0].dual
expr = self.__class__(expr.args[0].simplify())
expr.iscanonical = True
return expr
def cancel(self):
"""
Cancel itself and following NOTs as far as possible.
Returns the simplified expression.
"""
expr = self
while True:
arg = expr.args[0]
if not isinstance(arg, self.__class__):
return expr
expr = arg.args[0]
if not isinstance(expr, self.__class__):
return expr
def demorgan(self):
"""
Return a expr where the NOT function is moved inward.
This is achieved by canceling double NOTs and using De Morgan laws.
"""
expr = self.cancel()
if expr.isliteral or not isinstance(expr, self.NOT):
return expr
op = expr.args[0]
return op.dual(*(self.__class__(arg).cancel() for arg in op.args))
def __call__(self, **kwargs):
"""
Return the evaluated (negated) value for this function.
"""
return not self.args[0](**kwargs)
def __lt__(self, other):
return self.args[0] < other
def pretty(self, indent=1, debug=False):
"""
Return a pretty formatted representation of self.
Include additional debug details if `debug` is True.
"""
debug_details = ""
if debug:
debug_details += f"<isliteral={self.isliteral!r}, iscanonical={self.iscanonical!r}>"
if self.isliteral:
pretty_literal = self.args[0].pretty(indent=0, debug=debug)
return (" " * indent) + f"{self.__class__.__name__}({debug_details}{pretty_literal})"
else:
return super(NOT, self).pretty(indent=indent, debug=debug)
class DualBase(Function):
"""
Base class for AND and OR function.
This class uses the duality principle to combine similar methods of AND
and OR. Both operations take two or more arguments and can be created using
"|" for OR and "&" for AND.
"""
_pyoperator = None
def __init__(self, arg1, arg2, *args):
super(DualBase, self).__init__(arg1, arg2, *args)
# identity element for the specific operation.
# This will be TRUE for the AND operation and FALSE for the OR operation.
self.identity = None
# annihilator element for this function.
# This will be FALSE for the AND operation and TRUE for the OR operation.
self.annihilator = None
# dual class of this function.
# This means OR.dual returns AND and AND.dual returns OR.
self.dual = None
def __contains__(self, expr):
"""
Test if expr is a subterm of this expression.
"""
if expr in self.args:
return True
if isinstance(expr, self.__class__):
return all(arg in self.args for arg in expr.args)
def simplify(self, sort=True):
"""
Return a new simplified expression in canonical form from this
expression.
For simplification of AND and OR fthe ollowing rules are used
recursively bottom up:
- Associativity (output does not contain same operations nested)::
(A & B) & C = A & (B & C) = A & B & C
(A | B) | C = A | (B | C) = A | B | C
- Annihilation::
A & 0 = 0, A | 1 = 1
- Idempotence (e.g. removing duplicates)::
A & A = A, A | A = A
- Identity::
A & 1 = A, A | 0 = A
- Complementation::
A & ~A = 0, A | ~A = 1
- Elimination::
(A & B) | (A & ~B) = A, (A | B) & (A | ~B) = A
- Absorption::
A & (A | B) = A, A | (A & B) = A
- Negative absorption::
A & (~A | B) = A & B, A | (~A & B) = A | B
- Commutativity (output is always sorted)::
A & B = B & A, A | B = B | A
Other boolean objects are also in their canonical form.
"""
# TODO: Refactor DualBase.simplify into different "sub-evals".
# If self is already canonical do nothing.
if self.iscanonical:
return self
# Otherwise bring arguments into canonical form.
args = [arg.simplify() for arg in self.args]
# Create new instance of own class with canonical args.
# TODO: Only create new class if some args changed.
expr = self.__class__(*args)
# Literalize before doing anything, this also applies De Morgan's Law
expr = expr.literalize()
# Associativity:
# (A & B) & C = A & (B & C) = A & B & C
# (A | B) | C = A | (B | C) = A | B | C
expr = expr.flatten()
# Annihilation: A & 0 = 0, A | 1 = 1
if self.annihilator in expr.args:
return self.annihilator
# Idempotence: A & A = A, A | A = A
# this boils down to removing duplicates
args = []
for arg in expr.args:
if arg not in args:
args.append(arg)
if len(args) == 1:
return args[0]
# Identity: A & 1 = A, A | 0 = A
if self.identity in args:
args.remove(self.identity)
if len(args) == 1:
return args[0]
# Complementation: A & ~A = 0, A | ~A = 1
for arg in args:
if self.NOT(arg) in args:
return self.annihilator
# Elimination: (A & B) | (A & ~B) = A, (A | B) & (A | ~B) = A
i = 0
while i < len(args) - 1:
j = i + 1
ai = args[i]
if not isinstance(ai, self.dual):
i += 1
continue
while j < len(args):
aj = args[j]
if not isinstance(aj, self.dual) or len(ai.args) != len(aj.args):
j += 1
continue
# Find terms where only one arg is different.
negated = None
for arg in ai.args:
# FIXME: what does this pass Do?
if arg in aj.args:
pass
elif self.NOT(arg).cancel() in aj.args:
if negated is None:
negated = arg
else:
negated = None
break
else:
negated = None
break
# If the different arg is a negation simplify the expr.
if negated is not None:
# Cancel out one of the two terms.
del args[j]
aiargs = list(ai.args)
aiargs.remove(negated)
if len(aiargs) == 1:
args[i] = aiargs[0]
else:
args[i] = self.dual(*aiargs)
if len(args) == 1:
return args[0]
else:
# Now the other simplifications have to be redone.
return self.__class__(*args).simplify()
j += 1
i += 1
# Absorption: A & (A | B) = A, A | (A & B) = A
# Negative absorption: A & (~A | B) = A & B, A | (~A & B) = A | B
args = self.absorb(args)
if len(args) == 1:
return args[0]
# Commutativity: A & B = B & A, A | B = B | A
if sort:
args.sort()
# Create new (now canonical) expression.
expr = self.__class__(*args)
expr.iscanonical = True
return expr
def flatten(self):
"""
Return a new expression where nested terms of this expression are
flattened as far as possible.
E.g.::
A & (B & C) becomes A & B & C.
"""
args = list(self.args)
i = 0
for arg in self.args:
if isinstance(arg, self.__class__):
args[i : i + 1] = arg.args
i += len(arg.args)
else:
i += 1
return self.__class__(*args)
def absorb(self, args):
"""
Given an `args` sequence of expressions, return a new list of expression
applying absorption and negative absorption.
See https://en.wikipedia.org/wiki/Absorption_law
Absorption::
A & (A | B) = A, A | (A & B) = A
Negative absorption::
A & (~A | B) = A & B, A | (~A & B) = A | B
"""
args = list(args)
if not args:
args = list(self.args)
i = 0
while i < len(args):
absorber = args[i]
j = 0
while j < len(args):
if j == i:
j += 1
continue
target = args[j]
if not isinstance(target, self.dual):
j += 1
continue
# Absorption
if absorber in target:
del args[j]
if j < i:
i -= 1
continue
# Negative absorption
neg_absorber = self.NOT(absorber).cancel()
if neg_absorber in target:
b = target.subtract(neg_absorber, simplify=False)
if b is None:
del args[j]
if j < i:
i -= 1
continue
else:
args[j] = b
j += 1
continue
if isinstance(absorber, self.dual):
remove = None
for arg in absorber.args:
narg = self.NOT(arg).cancel()
if arg in target.args:
pass
elif narg in target.args:
if remove is None:
remove = narg
else:
remove = None
break
else:
remove = None
break
if remove is not None:
args[j] = target.subtract(remove, simplify=True)
j += 1
i += 1
return args
def subtract(self, expr, simplify):
"""
Return a new expression where the `expr` expression has been removed
from this expression if it exists.
"""
args = self.args
if expr in self.args:
args = list(self.args)
args.remove(expr)
elif isinstance(expr, self.__class__):
if all(arg in self.args for arg in expr.args):
args = tuple(arg for arg in self.args if arg not in expr)
if len(args) == 0:
return None
if len(args) == 1:
return args[0]
newexpr = self.__class__(*args)
if simplify:
newexpr = newexpr.simplify()
return newexpr
def distributive(self):
"""
Return a term where the leading AND or OR terms are switched.
This is done by applying the distributive laws::
A & (B|C) = (A&B) | (A&C)
A | (B&C) = (A|B) & (A|C)
"""
dual = self.dual
args = list(self.args)
for i, arg in enumerate(args):
if isinstance(arg, dual):
args[i] = arg.args
else:
args[i] = (arg,)
prod = itertools.product(*args)
args = tuple(self.__class__(*arg).simplify() for arg in prod)
if len(args) == 1:
return args[0]
else:
return dual(*args)
def __lt__(self, other):
comparator = Expression.__lt__(self, other)
if comparator is not NotImplemented:
return comparator
if isinstance(other, self.__class__):
lenself = len(self.args)
lenother = len(other.args)
for i in range(min(lenself, lenother)):
if self.args[i] == other.args[i]:
continue
comparator = self.args[i] < other.args[i]
if comparator is not NotImplemented:
return comparator
if lenself != lenother:
return lenself < lenother
return NotImplemented
def __call__(self, **kwargs):
"""
Return the evaluation of this expression by calling each of its arg as
arg(**kwargs) and applying its corresponding Python operator (and or or)
to the results.
Reduce is used as in e.g. AND(a, b, c, d) == AND(a, AND(b, AND(c, d)))
ore.g. OR(a, b, c, d) == OR(a, OR(b, OR(c, d)))
"""
return reduce(self._pyoperator, (a(**kwargs) for a in self.args))
class AND(DualBase):
"""
Boolean AND operation, taking two or more arguments.
It can also be created by using "&" between two boolean expressions.
You can subclass to define alternative string representation by overriding
self.operator.
For example:
>>> class AND2(AND):
... def __init__(self, *args):
... super(AND2, self).__init__(*args)
... self.operator = 'AND'
"""
_pyoperator = and_operator
def __init__(self, arg1, arg2, *args):
super(AND, self).__init__(arg1, arg2, *args)
self.sort_order = 10
self.identity = self.TRUE
self.annihilator = self.FALSE
self.dual = self.OR
self.operator = "&"
class OR(DualBase):
"""
Boolean OR operation, taking two or more arguments
It can also be created by using "|" between two boolean expressions.
You can subclass to define alternative string representation by overriding
self.operator.
For example:
>>> class OR2(OR):
... def __init__(self, *args):
... super(OR2, self).__init__(*args)
... self.operator = 'OR'
"""
_pyoperator = or_operator
def __init__(self, arg1, arg2, *args):
super(OR, self).__init__(arg1, arg2, *args)
self.sort_order = 25
self.identity = self.FALSE
self.annihilator = self.TRUE
self.dual = self.AND
self.operator = "|"
|
boolean/boolean.py
|
codereval_python_data_91
|
Given an `args` sequence of expressions, return a new list of expression
applying absorption and negative absorption.
See https://en.wikipedia.org/wiki/Absorption_law
Absorption::
A & (A | B) = A, A | (A & B) = A
Negative absorption::
A & (~A | B) = A & B, A | (~A & B) = A | B
def absorb(self, args):
"""
Given an `args` sequence of expressions, return a new list of expression
applying absorption and negative absorption.
See https://en.wikipedia.org/wiki/Absorption_law
Absorption::
A & (A | B) = A, A | (A & B) = A
Negative absorption::
A & (~A | B) = A & B, A | (~A & B) = A | B
"""
args = list(args)
if not args:
args = list(self.args)
i = 0
while i < len(args):
absorber = args[i]
j = 0
while j < len(args):
if j == i:
j += 1
continue
target = args[j]
if not isinstance(target, self.dual):
j += 1
continue
# Absorption
if absorber in target:
del args[j]
if j < i:
i -= 1
continue
# Negative absorption
neg_absorber = self.NOT(absorber).cancel()
if neg_absorber in target:
b = target.subtract(neg_absorber, simplify=False)
if b is None:
del args[j]
if j < i:
i -= 1
continue
else:
args[j] = b
j += 1
continue
if isinstance(absorber, self.dual):
remove = None
for arg in absorber.args:
narg = self.NOT(arg).cancel()
if arg in target.args:
pass
elif narg in target.args:
if remove is None:
remove = narg
else:
remove = None
break
else:
remove = None
break
if remove is not None:
args[j] = target.subtract(remove, simplify=True)
j += 1
i += 1
return args
"""
Boolean expressions algebra.
This module defines a Boolean algebra over the set {TRUE, FALSE} with boolean
variables called Symbols and the boolean functions AND, OR, NOT.
Some basic logic comparison is supported: two expressions can be
compared for equivalence or containment. Furthermore you can simplify
an expression and obtain its normal form.
You can create expressions in Python using familiar boolean operators
or parse expressions from strings. The parsing can be extended with
your own tokenizer. You can also customize how expressions behave and
how they are presented.
For extensive documentation look either into the docs directory or view it
online, at https://booleanpy.readthedocs.org/en/latest/.
Copyright (c) Sebastian Kraemer, basti.kr@gmail.com and others
SPDX-License-Identifier: BSD-2-Clause
"""
import inspect
import itertools
from functools import reduce # NOQA
from operator import and_ as and_operator
from operator import or_ as or_operator
# Set to True to enable tracing for parsing
TRACE_PARSE = False
# Token types for standard operators and parens
TOKEN_AND = 1
TOKEN_OR = 2
TOKEN_NOT = 3
TOKEN_LPAR = 4
TOKEN_RPAR = 5
TOKEN_TRUE = 6
TOKEN_FALSE = 7
TOKEN_SYMBOL = 8
TOKEN_TYPES = {
TOKEN_AND: "AND",
TOKEN_OR: "OR",
TOKEN_NOT: "NOT",
TOKEN_LPAR: "(",
TOKEN_RPAR: ")",
TOKEN_TRUE: "TRUE",
TOKEN_FALSE: "FALSE",
TOKEN_SYMBOL: "SYMBOL",
}
# parsing error code and messages
PARSE_UNKNOWN_TOKEN = 1
PARSE_UNBALANCED_CLOSING_PARENS = 2
PARSE_INVALID_EXPRESSION = 3
PARSE_INVALID_NESTING = 4
PARSE_INVALID_SYMBOL_SEQUENCE = 5
PARSE_INVALID_OPERATOR_SEQUENCE = 6
PARSE_ERRORS = {
PARSE_UNKNOWN_TOKEN: "Unknown token",
PARSE_UNBALANCED_CLOSING_PARENS: "Unbalanced parenthesis",
PARSE_INVALID_EXPRESSION: "Invalid expression",
PARSE_INVALID_NESTING: "Invalid expression nesting such as (AND xx)",
PARSE_INVALID_SYMBOL_SEQUENCE: "Invalid symbols sequence such as (A B)",
PARSE_INVALID_OPERATOR_SEQUENCE: "Invalid operator sequence without symbols such as AND OR or OR OR",
}
class ParseError(Exception):
"""
Raised when the parser or tokenizer encounters a syntax error. Instances of
this class have attributes token_type, token_string, position, error_code to
access the details of the error. str() of the exception instance returns a
formatted message.
"""
def __init__(self, token_type=None, token_string="", position=-1, error_code=0):
self.token_type = token_type
self.token_string = token_string
self.position = position
self.error_code = error_code
def __str__(self, *args, **kwargs):
emsg = PARSE_ERRORS.get(self.error_code, "Unknown parsing error")
tstr = ""
if self.token_string:
tstr = f' for token: "{self.token_string}"'
pos = ""
if self.position > 0:
pos = f" at position: {self.position}"
return f"{emsg}{tstr}{pos}"
class BooleanAlgebra(object):
"""
An algebra is defined by:
- the types of its operations and Symbol.
- the tokenizer used when parsing expressions from strings.
This class also serves as a base class for all boolean expressions,
including base elements, functions and variable symbols.
"""
def __init__(
self,
TRUE_class=None,
FALSE_class=None,
Symbol_class=None,
NOT_class=None,
AND_class=None,
OR_class=None,
allowed_in_token=(".", ":", "_"),
):
"""
The types for TRUE, FALSE, NOT, AND, OR and Symbol define the boolean
algebra elements, operations and Symbol variable. They default to the
standard classes if not provided.
You can customize an algebra by providing alternative subclasses of the
standard types.
"""
# TRUE and FALSE base elements are algebra-level "singleton" instances
self.TRUE = TRUE_class or _TRUE
self.TRUE = self.TRUE()
self.FALSE = FALSE_class or _FALSE
self.FALSE = self.FALSE()
# they cross-reference each other
self.TRUE.dual = self.FALSE
self.FALSE.dual = self.TRUE
# boolean operation types, defaulting to the standard types
self.NOT = NOT_class or NOT
self.AND = AND_class or AND
self.OR = OR_class or OR
# class used for Symbols
self.Symbol = Symbol_class or Symbol
tf_nao = {
"TRUE": self.TRUE,
"FALSE": self.FALSE,
"NOT": self.NOT,
"AND": self.AND,
"OR": self.OR,
"Symbol": self.Symbol,
}
# setup cross references such that all algebra types and
# objects hold a named attribute for every other types and
# objects, including themselves.
for obj in tf_nao.values():
for name, value in tf_nao.items():
setattr(obj, name, value)
# Set the set of characters allowed in tokens
self.allowed_in_token = allowed_in_token
def definition(self):
"""
Return a tuple of this algebra defined elements and types as:
(TRUE, FALSE, NOT, AND, OR, Symbol)
"""
return self.TRUE, self.FALSE, self.NOT, self.AND, self.OR, self.Symbol
def symbols(self, *args):
"""
Return a tuple of symbols building a new Symbol from each argument.
"""
return tuple(map(self.Symbol, args))
def parse(self, expr, simplify=False):
"""
Return a boolean expression parsed from `expr` either a unicode string
or tokens iterable.
Optionally simplify the expression if `simplify` is True.
Raise ParseError on errors.
If `expr` is a string, the standard `tokenizer` is used for tokenization
and the algebra configured Symbol type is used to create Symbol
instances from Symbol tokens.
If `expr` is an iterable, it should contain 3-tuples of: (token_type,
token_string, token_position). In this case, the `token_type` can be
a Symbol instance or one of the TOKEN_* constant types.
See the `tokenize()` method for detailed specification.
"""
precedence = {self.NOT: 5, self.AND: 10, self.OR: 15, TOKEN_LPAR: 20}
if isinstance(expr, str):
tokenized = self.tokenize(expr)
else:
tokenized = iter(expr)
if TRACE_PARSE:
tokenized = list(tokenized)
print("tokens:")
for t in tokenized:
print(t)
tokenized = iter(tokenized)
# the abstract syntax tree for this expression that will be build as we
# process tokens
# the first two items are None
# symbol items are appended to this structure
ast = [None, None]
def is_sym(_t):
return isinstance(_t, Symbol) or _t in (TOKEN_TRUE, TOKEN_FALSE, TOKEN_SYMBOL)
def is_operator(_t):
return _t in (TOKEN_AND, TOKEN_OR)
prev_token = None
for token_type, token_string, token_position in tokenized:
if TRACE_PARSE:
print(
"\nprocessing token_type:",
repr(token_type),
"token_string:",
repr(token_string),
"token_position:",
repr(token_position),
)
if prev_token:
prev_token_type, _prev_token_string, _prev_token_position = prev_token
if TRACE_PARSE:
print(" prev_token:", repr(prev_token))
if is_sym(prev_token_type) and (
is_sym(token_type)
): # or token_type == TOKEN_LPAR) :
raise ParseError(
token_type, token_string, token_position, PARSE_INVALID_SYMBOL_SEQUENCE
)
if is_operator(prev_token_type) and (
is_operator(token_type) or token_type == TOKEN_RPAR
):
raise ParseError(
token_type, token_string, token_position, PARSE_INVALID_OPERATOR_SEQUENCE
)
else:
if is_operator(token_type):
raise ParseError(
token_type, token_string, token_position, PARSE_INVALID_OPERATOR_SEQUENCE
)
if token_type == TOKEN_SYMBOL:
ast.append(self.Symbol(token_string))
if TRACE_PARSE:
print(" ast: token_type is TOKEN_SYMBOL: append new symbol", repr(ast))
elif isinstance(token_type, Symbol):
ast.append(token_type)
if TRACE_PARSE:
print(" ast: token_type is Symbol): append existing symbol", repr(ast))
elif token_type == TOKEN_TRUE:
ast.append(self.TRUE)
if TRACE_PARSE:
print(" ast: token_type is TOKEN_TRUE:", repr(ast))
elif token_type == TOKEN_FALSE:
ast.append(self.FALSE)
if TRACE_PARSE:
print(" ast: token_type is TOKEN_FALSE:", repr(ast))
elif token_type == TOKEN_NOT:
ast = [ast, self.NOT]
if TRACE_PARSE:
print(" ast: token_type is TOKEN_NOT:", repr(ast))
elif token_type == TOKEN_AND:
ast = self._start_operation(ast, self.AND, precedence)
if TRACE_PARSE:
print(" ast:token_type is TOKEN_AND: start_operation", ast)
elif token_type == TOKEN_OR:
ast = self._start_operation(ast, self.OR, precedence)
if TRACE_PARSE:
print(" ast:token_type is TOKEN_OR: start_operation", ast)
elif token_type == TOKEN_LPAR:
if prev_token:
# Check that an opening parens is preceded by a function
# or an opening parens
if prev_token_type not in (TOKEN_NOT, TOKEN_AND, TOKEN_OR, TOKEN_LPAR):
raise ParseError(
token_type, token_string, token_position, PARSE_INVALID_NESTING
)
ast = [ast, TOKEN_LPAR]
elif token_type == TOKEN_RPAR:
while True:
if ast[0] is None:
raise ParseError(
token_type,
token_string,
token_position,
PARSE_UNBALANCED_CLOSING_PARENS,
)
if ast[1] is TOKEN_LPAR:
ast[0].append(ast[2])
if TRACE_PARSE:
print("ast9:", repr(ast))
ast = ast[0]
if TRACE_PARSE:
print("ast10:", repr(ast))
break
if isinstance(ast[1], int):
raise ParseError(
token_type,
token_string,
token_position,
PARSE_UNBALANCED_CLOSING_PARENS,
)
# the parens are properly nested
# the top ast node should be a function subclass
if not (inspect.isclass(ast[1]) and issubclass(ast[1], Function)):
raise ParseError(
token_type, token_string, token_position, PARSE_INVALID_NESTING
)
subex = ast[1](*ast[2:])
ast[0].append(subex)
if TRACE_PARSE:
print("ast11:", repr(ast))
ast = ast[0]
if TRACE_PARSE:
print("ast12:", repr(ast))
else:
raise ParseError(token_type, token_string, token_position, PARSE_UNKNOWN_TOKEN)
prev_token = (token_type, token_string, token_position)
try:
while True:
if ast[0] is None:
if TRACE_PARSE:
print("ast[0] is None:", repr(ast))
if ast[1] is None:
if TRACE_PARSE:
print(" ast[1] is None:", repr(ast))
if len(ast) != 3:
raise ParseError(error_code=PARSE_INVALID_EXPRESSION)
parsed = ast[2]
if TRACE_PARSE:
print(" parsed = ast[2]:", repr(parsed))
else:
# call the function in ast[1] with the rest of the ast as args
parsed = ast[1](*ast[2:])
if TRACE_PARSE:
print(" parsed = ast[1](*ast[2:]):", repr(parsed))
break
else:
if TRACE_PARSE:
print("subex = ast[1](*ast[2:]):", repr(ast))
subex = ast[1](*ast[2:])
ast[0].append(subex)
if TRACE_PARSE:
print(" ast[0].append(subex):", repr(ast))
ast = ast[0]
if TRACE_PARSE:
print(" ast = ast[0]:", repr(ast))
except TypeError:
raise ParseError(error_code=PARSE_INVALID_EXPRESSION)
if simplify:
return parsed.simplify()
if TRACE_PARSE:
print("final parsed:", repr(parsed))
return parsed
def _start_operation(self, ast, operation, precedence):
"""
Return an AST where all operations of lower precedence are finalized.
"""
if TRACE_PARSE:
print(" start_operation:", repr(operation), "AST:", ast)
op_prec = precedence[operation]
while True:
if ast[1] is None:
# [None, None, x]
if TRACE_PARSE:
print(" start_op: ast[1] is None:", repr(ast))
ast[1] = operation
if TRACE_PARSE:
print(" --> start_op: ast[1] is None:", repr(ast))
return ast
prec = precedence[ast[1]]
if prec > op_prec: # op=&, [ast, |, x, y] -> [[ast, |, x], &, y]
if TRACE_PARSE:
print(" start_op: prec > op_prec:", repr(ast))
ast = [ast, operation, ast.pop(-1)]
if TRACE_PARSE:
print(" --> start_op: prec > op_prec:", repr(ast))
return ast
if prec == op_prec: # op=&, [ast, &, x] -> [ast, &, x]
if TRACE_PARSE:
print(" start_op: prec == op_prec:", repr(ast))
return ast
if not (inspect.isclass(ast[1]) and issubclass(ast[1], Function)):
# the top ast node should be a function subclass at this stage
raise ParseError(error_code=PARSE_INVALID_NESTING)
if ast[0] is None: # op=|, [None, &, x, y] -> [None, |, x&y]
if TRACE_PARSE:
print(" start_op: ast[0] is None:", repr(ast))
subexp = ast[1](*ast[2:])
new_ast = [ast[0], operation, subexp]
if TRACE_PARSE:
print(" --> start_op: ast[0] is None:", repr(new_ast))
return new_ast
else: # op=|, [[ast, &, x], ~, y] -> [ast, &, x, ~y]
if TRACE_PARSE:
print(" start_op: else:", repr(ast))
ast[0].append(ast[1](*ast[2:]))
ast = ast[0]
if TRACE_PARSE:
print(" --> start_op: else:", repr(ast))
def tokenize(self, expr):
"""
Return an iterable of 3-tuple describing each token given an expression
unicode string.
This 3-tuple contains (token, token string, position):
- token: either a Symbol instance or one of TOKEN_* token types.
- token string: the original token unicode string.
- position: some simple object describing the starting position of the
original token string in the `expr` string. It can be an int for a
character offset, or a tuple of starting (row/line, column).
The token position is used only for error reporting and can be None or
empty.
Raise ParseError on errors. The ParseError.args is a tuple of:
(token_string, position, error message)
You can use this tokenizer as a base to create specialized tokenizers
for your custom algebra by subclassing BooleanAlgebra. See also the
tests for other examples of alternative tokenizers.
This tokenizer has these characteristics:
- The `expr` string can span multiple lines,
- Whitespace is not significant.
- The returned position is the starting character offset of a token.
- A TOKEN_SYMBOL is returned for valid identifiers which is a string
without spaces.
- These are valid identifiers:
- Python identifiers.
- a string even if starting with digits
- digits (except for 0 and 1).
- dotted names : foo.bar consist of one token.
- names with colons: foo:bar consist of one token.
- These are not identifiers:
- quoted strings.
- any punctuation which is not an operation
- Recognized operators are (in any upper/lower case combinations):
- for and: '*', '&', 'and'
- for or: '+', '|', 'or'
- for not: '~', '!', 'not'
- Recognized special symbols are (in any upper/lower case combinations):
- True symbols: 1 and True
- False symbols: 0, False and None
"""
if not isinstance(expr, str):
raise TypeError(f"expr must be string but it is {type(expr)}.")
# mapping of lowercase token strings to a token type id for the standard
# operators, parens and common true or false symbols, as used in the
# default tokenizer implementation.
TOKENS = {
"*": TOKEN_AND,
"&": TOKEN_AND,
"and": TOKEN_AND,
"+": TOKEN_OR,
"|": TOKEN_OR,
"or": TOKEN_OR,
"~": TOKEN_NOT,
"!": TOKEN_NOT,
"not": TOKEN_NOT,
"(": TOKEN_LPAR,
")": TOKEN_RPAR,
"[": TOKEN_LPAR,
"]": TOKEN_RPAR,
"true": TOKEN_TRUE,
"1": TOKEN_TRUE,
"false": TOKEN_FALSE,
"0": TOKEN_FALSE,
"none": TOKEN_FALSE,
}
position = 0
length = len(expr)
while position < length:
tok = expr[position]
sym = tok.isalnum() or tok == "_"
if sym:
position += 1
while position < length:
char = expr[position]
if char.isalnum() or char in self.allowed_in_token:
position += 1
tok += char
else:
break
position -= 1
try:
yield TOKENS[tok.lower()], tok, position
except KeyError:
if sym:
yield TOKEN_SYMBOL, tok, position
elif tok not in (" ", "\t", "\r", "\n"):
raise ParseError(
token_string=tok, position=position, error_code=PARSE_UNKNOWN_TOKEN
)
position += 1
def _recurse_distributive(self, expr, operation_inst):
"""
Recursively flatten, simplify and apply the distributive laws to the
`expr` expression. Distributivity is considered for the AND or OR
`operation_inst` instance.
"""
if expr.isliteral:
return expr
args = (self._recurse_distributive(arg, operation_inst) for arg in expr.args)
args = tuple(arg.simplify() for arg in args)
if len(args) == 1:
return args[0]
flattened_expr = expr.__class__(*args)
dualoperation = operation_inst.dual
if isinstance(flattened_expr, dualoperation):
flattened_expr = flattened_expr.distributive()
return flattened_expr
def normalize(self, expr, operation):
"""
Return a normalized expression transformed to its normal form in the
given AND or OR operation.
The new expression arguments will satisfy these conditions:
- ``operation(*args) == expr`` (here mathematical equality is meant)
- the operation does not occur in any of its arg.
- NOT is only appearing in literals (aka. Negation normal form).
The operation must be an AND or OR operation or a subclass.
"""
# Ensure that the operation is not NOT
assert operation in (
self.AND,
self.OR,
)
# Move NOT inwards.
expr = expr.literalize()
# Simplify first otherwise _recurse_distributive() may take forever.
expr = expr.simplify()
operation_example = operation(self.TRUE, self.FALSE)
# For large dual operations build up from normalized subexpressions,
# otherwise we can get exponential blowup midway through
expr.args = tuple(self.normalize(a, operation) for a in expr.args)
if len(expr.args) > 1 and (
(operation == self.AND and isinstance(expr, self.OR))
or (operation == self.OR and isinstance(expr, self.AND))
):
args = expr.args
expr_class = expr.__class__
expr = args[0]
for arg in args[1:]:
expr = expr_class(expr, arg)
expr = self._recurse_distributive(expr, operation_example)
# Canonicalize
expr = expr.simplify()
else:
expr = self._recurse_distributive(expr, operation_example)
# Canonicalize
expr = expr.simplify()
return expr
def cnf(self, expr):
"""
Return a conjunctive normal form of the `expr` expression.
"""
return self.normalize(expr, self.AND)
conjunctive_normal_form = cnf
def dnf(self, expr):
"""
Return a disjunctive normal form of the `expr` expression.
"""
return self.normalize(expr, self.OR)
disjunctive_normal_form = dnf
class Expression(object):
"""
Abstract base class for all boolean expressions, including functions and
variable symbols.
"""
# these class attributes are configured when a new BooleanAlgebra is created
TRUE = None
FALSE = None
NOT = None
AND = None
OR = None
Symbol = None
def __init__(self):
# Defines sort and comparison order between expressions arguments
self.sort_order = None
# Store arguments aka. subterms of this expressions.
# subterms are either literals or expressions.
self.args = tuple()
# True is this is a literal expression such as a Symbol, TRUE or FALSE
self.isliteral = False
# True if this expression has been simplified to in canonical form.
self.iscanonical = False
@property
def objects(self):
"""
Return a set of all associated objects with this expression symbols.
Include recursively subexpressions objects.
"""
return set(s.obj for s in self.symbols)
def get_literals(self):
"""
Return a list of all the literals contained in this expression.
Include recursively subexpressions symbols.
This includes duplicates.
"""
if self.isliteral:
return [self]
if not self.args:
return []
return list(itertools.chain.from_iterable(arg.get_literals() for arg in self.args))
@property
def literals(self):
"""
Return a set of all literals contained in this expression.
Include recursively subexpressions literals.
"""
return set(self.get_literals())
def literalize(self):
"""
Return an expression where NOTs are only occurring as literals.
Applied recursively to subexpressions.
"""
if self.isliteral:
return self
args = tuple(arg.literalize() for arg in self.args)
if all(arg is self.args[i] for i, arg in enumerate(args)):
return self
return self.__class__(*args)
def get_symbols(self):
"""
Return a list of all the symbols contained in this expression.
Include subexpressions symbols recursively.
This includes duplicates.
"""
return [s if isinstance(s, Symbol) else s.args[0] for s in self.get_literals()]
@property
def symbols(
self,
):
"""
Return a list of all the symbols contained in this expression.
Include subexpressions symbols recursively.
This includes duplicates.
"""
return set(self.get_symbols())
def subs(self, substitutions, default=None, simplify=False):
"""
Return an expression where all subterms of this expression are
by the new expression using a `substitutions` mapping of:
{expr: replacement}
Return the provided `default` value if this expression has no elements,
e.g. is empty.
Simplify the results if `simplify` is True.
Return this expression unmodified if nothing could be substituted. Note
that a possible usage of this function is to check for expression
containment as the expression will be returned unmodified if if does not
contain any of the provided substitutions.
"""
# shortcut: check if we have our whole expression as a possible
# subsitution source
for expr, substitution in substitutions.items():
if expr == self:
return substitution
# otherwise, do a proper substitution of subexpressions
expr = self._subs(substitutions, default, simplify)
return self if expr is None else expr
def _subs(self, substitutions, default, simplify):
"""
Return an expression where all subterms are substituted by the new
expression using a `substitutions` mapping of: {expr: replacement}
"""
# track the new list of unchanged args or replaced args through
# a substitution
new_arguments = []
changed_something = False
# shortcut for basic logic True or False
if self is self.TRUE or self is self.FALSE:
return self
# if the expression has no elements, e.g. is empty, do not apply
# substitutions
if not self.args:
return default
# iterate the subexpressions: either plain symbols or a subexpressions
for arg in self.args:
# collect substitutions for exact matches
# break as soon as we have a match
for expr, substitution in substitutions.items():
if arg == expr:
new_arguments.append(substitution)
changed_something = True
break
# this will execute only if we did not break out of the
# loop, e.g. if we did not change anything and did not
# collect any substitutions
else:
# recursively call _subs on each arg to see if we get a
# substituted arg
new_arg = arg._subs(substitutions, default, simplify)
if new_arg is None:
# if we did not collect a substitution for this arg,
# keep the arg as-is, it is not replaced by anything
new_arguments.append(arg)
else:
# otherwise, we add the substitution for this arg instead
new_arguments.append(new_arg)
changed_something = True
if not changed_something:
return
# here we did some substitution: we return a new expression
# built from the new_arguments
newexpr = self.__class__(*new_arguments)
return newexpr.simplify() if simplify else newexpr
def simplify(self):
"""
Return a new simplified expression in canonical form built from this
expression. The simplified expression may be exactly the same as this
expression.
Subclasses override this method to compute actual simplification.
"""
return self
def __hash__(self):
"""
Expressions are immutable and hashable. The hash of Functions is
computed by respecting the structure of the whole expression by mixing
the class name hash and the recursive hash of a frozenset of arguments.
Hash of elements is based on their boolean equivalent. Hash of symbols
is based on their object.
"""
if not self.args:
arghash = id(self)
else:
arghash = hash(frozenset(map(hash, self.args)))
return hash(self.__class__.__name__) ^ arghash
def __eq__(self, other):
"""
Test if other element is structurally the same as itself.
This method does not make any simplification or transformation, so it
will return False although the expression terms may be mathematically
equal. Use simplify() before testing equality to check the mathematical
equality.
For literals, plain equality is used.
For functions, equality uses the facts that operations are:
- commutative: order does not matter and different orders are equal.
- idempotent: so args can appear more often in one term than in the other.
"""
if self is other:
return True
if isinstance(other, self.__class__):
return frozenset(self.args) == frozenset(other.args)
return NotImplemented
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if self.sort_order is not None and other.sort_order is not None:
if self.sort_order == other.sort_order:
return NotImplemented
return self.sort_order < other.sort_order
return NotImplemented
def __gt__(self, other):
lt = other.__lt__(self)
if lt is NotImplemented:
return not self.__lt__(other)
return lt
def __and__(self, other):
return self.AND(self, other)
__mul__ = __and__
def __invert__(self):
return self.NOT(self)
def __or__(self, other):
return self.OR(self, other)
__add__ = __or__
def __bool__(self):
raise TypeError("Cannot evaluate expression as a Python Boolean.")
__nonzero__ = __bool__
class BaseElement(Expression):
"""
Abstract base class for the base elements TRUE and FALSE of the boolean
algebra.
"""
def __init__(self):
super(BaseElement, self).__init__()
self.sort_order = 0
self.iscanonical = True
# The dual Base Element class for this element: TRUE.dual returns
# _FALSE() and FALSE.dual returns _TRUE(). This is a cyclic reference
# and therefore only assigned after creation of the singletons,
self.dual = None
def __lt__(self, other):
if isinstance(other, BaseElement):
return self == self.FALSE
return NotImplemented
__nonzero__ = __bool__ = lambda s: None
def pretty(self, indent=0, debug=False):
"""
Return a pretty formatted representation of self.
"""
return (" " * indent) + repr(self)
class _TRUE(BaseElement):
"""
Boolean base element TRUE.
Not meant to be subclassed nor instantiated directly.
"""
def __init__(self):
super(_TRUE, self).__init__()
# assigned at singleton creation: self.dual = FALSE
def __hash__(self):
return hash(True)
def __eq__(self, other):
return self is other or other is True or isinstance(other, _TRUE)
def __str__(self):
return "1"
def __repr__(self):
return "TRUE"
def __call__(self):
return self
__nonzero__ = __bool__ = lambda s: True
class _FALSE(BaseElement):
"""
Boolean base element FALSE.
Not meant to be subclassed nor instantiated directly.
"""
def __init__(self):
super(_FALSE, self).__init__()
# assigned at singleton creation: self.dual = TRUE
def __hash__(self):
return hash(False)
def __eq__(self, other):
return self is other or other is False or isinstance(other, _FALSE)
def __str__(self):
return "0"
def __repr__(self):
return "FALSE"
def __call__(self):
return self
__nonzero__ = __bool__ = lambda s: False
class Symbol(Expression):
"""
Boolean variable.
A Symbol can hold an object used to determine equality between symbols.
"""
def __init__(self, obj):
super(Symbol, self).__init__()
self.sort_order = 5
# Store an associated object. This object determines equality
self.obj = obj
self.iscanonical = True
self.isliteral = True
def __call__(self, **kwargs):
"""
Return the evaluated value for this symbol from kwargs
"""
return kwargs[self.obj]
def __hash__(self):
if self.obj is None: # Anonymous Symbol.
return id(self)
return hash(self.obj)
def __eq__(self, other):
if self is other:
return True
if isinstance(other, self.__class__):
return self.obj == other.obj
return NotImplemented
def __lt__(self, other):
comparator = Expression.__lt__(self, other)
if comparator is not NotImplemented:
return comparator
if isinstance(other, Symbol):
return self.obj < other.obj
return NotImplemented
def __str__(self):
return str(self.obj)
def __repr__(self):
obj = f"'{self.obj}'" if isinstance(self.obj, str) else repr(self.obj)
return f"{self.__class__.__name__}({obj})"
def pretty(self, indent=0, debug=False):
"""
Return a pretty formatted representation of self.
"""
debug_details = ""
if debug:
debug_details += f"<isliteral={self.isliteral!r}, iscanonical={self.iscanonical!r}>"
obj = f"'{self.obj}'" if isinstance(self.obj, str) else repr(self.obj)
return (" " * indent) + f"{self.__class__.__name__}({debug_details}{obj})"
class Function(Expression):
"""
Boolean function.
A boolean function takes n (one or more) boolean expressions as arguments
where n is called the order of the function and maps them to one of the base
elements TRUE or FALSE. Implemented functions are AND, OR and NOT.
"""
def __init__(self, *args):
super(Function, self).__init__()
# Specifies an infix notation of an operator for printing such as | or &.
self.operator = None
assert all(
isinstance(arg, Expression) for arg in args
), f"Bad arguments: all arguments must be an Expression: {args!r}"
self.args = tuple(args)
def __str__(self):
args = self.args
if len(args) == 1:
if self.isliteral:
return f"{self.operator}{args[0]}"
return f"{self.operator}({args[0]})"
args_str = []
for arg in args:
if arg.isliteral:
args_str.append(str(arg))
else:
args_str.append(f"({arg})")
return self.operator.join(args_str)
def __repr__(self):
args = ", ".join(map(repr, self.args))
return f"{self.__class__.__name__}({args})"
def pretty(self, indent=0, debug=False):
"""
Return a pretty formatted representation of self as an indented tree.
If debug is True, also prints debug information for each expression arg.
For example:
>>> print(BooleanAlgebra().parse(
... u'not a and not b and not (a and ba and c) and c or c').pretty())
OR(
AND(
NOT(Symbol('a')),
NOT(Symbol('b')),
NOT(
AND(
Symbol('a'),
Symbol('ba'),
Symbol('c')
)
),
Symbol('c')
),
Symbol('c')
)
"""
debug_details = ""
if debug:
debug_details += f"<isliteral={self.isliteral!r}, iscanonical={self.iscanonical!r}"
identity = getattr(self, "identity", None)
if identity is not None:
debug_details += f", identity={identity!r}"
annihilator = getattr(self, "annihilator", None)
if annihilator is not None:
debug_details += f", annihilator={annihilator!r}"
dual = getattr(self, "dual", None)
if dual is not None:
debug_details += f", dual={dual!r}"
debug_details += ">"
cls = self.__class__.__name__
args = [a.pretty(indent=indent + 2, debug=debug) for a in self.args]
pfargs = ",\n".join(args)
cur_indent = " " * indent
new_line = "" if self.isliteral else "\n"
return f"{cur_indent}{cls}({debug_details}{new_line}{pfargs}\n{cur_indent})"
class NOT(Function):
"""
Boolean NOT operation.
The NOT operation takes exactly one argument. If this argument is a Symbol
the resulting expression is also called a literal.
The operator "~" can be used as abbreviation for NOT, e.g. instead of NOT(x)
one can write ~x (where x is some boolean expression). Also for printing "~"
is used for better readability.
You can subclass to define alternative string representation.
For example:
>>> class NOT2(NOT):
... def __init__(self, *args):
... super(NOT2, self).__init__(*args)
... self.operator = '!'
"""
def __init__(self, arg1):
super(NOT, self).__init__(arg1)
self.isliteral = isinstance(self.args[0], Symbol)
self.operator = "~"
def literalize(self):
"""
Return an expression where NOTs are only occurring as literals.
"""
expr = self.demorgan()
if isinstance(expr, self.__class__):
return expr
return expr.literalize()
def simplify(self):
"""
Return a simplified expr in canonical form.
This means double negations are canceled out and all contained boolean
objects are in their canonical form.
"""
if self.iscanonical:
return self
expr = self.cancel()
if not isinstance(expr, self.__class__):
return expr.simplify()
if expr.args[0] in (
self.TRUE,
self.FALSE,
):
return expr.args[0].dual
expr = self.__class__(expr.args[0].simplify())
expr.iscanonical = True
return expr
def cancel(self):
"""
Cancel itself and following NOTs as far as possible.
Returns the simplified expression.
"""
expr = self
while True:
arg = expr.args[0]
if not isinstance(arg, self.__class__):
return expr
expr = arg.args[0]
if not isinstance(expr, self.__class__):
return expr
def demorgan(self):
"""
Return a expr where the NOT function is moved inward.
This is achieved by canceling double NOTs and using De Morgan laws.
"""
expr = self.cancel()
if expr.isliteral or not isinstance(expr, self.NOT):
return expr
op = expr.args[0]
return op.dual(*(self.__class__(arg).cancel() for arg in op.args))
def __call__(self, **kwargs):
"""
Return the evaluated (negated) value for this function.
"""
return not self.args[0](**kwargs)
def __lt__(self, other):
return self.args[0] < other
def pretty(self, indent=1, debug=False):
"""
Return a pretty formatted representation of self.
Include additional debug details if `debug` is True.
"""
debug_details = ""
if debug:
debug_details += f"<isliteral={self.isliteral!r}, iscanonical={self.iscanonical!r}>"
if self.isliteral:
pretty_literal = self.args[0].pretty(indent=0, debug=debug)
return (" " * indent) + f"{self.__class__.__name__}({debug_details}{pretty_literal})"
else:
return super(NOT, self).pretty(indent=indent, debug=debug)
class DualBase(Function):
"""
Base class for AND and OR function.
This class uses the duality principle to combine similar methods of AND
and OR. Both operations take two or more arguments and can be created using
"|" for OR and "&" for AND.
"""
_pyoperator = None
def __init__(self, arg1, arg2, *args):
super(DualBase, self).__init__(arg1, arg2, *args)
# identity element for the specific operation.
# This will be TRUE for the AND operation and FALSE for the OR operation.
self.identity = None
# annihilator element for this function.
# This will be FALSE for the AND operation and TRUE for the OR operation.
self.annihilator = None
# dual class of this function.
# This means OR.dual returns AND and AND.dual returns OR.
self.dual = None
def __contains__(self, expr):
"""
Test if expr is a subterm of this expression.
"""
if expr in self.args:
return True
if isinstance(expr, self.__class__):
return all(arg in self.args for arg in expr.args)
def simplify(self, sort=True):
"""
Return a new simplified expression in canonical form from this
expression.
For simplification of AND and OR fthe ollowing rules are used
recursively bottom up:
- Associativity (output does not contain same operations nested)::
(A & B) & C = A & (B & C) = A & B & C
(A | B) | C = A | (B | C) = A | B | C
- Annihilation::
A & 0 = 0, A | 1 = 1
- Idempotence (e.g. removing duplicates)::
A & A = A, A | A = A
- Identity::
A & 1 = A, A | 0 = A
- Complementation::
A & ~A = 0, A | ~A = 1
- Elimination::
(A & B) | (A & ~B) = A, (A | B) & (A | ~B) = A
- Absorption::
A & (A | B) = A, A | (A & B) = A
- Negative absorption::
A & (~A | B) = A & B, A | (~A & B) = A | B
- Commutativity (output is always sorted)::
A & B = B & A, A | B = B | A
Other boolean objects are also in their canonical form.
"""
# TODO: Refactor DualBase.simplify into different "sub-evals".
# If self is already canonical do nothing.
if self.iscanonical:
return self
# Otherwise bring arguments into canonical form.
args = [arg.simplify() for arg in self.args]
# Create new instance of own class with canonical args.
# TODO: Only create new class if some args changed.
expr = self.__class__(*args)
# Literalize before doing anything, this also applies De Morgan's Law
expr = expr.literalize()
# Associativity:
# (A & B) & C = A & (B & C) = A & B & C
# (A | B) | C = A | (B | C) = A | B | C
expr = expr.flatten()
# Annihilation: A & 0 = 0, A | 1 = 1
if self.annihilator in expr.args:
return self.annihilator
# Idempotence: A & A = A, A | A = A
# this boils down to removing duplicates
args = []
for arg in expr.args:
if arg not in args:
args.append(arg)
if len(args) == 1:
return args[0]
# Identity: A & 1 = A, A | 0 = A
if self.identity in args:
args.remove(self.identity)
if len(args) == 1:
return args[0]
# Complementation: A & ~A = 0, A | ~A = 1
for arg in args:
if self.NOT(arg) in args:
return self.annihilator
# Elimination: (A & B) | (A & ~B) = A, (A | B) & (A | ~B) = A
i = 0
while i < len(args) - 1:
j = i + 1
ai = args[i]
if not isinstance(ai, self.dual):
i += 1
continue
while j < len(args):
aj = args[j]
if not isinstance(aj, self.dual) or len(ai.args) != len(aj.args):
j += 1
continue
# Find terms where only one arg is different.
negated = None
for arg in ai.args:
# FIXME: what does this pass Do?
if arg in aj.args:
pass
elif self.NOT(arg).cancel() in aj.args:
if negated is None:
negated = arg
else:
negated = None
break
else:
negated = None
break
# If the different arg is a negation simplify the expr.
if negated is not None:
# Cancel out one of the two terms.
del args[j]
aiargs = list(ai.args)
aiargs.remove(negated)
if len(aiargs) == 1:
args[i] = aiargs[0]
else:
args[i] = self.dual(*aiargs)
if len(args) == 1:
return args[0]
else:
# Now the other simplifications have to be redone.
return self.__class__(*args).simplify()
j += 1
i += 1
# Absorption: A & (A | B) = A, A | (A & B) = A
# Negative absorption: A & (~A | B) = A & B, A | (~A & B) = A | B
args = self.absorb(args)
if len(args) == 1:
return args[0]
# Commutativity: A & B = B & A, A | B = B | A
if sort:
args.sort()
# Create new (now canonical) expression.
expr = self.__class__(*args)
expr.iscanonical = True
return expr
def flatten(self):
"""
Return a new expression where nested terms of this expression are
flattened as far as possible.
E.g.::
A & (B & C) becomes A & B & C.
"""
args = list(self.args)
i = 0
for arg in self.args:
if isinstance(arg, self.__class__):
args[i : i + 1] = arg.args
i += len(arg.args)
else:
i += 1
return self.__class__(*args)
def absorb(self, args):
"""
Given an `args` sequence of expressions, return a new list of expression
applying absorption and negative absorption.
See https://en.wikipedia.org/wiki/Absorption_law
Absorption::
A & (A | B) = A, A | (A & B) = A
Negative absorption::
A & (~A | B) = A & B, A | (~A & B) = A | B
"""
args = list(args)
if not args:
args = list(self.args)
i = 0
while i < len(args):
absorber = args[i]
j = 0
while j < len(args):
if j == i:
j += 1
continue
target = args[j]
if not isinstance(target, self.dual):
j += 1
continue
# Absorption
if absorber in target:
del args[j]
if j < i:
i -= 1
continue
# Negative absorption
neg_absorber = self.NOT(absorber).cancel()
if neg_absorber in target:
b = target.subtract(neg_absorber, simplify=False)
if b is None:
del args[j]
if j < i:
i -= 1
continue
else:
args[j] = b
j += 1
continue
if isinstance(absorber, self.dual):
remove = None
for arg in absorber.args:
narg = self.NOT(arg).cancel()
if arg in target.args:
pass
elif narg in target.args:
if remove is None:
remove = narg
else:
remove = None
break
else:
remove = None
break
if remove is not None:
args[j] = target.subtract(remove, simplify=True)
j += 1
i += 1
return args
def subtract(self, expr, simplify):
"""
Return a new expression where the `expr` expression has been removed
from this expression if it exists.
"""
args = self.args
if expr in self.args:
args = list(self.args)
args.remove(expr)
elif isinstance(expr, self.__class__):
if all(arg in self.args for arg in expr.args):
args = tuple(arg for arg in self.args if arg not in expr)
if len(args) == 0:
return None
if len(args) == 1:
return args[0]
newexpr = self.__class__(*args)
if simplify:
newexpr = newexpr.simplify()
return newexpr
def distributive(self):
"""
Return a term where the leading AND or OR terms are switched.
This is done by applying the distributive laws::
A & (B|C) = (A&B) | (A&C)
A | (B&C) = (A|B) & (A|C)
"""
dual = self.dual
args = list(self.args)
for i, arg in enumerate(args):
if isinstance(arg, dual):
args[i] = arg.args
else:
args[i] = (arg,)
prod = itertools.product(*args)
args = tuple(self.__class__(*arg).simplify() for arg in prod)
if len(args) == 1:
return args[0]
else:
return dual(*args)
def __lt__(self, other):
comparator = Expression.__lt__(self, other)
if comparator is not NotImplemented:
return comparator
if isinstance(other, self.__class__):
lenself = len(self.args)
lenother = len(other.args)
for i in range(min(lenself, lenother)):
if self.args[i] == other.args[i]:
continue
comparator = self.args[i] < other.args[i]
if comparator is not NotImplemented:
return comparator
if lenself != lenother:
return lenself < lenother
return NotImplemented
def __call__(self, **kwargs):
"""
Return the evaluation of this expression by calling each of its arg as
arg(**kwargs) and applying its corresponding Python operator (and or or)
to the results.
Reduce is used as in e.g. AND(a, b, c, d) == AND(a, AND(b, AND(c, d)))
ore.g. OR(a, b, c, d) == OR(a, OR(b, OR(c, d)))
"""
return reduce(self._pyoperator, (a(**kwargs) for a in self.args))
class AND(DualBase):
"""
Boolean AND operation, taking two or more arguments.
It can also be created by using "&" between two boolean expressions.
You can subclass to define alternative string representation by overriding
self.operator.
For example:
>>> class AND2(AND):
... def __init__(self, *args):
... super(AND2, self).__init__(*args)
... self.operator = 'AND'
"""
_pyoperator = and_operator
def __init__(self, arg1, arg2, *args):
super(AND, self).__init__(arg1, arg2, *args)
self.sort_order = 10
self.identity = self.TRUE
self.annihilator = self.FALSE
self.dual = self.OR
self.operator = "&"
class OR(DualBase):
"""
Boolean OR operation, taking two or more arguments
It can also be created by using "|" between two boolean expressions.
You can subclass to define alternative string representation by overriding
self.operator.
For example:
>>> class OR2(OR):
... def __init__(self, *args):
... super(OR2, self).__init__(*args)
... self.operator = 'OR'
"""
_pyoperator = or_operator
def __init__(self, arg1, arg2, *args):
super(OR, self).__init__(arg1, arg2, *args)
self.sort_order = 25
self.identity = self.FALSE
self.annihilator = self.TRUE
self.dual = self.AND
self.operator = "|"
|
boolean/boolean.py
|
codereval_python_data_92
|
Decorator function to add a new handler to the registry.
Args:
hook (HookType): Hook attribute for which to register the handler.
Returns:
callable: Decorator for registering listeners for the specified
hook.
def on(self, hook):
"""Decorator function to add a new handler to the registry.
Args:
hook (HookType): Hook attribute for which to register the handler.
Returns:
callable: Decorator for registering listeners for the specified
hook.
"""
def decorator(handler):
self.registry[hook].append(handler)
return handler
return decorator
"""This module defines the Hook Dispatcher and listeners for registering and
executing hooks. Hook Dispatcher emits hooks based on :class:`Hook` attributes which
define when the hook will be executed.
"""
import asyncio
import logging
import random
from base64 import b64encode
from collections import defaultdict
from contextlib import suppress
from copy import deepcopy
from datetime import datetime
from functools import reduce
from operator import getitem
from enum import Enum, auto
from inspect import iscoroutinefunction
from OpenSSL import crypto
from typing import NamedTuple
import yarl
from aiohttp import ClientConnectorError
from krake.controller import Observer
from krake.controller.kubernetes.client import KubernetesClient, InvalidManifestError
from krake.utils import camel_to_snake_case, get_kubernetes_resource_idx
from kubernetes_asyncio.client.rest import ApiException
from kubernetes_asyncio.client.api_client import ApiClient
from kubernetes_asyncio import client
from krake.data.kubernetes import ClusterState, Application, Cluster
from yarl import URL
from secrets import token_urlsafe
from kubernetes_asyncio.client import (
Configuration,
V1Secret,
V1EnvVar,
V1VolumeMount,
V1Volume,
V1SecretKeySelector,
V1EnvVarSource,
)
from kubernetes_asyncio.config.kube_config import KubeConfigLoader
logger = logging.getLogger(__name__)
class HookType(Enum):
ResourcePreCreate = auto()
ResourcePostCreate = auto()
ResourcePreUpdate = auto()
ResourcePostUpdate = auto()
ResourcePreDelete = auto()
ResourcePostDelete = auto()
ApplicationMangling = auto()
ApplicationPreMigrate = auto()
ApplicationPostMigrate = auto()
ApplicationPreReconcile = auto()
ApplicationPostReconcile = auto()
ApplicationPreDelete = auto()
ApplicationPostDelete = auto()
ClusterCreation = auto()
ClusterDeletion = auto()
class HookDispatcher(object):
"""Simple wrapper around a registry of handlers associated to :class:`Hook`
attributes. Each :class:`Hook` attribute defines when the handler will be
executed.
Listeners for certain hooks can be registered via :meth:`on`. Registered
listeners are executed via :meth:`hook`.
Example:
.. code:: python
listen = HookDispatcher()
@listen.on(HookType.PreApply)
def to_perform_before_app_creation(app, cluster, resource, controller):
# Do Stuff
@listen.on(HookType.PostApply)
def another_to_perform_after_app_creation(app, cluster, resource, resp):
# Do Stuff
@listen.on(HookType.PostDelete)
def to_perform_after_app_deletion(app, cluster, resource, resp):
# Do Stuff
"""
def __init__(self):
self.registry = defaultdict(list)
def on(self, hook):
"""Decorator function to add a new handler to the registry.
Args:
hook (HookType): Hook attribute for which to register the handler.
Returns:
callable: Decorator for registering listeners for the specified
hook.
"""
def decorator(handler):
self.registry[hook].append(handler)
return handler
return decorator
async def hook(self, hook, **kwargs):
"""Execute the list of handlers associated to the provided :class:`Hook`
attribute.
Args:
hook (HookType): The hook attribute for which to execute handlers.
"""
try:
handlers = self.registry[hook]
except KeyError:
pass
else:
for handler in handlers:
if iscoroutinefunction(handler):
await handler(**kwargs)
else:
handler(**kwargs)
listen = HookDispatcher()
@listen.on(HookType.ResourcePostCreate)
@listen.on(HookType.ResourcePostUpdate)
async def register_service(app, cluster, resource, response):
"""Register endpoint of Kubernetes Service object on creation and update.
Args:
app (krake.data.kubernetes.Application): Application the service belongs to
cluster (krake.data.kubernetes.Cluster): The cluster on which the
application is running
resource (dict): Kubernetes object description as specified in the
specification of the application.
response (kubernetes_asyncio.client.V1Service): Response of the
Kubernetes API
"""
if resource["kind"] != "Service":
return
service_name = resource["metadata"]["name"]
if response.spec and response.spec.type == "LoadBalancer":
# For a "LoadBalancer" type of Service, an external IP is given in the cluster
# by a load balancer controller to the service. In this case, the "port"
# specified in the spec is reachable from the outside.
if (
not response.status.load_balancer
or not response.status.load_balancer.ingress
):
# When a "LoadBalancer" type of service is created, the IP is given by an
# additional controller (e.g. a controller that requests a floating IP to an
# OpenStack infrastructure). This process can take some time, but the
# Service itself already exist before the IP is assigned. In the case of an
# error with the controller, the IP is also not given. This "<pending>" IP
# just expresses that the Service exists, but the IP is not ready yet.
external_ip = "<pending>"
else:
external_ip = response.status.load_balancer.ingress[0].ip
if not response.spec.ports:
external_port = "<pending>"
else:
external_port = response.spec.ports[0].port
app.status.services[service_name] = f"{external_ip}:{external_port}"
return
node_port = None
# Ensure that ports are specified
if response.spec and response.spec.ports:
node_port = response.spec.ports[0].node_port
# If the service does not have a node port, remove a potential reference
# and return.
if node_port is None:
try:
del app.status.services[service_name]
except KeyError:
pass
return
# Determine URL of Kubernetes cluster API
loader = KubeConfigLoader(cluster.spec.kubeconfig)
config = Configuration()
await loader.load_and_set(config)
cluster_url = yarl.URL(config.host)
app.status.services[service_name] = f"{cluster_url.host}:{node_port}"
@listen.on(HookType.ResourcePostDelete)
async def unregister_service(app, resource, **kwargs):
"""Unregister endpoint of Kubernetes Service object on deletion.
Args:
app (krake.data.kubernetes.Application): Application the service belongs to
resource (dict): Kubernetes object description as specified in the
specification of the application.
"""
if resource["kind"] != "Service":
return
service_name = resource["metadata"]["name"]
try:
del app.status.services[service_name]
except KeyError:
pass
@listen.on(HookType.ResourcePostDelete)
async def remove_resource_from_last_observed_manifest(app, resource, **kwargs):
"""Remove a given resource from the last_observed_manifest after its deletion
Args:
app (krake.data.kubernetes.Application): Application the service belongs to
resource (dict): Kubernetes object description as specified in the
specification of the application.
"""
try:
idx = get_kubernetes_resource_idx(app.status.last_observed_manifest, resource)
except IndexError:
return
app.status.last_observed_manifest.pop(idx)
def update_last_applied_manifest_dict_from_resp(
last_applied_manifest, observer_schema, response
):
"""Together with :func:``update_last_applied_manifest_list_from_resp``, this
function is called recursively to update a partial ``last_applied_manifest``
from a partial Kubernetes response
Args:
last_applied_manifest (dict): partial ``last_applied_manifest`` being
updated
observer_schema (dict): partial ``observer_schema``
response (dict): partial response from the Kubernetes API.
Raises:
KeyError: If the observed field is not present in the Kubernetes response
This function go through all observed fields, and initialized their value in
last_applied_manifest if they are not yet present
"""
for key, value in observer_schema.items():
# Keys in the response are in camelCase
camel_key = camel_to_snake_case(key)
if camel_key not in response:
# An observed key should always be present in the k8s response
raise KeyError(
f"Observed key {camel_key} is not present in response {response}"
)
if isinstance(value, dict):
if key not in last_applied_manifest:
# The dictionary is observed, but not present in
# last_applied_manifest
last_applied_manifest[key] = {}
update_last_applied_manifest_dict_from_resp(
last_applied_manifest[key], observer_schema[key], response[camel_key]
)
elif isinstance(value, list):
if key not in last_applied_manifest:
# The list is observed, but not present in last_applied_manifest
last_applied_manifest[key] = []
update_last_applied_manifest_list_from_resp(
last_applied_manifest[key], observer_schema[key], response[camel_key]
)
elif key not in last_applied_manifest:
# If key not present in last_applied_manifest, and value is neither a
# dict nor a list, simply add it.
last_applied_manifest[key] = response[camel_key]
def update_last_applied_manifest_list_from_resp(
last_applied_manifest, observer_schema, response
):
"""Together with :func:``update_last_applied_manifest_dict_from_resp``, this
function is called recursively to update a partial ``last_applied_manifest``
from a partial Kubernetes response
Args:
last_applied_manifest (list): partial ``last_applied_manifest`` being
updated
observer_schema (list): partial ``observer_schema``
response (list): partial response from the Kubernetes API.
This function go through all observed fields, and initialized their value in
last_applied_manifest if they are not yet present
"""
# Looping over the observed resource, except the last element which is the
# special control dictionary
for idx, val in enumerate(observer_schema[:-1]):
if idx >= len(response):
# Element is observed but not present in k8s response, so following
# elements will also not exist.
#
# This doesn't raise an Exception as observing the element of a list
# doesn't ensure its presence. The list length is controlled by the
# special control dictionary
return
if isinstance(val, dict):
if idx >= len(last_applied_manifest):
# The dict is observed, but not present in last_applied_manifest
last_applied_manifest.append({})
update_last_applied_manifest_dict_from_resp(
last_applied_manifest[idx], observer_schema[idx], response[idx]
)
elif isinstance(response[idx], list):
if idx >= len(last_applied_manifest):
# The list is observed, but not present in last_applied_manifest
last_applied_manifest.append([])
update_last_applied_manifest_list_from_resp(
last_applied_manifest[idx], observer_schema[idx], response[idx]
)
elif idx >= len(last_applied_manifest):
# Element is not yet present in last_applied_manifest. Adding it.
last_applied_manifest.append(response[idx])
@listen.on(HookType.ResourcePostCreate)
@listen.on(HookType.ResourcePostUpdate)
def update_last_applied_manifest_from_resp(app, response, **kwargs):
"""Hook run after the creation or update of an application in order to update the
`status.last_applied_manifest` using the k8s response.
Args:
app (krake.data.kubernetes.Application): Application the service belongs to
response (kubernetes_asyncio.client.V1Status): Response of the Kubernetes API
After a Kubernetes resource has been created/updated, the
`status.last_applied_manifest` has to be updated. All fields already initialized
(either from the mangling of `spec.manifest`, or by a previous call to this
function) should be left untouched. Only observed fields which are not present in
`status.last_applied_manifest` should be initialized.
"""
if isinstance(response, dict):
# The Kubernetes API couldn't deserialize the k8s response into an object
resp = response
else:
# The Kubernetes API deserialized the k8s response into an object
resp = response.to_dict()
idx_applied = get_kubernetes_resource_idx(app.status.last_applied_manifest, resp)
idx_observed = get_kubernetes_resource_idx(app.status.mangled_observer_schema, resp)
update_last_applied_manifest_dict_from_resp(
app.status.last_applied_manifest[idx_applied],
app.status.mangled_observer_schema[idx_observed],
resp,
)
@listen.on(HookType.ResourcePostCreate)
@listen.on(HookType.ResourcePostUpdate)
def update_last_observed_manifest_from_resp(app, response, **kwargs):
"""Handler to run after the creation or update of a Kubernetes resource to update
the last_observed_manifest from the response of the Kubernetes API.
Args:
app (krake.data.kubernetes.Application): Application the service belongs to
response (kubernetes_asyncio.client.V1Service): Response of the
Kubernetes API
The target last_observed_manifest holds the value of all observed fields plus the
special control dictionaries for the list length
"""
if isinstance(response, dict):
# The Kubernetes API couldn't deserialize the k8s response into an object
resp = response
else:
# The Kubernetes API deserialized the k8s response into an object
resp = response.to_dict()
try:
idx_observed = get_kubernetes_resource_idx(
app.status.mangled_observer_schema,
resp,
)
except IndexError:
# All created resources should be observed
raise
try:
idx_last_observed = get_kubernetes_resource_idx(
app.status.last_observed_manifest,
resp,
)
except IndexError:
# If the resource is not yes present in last_observed_manifest, append it.
idx_last_observed = len(app.status.last_observed_manifest)
app.status.last_observed_manifest.append({})
# Overwrite the last_observed_manifest for this resource
app.status.last_observed_manifest[
idx_last_observed
] = update_last_observed_manifest_dict(
app.status.mangled_observer_schema[idx_observed], resp
)
def update_last_observed_manifest_dict(observed_resource, response):
"""Together with :func:``update_last_observed_manifest_list``, recursively
crafts the ``last_observed_manifest`` from the Kubernetes :attr:``response``.
Args:
observed_resource (dict): The schema to observe for the partial given resource
response (dict): The partial Kubernetes response for this resource.
Raises:
KeyError: If an observed key is not present in the Kubernetes response
Returns:
dict: The dictionary of observed keys and their value
Get the value of all observed fields from the Kubernetes response
"""
res = {}
for key, value in observed_resource.items():
camel_key = camel_to_snake_case(key)
if camel_key not in response:
raise KeyError(
f"Observed key {camel_key} is not present in response {response}"
)
if isinstance(value, dict):
res[key] = update_last_observed_manifest_dict(value, response[camel_key])
elif isinstance(value, list):
res[key] = update_last_observed_manifest_list(value, response[camel_key])
else:
res[key] = response[camel_key]
return res
def update_last_observed_manifest_list(observed_resource, response):
"""Together with :func:``update_last_observed_manifest_dict``, recursively
crafts the ``last_observed_manifest`` from the Kubernetes :attr:``response``.
Args:
observed_resource (list): the schema to observe for the partial given resource
response (list): the partial Kubernetes response for this resource.
Returns:
list: The list of observed elements, plus the special list length control
dictionary
Get the value of all observed elements from the Kubernetes response
"""
if not response:
return [{"observer_schema_list_current_length": 0}]
res = []
# Looping over the observed resource, except the last element which is the special
# control dictionary
for idx, val in enumerate(observed_resource[:-1]):
if idx >= len(response):
# Element is not present in the Kubernetes response, nothing more to do
break
if type(response[idx]) == dict:
res.append(update_last_observed_manifest_dict(val, response[idx]))
elif type(response[idx]) == list:
res.append(update_last_observed_manifest_list(val, response[idx]))
else:
res.append(response[idx])
# Append the special control dictionary to the list
res.append({"observer_schema_list_current_length": len(response)})
return res
def update_last_applied_manifest_dict_from_spec(
resource_status_new, resource_status_old, resource_observed
):
"""Together with :func:``update_last_applied_manifest_list_from_spec``, this
function is called recursively to update a partial ``last_applied_manifest``
Args:
resource_status_new (dict): partial ``last_applied_manifest`` being updated
resource_status_old (dict): partial of the current ``last_applied_manifest``
resource_observed (dict): partial observer_schema for the manifest file
being updated
"""
for key, value in resource_observed.items():
if key not in resource_status_old:
continue
if key in resource_status_new:
if isinstance(value, dict):
update_last_applied_manifest_dict_from_spec(
resource_status_new[key],
resource_status_old[key],
resource_observed[key],
)
elif isinstance(value, list):
update_last_applied_manifest_list_from_spec(
resource_status_new[key],
resource_status_old[key],
resource_observed[key],
)
else:
# If the key is not present the spec.manifest, we first need to
# initialize it
if isinstance(value, dict):
resource_status_new[key] = {}
update_last_applied_manifest_dict_from_spec(
resource_status_new[key],
resource_status_old[key],
resource_observed[key],
)
elif isinstance(value, list):
resource_status_new[key] = []
update_last_applied_manifest_list_from_spec(
resource_status_new[key],
resource_status_old[key],
resource_observed[key],
)
else:
resource_status_new[key] = resource_status_old[key]
def update_last_applied_manifest_list_from_spec(
resource_status_new, resource_status_old, resource_observed
):
"""Together with :func:``update_last_applied_manifest_dict_from_spec``, this
function is called recursively to update a partial ``last_applied_manifest``
Args:
resource_status_new (list): partial ``last_applied_manifest`` being updated
resource_status_old (list): partial of the current ``last_applied_manifest``
resource_observed (list): partial observer_schema for the manifest file
being updated
"""
# Looping over the observed resource, except the last element which is the
# special control dictionary
for idx, val in enumerate(resource_observed[:-1]):
if idx >= len(resource_status_old):
# The element in not in the current last_applied_manifest, and neither
# is the rest of the list
break
if idx < len(resource_status_new):
# The element is present in spec.manifest and in the current
# last_applied_manifest. Updating observed fields
if isinstance(val, dict):
update_last_applied_manifest_dict_from_spec(
resource_status_new[idx],
resource_status_old[idx],
resource_observed[idx],
)
elif isinstance(val, list):
update_last_applied_manifest_list_from_spec(
resource_status_new[idx],
resource_status_old[idx],
resource_observed[idx],
)
else:
# If the element is not present in the spec.manifest, we first have to
# initialize it.
if isinstance(val, dict):
resource_status_new.append({})
update_last_applied_manifest_dict_from_spec(
resource_status_new[idx],
resource_status_old[idx],
resource_observed[idx],
)
elif isinstance(val, list):
resource_status_new.append([])
update_last_applied_manifest_list_from_spec(
resource_status_new[idx],
resource_status_old[idx],
resource_observed[idx],
)
else:
resource_status_new.append(resource_status_old[idx])
def update_last_applied_manifest_from_spec(app):
"""Update the status.last_applied_manifest of an application from spec.manifests
Args:
app (krake.data.kubernetes.Application): Application to update
This function is called on application creation and updates. The
last_applied_manifest of an application is initialized as a copy of spec.manifest,
and is augmented by all known observed fields not yet initialized (i.e. all observed
fields or resources which are present in the current last_applied_manifest but not
in the spec.manifest)
"""
# The new last_applied_manifest is initialized as a copy of the spec.manifest, and
# augmented by all observed fields which are present in the current
# last_applied_manifest but not in the original spec.manifest
new_last_applied_manifest = deepcopy(app.spec.manifest)
# Loop over observed resources and observed fields, and check if they should be
# added to the new last_applied_manifest (i.e. present in the current
# last_applied_manifest but not in spec.manifest)
for resource_observed in app.status.mangled_observer_schema:
# If the resource is not present in the current last_applied_manifest, there is
# nothing to do. Whether the resource was initialized by spec.manifest doesn't
# matter.
try:
idx_status_old = get_kubernetes_resource_idx(
app.status.last_applied_manifest, resource_observed
)
except IndexError:
continue
# As the resource is present in the current last_applied_manifest, we need to go
# through it to check if observed fields should be set to their current value
# (i.e. fields are present in the current last_applied_manifest, but not in
# spec.manifest)
try:
# Check if the observed resource is present in spec.manifest
idx_status_new = get_kubernetes_resource_idx(
new_last_applied_manifest, resource_observed
)
except IndexError:
# The resource is observed but is not present in the spec.manifest.
# Create an empty resource, which will be augmented in
# update_last_applied_manifest_dict_from_spec with the observed and known
# fields.
new_last_applied_manifest.append({})
idx_status_new = len(new_last_applied_manifest) - 1
update_last_applied_manifest_dict_from_spec(
new_last_applied_manifest[idx_status_new],
app.status.last_applied_manifest[idx_status_old],
resource_observed,
)
app.status.last_applied_manifest = new_last_applied_manifest
class KubernetesApplicationObserver(Observer):
"""Observer specific for Kubernetes Applications. One observer is created for each
Application managed by the Controller, but not one per Kubernetes resource
(Deployment, Service...). If several resources are defined by an Application, they
are all monitored by the same observer.
The observer gets the actual status of the resources on the cluster using the
Kubernetes API, and compare it to the status stored in the API.
The observer is:
* started at initial Krake resource creation;
* deleted when a resource needs to be updated, then started again when it is done;
* simply deleted on resource deletion.
Args:
cluster (krake.data.kubernetes.Cluster): the cluster on which the observed
Application is created.
resource (krake.data.kubernetes.Application): the application that will be
observed.
on_res_update (coroutine): a coroutine called when a resource's actual status
differs from the status sent by the database. Its signature is:
``(resource) -> updated_resource``. ``updated_resource`` is the instance of
the resource that is up-to-date with the API. The Observer internal instance
of the resource to observe will be updated. If the API cannot be contacted,
``None`` can be returned. In this case the internal instance of the Observer
will not be updated.
time_step (int, optional): how frequently the Observer should watch the actual
status of the resources.
"""
def __init__(self, cluster, resource, on_res_update, time_step=2):
super().__init__(resource, on_res_update, time_step)
self.cluster = cluster
async def poll_resource(self):
"""Fetch the current status of the Application monitored by the Observer.
Returns:
krake.data.core.Status: the status object created using information from the
real world Applications resource.
"""
app = self.resource
status = deepcopy(app.status)
status.last_observed_manifest = []
# For each observed kubernetes resource of the Application,
# get its current status on the cluster.
for desired_resource in app.status.last_applied_manifest:
kube = KubernetesClient(self.cluster.spec.kubeconfig)
idx_observed = get_kubernetes_resource_idx(
app.status.mangled_observer_schema, desired_resource
)
observed_resource = app.status.mangled_observer_schema[idx_observed]
async with kube:
try:
group, version, kind, name, namespace = kube.get_immutables(
desired_resource
)
resource_api = await kube.get_resource_api(group, version, kind)
resp = await resource_api.read(kind, name, namespace)
except ApiException as err:
if err.status == 404:
# Resource does not exist
continue
# Otherwise, log the unexpected errors
logger.error(err)
observed_manifest = update_last_observed_manifest_dict(
observed_resource, resp.to_dict()
)
status.last_observed_manifest.append(observed_manifest)
return status
class KubernetesClusterObserver(Observer):
"""Observer specific for Kubernetes Clusters. One observer is created for each
Cluster managed by the Controller.
The observer gets the actual status of the cluster using the
Kubernetes API, and compare it to the status stored in the API.
The observer is:
* started at initial Krake resource creation;
* deleted when a resource needs to be updated, then started again when it is done;
* simply deleted on resource deletion.
Args:
cluster (krake.data.kubernetes.Cluster): the cluster which will be observed.
on_res_update (coroutine): a coroutine called when a resource's actual status
differs from the status sent by the database. Its signature is:
``(resource) -> updated_resource``. ``updated_resource`` is the instance of
the resource that is up-to-date with the API. The Observer internal instance
of the resource to observe will be updated. If the API cannot be contacted,
``None`` can be returned. In this case the internal instance of the Observer
will not be updated.
time_step (int, optional): how frequently the Observer should watch the actual
status of the resources.
"""
def __init__(self, cluster, on_res_update, time_step=2):
super().__init__(cluster, on_res_update, time_step)
self.cluster = cluster
async def poll_resource(self):
"""Fetch the current status of the Cluster monitored by the Observer.
Returns:
krake.data.core.Status: the status object created using information from the
real world Cluster.
"""
status = deepcopy(self.cluster.status)
# For each observed kubernetes cluster registered in Krake,
# get its current node status.
loader = KubeConfigLoader(self.cluster.spec.kubeconfig)
config = Configuration()
await loader.load_and_set(config)
kube = ApiClient(config)
async with kube as api:
v1 = client.CoreV1Api(api)
try:
response = await v1.list_node()
except ClientConnectorError as err:
status.state = ClusterState.OFFLINE
self.cluster.status.state = ClusterState.OFFLINE
# Log the error
logger.debug(err)
return status
condition_dict = {
"MemoryPressure": [],
"DiskPressure": [],
"PIDPressure": [],
"Ready": [],
}
for item in response.items:
for condition in item.status.conditions:
condition_dict[condition.type].append(condition.status)
if (
condition_dict["MemoryPressure"] == ["True"]
or condition_dict["DiskPressure"] == ["True"]
or condition_dict["PIDPressure"] == ["True"]
):
status.state = ClusterState.UNHEALTHY
self.cluster.status.state = ClusterState.UNHEALTHY
return status
elif (
condition_dict["Ready"] == ["True"]
and status.state is ClusterState.OFFLINE
):
status.state = ClusterState.CONNECTING
self.cluster.status.state = ClusterState.CONNECTING
return status
elif condition_dict["Ready"] == ["True"]:
status.state = ClusterState.ONLINE
self.cluster.status.state = ClusterState.ONLINE
return status
else:
status.state = ClusterState.NOTREADY
self.cluster.status.state = ClusterState.NOTREADY
return status
@listen.on(HookType.ApplicationPostReconcile)
@listen.on(HookType.ApplicationPostMigrate)
@listen.on(HookType.ClusterCreation)
async def register_observer(controller, resource, start=True, **kwargs):
"""Create an observer for the given Application or Cluster, and start it as a
background task if wanted.
If an observer already existed for this Application or Cluster, it is stopped
and deleted.
Args:
controller (KubernetesController): the controller for which the observer will be
added in the list of working observers.
resource (krake.data.kubernetes.Application): the Application to observe or
resource (krake.data.kubernetes.Cluster): the Cluster to observe.
start (bool, optional): if False, does not start the observer as background
task.
"""
if resource.kind == Application.kind:
cluster = await controller.kubernetes_api.read_cluster(
namespace=resource.status.running_on.namespace,
name=resource.status.running_on.name,
)
observer = KubernetesApplicationObserver(
cluster,
resource,
controller.on_status_update,
time_step=controller.observer_time_step,
)
elif resource.kind == Cluster.kind:
observer = KubernetesClusterObserver(
resource,
controller.on_status_update,
time_step=controller.observer_time_step,
)
else:
logger.debug("Unknown resource kind. No observer was registered.", resource)
return
logger.debug(f"Start observer for {resource.kind} %r", resource.metadata.name)
task = None
if start:
task = controller.loop.create_task(observer.run())
controller.observers[resource.metadata.uid] = (observer, task)
@listen.on(HookType.ApplicationPreReconcile)
@listen.on(HookType.ApplicationPreMigrate)
@listen.on(HookType.ApplicationPreDelete)
@listen.on(HookType.ClusterDeletion)
async def unregister_observer(controller, resource, **kwargs):
"""Stop and delete the observer for the given Application or Cluster. If no observer
is started, do nothing.
Args:
controller (KubernetesController): the controller for which the observer will be
removed from the list of working observers.
resource (krake.data.kubernetes.Application): the Application whose observer
will be stopped or
resource (krake.data.kubernetes.Cluster): the Cluster whose observer will be
stopped.
"""
if resource.metadata.uid not in controller.observers:
return
logger.debug(f"Stop observer for {resource.kind} %r", resource.metadata.name)
_, task = controller.observers.pop(resource.metadata.uid)
task.cancel()
with suppress(asyncio.CancelledError):
await task
def utc_difference():
"""Get the difference in seconds between the current time and the current UTC time.
Returns:
int: the time difference in seconds.
"""
delta = datetime.now() - datetime.utcnow()
return delta.seconds
def generate_certificate(config):
"""Create and sign a new certificate using the one defined in the complete hook
configuration as intermediate certificate.
Args:
config (krake.data.config.CompleteHookConfiguration): the configuration of the
complete hook.
Returns:
CertificatePair: the content of the certificate created and its corresponding
key.
"""
with open(config.intermediate_src, "rb") as f:
intermediate_src = crypto.load_certificate(crypto.FILETYPE_PEM, f.read())
with open(config.intermediate_key_src, "rb") as f:
intermediate_key_src = crypto.load_privatekey(crypto.FILETYPE_PEM, f.read())
client_cert = crypto.X509()
# Set general information
client_cert.set_version(3)
client_cert.set_serial_number(random.randint(50000000000000, 100000000000000))
# If not set before, TLS will not accept to use this certificate in UTC cases, as
# the server time may be earlier.
time_offset = utc_difference() * -1
client_cert.gmtime_adj_notBefore(time_offset)
client_cert.gmtime_adj_notAfter(1 * 365 * 24 * 60 * 60)
# Set issuer and subject
intermediate_subject = intermediate_src.get_subject()
client_cert.set_issuer(intermediate_subject)
client_subj = crypto.X509Name(intermediate_subject)
client_subj.CN = config.hook_user
client_cert.set_subject(client_subj)
# Create and set the private key
client_key = crypto.PKey()
client_key.generate_key(crypto.TYPE_RSA, 2048)
client_cert.set_pubkey(client_key)
client_cert.sign(intermediate_key_src, "sha256")
cert_dump = crypto.dump_certificate(crypto.FILETYPE_PEM, client_cert).decode()
key_dump = crypto.dump_privatekey(crypto.FILETYPE_PEM, client_key).decode()
return CertificatePair(cert=cert_dump, key=key_dump)
def generate_default_observer_schema(app):
"""Generate the default observer schema for each Kubernetes resource present in
``spec.manifest`` for which a custom observer schema hasn't been specified.
Args:
app (krake.data.kubernetes.Application): The application for which to generate a
default observer schema
"""
app.status.mangled_observer_schema = deepcopy(app.spec.observer_schema)
for resource_manifest in app.spec.manifest:
try:
get_kubernetes_resource_idx(
app.status.mangled_observer_schema, resource_manifest
)
except IndexError:
# Only create a default observer schema, if a custom observer schema hasn't
# been set by the user.
app.status.mangled_observer_schema.append(
generate_default_observer_schema_dict(
resource_manifest,
first_level=True,
)
)
def generate_default_observer_schema_dict(manifest_dict, first_level=False):
"""Together with :func:``generate_default_observer_schema_list``, this function is
called recursively to generate part of a default ``observer_schema`` from part of a
Kubernetes resource, defined respectively by ``manifest_dict`` or ``manifest_list``.
Args:
manifest_dict (dict): Partial Kubernetes resources
first_level (bool, optional): If True, indicates that the dictionary represents
the whole observer schema of a Kubernetes resource
Returns:
dict: Generated partial observer_schema
This function creates a new dictionary from ``manifest_dict`` and replaces all
non-list and non-dict values by ``None``.
In case of ``first_level`` dictionary (i.e. complete ``observer_schema`` for a
resource), the values of the identifying fields are copied from the manifest file.
"""
observer_schema_dict = {}
for key, value in manifest_dict.items():
if isinstance(value, dict):
observer_schema_dict[key] = generate_default_observer_schema_dict(value)
elif isinstance(value, list):
observer_schema_dict[key] = generate_default_observer_schema_list(value)
else:
observer_schema_dict[key] = None
if first_level:
observer_schema_dict["apiVersion"] = manifest_dict["apiVersion"]
observer_schema_dict["kind"] = manifest_dict["kind"]
observer_schema_dict["metadata"]["name"] = manifest_dict["metadata"]["name"]
if (
"spec" in manifest_dict
and "type" in manifest_dict["spec"]
and manifest_dict["spec"]["type"] == "LoadBalancer"
):
observer_schema_dict["status"] = {"load_balancer": {"ingress": None}}
return observer_schema_dict
def generate_default_observer_schema_list(manifest_list):
"""Together with :func:``generate_default_observer_schema_dict``, this function is
called recursively to generate part of a default ``observer_schema`` from part of a
Kubernetes resource, defined respectively by ``manifest_list`` or ``manifest_dict``.
Args:
manifest_list (list): Partial Kubernetes resources
Returns:
list: Generated partial observer_schema
This function creates a new list from ``manifest_list`` and replaces all non-list
and non-dict elements by ``None``.
Additionally, it generates the default list control dictionary, using the current
length of the list as default minimum and maximum values.
"""
observer_schema_list = []
for value in manifest_list:
if isinstance(value, dict):
observer_schema_list.append(generate_default_observer_schema_dict(value))
elif isinstance(value, list):
observer_schema_list.append(generate_default_observer_schema_list(value))
else:
observer_schema_list.append(None)
observer_schema_list.append(
{
"observer_schema_list_min_length": len(manifest_list),
"observer_schema_list_max_length": len(manifest_list),
}
)
return observer_schema_list
@listen.on(HookType.ApplicationMangling)
async def complete(app, api_endpoint, ssl_context, config):
"""Execute application complete hook defined by :class:`Complete`.
Hook mangles given application and injects complete hooks variables.
Application complete hook is disabled by default.
User enables this hook by the --hook-complete argument in rok cli.
Args:
app (krake.data.kubernetes.Application): Application object processed
when the hook is called
api_endpoint (str): the given API endpoint
ssl_context (ssl.SSLContext): SSL context to communicate with the API endpoint
config (krake.data.config.HooksConfiguration): Complete hook
configuration.
"""
if "complete" not in app.spec.hooks:
return
# Use the endpoint of the API only if the external endpoint has not been set.
if config.complete.external_endpoint:
api_endpoint = config.complete.external_endpoint
app.status.complete_token = \
app.status.complete_token if app.status.complete_token else token_urlsafe()
# Generate only once the certificate and key for a specific Application
generated_cert = CertificatePair(
cert=app.status.complete_cert, key=app.status.complete_key
)
if ssl_context and generated_cert == (None, None):
generated_cert = generate_certificate(config.complete)
app.status.complete_cert = generated_cert.cert
app.status.complete_key = generated_cert.key
hook = Complete(
api_endpoint,
ssl_context,
hook_user=config.complete.hook_user,
cert_dest=config.complete.cert_dest,
env_token=config.complete.env_token,
env_url=config.complete.env_url,
)
hook.mangle_app(
app.metadata.name,
app.metadata.namespace,
app.status.complete_token,
app.status.last_applied_manifest,
config.complete.intermediate_src,
generated_cert,
app.status.mangled_observer_schema,
"complete"
)
@listen.on(HookType.ApplicationMangling)
async def shutdown(app, api_endpoint, ssl_context, config):
"""Executes an application shutdown hook defined by :class:`Shutdown`.
The hook mangles the given application and injects shutdown hooks variables.
Application shutdown hook is disabled by default.
User enables this hook by the --hook-shutdown argument in rok cli.
Args:
app (krake.data.kubernetes.Application): Application object processed
when the hook is called
api_endpoint (str): the given API endpoint
ssl_context (ssl.SSLContext): SSL context to communicate with the API endpoint
config (krake.data.config.HooksConfiguration): Shutdown hook
configuration.
"""
if "shutdown" not in app.spec.hooks:
return
# Use the endpoint of the API only if the external endpoint has not been set.
if config.shutdown.external_endpoint:
api_endpoint = config.shutdown.external_endpoint
app.status.shutdown_token = \
app.status.shutdown_token if app.status.shutdown_token else token_urlsafe()
# Generate only once the certificate and key for a specific Application
generated_cert = CertificatePair(
cert=app.status.shutdown_cert, key=app.status.shutdown_key
)
if ssl_context and generated_cert == (None, None):
generated_cert = generate_certificate(config.shutdown)
app.status.shutdown_cert = generated_cert.cert
app.status.shutdown_key = generated_cert.key
hook = Shutdown(
api_endpoint,
ssl_context,
hook_user=config.shutdown.hook_user,
cert_dest=config.shutdown.cert_dest,
env_token=config.shutdown.env_token,
env_url=config.shutdown.env_url,
)
hook.mangle_app(
app.metadata.name,
app.metadata.namespace,
app.status.shutdown_token,
app.status.last_applied_manifest,
config.shutdown.intermediate_src,
generated_cert,
app.status.mangled_observer_schema,
"shutdown"
)
@listen.on(HookType.ResourcePreDelete)
async def pre_shutdown(controller, app, **kwargs):
"""
Args:
app (krake.data.kubernetes.Application): Application object processed
when the hook is called
"""
if "shutdown" not in app.spec.hooks:
return
return
class SubResource(NamedTuple):
group: str
name: str
body: dict
path: tuple
class CertificatePair(NamedTuple):
"""Tuple which contains a certificate and its corresponding key.
Attributes:
cert (str): content of a certificate.
key (str): content of the key that corresponds to the certificate.
"""
cert: str
key: str
class Hook(object):
hook_resources = ()
ca_name = "ca-bundle.pem"
cert_name = "cert.pem"
key_name = "key.pem"
def __init__(
self, api_endpoint, ssl_context, hook_user, cert_dest, env_token, env_url
):
self.api_endpoint = api_endpoint
self.ssl_context = ssl_context
self.hook_user = hook_user
self.cert_dest = cert_dest
self.env_token = env_token
self.env_url = env_url
def mangle_app(
self,
name,
namespace,
token,
last_applied_manifest,
intermediate_src,
generated_cert,
mangled_observer_schema,
hook_type="",
):
"""Mangle a given application and inject complete hook resources and
sub-resources into the :attr:`last_applied_manifest` object by :meth:`mangle`.
Also mangle the observer_schema as new resources and sub-resources should
be observed.
:attr:`last_applied_manifest` is created as a deep copy of the desired
application resources, as defined by user. It can be updated by custom hook
resources or modified by custom hook sub-resources. It is used as a desired
state for the Krake deployment process.
Args:
name (str): Application name
namespace (str): Application namespace
token (str): Complete hook authentication token
last_applied_manifest (list): Application resources
intermediate_src (str): content of the certificate that is used to sign new
certificates for the complete hook.
generated_cert (CertificatePair): tuple that contains the content of the
new signed certificate for the Application, and the content of its
corresponding key.
mangled_observer_schema (list): Observed fields
hook_type (str, optional): Name of the hook the app should be mangled for
"""
secret_certs_name = "-".join([name, "krake", hook_type, "secret", "certs"])
secret_token_name = "-".join([name, "krake", hook_type, "secret", "token"])
volume_name = "-".join([name, "krake", hook_type, "volume"])
ca_certs = (
self.ssl_context.get_ca_certs(binary_form=True)
if self.ssl_context
else None
)
# Extract all different namespaces
# FIXME: too many assumptions here: do we create one ConfigMap for each
# namespace?
resource_namespaces = {
resource["metadata"].get("namespace", "default")
for resource in last_applied_manifest
}
hook_resources = []
hook_sub_resources = []
if ca_certs:
hook_resources.extend(
[
self.secret_certs(
secret_certs_name,
resource_namespace,
intermediate_src=intermediate_src,
generated_cert=generated_cert,
ca_certs=ca_certs,
)
for resource_namespace in resource_namespaces
]
)
hook_sub_resources.extend(
[*self.volumes(secret_certs_name, volume_name, self.cert_dest)]
)
hook_resources.extend(
[
self.secret_token(
secret_token_name,
name,
namespace,
resource_namespace,
self.api_endpoint,
token,
)
for resource_namespace in resource_namespaces
]
)
hook_sub_resources.extend(
[
*self.env_vars(secret_token_name),
]
)
self.mangle(
hook_resources,
last_applied_manifest,
mangled_observer_schema,
)
self.mangle(
hook_sub_resources,
last_applied_manifest,
mangled_observer_schema,
is_sub_resource=True,
)
def mangle(
self,
items,
last_applied_manifest,
mangled_observer_schema,
is_sub_resource=False,
):
"""Mangle applications desired state with custom hook resources or
sub-resources.
Example:
.. code:: python
last_applied_manifest = [
{
'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {'name': 'test', 'namespace': 'default'},
'spec': {'containers': [{'name': 'test'}]}
}
]
mangled_observer_schema = [
{
'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {'name': 'test', 'namespace': 'default'},
'spec': {
'containers': [
{'name': None},
{
'observer_schema_list_max_length': 1,
'observer_schema_list_min_length': 1,
},
]
},
}
]
hook_resources = [
{
'apiVersion': 'v1',
'kind': 'Secret',
'metadata': {'name': 'sct', 'namespace': 'default'}
}
]
hook_sub_resources = [
SubResource(
group='env', name='env', body={'name': 'test', 'value': 'test'},
path=(('spec', 'containers'),)
)
]
mangle(
hook_resources,
last_applied_manifest,
mangled_observer_schema,
)
mangle(
hook_sub_resources,
last_applied_manifest,
mangled_observer_schema,
is_sub_resource=True
)
assert last_applied_manifest == [
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {"name": "test", 'namespace': 'default'},
"spec": {
"containers": [
{
"name": "test",
"env": [{"name": "test", "value": "test"}]
}
]
},
},
{"apiVersion": "v1", "kind": "Secret", "metadata": {"name": "sct"}},
]
assert mangled_observer_schema == [
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {"name": "test", "namespace": None},
"spec": {
"containers": [
{
"name": None,
"env": [
{"name": None, "value": None},
{
"observer_schema_list_max_length": 1,
"observer_schema_list_min_length": 1,
},
],
},
{
"observer_schema_list_max_length": 1,
"observer_schema_list_min_length": 1,
},
]
},
},
{
"apiVersion": "v1",
"kind": "Secret",
"metadata": {"name": "sct", "namespace": None},
},
]
Args:
items (list[SubResource]): Custom hook resources or sub-resources
last_applied_manifest (list): Application resources
mangled_observer_schema (list): Observed resources
is_sub_resource (bool, optional): if False, the function only extend the
list of Kubernetes resources defined in :attr:`last_applied_manifest`
with new hook resources. Otherwise, the function injects each new hook
sub-resource into the :attr:`last_applied_manifest` object
sub-resources. Defaults to False.
"""
if not items:
return
if not is_sub_resource:
last_applied_manifest.extend(items)
for sub_resource in items:
# Generate the default observer schema for each resource
mangled_observer_schema.append(
generate_default_observer_schema_dict(
sub_resource,
first_level=True,
)
)
return
def inject(sub_resource, sub_resource_to_mangle, observed_resource_to_mangle):
"""Inject a hooks defined sub-resource into a Kubernetes sub-resource.
Args:
sub_resource (SubResource): Hook sub-resource that needs to be injected
into :attr:`last_applied_manifest`
sub_resource_to_mangle (object): Kubernetes sub-resources from
:attr:`last_applied_manifest` which need to be processed
observed_resource_to_mangle (dict): partial mangled_observer_schema
corresponding to the Kubernetes sub-resource.
Raises:
InvalidManifestError: if the sub-resource which will be mangled is not a
list or a dict.
"""
# Create sub-resource group if not present in the Kubernetes sub-resource
if sub_resource.group not in sub_resource_to_mangle:
# FIXME: This assumes the subresource group contains a list
sub_resource_to_mangle.update({sub_resource.group: []})
# Create sub-resource group if not present in the observed fields
if sub_resource.group not in observed_resource_to_mangle:
observed_resource_to_mangle.update(
{
sub_resource.group: [
{
"observer_schema_list_min_length": 0,
"observer_schema_list_max_length": 0,
}
]
}
)
# Inject sub-resource
# If sub-resource name is already there update it, if not, append it
if sub_resource.name in [
g["name"] for g in sub_resource_to_mangle[sub_resource.group]
]:
# FIXME: Assuming we are dealing with a list
for idx, item in enumerate(sub_resource_to_mangle[sub_resource.group]):
if item["name"]:
if hasattr(item, "body"):
sub_resource_to_mangle[item.group][idx] = item["body"]
else:
sub_resource_to_mangle[sub_resource.group].append(sub_resource.body)
# Make sure the value is observed
if sub_resource.name not in [
g["name"] for g in observed_resource_to_mangle[sub_resource.group][:-1]
]:
observed_resource_to_mangle[sub_resource.group].insert(
-1, generate_default_observer_schema_dict(sub_resource.body)
)
observed_resource_to_mangle[sub_resource.group][-1][
"observer_schema_list_min_length"
] += 1
observed_resource_to_mangle[sub_resource.group][-1][
"observer_schema_list_max_length"
] += 1
for resource in last_applied_manifest:
# Complete hook is applied only on defined Kubernetes resources
if resource["kind"] not in self.hook_resources:
continue
for sub_resource in items:
sub_resources_to_mangle = None
idx_observed = get_kubernetes_resource_idx(
mangled_observer_schema, resource
)
for keys in sub_resource.path:
try:
sub_resources_to_mangle = reduce(getitem, keys, resource)
except KeyError:
continue
break
# Create the path to the observed sub-resource, if it doesn't yet exist
try:
observed_sub_resources = reduce(
getitem, keys, mangled_observer_schema[idx_observed]
)
except KeyError:
Complete.create_path(
mangled_observer_schema[idx_observed], list(keys)
)
observed_sub_resources = reduce(
getitem, keys, mangled_observer_schema[idx_observed]
)
if isinstance(sub_resources_to_mangle, list):
for idx, sub_resource_to_mangle in enumerate(
sub_resources_to_mangle
):
# Ensure that each element of the list is observed.
idx_observed = idx
if idx >= len(observed_sub_resources[:-1]):
idx_observed = len(observed_sub_resources[:-1])
# FIXME: Assuming each element of the list contains a
# dictionary, therefore initializing new elements with an
# empty dict
observed_sub_resources.insert(-1, {})
observed_sub_resource = observed_sub_resources[idx_observed]
# FIXME: This is assuming a list always contains dict
inject(
sub_resource, sub_resource_to_mangle, observed_sub_resource
)
elif isinstance(sub_resources_to_mangle, dict):
inject(
sub_resource, sub_resources_to_mangle, observed_sub_resources
)
else:
message = (
f"The sub-resource to mangle {sub_resources_to_mangle!r} has an"
"invalid type, should be in '[dict, list]'"
)
raise InvalidManifestError(message)
@staticmethod
def attribute_map(obj):
"""Convert a Kubernetes object to dict based on its attribute mapping
Example:
.. code:: python
from kubernetes_asyncio.client import V1VolumeMount
d = attribute_map(
V1VolumeMount(name="name", mount_path="path")
)
assert d == {'mountPath': 'path', 'name': 'name'}
Args:
obj (object): Kubernetes object
Returns:
dict: Converted Kubernetes object
"""
return {
obj.attribute_map[attr]: getattr(obj, attr)
for attr, _ in obj.to_dict().items()
if getattr(obj, attr) is not None
}
@staticmethod
def create_path(mangled_observer_schema, keys):
"""Create the path to the observed field in the observer schema.
When a sub-resource is mangled, it should be observed. This function creates
the path to the subresource to observe.
Args:
mangled_observer_schema (dict): Partial observer schema of a resource
keys (list): list of keys forming the path to the sub-resource to
observe
FIXME: This assumes we are only adding keys to dict. We don't consider lists
"""
# Unpack the first key first, as it contains the base directory
key = keys.pop(0)
# If the key is the last of the list, we reached the end of the path.
if len(keys) == 0:
mangled_observer_schema[key] = None
return
if key not in mangled_observer_schema:
mangled_observer_schema[key] = {}
Hook.create_path(mangled_observer_schema[key], keys)
def secret_certs(
self,
secret_name,
namespace,
ca_certs=None,
intermediate_src=None,
generated_cert=None,
):
"""Create a complete hooks secret resource.
Complete hook secret stores Krake CAs and client certificates to communicate
with the Krake API.
Args:
secret_name (str): Secret name
namespace (str): Kubernetes namespace where the Secret will be created.
ca_certs (list): Krake CA list
intermediate_src (str): content of the certificate that is used to sign new
certificates for the complete hook.
generated_cert (CertificatePair): tuple that contains the content of the
new signed certificate for the Application, and the content of its
corresponding key.
Returns:
dict: complete hook secret resource
"""
ca_certs_pem = ""
for ca_cert in ca_certs:
x509 = crypto.load_certificate(crypto.FILETYPE_ASN1, ca_cert)
ca_certs_pem += crypto.dump_certificate(crypto.FILETYPE_PEM, x509).decode()
# Add the intermediate certificate into the chain
with open(intermediate_src, "r") as f:
intermediate_src_content = f.read()
ca_certs_pem += intermediate_src_content
data = {
self.ca_name: self._encode_to_64(ca_certs_pem),
self.cert_name: self._encode_to_64(generated_cert.cert),
self.key_name: self._encode_to_64(generated_cert.key),
}
return self.secret(secret_name, data, namespace)
def secret_token(
self, secret_name, name, namespace, resource_namespace, api_endpoint, token
):
"""Create a hooks secret resource.
The hook secret stores Krake authentication token
and hook URL for given application.
Args:
secret_name (str): Secret name
name (str): Application name
namespace (str): Application namespace
resource_namespace (str): Kubernetes namespace where the
Secret will be created.
api_endpoint (str): Krake API endpoint
token (str): Complete hook authentication token
Returns:
dict: complete hook secret resource
"""
pass
def volumes(self, secret_name, volume_name, mount_path):
"""Create complete hooks volume and volume mount sub-resources
Complete hook volume gives access to hook's secret, which stores
Krake CAs and client certificates to communicate with the Krake API.
Complete hook volume mount puts the volume into the application
Args:
secret_name (str): Secret name
volume_name (str): Volume name
mount_path (list): Volume mount path
Returns:
list: List of complete hook volume and volume mount sub-resources
"""
volume = V1Volume(name=volume_name, secret={"secretName": secret_name})
volume_mount = V1VolumeMount(name=volume_name, mount_path=mount_path)
return [
SubResource(
group="volumes",
name=volume.name,
body=self.attribute_map(volume),
path=(("spec", "template", "spec"), ("spec",)),
),
SubResource(
group="volumeMounts",
name=volume_mount.name,
body=self.attribute_map(volume_mount),
path=(
("spec", "template", "spec", "containers"),
("spec", "containers"), # kind: Pod
),
),
]
@staticmethod
def _encode_to_64(string):
"""Compute the base 64 encoding of a string.
Args:
string (str): the string to encode.
Returns:
str: the result of the encoding.
"""
return b64encode(string.encode()).decode()
def secret(self, secret_name, secret_data, namespace, _type="Opaque"):
"""Create a secret resource.
Args:
secret_name (str): Secret name
secret_data (dict): Secret data
namespace (str): Kubernetes namespace where the Secret will be created.
_type (str, optional): Secret type. Defaults to Opaque.
Returns:
dict: secret resource
"""
return self.attribute_map(
V1Secret(
api_version="v1",
kind="Secret",
data=secret_data,
metadata={"name": secret_name, "namespace": namespace},
type=_type,
)
)
@staticmethod
def create_hook_url(name, namespace, api_endpoint):
"""Create an applications' hook URL.
Function needs to be specified for each hook.
Args:
name (str): Application name
namespace (str): Application namespace
api_endpoint (str): Krake API endpoint
Returns:
str: Application shutdown url
"""
pass
def env_vars(self, secret_name):
"""Create the hooks' environment variables sub-resources.
Function needs to be specified for each hook.
Creates hook environment variables to store Krake authentication token
and a hook URL for the given applications.
Args:
secret_name (str): Secret name
Returns:
list: List of shutdown hook environment variables sub-resources
"""
pass
class Complete(Hook):
"""Mangle given application and inject complete hooks variables into it.
Hook injects a Kubernetes secret, which stores Krake authentication token
and the Krake complete hook URL for the given application. The variables
from Kubernetes secret are imported as environment variables
into the application resource definition. Only resources defined in
:args:`hook_resources` can be modified.
Names of environment variables are defined in the application controller
configuration file.
If TLS is enabled on the Krake API, the complete hook injects a Kubernetes secret,
and it's corresponding volume and volume mount definitions for the Krake CA,
the client certificate with the right CN, and its key. The directory where the
secret is mounted is defined in the configuration.
Args:
api_endpoint (str): the given API endpoint
ssl_context (ssl.SSLContext): SSL context to communicate with the API endpoint
cert_dest (str, optional): Path of the directory where the CA, client
certificate and key to the Krake API will be stored.
env_token (str, optional): Name of the environment variable, which stores Krake
authentication token.
env_url (str, optional): Name of the environment variable,
which stores Krake complete hook URL.
"""
hook_resources = ("Pod", "Deployment", "ReplicationController")
def __init__(
self, api_endpoint, ssl_context, hook_user, cert_dest, env_token, env_url
):
super().__init__(
api_endpoint, ssl_context, hook_user, cert_dest, env_token, env_url
)
self.env_url = env_url
def secret_token(
self, secret_name, name, namespace, resource_namespace, api_endpoint, token
):
"""Create complete hooks secret resource.
Complete hook secret stores Krake authentication token
and complete hook URL for given application.
Args:
secret_name (str): Secret name
name (str): Application name
namespace (str): Application namespace
resource_namespace (str): Kubernetes namespace where the
Secret will be created.
api_endpoint (str): Krake API endpoint
token (str): Complete hook authentication token
Returns:
dict: complete hook secret resource
"""
complete_url = self.create_hook_url(name, namespace, api_endpoint)
data = {
self.env_token.lower(): self._encode_to_64(token),
self.env_url.lower(): self._encode_to_64(complete_url),
}
return self.secret(secret_name, data, resource_namespace)
@staticmethod
def create_hook_url(name, namespace, api_endpoint):
"""Create an applications' complete URL.
Args:
name (str): Application name
namespace (str): Application namespace
api_endpoint (str): Krake API endpoint
Returns:
str: Application complete url
"""
api_url = URL(api_endpoint)
return str(
api_url.with_path(
f"/kubernetes/namespaces/{namespace}/applications/{name}/complete"
)
)
def env_vars(self, secret_name):
"""Create complete hooks environment variables sub-resources
Create complete hook environment variables store Krake authentication token
and complete hook URL for given application.
Args:
secret_name (str): Secret name
Returns:
list: List of complete hook environment variables sub-resources
"""
sub_resources = []
env_token = V1EnvVar(
name=self.env_token,
value_from=self.attribute_map(
V1EnvVarSource(
secret_key_ref=self.attribute_map(
V1SecretKeySelector(
name=secret_name, key=self.env_token.lower()
)
)
)
),
)
env_url = V1EnvVar(
name=self.env_url,
value_from=self.attribute_map(
V1EnvVarSource(
secret_key_ref=self.attribute_map(
V1SecretKeySelector(name=secret_name, key=self.env_url.lower())
)
)
),
)
for env in (env_token, env_url):
sub_resources.append(
SubResource(
group="env",
name=env.name,
body=self.attribute_map(env),
path=(
("spec", "template", "spec", "containers"),
("spec", "containers"), # kind: Pod
),
)
)
return sub_resources
class Shutdown(Hook):
"""Mangle given application and inject shutdown hooks variables into it.
Hook injects a Kubernetes secret, which stores Krake authentication token
and the Krake complete hook URL for the given application. The variables
from the Kubernetes secret are imported as environment variables
into the application resource definition. Only resources defined in
:args:`hook_resources` can be modified.
Names of environment variables are defined in the application controller
configuration file.
If TLS is enabled on the Krake API, the shutdown hook injects a Kubernetes secret,
and it's corresponding volume and volume mount definitions for the Krake CA,
the client certificate with the right CN, and its key. The directory where the
secret is mounted is defined in the configuration.
Args:
api_endpoint (str): the given API endpoint
ssl_context (ssl.SSLContext): SSL context to communicate with the API endpoint
cert_dest (str, optional): Path of the directory where the CA, client
certificate and key to the Krake API will be stored.
env_token (str, optional): Name of the environment variable, which stores Krake
authentication token.
env_url (str, optional): Name of the environment variable,
which stores Krake complete hook URL.
"""
hook_resources = ("Pod", "Deployment", "ReplicationController")
def __init__(
self, api_endpoint, ssl_context, hook_user, cert_dest, env_token, env_url
):
super().__init__(
api_endpoint, ssl_context, hook_user, cert_dest, env_token, env_url
)
self.env_url = env_url
def secret_token(
self, secret_name, name, namespace, resource_namespace, api_endpoint, token
):
"""Create shutdown hooks secret resource.
Shutdown hook secret stores Krake authentication token
and shutdown hook URL for given application.
Args:
secret_name (str): Secret name
name (str): Application name
namespace (str): Application namespace
resource_namespace (str): Kubernetes namespace where the
Secret will be created.
api_endpoint (str): Krake API endpoint
token (str): Shutdown hook authentication token
Returns:
dict: shutdown hook secret resource
"""
shutdown_url = self.create_hook_url(name, namespace, api_endpoint)
data = {
self.env_token.lower(): self._encode_to_64(token),
self.env_url.lower(): self._encode_to_64(shutdown_url),
}
return self.secret(secret_name, data, resource_namespace)
@staticmethod
def create_hook_url(name, namespace, api_endpoint):
"""Create an applications' shutdown URL.
Args:
name (str): Application name
namespace (str): Application namespace
api_endpoint (str): Krake API endpoint
Returns:
str: Application shutdown url
"""
api_url = URL(api_endpoint)
return str(
api_url.with_path(
f"/kubernetes/namespaces/{namespace}/applications/{name}/shutdown"
)
)
def env_vars(self, secret_name):
"""Create shutdown hooks environment variables sub-resources.
Creates shutdown hook environment variables to store Krake authentication token
and a shutdown hook URL for given applications.
Args:
secret_name (str): Secret name
Returns:
list: List of shutdown hook environment variables sub-resources
"""
sub_resources = []
env_resources = []
env_token = V1EnvVar(
name=self.env_token,
value_from=self.attribute_map(
V1EnvVarSource(
secret_key_ref=self.attribute_map(
V1SecretKeySelector(
name=secret_name,
key=self.env_token.lower()
)
)
)
)
)
env_resources.append(env_token)
env_url = V1EnvVar(
name=self.env_url,
value_from=self.attribute_map(
V1EnvVarSource(
secret_key_ref=self.attribute_map(
V1SecretKeySelector(name=secret_name, key=self.env_url.lower())
)
)
),
)
env_resources.append(env_url)
for env in env_resources:
sub_resources.append(
SubResource(
group="env",
name=env.name,
body=self.attribute_map(env),
path=(
("spec", "template", "spec", "containers"),
("spec", "containers"), # kind: Pod
),
)
)
return sub_resources
|
krake/krake/controller/kubernetes/hooks.py
|
codereval_python_data_93
|
Creates a configuration with some simple parameters, which have a default value
that can be set.
Args:
user (str): the name of the user for the static authentication
etcd_host (str): the host for the database.
etcd_port (int): the port for the database.
Returns:
dict: the created configuration.
def base_config(user, etcd_host="localhost", etcd_port=2379):
"""Creates a configuration with some simple parameters, which have a default value
that can be set.
Args:
user (str): the name of the user for the static authentication
etcd_host (str): the host for the database.
etcd_port (int): the port for the database.
Returns:
dict: the created configuration.
"""
return {
"tls": {
"enabled": False,
"cert": "cert_path",
"key": "key_path",
"client_ca": "client_ca_path",
},
"authentication": {
"allow_anonymous": True,
"strategy": {
"keystone": {"enabled": False, "endpoint": "http://localhost"},
"keycloak": {
"enabled": False,
"endpoint": "no_endpoint",
"realm": "krake",
},
"static": {"enabled": True, "name": user},
},
"cors_origin": "http://example.com",
},
"authorization": "always-allow",
"etcd": {"host": etcd_host, "port": etcd_port, "retry_transactions": 0},
"docs": {"problem_base_url": "http://example.com/problem"},
"log": {},
}
import asyncio
import os
import random
import signal
import sys
import subprocess
import urllib
from io import BytesIO
from copy import deepcopy
from tempfile import TemporaryDirectory
from pathlib import Path
from typing import NamedTuple, List
import time
import logging.config
from textwrap import dedent
import json
from zipfile import ZipFile
import requests
import pytest
import aiohttp
import shutil
from aiohttp import web
from krake.controller import create_ssl_context
from prometheus_async import aio
from prometheus_client import Gauge, CollectorRegistry, CONTENT_TYPE_LATEST
from contextlib import suppress
# Prepend package directory for working imports
from krake.data.config import (
ApiConfiguration,
HooksConfiguration,
MagnumConfiguration,
TlsClientConfiguration,
ControllerConfiguration,
SchedulerConfiguration,
KubernetesConfiguration,
)
package_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.insert(0, package_dir)
logging.config.dictConfig(
{
"version": 1,
"handlers": {"console": {"class": "logging.StreamHandler", "level": "DEBUG"}},
"loggers": {"krake": {"handlers": ["console"]}},
}
)
def pytest_addoption(parser):
"""Register :mod:`argparse`-style options and ini-style config values for pytest.
Called once at the beginning of a test run.
Args:
parser (pytest.config.Parser): pytest parser
"""
parser.addoption(
"--runslow", action="store_true", default=False, help="run slow tests"
)
def pytest_configure(config):
"""Allows plugins and conftest files to perform initial configuration.
Args:
config (pytest.config.Config): config object
"""
config.addinivalue_line("markers", "slow: mark test as slow to run")
config.addinivalue_line(
"markers", "timeout(time): mark async test with maximal duration"
)
def pytest_collection_modifyitems(config, items):
"""Called after pytest collection has been performed, may filter or
re-order the items in-place.
Args:
config (pytest.config.Config): config object
items (List[pytest.nodes.Item]): list of test item objects
"""
if not config.getoption("--runslow"):
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
def wait_for_url(url, timeout=5, condition=None):
"""Wait until an URL endpoint is reachable.
The ``condition`` callable takes the HTTP response as argument and checks if it
suits a certain format.
The signature of ``condition`` is:
.. function:: my_condition(response)
:param requests.Response response: the Response object of the HTTP request
:return: true if the condition is met
:rtype: bool
Args:
url (str): URL endpoint
timeout (int, optional): Timeout. Defaults to 5s
condition (callable, optional): Condition that has to be met.
Raises:
TimeoutError: When timeout is reached
"""
start = time.time()
while True:
try:
resp = requests.get(url)
assert resp.status_code == 200
if condition:
assert condition(resp)
except (requests.ConnectionError, AssertionError):
time.sleep(0.1)
if time.time() - start > timeout:
raise TimeoutError(f"Cannot connect to {url}")
else:
return
async def await_for_url(url, loop, timeout=5):
start = loop.time()
while True:
try:
async with aiohttp.ClientSession(raise_for_status=True) as client:
resp = await client.get(url)
except aiohttp.ClientError:
await asyncio.sleep(0.1)
if loop.time() - start > timeout:
raise TimeoutError(f"Cannot connect to {url!r}")
else:
return resp
@pytest.fixture(scope="session")
def etcd_server():
def check_etcd_health(response):
with suppress(json.decoder.JSONDecodeError):
jresp = response.json()
with suppress(KeyError):
return jresp["health"] == "true"
return False
etcd_host = "127.0.0.1"
etcd_port = 3379
with TemporaryDirectory() as tmpdir:
command = [
"etcd",
"--data-dir",
tmpdir,
"--name",
"krake-testing",
"--listen-client-urls",
f"http://{etcd_host}:{etcd_port}",
"--advertise-client-urls",
f"http://{etcd_host}:{etcd_port}",
"--listen-peer-urls",
f"http://{etcd_host}:{etcd_port + 1}",
"--initial-advertise-peer-urls",
f"http://{etcd_host}:{etcd_port + 1}",
]
with subprocess.Popen(command) as proc:
try:
wait_for_url(
f"http://{etcd_host}:{etcd_port}/health",
condition=check_etcd_health,
)
yield etcd_host, etcd_port
finally:
proc.terminate()
@pytest.fixture
async def etcd_client(etcd_server, loop):
# Use the patched etcd3 client (see #293)
from krake.api.database import EtcdClient
host, port = etcd_server
async with EtcdClient(host=host, port=port) as client:
yield client
await client.delete_range(all=True)
@pytest.fixture
async def db(etcd_server, etcd_client, loop):
from krake.api.database import Session
host, port = etcd_server
async with Session(host=host, port=port, loop=loop) as session:
yield session
@pytest.fixture
def user():
return "testuser"
def base_config(user, etcd_host="localhost", etcd_port=2379):
"""Creates a configuration with some simple parameters, which have a default value
that can be set.
Args:
user (str): the name of the user for the static authentication
etcd_host (str): the host for the database.
etcd_port (int): the port for the database.
Returns:
dict: the created configuration.
"""
return {
"tls": {
"enabled": False,
"cert": "cert_path",
"key": "key_path",
"client_ca": "client_ca_path",
},
"authentication": {
"allow_anonymous": True,
"strategy": {
"keystone": {"enabled": False, "endpoint": "http://localhost"},
"keycloak": {
"enabled": False,
"endpoint": "no_endpoint",
"realm": "krake",
},
"static": {"enabled": True, "name": user},
},
"cors_origin": "http://example.com",
},
"authorization": "always-allow",
"etcd": {"host": etcd_host, "port": etcd_port, "retry_transactions": 0},
"docs": {"problem_base_url": "http://example.com/problem"},
"log": {},
}
@pytest.fixture
def config(etcd_server, user):
"""Generate a default configuration for the API, which leverages a test instance of
etcd.
Args:
etcd_server ((str, int)): the information to connect to the etcd instance.
user (str): the name of the user for the static authentication.
Returns:
ApiConfiguration: the generated configuration.
"""
etcd_host, etcd_port = etcd_server
config = base_config(user, etcd_host=etcd_host, etcd_port=etcd_port)
return ApiConfiguration.deserialize(config, creation_ignored=True)
@pytest.fixture
def no_db_config(user):
"""Create a configuration for the API component without database being created and
running in the background.
Returns:
ApiConfiguration: the generated configuration.
"""
config = base_config(user)
return ApiConfiguration.deserialize(config, creation_ignored=True)
@pytest.fixture
def log_to_file_config(tmp_path):
"""Returns a function that can generate a dictionary that can be used as
configuration for the logging module. Such a dictionary is part of the Krake
components configuration.
The generated configuration sets the "INFO" log level, and only logs to a file. The
path to the file can be provided. If not, a file is created by default in a
temporary directory.
FIXME: This should be removed when issue #282 has been closed.
"""
base_file_path = str(tmp_path / "krake.log")
def generate_log_config(file_path=None):
"""Generate the actual dictionary for logging.
Args:
file_path (str): path to the file to which the logs will be written. If not
specified, a temporary file is used by default.
Returns:
(dict[str, Any], str): a tuple that contains first the generated dictionary,
and second the path to the file where the logs will be written.
"""
final_file_path = base_file_path
if file_path is not None:
final_file_path = file_path
log_format = "%(asctime)s - [%(name)s] - [%(levelname)-5s] - %(message)s"
return (
{
"version": 1,
"level": "INFO",
"formatters": {"krake": {"format": log_format}},
"handlers": {
"file": {
"class": "logging.FileHandler",
"formatter": "krake",
"filename": final_file_path,
}
},
"loggers": {"krake": {"handlers": ["file"], "propagate": False}},
},
final_file_path,
)
return generate_log_config
@pytest.fixture
def tls_client_config():
"""Create a configuration for the "tls" field in the controllers configuration.
Returns:
TlsClientConfiguration: the created configuration.
"""
config = {
"enabled": False,
"client_cert": "cert_path",
"client_key": "key_path",
"client_ca": "client_ca_path",
}
return TlsClientConfiguration.deserialize(config, creation_ignored=True)
@pytest.fixture
def gc_config(tls_client_config):
"""Create a configuration for the Garbage Collector.
Returns:
ControllerConfiguration: the created configuration.
"""
config = {"tls": tls_client_config.serialize(), "log": {}}
return ControllerConfiguration.deserialize(config, creation_ignored=True)
@pytest.fixture
def kube_config(tls_client_config):
"""Create a configuration for the Kubernetes Controller.
Returns:
KubernetesConfiguration: the created configuration.
"""
config = {
"tls": tls_client_config.serialize(),
"hooks": {
"complete": {
"intermediate_src": "/etc/krake/certs/kube.pem",
"intermediate_key_src": "/etc/krake/certs/kube-key.pem",
},
"shutdown": {
"intermediate_src": "/etc/krake/certs/kube.pem",
"intermediate_key_src": "/etc/krake/certs/kube-key.pem",
},
},
"log": {},
}
return KubernetesConfiguration.deserialize(config, creation_ignored=True)
@pytest.fixture
def magnum_config(tls_client_config):
"""Create a configuration for the Magnum Controller.
Returns:
MagnumConfiguration: the created configuration.
"""
config = {"tls": tls_client_config.serialize(), "log": {}}
return MagnumConfiguration.deserialize(config, creation_ignored=True)
@pytest.fixture
def scheduler_config(tls_client_config):
"""Create a configuration for the Scheduler.
Returns:
SchedulerConfiguration: the created configuration.
"""
config = {"tls": tls_client_config.serialize(), "log": {}}
return SchedulerConfiguration.deserialize(config, creation_ignored=True)
class KeystoneInfo(NamedTuple):
host: str
port: int
username: str
user_domain_name: str
password: str
project_name: str
project_domain_name: str
@property
def auth_url(self):
return f"http://{self.host}:{self.port}/v3"
@pytest.fixture(scope="session")
def keystone():
pytest.importorskip("keystone")
host = "localhost"
port = 5050
config_template = dedent(
"""
[fernet_tokens]
key_repository = {tempdir}/fernet-keys
[fernet_receipts]
key_repository = {tempdir}/fernet-keys
[DEFAULT]
log_dir = {tempdir}/logs
[assignment]
driver = sql
[cache]
enabled = false
[catalog]
driver = sql
[policy]
driver = rules
[credential]
key_repository = {tempdir}/credential-keys
[token]
provider = fernet
expiration = 21600
[database]
connection = sqlite:///{tempdir}/keystone.db
"""
)
with TemporaryDirectory() as tempdir:
config_file = Path(tempdir) / "keystone.conf"
# Create keystone configuration
with config_file.open("w") as fd:
fd.write(config_template.format(tempdir=tempdir))
(Path(tempdir) / "fernet-keys").mkdir(mode=0o700)
(Path(tempdir) / "credential-keys").mkdir(mode=0o700)
(Path(tempdir) / "logs").mkdir()
user = os.getuid()
group = os.getgid()
# Populate identity service database
subprocess.check_call(
["keystone-manage", "--config-file", str(config_file), "db_sync"]
)
# Initialize Fernet key repositories
subprocess.check_call(
[
"keystone-manage",
"--config-file",
str(config_file),
"fernet_setup",
"--keystone-user",
str(user),
"--keystone-group",
str(group),
]
)
subprocess.check_call(
[
"keystone-manage",
"--config-file",
str(config_file),
"credential_setup",
"--keystone-user",
str(user),
"--keystone-group",
str(group),
]
)
# Bootstrap identity service
subprocess.check_call(
[
"keystone-manage",
"--config-file",
str(config_file),
"bootstrap",
"--bootstrap-password",
"admin",
"--bootstrap-admin-url",
f"http://{host}:{port}/v3/",
"--bootstrap-internal-url",
f"http://{host}:{port}/v3/",
"--bootstrap-public-url",
f"http://{host}:{port}/v3/",
"--bootstrap-region-id",
"DefaultRegion",
]
)
command = [
"keystone-wsgi-public",
"--host",
host,
"--port",
str(port),
"--",
"--config-file",
str(config_file),
]
with subprocess.Popen(command) as proc:
try:
wait_for_url(f"http://{host}:{port}/v3", timeout=30)
info = KeystoneInfo(
host=host,
port=port,
username="admin",
password="admin",
user_domain_name="Default",
project_name="admin",
project_domain_name="Default",
)
yield info
finally:
time.sleep(1)
proc.terminate()
class KeycloakInfo(NamedTuple):
port: int
realm: str
client_id: str
client_secret: str
grant_type: str
username: str
password: str
@property
def auth_url(self):
return f"http://localhost:{self.port}"
@pytest.fixture
def keycloak():
"""Fixture to create a Keycloak instance running in the background. The instance is
stopped after a test that uses this fixture finished.
Returns:
KeycloakInfo: the different values needed to connect to the running instance.
"""
version = "11.0.2"
with TemporaryDirectory() as tempdir:
url = urllib.request.urlopen(
f"https://downloads.jboss.org/keycloak/{version}/keycloak-{version}.zip"
)
# Download Keycloak's zip and directly unzip the downloaded file.
# See https://stackoverflow.com/questions/42326428/zipfile-in-python-file-permission # noqa
zip_unix_system = 3
with ZipFile(BytesIO(url.read())) as zf:
for info in zf.infolist():
extracted_path = zf.extract(info, tempdir)
if info.create_system == zip_unix_system:
unix_attributes = info.external_attr >> 16
if unix_attributes:
os.chmod(extracted_path, unix_attributes)
keycloak_dir = Path(tempdir) / f"keycloak-{version}"
subprocess.check_call(
[
"support/keycloak",
"--temp-dir",
tempdir,
"init",
"--keycloak-dir",
keycloak_dir,
]
)
process = subprocess.Popen(
["support/keycloak", "--temp-dir", tempdir, "credentials"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, _ = process.communicate()
keycloak_cred = json.loads(out)
info = KeycloakInfo(**keycloak_cred)
with subprocess.Popen(
f"support/keycloak --temp-dir {tempdir}", shell=True, preexec_fn=os.setsid
) as proc:
try:
wait_for_url(
f"http://localhost:{info.port}/auth/realms/{info.realm}/",
timeout=90,
)
yield info
except TimeoutError:
print("The URL could not be reached before the timeout.")
finally:
pid = proc.pid
time.sleep(1)
proc.terminate()
os.killpg(os.getpgid(pid), signal.SIGTERM)
time.sleep(1)
class RecordsContext(object):
def __init__(self, db, records):
self.db = db
self.records = records
async def __aenter__(self):
for record in self.records:
await self.db.put(record)
return self.records
async def __aexit__(self, *exc):
for record in reversed(self.records):
await self.db.delete(record)
@pytest.fixture
def rbac_allow(db, user):
from tests.factories.core import RoleFactory, RoleBindingFactory
from krake.data.core import Verb, RoleRule
def rbac_creator(api, resource, verb, namespace="testing", override_user=None):
"""Add a role and role binding for the provided resource in the given namespace.
This can then be leveraged to test the RBAC mechanism.
Args:
api (str): name of the API of the resource for which a role has to be given
permission:
resource (str): name of the resource's kind for which a role has to be given
permission:
verb (str, Verb): verb or name of the verb that corresponds to the action
which should be allowed on the resource.
namespace (str): namespace where the action is allowed.
override_user (str): if provided, change the user for which the permission
is added. Otherwise, use the tests default.
Returns:
RecordsContext: context manager in which the permission is added.
"""
role_user = user
if override_user:
role_user = override_user
if isinstance(verb, str):
verb = Verb.__members__[verb]
namespaces = []
if namespace:
namespaces.append(namespace)
role = RoleFactory(
rules=[
RoleRule(
api=api, namespaces=namespaces, resources=[resource], verbs=[verb]
)
]
)
binding = RoleBindingFactory(users=[role_user], roles=[role.metadata.name])
return RecordsContext(db, [role, binding])
return rbac_creator
class Certificate(NamedTuple):
"""Path to certificate issued by :class:`PublicKeyRepository` and its
corresponding private key.
"""
cert: str
key: str
class PublicKeyRepository(object):
"""Pytest fixture for testing public key infrastructure.
The repository uses the ``cfssl`` executable for creating and signing
certificates.
The repository must be used with the context protocol:
.. code:: python
with PublicKeyRepository() as pki:
cert = pki.gencert("me")
Three types of certificates can be created:
* a CA is always created;
* an intermediate certificate can be created, which cannot be used for client
authentication (cfssl "intermediate-ca" profile) OR;
* a certificate ready for client authentication (cfssl "krake-test-ca" profile).
Attributes:
ca (Certificate): Certificate Authority of this repository created by
:meth:`genca`.
"""
ca_csr = {
"CN": "Krake CA",
"key": {"algo": "ecdsa", "size": 256},
"names": [{"O": "Acme Corporation"}],
}
ca_config = {
"signing": {
"profiles": {
"krake-test-ca": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth",
],
"expiry": "8760h",
},
"intermediate-ca": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth",
"cert sign",
"crl sign",
],
"ca_constraint": {"is_ca": True, "max_path_len": 1},
"expiry": "8760h",
},
}
}
}
client_csr = {
"CN": None,
"hosts": ["127.0.0.1"],
"key": {"algo": "ecdsa", "size": 256},
"names": [{"O": "Acme Corporation"}],
}
def __init__(self):
self._tempdir = None
self.clients = None
self.ca = None
self.ca_config_file = None
def __enter__(self):
self._tempdir = TemporaryDirectory(prefix="pki-")
return self
def __exit__(self, *exc):
self._tempdir.cleanup()
self.ca = None
self.ca_config_file = None
@property
def tempdir(self):
"""Temporary directory holding all certificates, keys and config
files. It is created when entering the context and removed on exit.
"""
if self._tempdir is None:
return None
return Path(self._tempdir.name)
def gencert(self, name, is_intermediate=False):
"""Generate client certificate signed by the CA managed by this repository.
Args:
name (str): Common name of the certificate
is_intermediate (bool): if True, the certificate will be able to sign other
certificates, but cannot be used for client authentication.
Returns:
Certificate: Named tuple of paths to the certificate and
corresponding private key.
"""
if self.ca is None:
self.genca()
client_csr = dict(self.client_csr, CN=name)
client_csr_file = self.tempdir / f"{name}-csr.json"
client_cert_file = self.tempdir / f"{name}.pem"
client_key_file = self.tempdir / f"{name}-key.pem"
profile = "krake-test-ca"
if is_intermediate:
profile = "intermediate-ca"
if not client_key_file.exists():
with client_csr_file.open("w") as fd:
json.dump(client_csr, fd, indent=4)
certs = self.cfssl(
"gencert",
"-profile",
profile,
"-config",
str(self.ca_config_file),
"-ca",
self.ca.cert,
"-ca-key",
self.ca.key,
str(client_csr_file),
)
with client_key_file.open("w") as fd:
fd.write(certs["key"])
client_key_file.chmod(0o600)
with client_cert_file.open("w") as fd:
fd.write(certs["cert"])
return Certificate(cert=str(client_cert_file), key=str(client_key_file))
def genca(self):
"""Initialize the CA certificate of the repository.
This method is automatically called by :meth:`gencert` if :attr:`ca`
is None.
"""
ca_csr_file = self.tempdir / "ca-csr.json"
ca_key_file = self.tempdir / "ca-key.pem"
ca_cert_file = self.tempdir / "ca.pem"
self.ca_config_file = self.tempdir / "ca-config.json"
self.ca = Certificate(cert=str(ca_cert_file), key=str(ca_key_file))
with ca_csr_file.open("w") as fd:
json.dump(self.ca_csr, fd, indent=4)
certs = self.cfssl("gencert", "-initca", str(ca_csr_file))
with ca_key_file.open("w") as fd:
fd.write(certs["key"])
ca_key_file.chmod(0o600)
with ca_cert_file.open("w") as fd:
fd.write(certs["cert"])
with open(self.ca_config_file, "w") as fd:
json.dump(self.ca_config, fd, indent=4)
@staticmethod
def cfssl(*command):
"""Execute an ``cfssl`` command. The output is directly parsed as JSON
and returned.
Args:
*command: command line arguments for ``cfssl``
Returns:
JSON output of the cfssl command
"""
with subprocess.Popen(("cfssl",) + command, stdout=subprocess.PIPE) as proc:
try:
data = json.load(proc.stdout)
except json.JSONDecodeError:
returncode = proc.poll()
if returncode is not None and returncode != 0:
raise subprocess.CalledProcessError(returncode, command)
raise
if proc.returncode != 0:
raise subprocess.CalledProcessError(proc.returncode, command)
return data
@pytest.fixture(scope="session")
def pki():
"""Public key infrastructure fixture"""
if not shutil.which("cfssl"):
pytest.skip("Executable 'cfssl' not found")
with PublicKeyRepository() as repo:
yield repo
@pytest.fixture
def client_ssl_context(pki):
"""Create a decorator to create an SSL context to be used by a
:class:`krake.client.Client`. It accepts a user as parameter for the certificate CN.
Args:
pki (PublicKeyRepository): the SSL components generated by the pki fixture.
Returns:
function: the generated decorator, which depends on the user provided.
"""
def create_client_ssl_context(user):
"""Generate an SSL context, with the CA, the certificate and key. The
certificate's CN has the provided user.
Args:
user (str): the CN for which the certificate should be generated.
Returns:
ssl.SSLContext: the created SSL context.
"""
client_cert = pki.gencert(user)
client_tls = TlsClientConfiguration(
enabled=True,
client_ca=pki.ca.cert,
client_cert=client_cert.cert,
client_key=client_cert.key,
)
return create_ssl_context(client_tls)
return create_client_ssl_context
@pytest.fixture
def hooks_config(pki):
"""Generate the configuration for using the hooks of the KubernetesController.
Args:
pki (PublicKeyRepository): Already-prepared certificate environment.
Returns:
HooksConfiguration: the generated configuration.
"""
client_complete_cert = pki.gencert("test-complete-hook-signing",
is_intermediate=True)
client_shutdown_cert = pki.gencert("test-shutdown-hook-signing",
is_intermediate=True)
return deepcopy(
HooksConfiguration.deserialize(
{
"complete": {
"hook_user": "test-complete-hook-user",
"intermediate_src": client_complete_cert.cert,
"intermediate_key_src": client_complete_cert.key,
"cert_dest": "/etc/krake_complete_certs",
"env_token": "KRAKE_COMPLETE_TOKEN",
"env_url": "KRAKE_COMPLETE_URL",
},
"shutdown": {
"hook_user": "test-shutdown-hook-user",
"intermediate_src": client_shutdown_cert.cert,
"intermediate_key_src": client_shutdown_cert.key,
"cert_dest": "/etc/krake_shutdown_certs",
"env_token": "KRAKE_SHUTDOWN_TOKEN",
"env_url": "KRAKE_SHUTDOWN_URL",
}
}
)
)
class PrometheusExporter(NamedTuple):
"""Tuple yielded by the :func:`prometheus_exporter` fixture describing
server connection information and the name of the provided metric.
"""
host: str
port: int
metric: str
@pytest.fixture
async def prometheus_exporter(loop, aiohttp_server):
"""Heat-demand exporter fixture. Heat demand exporter generates heat
demand metric `heat_demand_zone_1` with random value.
"""
metric_name = "heat_demand_zone_1"
interval = 1 # refresh metric value interval[s]
registry = CollectorRegistry(auto_describe=True)
async def heat_demand_metric():
metric = Gauge(metric_name, "float - heat demand (kW)", registry=registry)
while True:
metric.set(round(random.random(), 2))
await asyncio.sleep(interval)
async def start_metric(app):
app["metric"] = loop.create_task(heat_demand_metric())
async def cleanup_metric(app):
app["metric"].cancel()
try:
await app["metric"]
except asyncio.CancelledError:
pass
async def server_stats(request):
"""Return a web response with the plain text version of the metrics."""
resp = web.Response(body=aio.web.generate_latest(registry))
# This is set separately because aiohttp complains about ";"" in
# content_type thinking it means there's also a charset.
# @see https://github.com/aio-libs/aiohttp/issues/2197
resp.content_type = CONTENT_TYPE_LATEST
return resp
app = web.Application()
app.router.add_get("/metrics", server_stats)
app.on_startup.append(start_metric)
app.on_cleanup.append(cleanup_metric)
server = await aiohttp_server(app)
yield PrometheusExporter(host=server.host, port=server.port, metric=metric_name)
class Prometheus(NamedTuple):
"""Tuple yielded by the :func:`prometheus` fixture. It contains
information about the Prometheus server connection.
"""
scheme: str
host: str
port: int
exporter: PrometheusExporter
@pytest.fixture
async def prometheus(prometheus_exporter, loop):
prometheus_host = "localhost"
prometheus_port = 5055
if not shutil.which("prometheus"):
pytest.skip("Executable 'prometheus' not found")
config = dedent(
"""
global:
scrape_interval: 1s
scrape_configs:
- job_name: prometheus
static_configs:
- targets:
- {prometheus_host}:{prometheus_port}
- job_name: heat-demand-exporter
static_configs:
- targets:
- {exporter_host}:{exporter_port}
"""
)
async def await_for_prometheus(url, timeout=10):
"""Wait until the Prometheus server is booted up and the first metric
is scraped.
"""
start = loop.time()
while True:
resp = await await_for_url(url, loop)
body = await resp.json()
# If the returned metric list is not empty, stop waiting.
if body["data"]["result"]:
return
if loop.time() - start > timeout:
raise TimeoutError(f"Cannot get metric from {url!r}")
# Prometheus' first scrap takes some time
await asyncio.sleep(0.25)
with TemporaryDirectory() as tempdir:
config_file = Path(tempdir) / "prometheus.yml"
# Create prometheus configuration
with config_file.open("w") as fd:
fd.write(
config.format(
prometheus_host=prometheus_host,
prometheus_port=prometheus_port,
exporter_host=prometheus_exporter.host,
exporter_port=prometheus_exporter.port,
)
)
command = [
"prometheus",
"--config.file",
str(config_file),
"--storage.tsdb.path",
str(Path(tempdir) / "data"),
"--web.enable-admin-api",
"--web.listen-address",
":" + str(prometheus_port),
]
with subprocess.Popen(command) as prometheus:
try:
await await_for_prometheus(
f"http://{prometheus_host}:{prometheus_port}"
f"/api/v1/query?query={prometheus_exporter.metric}"
)
yield Prometheus(
scheme="http",
host=prometheus_host,
port=prometheus_port,
exporter=prometheus_exporter,
)
finally:
prometheus.terminate()
def write_properties(properties, file_path):
"""Create a file with the provided parameters: each key-value pair is written as:
"<key>=<value>", one line per key.
Args:
properties (dict[str, Any]): dictionary that contains the parameters to write.
file_path (pathlib.Path): name of the file in which the properties will be
written.
"""
with open(file_path, "w") as f:
for key, value in properties.items():
f.write(f"{key}={value}\n")
class Zookeeper(NamedTuple):
"""Contains the information to connect to a Zookeeper instance.
Attributes:
host (str): host of the Zookeeper instance.
port (int): port of the Zookeeper instance.
"""
host: str
port: int
async def write_command_to_port(loop, host, port, command=b"dump"):
"""Send a byte string to a specific port on the provided host. Read the complete
output and return it.
Args:
loop (asyncio.AbstractEventLoop): the current loop.
host (str): the host to which the command should be sent.
port (int): the port on which the command should be sent.
command (bytes): the command to send.
Returns:
bytes: the output read from the host.
"""
# If the process that listens at the endpoint is not ready, the socket connector
# raises an OSError.
# FIXME: the OSError may be changed with another error, for instance
# ConnectionRefusedError. This works locally but not on the pipeline.
with suppress(OSError):
reader, writer = await asyncio.open_connection(host, port)
writer.write(command)
data = await reader.read(512)
writer.close()
return data
@pytest.fixture
async def zookeeper(tmp_path, loop):
if not shutil.which("zookeeper-server-start"):
pytest.skip("Executable 'zookeeper-server-start' not found")
zookeeper_port = 30007
properties = {
"4lw.commands.whitelist": "*", # Allows sending all commands to Zookeeper
"admin.enableServer": False,
"clientPort": zookeeper_port,
"dataDir": tmp_path,
"maxClientCnxns": 0,
}
properties_path = tmp_path / "zookeeper.properties"
write_properties(properties, properties_path)
command = ["zookeeper-server-start", properties_path]
with subprocess.Popen(command) as zookeeper:
try:
timeout = 10
start = loop.time()
# Wait for the Zookeeper instance to be ready.
while True:
data = await write_command_to_port(loop, "localhost", zookeeper_port)
if data:
break
if loop.time() - start > timeout:
raise TimeoutError("The instance was not ready before the timeout")
await asyncio.sleep(0.25)
yield Zookeeper(host="localhost", port=zookeeper_port)
finally:
zookeeper.terminate()
class Kafka(NamedTuple):
"""Contains the information to connect to a Kafka instance.
Attributes:
host (str): host of the Kafka instance.
port (int): port of the Kafka instance.
"""
host: str
port: int
@pytest.fixture
async def kafka(zookeeper, tmp_path, loop):
if not shutil.which("kafka-server-start"):
pytest.skip("Executable 'kafka-server-start' not found")
broker_id = 42
kafka_host = "localhost"
kafka_port = 31007
properties = {
"auto.create.topics.enable": True,
"broker.id": broker_id,
"delete.topic.enable": True,
"listeners": f"PLAINTEXT://{kafka_host}:{kafka_port}",
"log.cleaner.enable": True,
"log.dirs": tmp_path,
"offsets.topic.replication.factor": 1, # Allows having only one broker
"transaction.state.log.replication.factor": 1, # Allows having only one broker
"transaction.state.log.min.isr": 1, # Allows having only one broker
"zookeeper.connect": f"{zookeeper.host}:{zookeeper.port}",
"zookeeper.connection.timeout.ms": 6000,
}
properties_path = tmp_path / "kafka.properties"
write_properties(properties, properties_path)
command = ["kafka-server-start", properties_path]
with subprocess.Popen(command) as kafka:
try:
timeout = 20
start = loop.time()
# Wait for the Kafka instance to be ready.
while True:
dump_return = await write_command_to_port(
loop, zookeeper.host, zookeeper.port
)
# If the ID appears in the list of broker IDs in the Zookeeper status,
# it means the broker is ready.
if f"/brokers/ids/{broker_id}".encode() in dump_return:
break
if loop.time() - start > timeout:
raise TimeoutError("The instance was not ready before the timeout")
await asyncio.sleep(1)
yield Kafka(host=kafka_host, port=kafka_port)
finally:
kafka.terminate()
class KsqlMetric(NamedTuple):
"""Entry in a KSQL table, where each row corresponds to a metric and its value. The
value can be updated any time by a new input from Kafka.
Attributes:
name (str): name attribute of an entry in the KSQL database.
value (int): value attribute of an entry in the KSQL database.
"""
name: str
value: int
class KafkaTable(NamedTuple):
"""Data about a KSQL table that contains the value of different metrics, one per
row.
Attributes:
metrics (list[KsqlMetric]): definitions of the metrics inserted into the
database.
comparison_column (str): name of the KSQL column which contains the metrics
names, and thus whose content is compared to the name of the chosen metric.
value_column (str): name of the KSQL column which contains the current value of
all metrics.
table (str): name of the table in which the metrics have been added (so this
table has at least two columns, namely "<comparison_column>" and
"<value_column>").
"""
metrics: List[KsqlMetric]
comparison_column: str
value_column: str
table: str
class KsqlServer(NamedTuple):
"""Contains the information to connect to a KSQL database.
Attributes:
host (str): host of the KSQL database.
port (int): port of the KSQL database.
kafka_table (KafkaTable): information regarding the KSQL table present in the
KSQL database.
scheme (str): scheme to connect to the KSQL database.
"""
host: str
port: int
kafka_table: KafkaTable
scheme: str = "http"
async def send_command(client, url, command):
"""Send a KSQL command to the provided URL.
Args:
client (aiohttp.ClientSession): client to use for sending the command.
url (str): URL to which the command should be sent.
command (dict): command to send to the KSQL database.
"""
resp = await client.post(url + "/ksql", json=command)
assert resp.status == 200, f"The following command failed: {command!r}"
async def insert_entries(url):
"""Prepare a KSQL database by adding a stream, a table constructed from the stream,
and by sending some elements to the stream. The stream has two columns: the metric
name, and the number of time it appeared in the stream.
Args:
url (str): URL of the KSQL database.
Returns:
KafkaTable: necessary information regarding all elements inserted in the
database.
"""
value_column = "num_write"
comparison_column = "zone"
table = "heat_demand_zones_metrics"
metrics = [
KsqlMetric(name="heat_demand_1", value=2), # Because it is inserted twice.
KsqlMetric(name="heat_demand_2", value=1),
]
base_command = {"ksql": None, "streamsProperties": {}}
build_commands = [
(
"CREATE STREAM heat_demand_zones"
f" ({comparison_column} STRING KEY, value INTEGER) WITH"
" (kafka_topic='heat_demand_zones', value_format='json', partitions=1);"
),
(
f"CREATE TABLE {table} AS SELECT {comparison_column}, COUNT(*)"
f" AS {value_column}"
f" FROM heat_demand_zones GROUP BY {comparison_column} EMIT CHANGES;"
), # Table from the stream, counts the number of inserted entries for each zone
]
insert_commands = [
(
f"INSERT INTO heat_demand_zones ({comparison_column}, value) VALUES"
f" ('{metrics[0].name}', 84);"
),
(
f"INSERT INTO heat_demand_zones ({comparison_column}, value) VALUES"
f" ('{metrics[1].name}', 23);"
),
(
f"INSERT INTO heat_demand_zones ({comparison_column}, value) VALUES"
f" ('{metrics[0].name}', 17);"
),
]
async with aiohttp.ClientSession() as client:
for command in build_commands:
base_command["ksql"] = command
await send_command(client, url, base_command)
# Between the creation of the stream/table and the insertion of entries, some
# time is necessary.
await asyncio.sleep(15)
for command in insert_commands:
base_command["ksql"] = command
await send_command(client, url, base_command)
return KafkaTable(
metrics=metrics,
comparison_column=comparison_column,
value_column=value_column,
table=table,
)
@pytest.fixture
async def ksql(kafka, tmp_path, loop):
"""Start a KSQL database. Insert some dummy metrics inside. The state of the
database at the end of this fixture is the following:
* a stream called "heat_demand_zones", with the following attributes:
* zone (as string): the name of the zone;
* value (as integer): an arbitrary value;
* a table created from the stream, it has the following attributes:
* zone (as string): same as for the stream;
* num_write (as integer): the amount of time an entry was added for the current
zone.
* Three entries added to the stream:
* two for the zone "heat_demand_1";
* one for the zone "heat_demand_2".
"""
if not shutil.which("ksql-server-start"):
pytest.skip("Executable 'ksql-server-start' not found")
ksql_host = "0.0.0.0"
ksql_port = 32007
url = f"http://{ksql_host}:{ksql_port}"
properties = {
"listeners": url,
"ksql.logging.processing.topic.auto.create": "true",
"ksql.logging.processing.stream.auto.create": "true",
"bootstrap.servers": f"{kafka.host}:{kafka.port}",
"compression.type": "snappy",
"ksql.streams.state.dir": tmp_path,
}
properties_path = tmp_path / "ksql-server.properties"
write_properties(properties, properties_path)
command = ["ksql-server-start", properties_path]
with subprocess.Popen(command) as ksql:
try:
timeout = 60
start = loop.time()
# Wait for the KSQL instance to be ready.
while True:
resp = None
async with aiohttp.ClientSession() as client:
try:
resp = await client.get(url + "/info")
except aiohttp.ClientConnectorError:
pass
if resp and resp.status == 200:
break
if loop.time() - start > timeout:
raise TimeoutError("The instance was not ready before the timeout")
await asyncio.sleep(1)
kafka_table = await insert_entries(url)
yield KsqlServer(host=ksql_host, port=ksql_port, kafka_table=kafka_table)
finally:
ksql.terminate()
|
krake/tests/conftest.py
|
codereval_python_data_94
|
Given a timezone-aware datetime in a given timezone, calculates a
timezone-aware datetime in a new timezone.
Since this is the one time that we *know* we have an unambiguous
datetime object, we take this opportunity to determine whether the
datetime is ambiguous and in a "fold" state (e.g. if it's the first
occurrence, chronologically, of the ambiguous datetime).
:param dt:
A timezone-aware :class:`datetime.datetime` object.
def _fromutc(self, dt):
"""
Given a timezone-aware datetime in a given timezone, calculates a
timezone-aware datetime in a new timezone.
Since this is the one time that we *know* we have an unambiguous
datetime object, we take this opportunity to determine whether the
datetime is ambiguous and in a "fold" state (e.g. if it's the first
occurrence, chronologically, of the ambiguous datetime).
:param dt:
A timezone-aware :class:`datetime.datetime` object.
"""
# Re-implement the algorithm from Python's datetime.py
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# The original datetime.py code assumes that `dst()` defaults to
# zero during ambiguous times. PEP 495 inverts this presumption, so
# for pre-PEP 495 versions of python, we need to tweak the algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
dt += delta
# Set fold=1 so we can default to being in the fold for
# ambiguous dates.
dtdst = enfold(dt, fold=1).dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
return dt + dtdst
from six import PY2
from functools import wraps
from datetime import datetime, timedelta, tzinfo
ZERO = timedelta(0)
__all__ = ['tzname_in_python2', 'enfold']
def tzname_in_python2(namefunc):
"""Change unicode output into bytestrings in Python 2
tzname() API changed in Python 3. It used to return bytes, but was changed
to unicode strings
"""
if PY2:
@wraps(namefunc)
def adjust_encoding(*args, **kwargs):
name = namefunc(*args, **kwargs)
if name is not None:
name = name.encode()
return name
return adjust_encoding
else:
return namefunc
# The following is adapted from Alexander Belopolsky's tz library
# https://github.com/abalkin/tz
if hasattr(datetime, 'fold'):
# This is the pre-python 3.6 fold situation
def enfold(dt, fold=1):
"""
Provides a unified interface for assigning the ``fold`` attribute to
datetimes both before and after the implementation of PEP-495.
:param fold:
The value for the ``fold`` attribute in the returned datetime. This
should be either 0 or 1.
:return:
Returns an object for which ``getattr(dt, 'fold', 0)`` returns
``fold`` for all versions of Python. In versions prior to
Python 3.6, this is a ``_DatetimeWithFold`` object, which is a
subclass of :py:class:`datetime.datetime` with the ``fold``
attribute added, if ``fold`` is 1.
.. versionadded:: 2.6.0
"""
return dt.replace(fold=fold)
else:
class _DatetimeWithFold(datetime):
"""
This is a class designed to provide a PEP 495-compliant interface for
Python versions before 3.6. It is used only for dates in a fold, so
the ``fold`` attribute is fixed at ``1``.
.. versionadded:: 2.6.0
"""
__slots__ = ()
def replace(self, *args, **kwargs):
"""
Return a datetime with the same attributes, except for those
attributes given new values by whichever keyword arguments are
specified. Note that tzinfo=None can be specified to create a naive
datetime from an aware datetime with no conversion of date and time
data.
This is reimplemented in ``_DatetimeWithFold`` because pypy3 will
return a ``datetime.datetime`` even if ``fold`` is unchanged.
"""
argnames = (
'year', 'month', 'day', 'hour', 'minute', 'second',
'microsecond', 'tzinfo'
)
for arg, argname in zip(args, argnames):
if argname in kwargs:
raise TypeError('Duplicate argument: {}'.format(argname))
kwargs[argname] = arg
for argname in argnames:
if argname not in kwargs:
kwargs[argname] = getattr(self, argname)
dt_class = self.__class__ if kwargs.get('fold', 1) else datetime
return dt_class(**kwargs)
@property
def fold(self):
return 1
def enfold(dt, fold=1):
"""
Provides a unified interface for assigning the ``fold`` attribute to
datetimes both before and after the implementation of PEP-495.
:param fold:
The value for the ``fold`` attribute in the returned datetime. This
should be either 0 or 1.
:return:
Returns an object for which ``getattr(dt, 'fold', 0)`` returns
``fold`` for all versions of Python. In versions prior to
Python 3.6, this is a ``_DatetimeWithFold`` object, which is a
subclass of :py:class:`datetime.datetime` with the ``fold``
attribute added, if ``fold`` is 1.
.. versionadded:: 2.6.0
"""
if getattr(dt, 'fold', 0) == fold:
return dt
args = dt.timetuple()[:6]
args += (dt.microsecond, dt.tzinfo)
if fold:
return _DatetimeWithFold(*args)
else:
return datetime(*args)
def _validate_fromutc_inputs(f):
"""
The CPython version of ``fromutc`` checks that the input is a ``datetime``
object and that ``self`` is attached as its ``tzinfo``.
"""
@wraps(f)
def fromutc(self, dt):
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
return f(self, dt)
return fromutc
class _tzinfo(tzinfo):
"""
Base class for all ``dateutil`` ``tzinfo`` objects.
"""
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
dt = dt.replace(tzinfo=self)
wall_0 = enfold(dt, fold=0)
wall_1 = enfold(dt, fold=1)
same_offset = wall_0.utcoffset() == wall_1.utcoffset()
same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None)
return same_dt and not same_offset
def _fold_status(self, dt_utc, dt_wall):
"""
Determine the fold status of a "wall" datetime, given a representation
of the same datetime as a (naive) UTC datetime. This is calculated based
on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all
datetimes, and that this offset is the actual number of hours separating
``dt_utc`` and ``dt_wall``.
:param dt_utc:
Representation of the datetime as UTC
:param dt_wall:
Representation of the datetime as "wall time". This parameter must
either have a `fold` attribute or have a fold-naive
:class:`datetime.tzinfo` attached, otherwise the calculation may
fail.
"""
if self.is_ambiguous(dt_wall):
delta_wall = dt_wall - dt_utc
_fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst()))
else:
_fold = 0
return _fold
def _fold(self, dt):
return getattr(dt, 'fold', 0)
def _fromutc(self, dt):
"""
Given a timezone-aware datetime in a given timezone, calculates a
timezone-aware datetime in a new timezone.
Since this is the one time that we *know* we have an unambiguous
datetime object, we take this opportunity to determine whether the
datetime is ambiguous and in a "fold" state (e.g. if it's the first
occurrence, chronologically, of the ambiguous datetime).
:param dt:
A timezone-aware :class:`datetime.datetime` object.
"""
# Re-implement the algorithm from Python's datetime.py
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# The original datetime.py code assumes that `dst()` defaults to
# zero during ambiguous times. PEP 495 inverts this presumption, so
# for pre-PEP 495 versions of python, we need to tweak the algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
dt += delta
# Set fold=1 so we can default to being in the fold for
# ambiguous dates.
dtdst = enfold(dt, fold=1).dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
return dt + dtdst
@_validate_fromutc_inputs
def fromutc(self, dt):
"""
Given a timezone-aware datetime in a given timezone, calculates a
timezone-aware datetime in a new timezone.
Since this is the one time that we *know* we have an unambiguous
datetime object, we take this opportunity to determine whether the
datetime is ambiguous and in a "fold" state (e.g. if it's the first
occurrence, chronologically, of the ambiguous datetime).
:param dt:
A timezone-aware :class:`datetime.datetime` object.
"""
dt_wall = self._fromutc(dt)
# Calculate the fold status given the two datetimes.
_fold = self._fold_status(dt, dt_wall)
# Set the default fold value for ambiguous dates
return enfold(dt_wall, fold=_fold)
class tzrangebase(_tzinfo):
"""
This is an abstract base class for time zones represented by an annual
transition into and out of DST. Child classes should implement the following
methods:
* ``__init__(self, *args, **kwargs)``
* ``transitions(self, year)`` - this is expected to return a tuple of
datetimes representing the DST on and off transitions in standard
time.
A fully initialized ``tzrangebase`` subclass should also provide the
following attributes:
* ``hasdst``: Boolean whether or not the zone uses DST.
* ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects
representing the respective UTC offsets.
* ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short
abbreviations in DST and STD, respectively.
* ``_hasdst``: Whether or not the zone has DST.
.. versionadded:: 2.6.0
"""
def __init__(self):
raise NotImplementedError('tzrangebase is an abstract base class')
def utcoffset(self, dt):
isdst = self._isdst(dt)
if isdst is None:
return None
elif isdst:
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
isdst = self._isdst(dt)
if isdst is None:
return None
elif isdst:
return self._dst_base_offset
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
if self._isdst(dt):
return self._dst_abbr
else:
return self._std_abbr
def fromutc(self, dt):
""" Given a datetime in UTC, return local time """
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
# Get transitions - if there are none, fixed offset
transitions = self.transitions(dt.year)
if transitions is None:
return dt + self.utcoffset(dt)
# Get the transition times in UTC
dston, dstoff = transitions
dston -= self._std_offset
dstoff -= self._std_offset
utc_transitions = (dston, dstoff)
dt_utc = dt.replace(tzinfo=None)
isdst = self._naive_isdst(dt_utc, utc_transitions)
if isdst:
dt_wall = dt + self._dst_offset
else:
dt_wall = dt + self._std_offset
_fold = int(not isdst and self.is_ambiguous(dt_wall))
return enfold(dt_wall, fold=_fold)
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
if not self.hasdst:
return False
start, end = self.transitions(dt.year)
dt = dt.replace(tzinfo=None)
return (end <= dt < end + self._dst_base_offset)
def _isdst(self, dt):
if not self.hasdst:
return False
elif dt is None:
return None
transitions = self.transitions(dt.year)
if transitions is None:
return False
dt = dt.replace(tzinfo=None)
isdst = self._naive_isdst(dt, transitions)
# Handle ambiguous dates
if not isdst and self.is_ambiguous(dt):
return not self._fold(dt)
else:
return isdst
def _naive_isdst(self, dt, transitions):
dston, dstoff = transitions
dt = dt.replace(tzinfo=None)
if dston < dstoff:
isdst = dston <= dt < dstoff
else:
isdst = not dstoff <= dt < dston
return isdst
@property
def _dst_base_offset(self):
return self._dst_offset - self._std_offset
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s(...)" % self.__class__.__name__
__reduce__ = object.__reduce__
|
dateutil/tz/_common.py
|
codereval_python_data_95
|
Return a version of this object represented entirely using integer
values for the relative attributes.
>>> relativedelta(days=1.5, hours=2).normalized()
relativedelta(days=+1, hours=+14)
:return:
Returns a :class:`dateutil.relativedelta.relativedelta` object.
def normalized(self):
"""
Return a version of this object represented entirely using integer
values for the relative attributes.
>>> relativedelta(days=1.5, hours=2).normalized()
relativedelta(days=+1, hours=+14)
:return:
Returns a :class:`dateutil.relativedelta.relativedelta` object.
"""
# Cascade remainders down (rounding each to roughly nearest microsecond)
days = int(self.days)
hours_f = round(self.hours + 24 * (self.days - days), 11)
hours = int(hours_f)
minutes_f = round(self.minutes + 60 * (hours_f - hours), 10)
minutes = int(minutes_f)
seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8)
seconds = int(seconds_f)
microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds))
# Constructor carries overflow back up with call to _fix()
return self.__class__(years=self.years, months=self.months,
days=days, hours=hours, minutes=minutes,
seconds=seconds, microseconds=microseconds,
leapdays=self.leapdays, year=self.year,
month=self.month, day=self.day,
weekday=self.weekday, hour=self.hour,
minute=self.minute, second=self.second,
microsecond=self.microsecond)
# -*- coding: utf-8 -*-
import datetime
import calendar
import operator
from math import copysign
from six import integer_types
from warnings import warn
from ._common import weekday
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7))
__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
class relativedelta(object):
"""
The relativedelta type is designed to be applied to an existing datetime and
can replace specific components of that datetime, or represents an interval
of time.
It is based on the specification of the excellent work done by M.-A. Lemburg
in his
`mx.DateTime <https://www.egenix.com/products/python/mxBase/mxDateTime/>`_ extension.
However, notice that this type does *NOT* implement the same algorithm as
his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
There are two different ways to build a relativedelta instance. The
first one is passing it two date/datetime classes::
relativedelta(datetime1, datetime2)
The second one is passing it any number of the following keyword arguments::
relativedelta(arg1=x,arg2=y,arg3=z...)
year, month, day, hour, minute, second, microsecond:
Absolute information (argument is singular); adding or subtracting a
relativedelta with absolute information does not perform an arithmetic
operation, but rather REPLACES the corresponding value in the
original datetime with the value(s) in relativedelta.
years, months, weeks, days, hours, minutes, seconds, microseconds:
Relative information, may be negative (argument is plural); adding
or subtracting a relativedelta with relative information performs
the corresponding arithmetic operation on the original datetime value
with the information in the relativedelta.
weekday:
One of the weekday instances (MO, TU, etc) available in the
relativedelta module. These instances may receive a parameter N,
specifying the Nth weekday, which could be positive or negative
(like MO(+1) or MO(-2)). Not specifying it is the same as specifying
+1. You can also use an integer, where 0=MO. This argument is always
relative e.g. if the calculated date is already Monday, using MO(1)
or MO(-1) won't change the day. To effectively make it absolute, use
it in combination with the day argument (e.g. day=1, MO(1) for first
Monday of the month).
leapdays:
Will add given days to the date found, if year is a leap
year, and the date found is post 28 of february.
yearday, nlyearday:
Set the yearday or the non-leap year day (jump leap days).
These are converted to day/month/leapdays information.
There are relative and absolute forms of the keyword
arguments. The plural is relative, and the singular is
absolute. For each argument in the order below, the absolute form
is applied first (by setting each attribute to that value) and
then the relative form (by adding the value to the attribute).
The order of attributes considered when this relativedelta is
added to a datetime is:
1. Year
2. Month
3. Day
4. Hours
5. Minutes
6. Seconds
7. Microseconds
Finally, weekday is applied, using the rule described above.
For example
>>> from datetime import datetime
>>> from dateutil.relativedelta import relativedelta, MO
>>> dt = datetime(2018, 4, 9, 13, 37, 0)
>>> delta = relativedelta(hours=25, day=1, weekday=MO(1))
>>> dt + delta
datetime.datetime(2018, 4, 2, 14, 37)
First, the day is set to 1 (the first of the month), then 25 hours
are added, to get to the 2nd day and 14th hour, finally the
weekday is applied, but since the 2nd is already a Monday there is
no effect.
"""
def __init__(self, dt1=None, dt2=None,
years=0, months=0, days=0, leapdays=0, weeks=0,
hours=0, minutes=0, seconds=0, microseconds=0,
year=None, month=None, day=None, weekday=None,
yearday=None, nlyearday=None,
hour=None, minute=None, second=None, microsecond=None):
if dt1 and dt2:
# datetime is a subclass of date. So both must be date
if not (isinstance(dt1, datetime.date) and
isinstance(dt2, datetime.date)):
raise TypeError("relativedelta only diffs datetime/date")
# We allow two dates, or two datetimes, so we coerce them to be
# of the same type
if (isinstance(dt1, datetime.datetime) !=
isinstance(dt2, datetime.datetime)):
if not isinstance(dt1, datetime.datetime):
dt1 = datetime.datetime.fromordinal(dt1.toordinal())
elif not isinstance(dt2, datetime.datetime):
dt2 = datetime.datetime.fromordinal(dt2.toordinal())
self.years = 0
self.months = 0
self.days = 0
self.leapdays = 0
self.hours = 0
self.minutes = 0
self.seconds = 0
self.microseconds = 0
self.year = None
self.month = None
self.day = None
self.weekday = None
self.hour = None
self.minute = None
self.second = None
self.microsecond = None
self._has_time = 0
# Get year / month delta between the two
months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month)
self._set_months(months)
# Remove the year/month delta so the timedelta is just well-defined
# time units (seconds, days and microseconds)
dtm = self.__radd__(dt2)
# If we've overshot our target, make an adjustment
if dt1 < dt2:
compare = operator.gt
increment = 1
else:
compare = operator.lt
increment = -1
while compare(dt1, dtm):
months += increment
self._set_months(months)
dtm = self.__radd__(dt2)
# Get the timedelta between the "months-adjusted" date and dt1
delta = dt1 - dtm
self.seconds = delta.seconds + delta.days * 86400
self.microseconds = delta.microseconds
else:
# Check for non-integer values in integer-only quantities
if any(x is not None and x != int(x) for x in (years, months)):
raise ValueError("Non-integer years and months are "
"ambiguous and not currently supported.")
# Relative information
self.years = int(years)
self.months = int(months)
self.days = days + weeks * 7
self.leapdays = leapdays
self.hours = hours
self.minutes = minutes
self.seconds = seconds
self.microseconds = microseconds
# Absolute information
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
if any(x is not None and int(x) != x
for x in (year, month, day, hour,
minute, second, microsecond)):
# For now we'll deprecate floats - later it'll be an error.
warn("Non-integer value passed as absolute information. " +
"This is not a well-defined condition and will raise " +
"errors in future versions.", DeprecationWarning)
if isinstance(weekday, integer_types):
self.weekday = weekdays[weekday]
else:
self.weekday = weekday
yday = 0
if nlyearday:
yday = nlyearday
elif yearday:
yday = yearday
if yearday > 59:
self.leapdays = -1
if yday:
ydayidx = [31, 59, 90, 120, 151, 181, 212,
243, 273, 304, 334, 366]
for idx, ydays in enumerate(ydayidx):
if yday <= ydays:
self.month = idx+1
if idx == 0:
self.day = yday
else:
self.day = yday-ydayidx[idx-1]
break
else:
raise ValueError("invalid year day (%d)" % yday)
self._fix()
def _fix(self):
if abs(self.microseconds) > 999999:
s = _sign(self.microseconds)
div, mod = divmod(self.microseconds * s, 1000000)
self.microseconds = mod * s
self.seconds += div * s
if abs(self.seconds) > 59:
s = _sign(self.seconds)
div, mod = divmod(self.seconds * s, 60)
self.seconds = mod * s
self.minutes += div * s
if abs(self.minutes) > 59:
s = _sign(self.minutes)
div, mod = divmod(self.minutes * s, 60)
self.minutes = mod * s
self.hours += div * s
if abs(self.hours) > 23:
s = _sign(self.hours)
div, mod = divmod(self.hours * s, 24)
self.hours = mod * s
self.days += div * s
if abs(self.months) > 11:
s = _sign(self.months)
div, mod = divmod(self.months * s, 12)
self.months = mod * s
self.years += div * s
if (self.hours or self.minutes or self.seconds or self.microseconds
or self.hour is not None or self.minute is not None or
self.second is not None or self.microsecond is not None):
self._has_time = 1
else:
self._has_time = 0
@property
def weeks(self):
return int(self.days / 7.0)
@weeks.setter
def weeks(self, value):
self.days = self.days - (self.weeks * 7) + value * 7
def _set_months(self, months):
self.months = months
if abs(self.months) > 11:
s = _sign(self.months)
div, mod = divmod(self.months * s, 12)
self.months = mod * s
self.years = div * s
else:
self.years = 0
def normalized(self):
"""
Return a version of this object represented entirely using integer
values for the relative attributes.
>>> relativedelta(days=1.5, hours=2).normalized()
relativedelta(days=+1, hours=+14)
:return:
Returns a :class:`dateutil.relativedelta.relativedelta` object.
"""
# Cascade remainders down (rounding each to roughly nearest microsecond)
days = int(self.days)
hours_f = round(self.hours + 24 * (self.days - days), 11)
hours = int(hours_f)
minutes_f = round(self.minutes + 60 * (hours_f - hours), 10)
minutes = int(minutes_f)
seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8)
seconds = int(seconds_f)
microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds))
# Constructor carries overflow back up with call to _fix()
return self.__class__(years=self.years, months=self.months,
days=days, hours=hours, minutes=minutes,
seconds=seconds, microseconds=microseconds,
leapdays=self.leapdays, year=self.year,
month=self.month, day=self.day,
weekday=self.weekday, hour=self.hour,
minute=self.minute, second=self.second,
microsecond=self.microsecond)
def __add__(self, other):
if isinstance(other, relativedelta):
return self.__class__(years=other.years + self.years,
months=other.months + self.months,
days=other.days + self.days,
hours=other.hours + self.hours,
minutes=other.minutes + self.minutes,
seconds=other.seconds + self.seconds,
microseconds=(other.microseconds +
self.microseconds),
leapdays=other.leapdays or self.leapdays,
year=(other.year if other.year is not None
else self.year),
month=(other.month if other.month is not None
else self.month),
day=(other.day if other.day is not None
else self.day),
weekday=(other.weekday if other.weekday is not None
else self.weekday),
hour=(other.hour if other.hour is not None
else self.hour),
minute=(other.minute if other.minute is not None
else self.minute),
second=(other.second if other.second is not None
else self.second),
microsecond=(other.microsecond if other.microsecond
is not None else
self.microsecond))
if isinstance(other, datetime.timedelta):
return self.__class__(years=self.years,
months=self.months,
days=self.days + other.days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds + other.seconds,
microseconds=self.microseconds + other.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
if not isinstance(other, datetime.date):
return NotImplemented
elif self._has_time and not isinstance(other, datetime.datetime):
other = datetime.datetime.fromordinal(other.toordinal())
year = (self.year or other.year)+self.years
month = self.month or other.month
if self.months:
assert 1 <= abs(self.months) <= 12
month += self.months
if month > 12:
year += 1
month -= 12
elif month < 1:
year -= 1
month += 12
day = min(calendar.monthrange(year, month)[1],
self.day or other.day)
repl = {"year": year, "month": month, "day": day}
for attr in ["hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
repl[attr] = value
days = self.days
if self.leapdays and month > 2 and calendar.isleap(year):
days += self.leapdays
ret = (other.replace(**repl)
+ datetime.timedelta(days=days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds,
microseconds=self.microseconds))
if self.weekday:
weekday, nth = self.weekday.weekday, self.weekday.n or 1
jumpdays = (abs(nth) - 1) * 7
if nth > 0:
jumpdays += (7 - ret.weekday() + weekday) % 7
else:
jumpdays += (ret.weekday() - weekday) % 7
jumpdays *= -1
ret += datetime.timedelta(days=jumpdays)
return ret
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
return self.__neg__().__radd__(other)
def __sub__(self, other):
if not isinstance(other, relativedelta):
return NotImplemented # In case the other object defines __rsub__
return self.__class__(years=self.years - other.years,
months=self.months - other.months,
days=self.days - other.days,
hours=self.hours - other.hours,
minutes=self.minutes - other.minutes,
seconds=self.seconds - other.seconds,
microseconds=self.microseconds - other.microseconds,
leapdays=self.leapdays or other.leapdays,
year=(self.year if self.year is not None
else other.year),
month=(self.month if self.month is not None else
other.month),
day=(self.day if self.day is not None else
other.day),
weekday=(self.weekday if self.weekday is not None else
other.weekday),
hour=(self.hour if self.hour is not None else
other.hour),
minute=(self.minute if self.minute is not None else
other.minute),
second=(self.second if self.second is not None else
other.second),
microsecond=(self.microsecond if self.microsecond
is not None else
other.microsecond))
def __abs__(self):
return self.__class__(years=abs(self.years),
months=abs(self.months),
days=abs(self.days),
hours=abs(self.hours),
minutes=abs(self.minutes),
seconds=abs(self.seconds),
microseconds=abs(self.microseconds),
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __neg__(self):
return self.__class__(years=-self.years,
months=-self.months,
days=-self.days,
hours=-self.hours,
minutes=-self.minutes,
seconds=-self.seconds,
microseconds=-self.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __bool__(self):
return not (not self.years and
not self.months and
not self.days and
not self.hours and
not self.minutes and
not self.seconds and
not self.microseconds and
not self.leapdays and
self.year is None and
self.month is None and
self.day is None and
self.weekday is None and
self.hour is None and
self.minute is None and
self.second is None and
self.microsecond is None)
# Compatibility with Python 2.x
__nonzero__ = __bool__
def __mul__(self, other):
try:
f = float(other)
except TypeError:
return NotImplemented
return self.__class__(years=int(self.years * f),
months=int(self.months * f),
days=int(self.days * f),
hours=int(self.hours * f),
minutes=int(self.minutes * f),
seconds=int(self.seconds * f),
microseconds=int(self.microseconds * f),
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
__rmul__ = __mul__
def __eq__(self, other):
if not isinstance(other, relativedelta):
return NotImplemented
if self.weekday or other.weekday:
if not self.weekday or not other.weekday:
return False
if self.weekday.weekday != other.weekday.weekday:
return False
n1, n2 = self.weekday.n, other.weekday.n
if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
return False
return (self.years == other.years and
self.months == other.months and
self.days == other.days and
self.hours == other.hours and
self.minutes == other.minutes and
self.seconds == other.seconds and
self.microseconds == other.microseconds and
self.leapdays == other.leapdays and
self.year == other.year and
self.month == other.month and
self.day == other.day and
self.hour == other.hour and
self.minute == other.minute and
self.second == other.second and
self.microsecond == other.microsecond)
def __hash__(self):
return hash((
self.weekday,
self.years,
self.months,
self.days,
self.hours,
self.minutes,
self.seconds,
self.microseconds,
self.leapdays,
self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
self.microsecond,
))
def __ne__(self, other):
return not self.__eq__(other)
def __div__(self, other):
try:
reciprocal = 1 / float(other)
except TypeError:
return NotImplemented
return self.__mul__(reciprocal)
__truediv__ = __div__
def __repr__(self):
l = []
for attr in ["years", "months", "days", "leapdays",
"hours", "minutes", "seconds", "microseconds"]:
value = getattr(self, attr)
if value:
l.append("{attr}={value:+g}".format(attr=attr, value=value))
for attr in ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
l.append("{attr}={value}".format(attr=attr, value=repr(value)))
return "{classname}({attrs})".format(classname=self.__class__.__name__,
attrs=", ".join(l))
def _sign(x):
return int(copysign(1, x))
# vim:ts=4:sw=4:et
|
dateutil/relativedelta.py
|
codereval_python_data_96
|
Change unicode output into bytestrings in Python 2
tzname() API changed in Python 3. It used to return bytes, but was changed
to unicode strings
def tzname_in_python2(namefunc):
"""Change unicode output into bytestrings in Python 2
tzname() API changed in Python 3. It used to return bytes, but was changed
to unicode strings
"""
if PY2:
@wraps(namefunc)
def adjust_encoding(*args, **kwargs):
name = namefunc(*args, **kwargs)
if name is not None:
name = name.encode()
return name
return adjust_encoding
else:
return namefunc
from six import PY2
from functools import wraps
from datetime import datetime, timedelta, tzinfo
ZERO = timedelta(0)
__all__ = ['tzname_in_python2', 'enfold']
def tzname_in_python2(namefunc):
"""Change unicode output into bytestrings in Python 2
tzname() API changed in Python 3. It used to return bytes, but was changed
to unicode strings
"""
if PY2:
@wraps(namefunc)
def adjust_encoding(*args, **kwargs):
name = namefunc(*args, **kwargs)
if name is not None:
name = name.encode()
return name
return adjust_encoding
else:
return namefunc
# The following is adapted from Alexander Belopolsky's tz library
# https://github.com/abalkin/tz
if hasattr(datetime, 'fold'):
# This is the pre-python 3.6 fold situation
def enfold(dt, fold=1):
"""
Provides a unified interface for assigning the ``fold`` attribute to
datetimes both before and after the implementation of PEP-495.
:param fold:
The value for the ``fold`` attribute in the returned datetime. This
should be either 0 or 1.
:return:
Returns an object for which ``getattr(dt, 'fold', 0)`` returns
``fold`` for all versions of Python. In versions prior to
Python 3.6, this is a ``_DatetimeWithFold`` object, which is a
subclass of :py:class:`datetime.datetime` with the ``fold``
attribute added, if ``fold`` is 1.
.. versionadded:: 2.6.0
"""
return dt.replace(fold=fold)
else:
class _DatetimeWithFold(datetime):
"""
This is a class designed to provide a PEP 495-compliant interface for
Python versions before 3.6. It is used only for dates in a fold, so
the ``fold`` attribute is fixed at ``1``.
.. versionadded:: 2.6.0
"""
__slots__ = ()
def replace(self, *args, **kwargs):
"""
Return a datetime with the same attributes, except for those
attributes given new values by whichever keyword arguments are
specified. Note that tzinfo=None can be specified to create a naive
datetime from an aware datetime with no conversion of date and time
data.
This is reimplemented in ``_DatetimeWithFold`` because pypy3 will
return a ``datetime.datetime`` even if ``fold`` is unchanged.
"""
argnames = (
'year', 'month', 'day', 'hour', 'minute', 'second',
'microsecond', 'tzinfo'
)
for arg, argname in zip(args, argnames):
if argname in kwargs:
raise TypeError('Duplicate argument: {}'.format(argname))
kwargs[argname] = arg
for argname in argnames:
if argname not in kwargs:
kwargs[argname] = getattr(self, argname)
dt_class = self.__class__ if kwargs.get('fold', 1) else datetime
return dt_class(**kwargs)
@property
def fold(self):
return 1
def enfold(dt, fold=1):
"""
Provides a unified interface for assigning the ``fold`` attribute to
datetimes both before and after the implementation of PEP-495.
:param fold:
The value for the ``fold`` attribute in the returned datetime. This
should be either 0 or 1.
:return:
Returns an object for which ``getattr(dt, 'fold', 0)`` returns
``fold`` for all versions of Python. In versions prior to
Python 3.6, this is a ``_DatetimeWithFold`` object, which is a
subclass of :py:class:`datetime.datetime` with the ``fold``
attribute added, if ``fold`` is 1.
.. versionadded:: 2.6.0
"""
if getattr(dt, 'fold', 0) == fold:
return dt
args = dt.timetuple()[:6]
args += (dt.microsecond, dt.tzinfo)
if fold:
return _DatetimeWithFold(*args)
else:
return datetime(*args)
def _validate_fromutc_inputs(f):
"""
The CPython version of ``fromutc`` checks that the input is a ``datetime``
object and that ``self`` is attached as its ``tzinfo``.
"""
@wraps(f)
def fromutc(self, dt):
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
return f(self, dt)
return fromutc
class _tzinfo(tzinfo):
"""
Base class for all ``dateutil`` ``tzinfo`` objects.
"""
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
dt = dt.replace(tzinfo=self)
wall_0 = enfold(dt, fold=0)
wall_1 = enfold(dt, fold=1)
same_offset = wall_0.utcoffset() == wall_1.utcoffset()
same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None)
return same_dt and not same_offset
def _fold_status(self, dt_utc, dt_wall):
"""
Determine the fold status of a "wall" datetime, given a representation
of the same datetime as a (naive) UTC datetime. This is calculated based
on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all
datetimes, and that this offset is the actual number of hours separating
``dt_utc`` and ``dt_wall``.
:param dt_utc:
Representation of the datetime as UTC
:param dt_wall:
Representation of the datetime as "wall time". This parameter must
either have a `fold` attribute or have a fold-naive
:class:`datetime.tzinfo` attached, otherwise the calculation may
fail.
"""
if self.is_ambiguous(dt_wall):
delta_wall = dt_wall - dt_utc
_fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst()))
else:
_fold = 0
return _fold
def _fold(self, dt):
return getattr(dt, 'fold', 0)
def _fromutc(self, dt):
"""
Given a timezone-aware datetime in a given timezone, calculates a
timezone-aware datetime in a new timezone.
Since this is the one time that we *know* we have an unambiguous
datetime object, we take this opportunity to determine whether the
datetime is ambiguous and in a "fold" state (e.g. if it's the first
occurrence, chronologically, of the ambiguous datetime).
:param dt:
A timezone-aware :class:`datetime.datetime` object.
"""
# Re-implement the algorithm from Python's datetime.py
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# The original datetime.py code assumes that `dst()` defaults to
# zero during ambiguous times. PEP 495 inverts this presumption, so
# for pre-PEP 495 versions of python, we need to tweak the algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
dt += delta
# Set fold=1 so we can default to being in the fold for
# ambiguous dates.
dtdst = enfold(dt, fold=1).dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
return dt + dtdst
@_validate_fromutc_inputs
def fromutc(self, dt):
"""
Given a timezone-aware datetime in a given timezone, calculates a
timezone-aware datetime in a new timezone.
Since this is the one time that we *know* we have an unambiguous
datetime object, we take this opportunity to determine whether the
datetime is ambiguous and in a "fold" state (e.g. if it's the first
occurrence, chronologically, of the ambiguous datetime).
:param dt:
A timezone-aware :class:`datetime.datetime` object.
"""
dt_wall = self._fromutc(dt)
# Calculate the fold status given the two datetimes.
_fold = self._fold_status(dt, dt_wall)
# Set the default fold value for ambiguous dates
return enfold(dt_wall, fold=_fold)
class tzrangebase(_tzinfo):
"""
This is an abstract base class for time zones represented by an annual
transition into and out of DST. Child classes should implement the following
methods:
* ``__init__(self, *args, **kwargs)``
* ``transitions(self, year)`` - this is expected to return a tuple of
datetimes representing the DST on and off transitions in standard
time.
A fully initialized ``tzrangebase`` subclass should also provide the
following attributes:
* ``hasdst``: Boolean whether or not the zone uses DST.
* ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects
representing the respective UTC offsets.
* ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short
abbreviations in DST and STD, respectively.
* ``_hasdst``: Whether or not the zone has DST.
.. versionadded:: 2.6.0
"""
def __init__(self):
raise NotImplementedError('tzrangebase is an abstract base class')
def utcoffset(self, dt):
isdst = self._isdst(dt)
if isdst is None:
return None
elif isdst:
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
isdst = self._isdst(dt)
if isdst is None:
return None
elif isdst:
return self._dst_base_offset
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
if self._isdst(dt):
return self._dst_abbr
else:
return self._std_abbr
def fromutc(self, dt):
""" Given a datetime in UTC, return local time """
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
# Get transitions - if there are none, fixed offset
transitions = self.transitions(dt.year)
if transitions is None:
return dt + self.utcoffset(dt)
# Get the transition times in UTC
dston, dstoff = transitions
dston -= self._std_offset
dstoff -= self._std_offset
utc_transitions = (dston, dstoff)
dt_utc = dt.replace(tzinfo=None)
isdst = self._naive_isdst(dt_utc, utc_transitions)
if isdst:
dt_wall = dt + self._dst_offset
else:
dt_wall = dt + self._std_offset
_fold = int(not isdst and self.is_ambiguous(dt_wall))
return enfold(dt_wall, fold=_fold)
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
if not self.hasdst:
return False
start, end = self.transitions(dt.year)
dt = dt.replace(tzinfo=None)
return (end <= dt < end + self._dst_base_offset)
def _isdst(self, dt):
if not self.hasdst:
return False
elif dt is None:
return None
transitions = self.transitions(dt.year)
if transitions is None:
return False
dt = dt.replace(tzinfo=None)
isdst = self._naive_isdst(dt, transitions)
# Handle ambiguous dates
if not isdst and self.is_ambiguous(dt):
return not self._fold(dt)
else:
return isdst
def _naive_isdst(self, dt, transitions):
dston, dstoff = transitions
dt = dt.replace(tzinfo=None)
if dston < dstoff:
isdst = dston <= dt < dstoff
else:
isdst = not dstoff <= dt < dston
return isdst
@property
def _dst_base_offset(self):
return self._dst_offset - self._std_offset
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s(...)" % self.__class__.__name__
__reduce__ = object.__reduce__
|
dateutil/tz/_common.py
|
codereval_python_data_97
|
Get version information or return default if unable to do so.
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.22 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import functools
import os
import re
import subprocess
import sys
from typing import Callable, Dict
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "src/prestoplot/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
popen_kwargs = {}
if sys.platform == "win32":
# This hides the console window if pythonw.exe is used
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
popen_kwargs["startupinfo"] = startupinfo
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen(
[command] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
**popen_kwargs
)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, process.returncode
return stdout, process.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r"\d", r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r"\d", r):
continue
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# GIT_DIR can interfere with correct operation of Versioneer.
# It may be intended to be passed to the Versioneer-versioned project,
# but that should not change where we get our version from.
env = os.environ.copy()
env.pop("GIT_DIR", None)
runner = functools.partial(runner, env=env)
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
MATCH_ARGS = ["--match", "%s*" % tag_prefix] if tag_prefix else []
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(
GITS,
["describe", "--tags", "--dirty", "--always", "--long", *MATCH_ARGS],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver):
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces):
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"])
else:
rendered += ".post0.dev%d" % (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
|
src/prestoplot/_version.py
|
codereval_python_data_98
|
Render the given version pieces into the requested style.
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.22 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import functools
import os
import re
import subprocess
import sys
from typing import Callable, Dict
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "src/prestoplot/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
popen_kwargs = {}
if sys.platform == "win32":
# This hides the console window if pythonw.exe is used
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
popen_kwargs["startupinfo"] = startupinfo
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen(
[command] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
**popen_kwargs
)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, process.returncode
return stdout, process.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r"\d", r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r"\d", r):
continue
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# GIT_DIR can interfere with correct operation of Versioneer.
# It may be intended to be passed to the Versioneer-versioned project,
# but that should not change where we get our version from.
env = os.environ.copy()
env.pop("GIT_DIR", None)
runner = functools.partial(runner, env=env)
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
MATCH_ARGS = ["--match", "%s*" % tag_prefix] if tag_prefix else []
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(
GITS,
["describe", "--tags", "--dirty", "--always", "--long", *MATCH_ARGS],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver):
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces):
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"])
else:
rendered += ".post0.dev%d" % (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
|
src/prestoplot/_version.py
|
codereval_python_data_99
|
Return a + if we don't already have one, else return a .
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.22 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import functools
import os
import re
import subprocess
import sys
from typing import Callable, Dict
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "src/prestoplot/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
popen_kwargs = {}
if sys.platform == "win32":
# This hides the console window if pythonw.exe is used
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
popen_kwargs["startupinfo"] = startupinfo
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen(
[command] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
**popen_kwargs
)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, process.returncode
return stdout, process.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r"\d", r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r"\d", r):
continue
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# GIT_DIR can interfere with correct operation of Versioneer.
# It may be intended to be passed to the Versioneer-versioned project,
# but that should not change where we get our version from.
env = os.environ.copy()
env.pop("GIT_DIR", None)
runner = functools.partial(runner, env=env)
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
MATCH_ARGS = ["--match", "%s*" % tag_prefix] if tag_prefix else []
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(
GITS,
["describe", "--tags", "--dirty", "--always", "--long", *MATCH_ARGS],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver):
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces):
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"])
else:
rendered += ".post0.dev%d" % (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
|
src/prestoplot/_version.py
|
codereval_python_data_100
|
Call the given command(s).
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
popen_kwargs = {}
if sys.platform == "win32":
# This hides the console window if pythonw.exe is used
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
popen_kwargs["startupinfo"] = startupinfo
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen(
[command] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
**popen_kwargs
)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, process.returncode
return stdout, process.returncode
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.22 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import functools
import os
import re
import subprocess
import sys
from typing import Callable, Dict
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "src/prestoplot/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
popen_kwargs = {}
if sys.platform == "win32":
# This hides the console window if pythonw.exe is used
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
popen_kwargs["startupinfo"] = startupinfo
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen(
[command] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
**popen_kwargs
)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, process.returncode
return stdout, process.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r"\d", r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r"\d", r):
continue
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# GIT_DIR can interfere with correct operation of Versioneer.
# It may be intended to be passed to the Versioneer-versioned project,
# but that should not change where we get our version from.
env = os.environ.copy()
env.pop("GIT_DIR", None)
runner = functools.partial(runner, env=env)
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
MATCH_ARGS = ["--match", "%s*" % tag_prefix] if tag_prefix else []
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(
GITS,
["describe", "--tags", "--dirty", "--always", "--long", *MATCH_ARGS],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver):
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces):
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"])
else:
rendered += ".post0.dev%d" % (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
|
src/prestoplot/_version.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.